text
stringlengths 145
7.65M
|
---|
============================================================================================================================================================
SOURCE CODE FILE: pixelshuffle.h
LINES: 1
SIZE: 3.12 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\pixelshuffle.h
ENCODING: utf-8
```h
#pragma once
#include <torch/nn/cloneable.h>
#include <torch/nn/functional/pixelshuffle.h>
#include <torch/nn/options/pixelshuffle.h>
#include <torch/csrc/Export.h>
namespace torch::nn {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PixelShuffle
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)`
/// to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an
/// upscale factor. See
/// https://pytorch.org/docs/main/nn.html#torch.nn.PixelShuffle to learn about
/// the exact behavior of this module.
///
/// See the documentation for `torch::nn::PixelShuffleOptions` class to learn
/// what constructor arguments are supported for this module.
///
/// Example:
/// ```
/// PixelShuffle model(PixelShuffleOptions(5));
/// ```
struct TORCH_API PixelShuffleImpl
: public torch::nn::Cloneable<PixelShuffleImpl> {
explicit PixelShuffleImpl(const PixelShuffleOptions& options_);
/// Pretty prints the `PixelShuffle` module into the given `stream`.
void pretty_print(std::ostream& stream) const override;
Tensor forward(const Tensor& input);
void reset() override;
/// The options with which this `Module` was constructed.
PixelShuffleOptions options;
};
/// A `ModuleHolder` subclass for `PixelShuffleImpl`.
/// See the documentation for `PixelShuffleImpl` class to learn what methods it
/// provides, and examples of how to use `PixelShuffle` with
/// `torch::nn::PixelShuffleOptions`. See the documentation for `ModuleHolder`
/// to learn about PyTorch's module storage semantics.
TORCH_MODULE(PixelShuffle);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PixelUnshuffle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Reverses the PixelShuffle operation by rearranging elements in a tensor of
/// shape :math:`(*, C, H \times r, W \times r)` to a tensor of shape :math:`(*,
/// C \times r^2, H, W)`, where r is a downscale factor. See
/// https://pytorch.org/docs/main/nn.html#torch.nn.PixelUnshuffle to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::PixelUnshuffleOptions` class to learn
/// what constructor arguments are supported for this module.
///
/// Example:
/// ```
/// PixelUnshuffle model(PixelUnshuffleOptions(5));
/// ```
struct TORCH_API PixelUnshuffleImpl
: public torch::nn::Cloneable<PixelUnshuffleImpl> {
explicit PixelUnshuffleImpl(const PixelUnshuffleOptions& options_);
/// Pretty prints the `PixelUnshuffle` module into the given `stream`.
void pretty_print(std::ostream& stream) const override;
Tensor forward(const Tensor& input);
void reset() override;
/// The options with which this `Module` was constructed.
PixelUnshuffleOptions options;
};
/// A `ModuleHolder` subclass for `PixelUnshuffleImpl`.
/// See the documentation for `PixelUnshuffleImpl` class to learn what methods
/// it provides, and examples of how to use `PixelUnshuffle` with
/// `torch::nn::PixelUnshuffleOptions`. See the documentation for `ModuleHolder`
/// to learn about PyTorch's module storage semantics.
TORCH_MODULE(PixelUnshuffle);
} // namespace torch::nn
```
|
=======================================================================================================================================================
SOURCE CODE FILE: pooling.h
LINES: 1
SIZE: 29.70 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\pooling.h
ENCODING: utf-8
```h
#pragma once
#include <torch/expanding_array.h>
#include <torch/nn/cloneable.h>
#include <torch/nn/functional/pooling.h>
#include <torch/nn/modules/common.h>
#include <torch/nn/options/pooling.h>
#include <torch/csrc/Export.h>
namespace torch::nn {
/// Base class for all (dimension-specialized) avgpool modules.
template <size_t D, typename Derived>
class TORCH_API AvgPoolImpl : public torch::nn::Cloneable<Derived> {
public:
AvgPoolImpl(ExpandingArray<D> kernel_size)
: AvgPoolImpl(AvgPoolOptions<D>(kernel_size)) {}
explicit AvgPoolImpl(const AvgPoolOptions<D>& options_);
void reset() override;
/// Pretty prints the `AvgPool{1,2,3}d` module into the given `stream`.
void pretty_print(std::ostream& stream) const override;
/// The options with which this `Module` was constructed.
AvgPoolOptions<D> options;
};
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AvgPool1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies avgpool over a 1-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.AvgPool1d to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::AvgPool1dOptions` class to learn what
/// constructor arguments are supported for this module.
///
/// Example:
/// ```
/// AvgPool1d model(AvgPool1dOptions(3).stride(2));
/// ```
class TORCH_API AvgPool1dImpl : public AvgPoolImpl<1, AvgPool1dImpl> {
public:
using AvgPoolImpl<1, AvgPool1dImpl>::AvgPoolImpl;
Tensor forward(const Tensor& input);
};
/// A `ModuleHolder` subclass for `AvgPool1dImpl`.
/// See the documentation for `AvgPool1dImpl` class to learn what methods it
/// provides, and examples of how to use `AvgPool1d` with
/// `torch::nn::AvgPool1dOptions`. See the documentation for `ModuleHolder` to
/// learn about PyTorch's module storage semantics.
TORCH_MODULE(AvgPool1d);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AvgPool2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies avgpool over a 2-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.AvgPool2d to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::AvgPool2dOptions` class to learn what
/// constructor arguments are supported for this module.
///
/// Example:
/// ```
/// AvgPool2d model(AvgPool2dOptions({3, 2}).stride({2, 2}));
/// ```
class TORCH_API AvgPool2dImpl : public AvgPoolImpl<2, AvgPool2dImpl> {
public:
using AvgPoolImpl<2, AvgPool2dImpl>::AvgPoolImpl;
Tensor forward(const Tensor& input);
};
/// A `ModuleHolder` subclass for `AvgPool2dImpl`.
/// See the documentation for `AvgPool2dImpl` class to learn what methods it
/// provides, and examples of how to use `AvgPool2d` with
/// `torch::nn::AvgPool2dOptions`. See the documentation for `ModuleHolder` to
/// learn about PyTorch's module storage semantics.
TORCH_MODULE(AvgPool2d);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AvgPool3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies avgpool over a 3-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.AvgPool3d to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::AvgPool3dOptions` class to learn what
/// constructor arguments are supported for this module.
///
/// Example:
/// ```
/// AvgPool3d model(AvgPool3dOptions(5).stride(2));
/// ```
class TORCH_API AvgPool3dImpl : public AvgPoolImpl<3, AvgPool3dImpl> {
public:
using AvgPoolImpl<3, AvgPool3dImpl>::AvgPoolImpl;
Tensor forward(const Tensor& input);
};
/// A `ModuleHolder` subclass for `AvgPool3dImpl`.
/// See the documentation for `AvgPool3dImpl` class to learn what methods it
/// provides, and examples of how to use `AvgPool3d` with
/// `torch::nn::AvgPool3dOptions`. See the documentation for `ModuleHolder` to
/// learn about PyTorch's module storage semantics.
TORCH_MODULE(AvgPool3d);
// ============================================================================
/// Base class for all (dimension-specialized) maxpool modules.
template <size_t D, typename Derived>
class TORCH_API MaxPoolImpl : public torch::nn::Cloneable<Derived> {
public:
MaxPoolImpl(ExpandingArray<D> kernel_size)
: MaxPoolImpl(MaxPoolOptions<D>(kernel_size)) {}
explicit MaxPoolImpl(const MaxPoolOptions<D>& options_);
void reset() override;
/// Pretty prints the `MaxPool{1,2,3}d` module into the given `stream`.
void pretty_print(std::ostream& stream) const override;
/// The options with which this `Module` was constructed.
MaxPoolOptions<D> options;
};
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MaxPool1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies maxpool over a 1-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.MaxPool1d to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::MaxPool1dOptions` class to learn what
/// constructor arguments are supported for this module.
///
/// Example:
/// ```
/// MaxPool1d model(MaxPool1dOptions(3).stride(2));
/// ```
class TORCH_API MaxPool1dImpl : public MaxPoolImpl<1, MaxPool1dImpl> {
public:
using MaxPoolImpl<1, MaxPool1dImpl>::MaxPoolImpl;
Tensor forward(const Tensor& input);
/// Returns the outputs and the indices of the max values.
/// Useful for `torch::nn::MaxUnpool1d` later.
std::tuple<Tensor, Tensor> forward_with_indices(const Tensor& input);
};
/// A `ModuleHolder` subclass for `MaxPool1dImpl`.
/// See the documentation for `MaxPool1dImpl` class to learn what methods it
/// provides, and examples of how to use `MaxPool1d` with
/// `torch::nn::MaxPool1dOptions`. See the documentation for `ModuleHolder` to
/// learn about PyTorch's module storage semantics.
TORCH_MODULE(MaxPool1d);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MaxPool2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies maxpool over a 2-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.MaxPool2d to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::MaxPool2dOptions` class to learn what
/// constructor arguments are supported for this module.
///
/// Example:
/// ```
/// MaxPool2d model(MaxPool2dOptions({3, 2}).stride({2, 2}));
/// ```
class TORCH_API MaxPool2dImpl : public MaxPoolImpl<2, MaxPool2dImpl> {
public:
using MaxPoolImpl<2, MaxPool2dImpl>::MaxPoolImpl;
Tensor forward(const Tensor& input);
/// Returns the outputs and the indices of the max values.
/// Useful for `torch::nn::MaxUnpool2d` later.
std::tuple<Tensor, Tensor> forward_with_indices(const Tensor& input);
};
/// A `ModuleHolder` subclass for `MaxPool2dImpl`.
/// See the documentation for `MaxPool2dImpl` class to learn what methods it
/// provides, and examples of how to use `MaxPool2d` with
/// `torch::nn::MaxPool2dOptions`. See the documentation for `ModuleHolder` to
/// learn about PyTorch's module storage semantics.
TORCH_MODULE(MaxPool2d);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MaxPool3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies maxpool over a 3-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.MaxPool3d to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::MaxPool3dOptions` class to learn what
/// constructor arguments are supported for this module.
///
/// Example:
/// ```
/// MaxPool3d model(MaxPool3dOptions(3).stride(2));
/// ```
class TORCH_API MaxPool3dImpl : public MaxPoolImpl<3, MaxPool3dImpl> {
public:
using MaxPoolImpl<3, MaxPool3dImpl>::MaxPoolImpl;
Tensor forward(const Tensor& input);
/// Returns the outputs and the indices of the max values.
/// Useful for `torch::nn::MaxUnpool3d` later.
std::tuple<Tensor, Tensor> forward_with_indices(const Tensor& input);
};
/// A `ModuleHolder` subclass for `MaxPool3dImpl`.
/// See the documentation for `MaxPool3dImpl` class to learn what methods it
/// provides, and examples of how to use `MaxPool3d` with
/// `torch::nn::MaxPool3dOptions`. See the documentation for `ModuleHolder` to
/// learn about PyTorch's module storage semantics.
TORCH_MODULE(MaxPool3d);
// ============================================================================
/// Base class for all (dimension-specialized) adaptive maxpool modules.
template <size_t D, typename output_size_t, typename Derived>
class TORCH_API AdaptiveMaxPoolImpl : public torch::nn::Cloneable<Derived> {
public:
AdaptiveMaxPoolImpl(output_size_t output_size)
: AdaptiveMaxPoolImpl(
AdaptiveMaxPoolOptions<output_size_t>(output_size)) {}
explicit AdaptiveMaxPoolImpl(
const AdaptiveMaxPoolOptions<output_size_t>& options_)
: options(options_) {}
void reset() override {}
/// Pretty prints the `AdaptiveMaxPool{1,2,3}d` module into the given
/// `stream`.
void pretty_print(std::ostream& stream) const override {
stream << "torch::nn::AdaptiveMaxPool" << D << "d"
<< "(output_size=" << options.output_size() << ")";
}
/// The options with which this `Module` was constructed.
AdaptiveMaxPoolOptions<output_size_t> options;
};
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveMaxPool1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies adaptive maxpool over a 1-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.AdaptiveMaxPool1d to
/// learn about the exact behavior of this module.
///
/// See the documentation for `torch::nn::AdaptiveMaxPool1dOptions` class to
/// learn what constructor arguments are supported for this module.
///
/// Example:
/// ```
/// AdaptiveMaxPool1d model(AdaptiveMaxPool1dOptions(3));
/// ```
class TORCH_API AdaptiveMaxPool1dImpl
: public AdaptiveMaxPoolImpl<1, ExpandingArray<1>, AdaptiveMaxPool1dImpl> {
public:
using AdaptiveMaxPoolImpl<1, ExpandingArray<1>, AdaptiveMaxPool1dImpl>::
AdaptiveMaxPoolImpl;
Tensor forward(const Tensor& input);
/// Returns the indices along with the outputs.
/// Useful to pass to nn.MaxUnpool1d.
std::tuple<Tensor, Tensor> forward_with_indices(const Tensor& input);
};
/// A `ModuleHolder` subclass for `AdaptiveMaxPool1dImpl`.
/// See the documentation for `AdaptiveMaxPool1dImpl` class to learn what
/// methods it provides, and examples of how to use `AdaptiveMaxPool1d` with
/// `torch::nn::AdaptiveMaxPool1dOptions`. See the documentation for
/// `ModuleHolder` to learn about PyTorch's module storage semantics.
TORCH_MODULE(AdaptiveMaxPool1d);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveMaxPool2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies adaptive maxpool over a 2-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.AdaptiveMaxPool2d to
/// learn about the exact behavior of this module.
///
/// See the documentation for `torch::nn::AdaptiveMaxPool2dOptions` class to
/// learn what constructor arguments are supported for this module.
///
/// Example:
/// ```
/// AdaptiveMaxPool2d model(AdaptiveMaxPool2dOptions({3, 2}));
/// ```
class TORCH_API AdaptiveMaxPool2dImpl : public AdaptiveMaxPoolImpl<
2,
ExpandingArrayWithOptionalElem<2>,
AdaptiveMaxPool2dImpl> {
public:
using AdaptiveMaxPoolImpl<
2,
ExpandingArrayWithOptionalElem<2>,
AdaptiveMaxPool2dImpl>::AdaptiveMaxPoolImpl;
Tensor forward(const Tensor& input);
/// Returns the indices along with the outputs.
/// Useful to pass to nn.MaxUnpool2d.
std::tuple<Tensor, Tensor> forward_with_indices(const Tensor& input);
};
/// A `ModuleHolder` subclass for `AdaptiveMaxPool2dImpl`.
/// See the documentation for `AdaptiveMaxPool2dImpl` class to learn what
/// methods it provides, and examples of how to use `AdaptiveMaxPool2d` with
/// `torch::nn::AdaptiveMaxPool2dOptions`. See the documentation for
/// `ModuleHolder` to learn about PyTorch's module storage semantics.
TORCH_MODULE(AdaptiveMaxPool2d);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveMaxPool3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies adaptive maxpool over a 3-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.AdaptiveMaxPool3d to
/// learn about the exact behavior of this module.
///
/// See the documentation for `torch::nn::AdaptiveMaxPool3dOptions` class to
/// learn what constructor arguments are supported for this module.
///
/// Example:
/// ```
/// AdaptiveMaxPool3d model(AdaptiveMaxPool3dOptions(3));
/// ```
class TORCH_API AdaptiveMaxPool3dImpl : public AdaptiveMaxPoolImpl<
3,
ExpandingArrayWithOptionalElem<3>,
AdaptiveMaxPool3dImpl> {
public:
using AdaptiveMaxPoolImpl<
3,
ExpandingArrayWithOptionalElem<3>,
AdaptiveMaxPool3dImpl>::AdaptiveMaxPoolImpl;
Tensor forward(const Tensor& input);
/// Returns the indices along with the outputs.
/// Useful to pass to nn.MaxUnpool3d.
std::tuple<Tensor, Tensor> forward_with_indices(const Tensor& input);
};
/// A `ModuleHolder` subclass for `AdaptiveMaxPool3dImpl`.
/// See the documentation for `AdaptiveMaxPool3dImpl` class to learn what
/// methods it provides, and examples of how to use `AdaptiveMaxPool3d` with
/// `torch::nn::AdaptiveMaxPool3dOptions`. See the documentation for
/// `ModuleHolder` to learn about PyTorch's module storage semantics.
TORCH_MODULE(AdaptiveMaxPool3d);
// ============================================================================
/// Base class for all (dimension-specialized) adaptive avgpool modules.
template <size_t D, typename output_size_t, typename Derived>
class TORCH_API AdaptiveAvgPoolImpl : public torch::nn::Cloneable<Derived> {
public:
AdaptiveAvgPoolImpl(output_size_t output_size)
: AdaptiveAvgPoolImpl(
AdaptiveAvgPoolOptions<output_size_t>(output_size)) {}
explicit AdaptiveAvgPoolImpl(
const AdaptiveAvgPoolOptions<output_size_t>& options_)
: options(options_) {}
void reset() override {}
/// Pretty prints the `AdaptiveAvgPool{1,2,3}d` module into the given
/// `stream`.
void pretty_print(std::ostream& stream) const override {
stream << "torch::nn::AdaptiveAvgPool" << D << "d"
<< "(output_size=" << options.output_size() << ")";
}
/// The options with which this `Module` was constructed.
AdaptiveAvgPoolOptions<output_size_t> options;
};
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveAvgPool1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies adaptive avgpool over a 1-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.AdaptiveAvgPool1d to
/// learn about the exact behavior of this module.
///
/// See the documentation for `torch::nn::AdaptiveAvgPool1dOptions` class to
/// learn what constructor arguments are supported for this module.
///
/// Example:
/// ```
/// AdaptiveAvgPool1d model(AdaptiveAvgPool1dOptions(5));
/// ```
class TORCH_API AdaptiveAvgPool1dImpl
: public AdaptiveAvgPoolImpl<1, ExpandingArray<1>, AdaptiveAvgPool1dImpl> {
public:
using AdaptiveAvgPoolImpl<1, ExpandingArray<1>, AdaptiveAvgPool1dImpl>::
AdaptiveAvgPoolImpl;
Tensor forward(const Tensor& input);
};
/// A `ModuleHolder` subclass for `AdaptiveAvgPool1dImpl`.
/// See the documentation for `AdaptiveAvgPool1dImpl` class to learn what
/// methods it provides, and examples of how to use `AdaptiveAvgPool1d` with
/// `torch::nn::AdaptiveAvgPool1dOptions`. See the documentation for
/// `ModuleHolder` to learn about PyTorch's module storage semantics.
TORCH_MODULE(AdaptiveAvgPool1d);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveAvgPool2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies adaptive avgpool over a 2-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.AdaptiveAvgPool2d to
/// learn about the exact behavior of this module.
///
/// See the documentation for `torch::nn::AdaptiveAvgPool2dOptions` class to
/// learn what constructor arguments are supported for this module.
///
/// Example:
/// ```
/// AdaptiveAvgPool2d model(AdaptiveAvgPool2dOptions({3, 2}));
/// ```
class TORCH_API AdaptiveAvgPool2dImpl : public AdaptiveAvgPoolImpl<
2,
ExpandingArrayWithOptionalElem<2>,
AdaptiveAvgPool2dImpl> {
public:
using AdaptiveAvgPoolImpl<
2,
ExpandingArrayWithOptionalElem<2>,
AdaptiveAvgPool2dImpl>::AdaptiveAvgPoolImpl;
Tensor forward(const Tensor& input);
};
/// A `ModuleHolder` subclass for `AdaptiveAvgPool2dImpl`.
/// See the documentation for `AdaptiveAvgPool2dImpl` class to learn what
/// methods it provides, and examples of how to use `AdaptiveAvgPool2d` with
/// `torch::nn::AdaptiveAvgPool2dOptions`. See the documentation for
/// `ModuleHolder` to learn about PyTorch's module storage semantics.
TORCH_MODULE(AdaptiveAvgPool2d);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveAvgPool3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies adaptive avgpool over a 3-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.AdaptiveAvgPool3d to
/// learn about the exact behavior of this module.
///
/// See the documentation for `torch::nn::AdaptiveAvgPool3dOptions` class to
/// learn what constructor arguments are supported for this module.
///
/// Example:
/// ```
/// AdaptiveAvgPool3d model(AdaptiveAvgPool3dOptions(3));
/// ```
class TORCH_API AdaptiveAvgPool3dImpl : public AdaptiveAvgPoolImpl<
3,
ExpandingArrayWithOptionalElem<3>,
AdaptiveAvgPool3dImpl> {
public:
using AdaptiveAvgPoolImpl<
3,
ExpandingArrayWithOptionalElem<3>,
AdaptiveAvgPool3dImpl>::AdaptiveAvgPoolImpl;
Tensor forward(const Tensor& input);
};
/// A `ModuleHolder` subclass for `AdaptiveAvgPool3dImpl`.
/// See the documentation for `AdaptiveAvgPool3dImpl` class to learn what
/// methods it provides, and examples of how to use `AdaptiveAvgPool3d` with
/// `torch::nn::AdaptiveAvgPool3dOptions`. See the documentation for
/// `ModuleHolder` to learn about PyTorch's module storage semantics.
TORCH_MODULE(AdaptiveAvgPool3d);
// ============================================================================
/// Base class for all (dimension-specialized) maxunpool modules.
template <size_t D, typename Derived>
class TORCH_API MaxUnpoolImpl : public torch::nn::Cloneable<Derived> {
public:
MaxUnpoolImpl(ExpandingArray<D> kernel_size)
: MaxUnpoolImpl(MaxUnpoolOptions<D>(kernel_size)) {}
explicit MaxUnpoolImpl(const MaxUnpoolOptions<D>& options_);
void reset() override;
/// Pretty prints the `MaxUnpool{1,2,3}d` module into the given `stream`.
void pretty_print(std::ostream& stream) const override;
/// The options with which this `Module` was constructed.
MaxUnpoolOptions<D> options;
};
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MaxUnpool1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies maxunpool over a 1-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.MaxUnpool1d to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::MaxUnpool1dOptions` class to learn
/// what constructor arguments are supported for this module.
///
/// Example:
/// ```
/// MaxUnpool1d model(MaxUnpool1dOptions(3).stride(2).padding(1));
/// ```
class TORCH_API MaxUnpool1dImpl : public MaxUnpoolImpl<1, MaxUnpool1dImpl> {
public:
using MaxUnpoolImpl<1, MaxUnpool1dImpl>::MaxUnpoolImpl;
Tensor forward(
const Tensor& input,
const Tensor& indices,
const std::optional<std::vector<int64_t>>& output_size = std::nullopt);
protected:
FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(std::optional<std::vector<int64_t>>())})
};
/// A `ModuleHolder` subclass for `MaxUnpool1dImpl`.
/// See the documentation for `MaxUnpool1dImpl` class to learn what methods it
/// provides, and examples of how to use `MaxUnpool1d` with
/// `torch::nn::MaxUnpool1dOptions`. See the documentation for `ModuleHolder` to
/// learn about PyTorch's module storage semantics.
TORCH_MODULE(MaxUnpool1d);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MaxUnpool2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies maxunpool over a 2-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.MaxUnpool2d to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::MaxUnpool2dOptions` class to learn
/// what constructor arguments are supported for this module.
///
/// Example:
/// ```
/// MaxUnpool2d model(MaxUnpool2dOptions(3).stride(2).padding(1));
/// ```
class TORCH_API MaxUnpool2dImpl : public MaxUnpoolImpl<2, MaxUnpool2dImpl> {
public:
using MaxUnpoolImpl<2, MaxUnpool2dImpl>::MaxUnpoolImpl;
Tensor forward(
const Tensor& input,
const Tensor& indices,
const std::optional<std::vector<int64_t>>& output_size = std::nullopt);
protected:
FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(std::optional<std::vector<int64_t>>())})
};
/// A `ModuleHolder` subclass for `MaxUnpool2dImpl`.
/// See the documentation for `MaxUnpool2dImpl` class to learn what methods it
/// provides, and examples of how to use `MaxUnpool2d` with
/// `torch::nn::MaxUnpool2dOptions`. See the documentation for `ModuleHolder` to
/// learn about PyTorch's module storage semantics.
TORCH_MODULE(MaxUnpool2d);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MaxUnpool3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies maxunpool over a 3-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.MaxUnpool3d to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::MaxUnpool3dOptions` class to learn
/// what constructor arguments are supported for this module.
///
/// Example:
/// ```
/// MaxUnpool3d model(MaxUnpool3dOptions(3).stride(2).padding(1));
/// ```
class TORCH_API MaxUnpool3dImpl : public MaxUnpoolImpl<3, MaxUnpool3dImpl> {
public:
using MaxUnpoolImpl<3, MaxUnpool3dImpl>::MaxUnpoolImpl;
Tensor forward(
const Tensor& input,
const Tensor& indices,
const std::optional<std::vector<int64_t>>& output_size = std::nullopt);
protected:
FORWARD_HAS_DEFAULT_ARGS({2, AnyValue(std::optional<std::vector<int64_t>>())})
};
/// A `ModuleHolder` subclass for `MaxUnpool3dImpl`.
/// See the documentation for `MaxUnpool3dImpl` class to learn what methods it
/// provides, and examples of how to use `MaxUnpool3d` with
/// `torch::nn::MaxUnpool3dOptions`. See the documentation for `ModuleHolder` to
/// learn about PyTorch's module storage semantics.
TORCH_MODULE(MaxUnpool3d);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FractionalMaxPool2d
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies fractional maxpool over a 2-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.FractionalMaxPool2d to
/// learn about the exact behavior of this module.
///
/// See the documentation for `torch::nn::FractionalMaxPool2dOptions` class to
/// learn what constructor arguments are supported for this module.
///
/// Example:
/// ```
/// FractionalMaxPool2d model(FractionalMaxPool2dOptions(5).output_size(1));
/// ```
class TORCH_API FractionalMaxPool2dImpl
: public torch::nn::Cloneable<FractionalMaxPool2dImpl> {
public:
FractionalMaxPool2dImpl(ExpandingArray<2> kernel_size)
: FractionalMaxPool2dImpl(FractionalMaxPool2dOptions(kernel_size)) {}
explicit FractionalMaxPool2dImpl(FractionalMaxPool2dOptions options_);
void reset() override;
/// Pretty prints the `FractionalMaxPool2d` module into the given `stream`.
void pretty_print(std::ostream& stream) const override;
Tensor forward(const Tensor& input);
/// Returns the outputs and the indices of the max values.
/// Useful for `torch::nn::MaxUnpool2d` later.
std::tuple<Tensor, Tensor> forward_with_indices(const Tensor& input);
/// The options with which this `Module` was constructed.
FractionalMaxPool2dOptions options;
Tensor _random_samples;
};
/// A `ModuleHolder` subclass for `FractionalMaxPool2dImpl`.
/// See the documentation for `FractionalMaxPool2dImpl` class to learn what
/// methods it provides, and examples of how to use `FractionalMaxPool2d` with
/// `torch::nn::FractionalMaxPool2dOptions`. See the documentation for
/// `ModuleHolder` to learn about PyTorch's module storage semantics.
TORCH_MODULE(FractionalMaxPool2d);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FractionalMaxPool3d
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies fractional maxpool over a 3-D input.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.FractionalMaxPool3d to
/// learn about the exact behavior of this module.
///
/// See the documentation for `torch::nn::FractionalMaxPool3dOptions` class to
/// learn what constructor arguments are supported for this module.
///
/// Example:
/// ```
/// FractionalMaxPool3d model(FractionalMaxPool3dOptions(5).output_size(1));
/// ```
class TORCH_API FractionalMaxPool3dImpl
: public torch::nn::Cloneable<FractionalMaxPool3dImpl> {
public:
FractionalMaxPool3dImpl(ExpandingArray<3> kernel_size)
: FractionalMaxPool3dImpl(FractionalMaxPool3dOptions(kernel_size)) {}
explicit FractionalMaxPool3dImpl(FractionalMaxPool3dOptions options_);
void reset() override;
/// Pretty prints the `FractionalMaxPool3d` module into the given `stream`.
void pretty_print(std::ostream& stream) const override;
Tensor forward(const Tensor& input);
/// Returns the outputs and the indices of the max values.
/// Useful for `torch::nn::MaxUnpool3d` later.
std::tuple<Tensor, Tensor> forward_with_indices(const Tensor& input);
/// The options with which this `Module` was constructed.
FractionalMaxPool3dOptions options;
Tensor _random_samples;
};
/// A `ModuleHolder` subclass for `FractionalMaxPool3dImpl`.
/// See the documentation for `FractionalMaxPool3dImpl` class to learn what
/// methods it provides, and examples of how to use `FractionalMaxPool3d` with
/// `torch::nn::FractionalMaxPool3dOptions`. See the documentation for
/// `ModuleHolder` to learn about PyTorch's module storage semantics.
TORCH_MODULE(FractionalMaxPool3d);
// ============================================================================
/// Base class for all (dimension-specialized) lppool modules.
template <size_t D, typename Derived>
class TORCH_API LPPoolImpl : public torch::nn::Cloneable<Derived> {
public:
LPPoolImpl(double norm_type, ExpandingArray<D> kernel_size)
: LPPoolImpl(LPPoolOptions<D>(norm_type, kernel_size)) {}
explicit LPPoolImpl(const LPPoolOptions<D>& options_);
void reset() override;
/// Pretty prints the `LPPool{1,2}d` module into the given `stream`.
void pretty_print(std::ostream& stream) const override;
LPPoolOptions<D> options;
};
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LPPool1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies the LPPool1d function element-wise.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.LPPool1d to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::LPPool1dOptions` class to learn what
/// constructor arguments are supported for this module.
///
/// Example:
/// ```
/// LPPool1d model(LPPool1dOptions(1, 2).stride(5).ceil_mode(true));
/// ```
class TORCH_API LPPool1dImpl : public LPPoolImpl<1, LPPool1dImpl> {
public:
using LPPoolImpl<1, LPPool1dImpl>::LPPoolImpl;
Tensor forward(const Tensor& input);
};
/// A `ModuleHolder` subclass for `LPPool1dImpl`.
/// See the documentation for `LPPool1dImpl` class to learn what methods it
/// provides, and examples of how to use `LPPool1d` with
/// `torch::nn::LPPool1dOptions`. See the documentation for `ModuleHolder` to
/// learn about PyTorch's module storage semantics.
TORCH_MODULE(LPPool1d);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LPPool2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies the LPPool2d function element-wise.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.LPPool2d to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::LPPool2dOptions` class to learn what
/// constructor arguments are supported for this module.
///
/// Example:
/// ```
/// LPPool2d model(LPPool2dOptions(1, std::vector<int64_t>({3, 4})).stride({5,
/// 6}).ceil_mode(true));
/// ```
class TORCH_API LPPool2dImpl : public LPPoolImpl<2, LPPool2dImpl> {
public:
using LPPoolImpl<2, LPPool2dImpl>::LPPoolImpl;
Tensor forward(const Tensor& input);
};
/// A `ModuleHolder` subclass for `LPPool2dImpl`.
/// See the documentation for `LPPool2dImpl` class to learn what methods it
/// provides, and examples of how to use `LPPool2d` with
/// `torch::nn::LPPool2dOptions`. See the documentation for `ModuleHolder` to
/// learn about PyTorch's module storage semantics.
TORCH_MODULE(LPPool2d);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LPPool3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Applies the LPPool3d function element-wise.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.LPPool3d to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::LPPool3dOptions` class to learn what
/// constructor arguments are supported for this module.
///
/// Example:
/// ```
/// LPPool3d model(LPPool3dOptions(1, std::vector<int64_t>({3, 4, 5})).stride(
/// {5, 6, 7}).ceil_mode(true));
/// ```
class TORCH_API LPPool3dImpl : public LPPoolImpl<3, LPPool3dImpl> {
public:
using LPPoolImpl<3, LPPool3dImpl>::LPPoolImpl;
Tensor forward(const Tensor& input);
};
/// A `ModuleHolder` subclass for `LPPool3dImpl`.
/// See the documentation for `LPPool3dImpl` class to learn what methods it
/// provides, and examples of how to use `LPPool3d` with
/// `torch::nn::LPPool3dOptions`. See the documentation for `ModuleHolder` to
/// learn about PyTorch's module storage semantics.
TORCH_MODULE(LPPool3d);
} // namespace torch::nn
```
|
===================================================================================================================================================
SOURCE CODE FILE: rnn.h
LINES: 1
SIZE: 13.52 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\rnn.h
ENCODING: utf-8
```h
#pragma once
#include <torch/nn/cloneable.h>
#include <torch/nn/modules/common.h>
#include <torch/nn/modules/dropout.h>
#include <torch/nn/options/rnn.h>
#include <torch/nn/pimpl.h>
#include <torch/nn/utils/rnn.h>
#include <torch/types.h>
#include <ATen/ATen.h>
#include <c10/util/Exception.h>
#include <cstddef>
#include <functional>
#include <memory>
#include <vector>
namespace torch::nn {
namespace detail {
/// Base class for all RNN implementations (intended for code sharing).
template <typename Derived>
class TORCH_API RNNImplBase : public torch::nn::Cloneable<Derived> {
public:
explicit RNNImplBase(const RNNOptionsBase& options_);
/// Initializes the parameters of the RNN module.
void reset() override;
void reset_parameters();
/// Overrides `nn::Module::to()` to call `flatten_parameters()` after the
/// original operation.
void to(torch::Device device, torch::Dtype dtype, bool non_blocking = false)
override;
void to(torch::Dtype dtype, bool non_blocking = false) override;
void to(torch::Device device, bool non_blocking = false) override;
/// Pretty prints the RNN module into the given `stream`.
void pretty_print(std::ostream& stream) const override;
/// Modifies the internal storage of weights for optimization purposes.
///
/// On CPU, this method should be called if any of the weight or bias vectors
/// are changed (i.e. weights are added or removed). On GPU, it should be
/// called __any time the storage of any parameter is modified__, e.g. any
/// time a parameter is assigned a new value. This allows using the fast path
/// in cuDNN implementations of respective RNN `forward()` methods. It is
/// called once upon construction, inside `reset()`.
void flatten_parameters();
std::vector<Tensor> all_weights() const;
/// The RNN's options.
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
RNNOptionsBase options_base;
protected:
// Resets flat_weights_
// Note: be v. careful before removing this, as 3rd party device types
// likely rely on this behavior to properly .to() modules like LSTM.
void reset_flat_weights();
void check_input(const Tensor& input, const Tensor& batch_sizes) const;
std::tuple<int64_t, int64_t, int64_t> get_expected_hidden_size(
const Tensor& input,
const Tensor& batch_sizes) const;
void check_hidden_size(
const Tensor& hx,
std::tuple<int64_t, int64_t, int64_t> expected_hidden_size,
std::string msg = "Expected hidden size {1}, got {2}") const;
void check_forward_args(Tensor input, Tensor hidden, Tensor batch_sizes)
const;
Tensor permute_hidden(Tensor hx, const Tensor& permutation) const;
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
std::vector<std::string> flat_weights_names_;
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
std::vector<std::vector<std::string>> all_weights_;
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
std::vector<Tensor> flat_weights_;
};
} // namespace detail
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RNN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// A multi-layer Elman RNN module with Tanh or ReLU activation.
/// See https://pytorch.org/docs/main/generated/torch.nn.RNN.html to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::RNNOptions` class to learn what
/// constructor arguments are supported for this module.
///
/// Example:
/// ```
/// RNN model(RNNOptions(128,
/// 64).num_layers(3).dropout(0.2).nonlinearity(torch::kTanh));
/// ```
class TORCH_API RNNImpl : public detail::RNNImplBase<RNNImpl> {
public:
RNNImpl(int64_t input_size, int64_t hidden_size)
: RNNImpl(RNNOptions(input_size, hidden_size)) {}
explicit RNNImpl(const RNNOptions& options_);
std::tuple<Tensor, Tensor> forward(const Tensor& input, Tensor hx = {});
protected:
FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(Tensor())})
public:
std::tuple<torch::nn::utils::rnn::PackedSequence, Tensor>
forward_with_packed_input(
const torch::nn::utils::rnn::PackedSequence& packed_input,
Tensor hx = {});
RNNOptions options;
protected:
std::tuple<Tensor, Tensor> forward_helper(
const Tensor& input,
const Tensor& batch_sizes,
const Tensor& sorted_indices,
int64_t max_batch_size,
Tensor hx);
};
/// A `ModuleHolder` subclass for `RNNImpl`.
/// See the documentation for `RNNImpl` class to learn what methods it
/// provides, and examples of how to use `RNN` with `torch::nn::RNNOptions`.
/// See the documentation for `ModuleHolder` to learn about PyTorch's
/// module storage semantics.
TORCH_MODULE(RNN);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LSTM ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// A multi-layer long-short-term-memory (LSTM) module.
/// See https://pytorch.org/docs/main/generated/torch.nn.LSTM.html to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::LSTMOptions` class to learn what
/// constructor arguments are supported for this module.
///
/// Example:
/// ```
/// LSTM model(LSTMOptions(2,
/// 4).num_layers(3).batch_first(false).bidirectional(true));
/// ```
class TORCH_API LSTMImpl : public detail::RNNImplBase<LSTMImpl> {
public:
LSTMImpl(int64_t input_size, int64_t hidden_size)
: LSTMImpl(LSTMOptions(input_size, hidden_size)) {}
explicit LSTMImpl(const LSTMOptions& options_);
std::tuple<Tensor, std::tuple<Tensor, Tensor>> forward(
const Tensor& input,
std::optional<std::tuple<Tensor, Tensor>> hx_opt = {});
protected:
FORWARD_HAS_DEFAULT_ARGS(
{1, AnyValue(std::optional<std::tuple<Tensor, Tensor>>())})
public:
std::tuple<torch::nn::utils::rnn::PackedSequence, std::tuple<Tensor, Tensor>>
forward_with_packed_input(
const torch::nn::utils::rnn::PackedSequence& packed_input,
std::optional<std::tuple<Tensor, Tensor>> hx_opt = {});
LSTMOptions options;
protected:
void check_forward_args(
const Tensor& input,
std::tuple<Tensor, Tensor> hidden,
const Tensor& batch_sizes) const;
std::tuple<int64_t, int64_t, int64_t> get_expected_cell_size(
const Tensor& input,
const Tensor& batch_sizes) const;
std::tuple<Tensor, Tensor> permute_hidden(
std::tuple<Tensor, Tensor> hx,
const Tensor& permutation) const;
std::tuple<Tensor, std::tuple<Tensor, Tensor>> forward_helper(
const Tensor& input,
const Tensor& batch_sizes,
const Tensor& sorted_indices,
int64_t max_batch_size,
std::optional<std::tuple<Tensor, Tensor>> hx_opt);
};
/// A `ModuleHolder` subclass for `LSTMImpl`.
/// See the documentation for `LSTMImpl` class to learn what methods it
/// provides, and examples of how to use `LSTM` with `torch::nn::LSTMOptions`.
/// See the documentation for `ModuleHolder` to learn about PyTorch's
/// module storage semantics.
TORCH_MODULE(LSTM);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GRU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// A multi-layer gated recurrent unit (GRU) module.
/// See https://pytorch.org/docs/main/generated/torch.nn.GRU.html to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::GRUOptions` class to learn what
/// constructor arguments are supported for this module.
///
/// Example:
/// ```
/// GRU model(GRUOptions(2,
/// 4).num_layers(3).batch_first(false).bidirectional(true));
/// ```
class TORCH_API GRUImpl : public detail::RNNImplBase<GRUImpl> {
public:
GRUImpl(int64_t input_size, int64_t hidden_size)
: GRUImpl(GRUOptions(input_size, hidden_size)) {}
explicit GRUImpl(const GRUOptions& options_);
std::tuple<Tensor, Tensor> forward(const Tensor& input, Tensor hx = {});
protected:
FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(torch::Tensor())})
public:
std::tuple<torch::nn::utils::rnn::PackedSequence, Tensor>
forward_with_packed_input(
const torch::nn::utils::rnn::PackedSequence& packed_input,
Tensor hx = {});
GRUOptions options;
protected:
std::tuple<Tensor, Tensor> forward_helper(
const Tensor& input,
const Tensor& batch_sizes,
const Tensor& sorted_indices,
int64_t max_batch_size,
Tensor hx);
};
/// A `ModuleHolder` subclass for `GRUImpl`.
/// See the documentation for `GRUImpl` class to learn what methods it
/// provides, and examples of how to use `GRU` with `torch::nn::GRUOptions`.
/// See the documentation for `ModuleHolder` to learn about PyTorch's
/// module storage semantics.
TORCH_MODULE(GRU);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RNNCellImplBase
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace detail {
/// Base class for all RNNCell implementations (intended for code sharing).
template <typename Derived>
class TORCH_API RNNCellImplBase : public torch::nn::Cloneable<Derived> {
public:
explicit RNNCellImplBase(const RNNCellOptionsBase& options_);
/// Initializes the parameters of the RNNCell module.
void reset() override;
void reset_parameters();
/// Pretty prints the RNN module into the given `stream`.
void pretty_print(std::ostream& stream) const override;
RNNCellOptionsBase options_base;
Tensor weight_ih;
Tensor weight_hh;
Tensor bias_ih;
Tensor bias_hh;
protected:
void check_forward_input(const Tensor& input, const std::string& name) const;
virtual std::string get_nonlinearity_str() const;
};
} // namespace detail
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RNNCell
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// An Elman RNN cell with tanh or ReLU non-linearity.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.RNNCell to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::RNNCellOptions` class to learn what
/// constructor arguments are supported for this module.
///
/// Example:
/// ```
/// RNNCell model(RNNCellOptions(20,
/// 10).bias(false).nonlinearity(torch::kReLU));
/// ```
class TORCH_API RNNCellImpl : public detail::RNNCellImplBase<RNNCellImpl> {
public:
RNNCellImpl(int64_t input_size, int64_t hidden_size)
: RNNCellImpl(RNNCellOptions(input_size, hidden_size)) {}
explicit RNNCellImpl(const RNNCellOptions& options_);
Tensor forward(const Tensor& input, const Tensor& hx = {});
protected:
FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(Tensor())})
public:
RNNCellOptions options;
protected:
std::string get_nonlinearity_str() const override;
};
/// A `ModuleHolder` subclass for `RNNCellImpl`.
/// See the documentation for `RNNCellImpl` class to learn what methods it
/// provides, and examples of how to use `RNNCell` with
/// `torch::nn::RNNCellOptions`. See the documentation for `ModuleHolder` to
/// learn about PyTorch's module storage semantics.
TORCH_MODULE(RNNCell);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LSTMCell
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// A long short-term memory (LSTM) cell.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.LSTMCell to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::LSTMCellOptions` class to learn what
/// constructor arguments are supported for this module.
///
/// Example:
/// ```
/// LSTMCell model(LSTMCellOptions(20, 10).bias(false));
/// ```
class TORCH_API LSTMCellImpl : public detail::RNNCellImplBase<LSTMCellImpl> {
public:
LSTMCellImpl(int64_t input_size, int64_t hidden_size)
: LSTMCellImpl(LSTMCellOptions(input_size, hidden_size)) {}
explicit LSTMCellImpl(const LSTMCellOptions& options_);
std::tuple<Tensor, Tensor> forward(
const Tensor& input,
std::optional<std::tuple<Tensor, Tensor>> hx_opt = {});
protected:
FORWARD_HAS_DEFAULT_ARGS(
{1, AnyValue(std::optional<std::tuple<Tensor, Tensor>>())})
public:
LSTMCellOptions options;
};
/// A `ModuleHolder` subclass for `LSTMCellImpl`.
/// See the documentation for `LSTMCellImpl` class to learn what methods it
/// provides, and examples of how to use `LSTMCell` with
/// `torch::nn::LSTMCellOptions`. See the documentation for `ModuleHolder` to
/// learn about PyTorch's module storage semantics.
TORCH_MODULE(LSTMCell);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GRUCell
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// A gated recurrent unit (GRU) cell.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.GRUCell to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::GRUCellOptions` class to learn what
/// constructor arguments are supported for this module.
///
/// Example:
/// ```
/// GRUCell model(GRUCellOptions(20, 10).bias(false));
/// ```
class TORCH_API GRUCellImpl : public detail::RNNCellImplBase<GRUCellImpl> {
public:
GRUCellImpl(int64_t input_size, int64_t hidden_size)
: GRUCellImpl(GRUCellOptions(input_size, hidden_size)) {}
explicit GRUCellImpl(const GRUCellOptions& options_);
Tensor forward(const Tensor& input, const Tensor& hx = {});
protected:
FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(Tensor())})
public:
GRUCellOptions options;
};
/// A `ModuleHolder` subclass for `GRUCellImpl`.
/// See the documentation for `GRUCellImpl` class to learn what methods it
/// provides, and examples of how to use `GRUCell` with
/// `torch::nn::GRUCellOptions`. See the documentation for `ModuleHolder` to
/// learn about PyTorch's module storage semantics.
TORCH_MODULE(GRUCell);
} // namespace torch::nn
```
|
===========================================================================================================================================================
SOURCE CODE FILE: transformer.h
LINES: 1
SIZE: 5.33 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\transformer.h
ENCODING: utf-8
```h
#pragma once
#include <torch/nn/cloneable.h>
#include <torch/nn/module.h>
#include <torch/nn/modules/common.h>
#include <torch/nn/options/transformer.h>
#include <torch/nn/pimpl.h>
#include <torch/types.h>
#include <ostream>
namespace torch::nn {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Transformer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// A transformer model. User is able to modify the attributes as needed. The
/// architecture is based on the paper "Attention Is All You Need". Ashish
/// Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N
/// Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need.
/// In Advances in Neural Information Processing Systems, pages 6000-6010.
///
/// See https://pytorch.org/docs/stable/generated/torch.nn.Transformer.html to
/// learn about the exact behavior of this transformer model
///
/// See the documentation for `torch::nn::Transformer` class to learn what
/// constructor arguments are supported for this encoder layer model
///
/// Example:
/// ```
/// Transformer trans(TransformerOptions(512, 8));
/// ```
class TORCH_API TransformerImpl : public Cloneable<TransformerImpl> {
public:
explicit TransformerImpl(TransformerOptions options_);
/// forward function for Transformer Module
/// Args:
/// src: the sequence to the encoder (required).
/// tgt: the sequence to the decoder (required).
/// src_mask: the additive mask for the src sequence (optional).
/// tgt_mask: the additive mask for the tgt sequence (optional).
/// memory_mask: the additive mask for the encoder output (optional).
/// src_key_padding_mask: the ByteTensor mask for src keys per batch
/// (optional). tgt_key_padding_mask: the ByteTensor mask for tgt keys per
/// batch (optional). memory_key_padding_mask: the ByteTensor mask for
/// memory keys per batch (optional).
///
/// Shape:
/// src: `(S, N, E)`
/// tgt: `(T, N, E)`
/// src_mask: `(S, S)`
/// tgt_mask: `(T, T)`
/// memory_mask: `(T, S)`
/// src_key_padding_mask: `(N, S)`
/// tgt_key_padding_mask: `(N, T)`
/// memory_key_padding_mask: `(N, S)`
///
/// Note:
/// [src/tgt/memory]_mask ensures that position i is allowed to attend the
/// unmasked positions. If a ByteTensor is provided, the non-zero
/// positions are not allowed to attend while the zero positions will be
/// unchanged. If a BoolTensor is provided, positions with `True` are not
/// allowed to attend while `False` values will be unchanged. If a
/// FloatTensor is provided, it will be added to the attention weight.
///
/// [src/tgt/memory]_key_padding_mask provides specified elements in the
/// key to be ignored by the attention. If a ByteTensor is provided, the
/// non-zero positions will be ignored while the zero positions will be
/// unchanged. If a BoolTensor is provided, the positions with the value
/// of `True` will be ignored while the position with the value of `False`
/// will be unchanged.
///
/// output: `(T, N, E)`
///
/// Note:
/// Due to the multi-head attention architecture in the transformer model,
/// the output sequence length of a transformer is same as the input
/// sequence (i.e. target) length of the decode.
///
/// where
/// S is the source sequence length,
/// T is the target sequence length,
/// N is the batch size,
/// E is the feature number.
Tensor forward(
const Tensor& src,
const Tensor& tgt,
const Tensor& src_mask = {},
const Tensor& tgt_mask = {},
const Tensor& memory_mask = {},
const Tensor& src_key_padding_mask = {},
const Tensor& tgt_key_padding_mask = {},
const Tensor& memory_key_padding_mask = {});
void reset() override;
void reset_parameters();
/// Generate a square mask for the sequence.
/// The masked positions are filled with `-inf` in float type.
/// Unmasked positions are filled with `0.0` in float type.
/// Note:
/// 1. This function will always return a CPU tensor.
/// 2. This function requires the platform support IEEE754, since `-inf` is
/// guaranteed to
/// be valid only when IEEE754 is supported. If the platform doesn't
/// support IEEE754, this function will fill the mask with the smallest
/// float number instead of `-inf`, a one time warning will pop up as
/// well.
static Tensor generate_square_subsequent_mask(int64_t sz);
protected:
FORWARD_HAS_DEFAULT_ARGS(
{2, AnyValue(Tensor())},
{3, AnyValue(Tensor())},
{4, AnyValue(Tensor())},
{5, AnyValue(Tensor())},
{6, AnyValue(Tensor())},
{7, AnyValue(Tensor())})
public:
/// options with which this `Transformer` was constructed
TransformerOptions options;
/// encoder module
AnyModule encoder;
/// decoder module
AnyModule decoder;
};
/// A `ModuleHolder` subclass for `TransformerImpl`.
/// See the documentation for `TransformerImpl` class to learn what
/// methods it provides, and examples of how to use `Transformer` with
/// `torch::nn::TransformerOptions`.
/// See the documentation for `ModuleHolder` to learn about PyTorch's
/// module storage semantics.
TORCH_MODULE(Transformer);
} // namespace torch::nn
```
|
================================================================================================================================================================
SOURCE CODE FILE: transformercoder.h
LINES: 1
SIZE: 5.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\transformercoder.h
ENCODING: utf-8
```h
#pragma once
#include <torch/nn/cloneable.h>
#include <torch/nn/module.h>
#include <torch/nn/modules/common.h>
#include <torch/nn/modules/container/any.h>
#include <torch/nn/modules/container/modulelist.h>
#include <torch/nn/options/transformercoder.h>
#include <torch/nn/pimpl.h>
#include <torch/types.h>
#include <utility>
namespace torch::nn {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TransformerEncoder
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// TransformerEncoder module.
/// See
/// https://pytorch.org/docs/main/generated/torch.nn.TransformerEncoder.html
/// to learn abouut the exact behavior of this encoder layer module.
///
/// See the documentation for `torch::nn::TransformerEncoder` class to learn
/// what constructor arguments are supported for this encoder module.
///
/// Example:
/// ```
/// TransformerEncoderLayer encoderLayer(TransformerEncoderLayerOptions(512,
/// 8).dropout(0.1)); TransformerEncoder
/// encoder(TransformerEncoderOptions(encoderLayer,
/// 6).norm(LayerNorm(LayerNormOptions({2}))));
/// ```
class TORCH_API TransformerEncoderImpl
: public Cloneable<TransformerEncoderImpl> {
public:
TransformerEncoderImpl(
TransformerEncoderLayer encoder_layer,
int64_t num_layers)
: TransformerEncoderImpl(
TransformerEncoderOptions(std::move(encoder_layer), num_layers)) {}
explicit TransformerEncoderImpl(TransformerEncoderOptions options_);
Tensor forward(
const Tensor& src,
const Tensor& src_mask = {},
const Tensor& src_key_padding_mask = {});
void reset() override;
void reset_parameters();
protected:
FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(Tensor())}, {2, AnyValue(Tensor())})
public:
/// options with which this `TransformerEncoder` was constructed
TransformerEncoderOptions options;
/// module list that contains all the encoder layers
ModuleList layers = nullptr;
/// optional normalization module
AnyModule norm;
};
/// A `ModuleHolder` subclass for `TransformerEncoderImpl`.
/// See the documentation for `TransformerEncoderImpl` class to learn what
/// methods it provides, and examples of how to use `TransformerEncoder` with
/// `torch::nn::TransformerEncoderOptions`.
/// See the documentation for `ModuleHolder` to learn about PyTorch's
/// module storage semantics.
TORCH_MODULE(TransformerEncoder);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TransformerDecoder
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// TransformerDecoder is a stack of N decoder layers.
/// See
/// https://pytorch.org/docs/main/generated/torch.nn.TransformerDecoder.html
/// to learn abouut the exact behavior of this decoder module
///
/// See the documentation for `torch::nn::TransformerDecoderOptions` class to
/// learn what constructor arguments are supported for this decoder module
///
/// Example:
/// ```
/// TransformerDecoderLayer decoder_layer(TransformerDecoderLayerOptions(512,
/// 8).dropout(0.1)); TransformerDecoder
/// transformer_decoder(TransformerDecoderOptions(decoder_layer,
/// 6).norm(LayerNorm(LayerNormOptions({2})))); const auto memory =
/// torch::rand({10, 32, 512}); const auto tgt = torch::rand({20, 32, 512});
/// auto out = transformer_decoder(tgt, memory);
/// ```
class TORCH_API TransformerDecoderImpl
: public Cloneable<TransformerDecoderImpl> {
public:
TransformerDecoderImpl(
TransformerDecoderLayer decoder_layer,
int64_t num_layers)
: TransformerDecoderImpl(
TransformerDecoderOptions(std::move(decoder_layer), num_layers)) {}
explicit TransformerDecoderImpl(TransformerDecoderOptions options_);
void reset() override;
void reset_parameters();
/// Pass the inputs (and mask) through the decoder layer in turn.
/// Args:
/// tgt: the sequence to the decoder layer (required).
/// memory: the sequence from the last layer of the encoder (required).
/// tgt_mask: the mask for the tgt sequence (optional).
/// memory_mask: the mask for the memory sequence (optional).
/// tgt_key_padding_mask: the mask for the tgt keys per batch
/// (optional). memory_key_padding_mask: the mask for the memory keys
/// per batch (optional).
Tensor forward(
const Tensor& tgt,
const Tensor& memory,
const Tensor& tgt_mask = {},
const Tensor& memory_mask = {},
const Tensor& tgt_key_padding_mask = {},
const Tensor& memory_key_padding_mask = {});
/// The options used to configure this module.
TransformerDecoderOptions options;
/// Cloned layers of decoder layers
ModuleList layers{nullptr};
/// optional layer normalization module
AnyModule norm;
protected:
FORWARD_HAS_DEFAULT_ARGS(
{2, AnyValue(Tensor())},
{3, AnyValue(Tensor())},
{4, AnyValue(Tensor())},
{5, AnyValue(Tensor())})
};
/// A `ModuleHolder` subclass for `TransformerDecoderImpl`.
/// See the documentation for `TransformerDecoderImpl` class to learn what
/// methods it provides, and examples of how to use `TransformerDecoder` with
/// `torch::nn::TransformerDecoderOptions`.
/// See the documentation for `ModuleHolder` to learn about PyTorch's
/// module storage semantics.
TORCH_MODULE(TransformerDecoder);
} // namespace torch::nn
```
|
================================================================================================================================================================
SOURCE CODE FILE: transformerlayer.h
LINES: 1
SIZE: 6.45 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\transformerlayer.h
ENCODING: utf-8
```h
#pragma once
#include <torch/nn/cloneable.h>
#include <torch/nn/module.h>
#include <torch/nn/modules/activation.h>
#include <torch/nn/modules/common.h>
#include <torch/nn/modules/dropout.h>
#include <torch/nn/modules/linear.h>
#include <torch/nn/modules/normalization.h>
#include <torch/nn/options/transformerlayer.h>
#include <torch/nn/pimpl.h>
#include <torch/types.h>
#include <ostream>
namespace torch::nn {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TransformerEncoderLayer
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// TransformerEncoderLayer module.
/// See
/// https://pytorch.org/docs/main/generated/torch.nn.TransformerEncoderLayer.html
/// to learn abouut the exact behavior of this encoder layer model
///
/// See the documentation for `torch::nn::TransformerEncoderLayer` class to
/// learn what constructor arguments are supported for this encoder layer model
///
/// Example:
/// ```
/// TransformerEncoderLayer encoderLayer(TransformerEncoderLayerOptions(512,
/// 8).dropout(0.1));
/// ```
class TORCH_API TransformerEncoderLayerImpl
: public Cloneable<TransformerEncoderLayerImpl> {
public:
TransformerEncoderLayerImpl(int64_t d_model, int64_t nhead)
: TransformerEncoderLayerImpl(
TransformerEncoderLayerOptions(d_model, nhead)) {}
explicit TransformerEncoderLayerImpl(TransformerEncoderLayerOptions options_);
Tensor forward(
const Tensor& src,
const Tensor& src_mask = {},
const Tensor& src_key_padding_mask = {});
void reset() override;
void reset_parameters();
protected:
FORWARD_HAS_DEFAULT_ARGS({1, AnyValue(Tensor())}, {2, AnyValue(Tensor())})
public:
/// options with which this `TransformerEncoderLayer` was constructed
TransformerEncoderLayerOptions options;
/// self attention
MultiheadAttention self_attn = nullptr;
/// feedforward first linear layer
Linear linear1 = nullptr;
/// feedforward dropout layer
Dropout dropout = nullptr;
/// feedforward second linear layer
Linear linear2 = nullptr;
/// pre feedforward, normalization layer
LayerNorm norm1 = nullptr;
/// post feedfastward, normalization layer
LayerNorm norm2 = nullptr;
/// pre feedfastward, dropout layer
Dropout dropout1 = nullptr;
/// post feedfastward, dropout layer
Dropout dropout2 = nullptr;
};
/// A `ModuleHolder` subclass for `TransformerEncoderLayerImpl``.
/// See the documentation for `TransformerEncoderLayerImpl` class to learn what
/// methods it provides, and examples of how to use `TransformerEncoderLayer`
/// with `torch::nn::TransformerEncoderLayerOptions`. See the documentation for
/// `ModuleHolder` to learn about PyTorch's module storage semantics.
TORCH_MODULE(TransformerEncoderLayer);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TransformerDecoderLayer
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// TransformerDecoderLayer is made up of self-attn, multi-head-attn and
/// feedforward network. This standard decoder layer is based on the paper
/// "Attention Is All You Need". Ashish Vaswani, Noam Shazeer, Niki Parmar,
/// Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia
/// Polosukhin. 2017. Attention is all you need. In Advances in Neural
/// Information Processing Systems, pages 6000-6010. Users may modify or
/// implement in a different way during application. See
/// https://pytorch.org/docs/main/nn.html#transformer-layers to learn about
/// the exact behavior of this module.
///
/// See the documentation for `torch::nn::TransformerDecoderLayerOptions` class
/// to learn what constructor arguments are supported for this module.
///
/// Example:
/// ```
/// TransformerDecoderLayer model(TransformerDecoderLayerOptions(512,
/// 8).dropout(0.2));
/// ```
class TORCH_API TransformerDecoderLayerImpl
: public Cloneable<TransformerDecoderLayerImpl> {
public:
TransformerDecoderLayerImpl(int64_t d_model, int64_t nhead)
: TransformerDecoderLayerImpl(
TransformerDecoderLayerOptions(d_model, nhead)) {}
explicit TransformerDecoderLayerImpl(TransformerDecoderLayerOptions options_);
void reset() override;
void reset_parameters();
/// Pass the inputs (and mask) through the decoder layer.
/// Args:
/// tgt: the sequence to the decoder layer (required).
/// memory: the sequence from the last layer of the encoder (required).
/// tgt_mask: the mask for the tgt sequence (optional).
/// memory_mask: the mask for the memory sequence (optional).
/// tgt_key_padding_mask: the mask for the tgt keys per batch
/// (optional). memory_key_padding_mask: the mask for the memory keys
/// per batch (optional).
Tensor forward(
Tensor tgt,
const Tensor& memory,
const Tensor& tgt_mask = {},
const Tensor& memory_mask = {},
const Tensor& tgt_key_padding_mask = {},
const Tensor& memory_key_padding_mask = {});
/// The options used to configure this module.
TransformerDecoderLayerOptions options;
/// self attention
MultiheadAttention self_attn{nullptr};
/// Dropout, post self attention
Dropout dropout1{nullptr};
/// Normalization, post self attention
LayerNorm norm1{nullptr};
/// Multi-headed attention
MultiheadAttention multihead_attn{nullptr};
/// Dropout, post multi-headed attention
Dropout dropout2{nullptr};
/// Normalization, post multi-headed attention
LayerNorm norm2{nullptr};
/// Feed forward first linear layer
Linear linear1{nullptr};
/// Feed forward dropout layer
Dropout dropout{nullptr};
/// Feed forward second linear layer
Linear linear2{nullptr};
/// Dropout, post feed forward
Dropout dropout3{nullptr};
/// Normalization, post feed forward
LayerNorm norm3{nullptr};
protected:
FORWARD_HAS_DEFAULT_ARGS(
{2, AnyValue(Tensor())},
{3, AnyValue(Tensor())},
{4, AnyValue(Tensor())},
{5, AnyValue(Tensor())})
/// Apply activation based on configuration
Tensor activation(const Tensor& input);
};
/// A `ModuleHolder` subclass for `TransformerDecoderLayerImpl`.
/// See the documentation for `TransformerDecoderLayerImpl` class to learn what
/// methods it provides, and examples of how to use `TransformerDecoderLayer`
/// with `torch::nn::TransformerDecoderLayerOptions`. See the documentation for
/// `ModuleHolder` to learn about PyTorch's module storage semantics.
TORCH_MODULE(TransformerDecoderLayer);
} // namespace torch::nn
```
|
==========================================================================================================================================================
SOURCE CODE FILE: upsampling.h
LINES: 1
SIZE: 1.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\upsampling.h
ENCODING: utf-8
```h
#pragma once
#include <torch/nn/cloneable.h>
#include <torch/nn/functional/upsampling.h>
#include <torch/nn/options/upsampling.h>
#include <torch/nn/pimpl.h>
#include <torch/types.h>
#include <torch/csrc/Export.h>
#include <cstddef>
#include <ostream>
namespace torch::nn {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Upsample ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D
/// (volumetric) data.
/// See https://pytorch.org/docs/main/nn.html#torch.nn.Upsample to learn
/// about the exact behavior of this module.
///
/// See the documentation for `torch::nn::UpsampleOptions` class to learn what
/// constructor arguments are supported for this module.
///
/// Example:
/// ```
/// Upsample
/// model(UpsampleOptions().scale_factor({3}).mode(torch::kLinear).align_corners(false));
/// ```
class TORCH_API UpsampleImpl : public Cloneable<UpsampleImpl> {
public:
explicit UpsampleImpl(UpsampleOptions options_ = {});
void reset() override;
/// Pretty prints the `Upsample` module into the given `stream`.
void pretty_print(std::ostream& stream) const override;
Tensor forward(const Tensor& input);
/// The options with which this `Module` was constructed.
UpsampleOptions options;
};
/// A `ModuleHolder` subclass for `UpsampleImpl`.
/// See the documentation for `UpsampleImpl` class to learn what methods it
/// provides, and examples of how to use `Upsample` with
/// `torch::nn::UpsampleOptions`. See the documentation for `ModuleHolder` to
/// learn about PyTorch's module storage semantics.
TORCH_MODULE(Upsample);
} // namespace torch::nn
```
|
=====================================================================================================================================================
SOURCE CODE FILE: utils.h
LINES: 1
SIZE: 1.41 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\modules\utils.h
ENCODING: utf-8
```h
#pragma once
#include <c10/util/ArrayRef.h>
#include <c10/util/irange.h>
#include <optional>
#include <vector>
namespace torch::nn::modules::utils {
// Reverse the order of `t` and repeat each element for `n` times.
// This can be used to translate padding arg used by Conv and Pooling modules
// to the ones used by `F::pad`.
//
// This mirrors `_reverse_repeat_tuple` in `torch/nn/modules/utils.py`.
inline std::vector<int64_t> _reverse_repeat_vector(
c10::ArrayRef<int64_t> t,
int64_t n) {
TORCH_INTERNAL_ASSERT(n >= 0);
std::vector<int64_t> ret;
ret.reserve(t.size() * n);
for (auto rit = t.rbegin(); rit != t.rend(); ++rit) {
for ([[maybe_unused]] const auto i : c10::irange(n)) {
ret.emplace_back(*rit);
}
}
return ret;
}
inline std::vector<int64_t> _list_with_default(
c10::ArrayRef<std::optional<int64_t>> out_size,
c10::IntArrayRef defaults) {
TORCH_CHECK(
defaults.size() > out_size.size(),
"Input dimension should be at least ",
out_size.size() + 1);
std::vector<int64_t> ret;
c10::IntArrayRef defaults_slice =
defaults.slice(defaults.size() - out_size.size(), out_size.size());
for (const auto i : c10::irange(out_size.size())) {
auto v = out_size.at(i);
auto d = defaults_slice.at(i);
ret.emplace_back(v.has_value() ? v.value() : d);
}
return ret;
}
} // namespace torch::nn::modules::utils
```
|
===============================================================================================================================================
SOURCE CODE FILE: options.h
LINES: 1
SIZE: 0.65 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options.h
ENCODING: utf-8
```h
#pragma once
#include <torch/nn/options/batchnorm.h>
#include <torch/nn/options/conv.h>
#include <torch/nn/options/dropout.h>
#include <torch/nn/options/fold.h>
#include <torch/nn/options/linear.h>
#include <torch/nn/options/loss.h>
#include <torch/nn/options/normalization.h>
#include <torch/nn/options/padding.h>
#include <torch/nn/options/pixelshuffle.h>
#include <torch/nn/options/pooling.h>
#include <torch/nn/options/rnn.h>
#include <torch/nn/options/transformer.h>
#include <torch/nn/options/transformercoder.h>
#include <torch/nn/options/transformerlayer.h>
#include <torch/nn/options/upsampling.h>
#include <torch/nn/options/vision.h>
```
|
==========================================================================================================================================================
SOURCE CODE FILE: activation.h
LINES: 1
SIZE: 19.27 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\activation.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/enum.h>
#include <torch/types.h>
namespace torch::nn {
/// Options for the `ELU` module.
///
/// Example:
/// ```
/// ELU model(ELUOptions().alpha(42.42).inplace(true));
/// ```
struct TORCH_API ELUOptions {
/// The `alpha` value for the ELU formulation. Default: 1.0
TORCH_ARG(double, alpha) = 1.0;
/// can optionally do the operation in-place. Default: False
TORCH_ARG(bool, inplace) = false;
};
namespace functional {
/// Options for `torch::nn::functional::elu`.
///
/// See the documentation for `torch::nn::ELUOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::elu(x, F::ELUFuncOptions().alpha(0.42).inplace(true));
/// ```
using ELUFuncOptions = ELUOptions;
} // namespace functional
// ============================================================================
/// Options for the `SELU` module.
///
/// Example:
/// ```
/// SELU model(SELUOptions().inplace(true));
/// ```
struct TORCH_API SELUOptions {
/* implicit */ SELUOptions(bool inplace = false);
/// can optionally do the operation in-place. Default: False
TORCH_ARG(bool, inplace);
};
namespace functional {
/// Options for `torch::nn::functional::selu`.
///
/// See the documentation for `torch::nn::SELUOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::selu(input, F::SELUFuncOptions(false));
/// ```
using SELUFuncOptions = SELUOptions;
} // namespace functional
// ============================================================================
/// Options for the `GLU` module.
///
/// Example:
/// ```
/// GLU model(GLUOptions(1));
/// ```
struct TORCH_API GLUOptions {
/* implicit */ GLUOptions(int64_t dim = -1);
/// the dimension on which to split the input. Default: -1
TORCH_ARG(int64_t, dim);
};
namespace functional {
/// Options for `torch::nn::functional::glu`.
///
/// See the documentation for `torch::nn::GLUOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::glu(input, GLUFuncOptions(1));
/// ```
using GLUFuncOptions = GLUOptions;
} // namespace functional
// ============================================================================
/// Options for the `GELU` module.
///
/// Example:
/// ```
/// GELU model(GELUOptions().approximate("none"));
/// ```
struct TORCH_API GELUOptions {
/// Specifies the approximation to apply to the output.
TORCH_ARG(std::string, approximate) = "none";
};
namespace functional {
/// Options for `torch::nn::functional::gelu`.
///
/// See the documentation for `torch::nn::GELUOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::gelu(input, F::GELUFuncOptions().approximate("none"));
/// ```
using GELUFuncOptions = GELUOptions;
} // namespace functional
// ============================================================================
/// Options for the `Hardshrink` module.
///
/// Example:
/// ```
/// Hardshrink model(HardshrinkOptions().lambda(42.42));
/// ```
struct TORCH_API HardshrinkOptions {
/* implicit */ HardshrinkOptions(double lambda = 0.5);
/// the `lambda` value for the Hardshrink formulation. Default: 0.5
TORCH_ARG(double, lambda);
};
namespace functional {
/// Options for `torch::nn::functional::hardshrink`.
///
/// See the documentation for `torch::nn::HardshrinkOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::hardshrink(x, F::HardshrinkFuncOptions().lambda(0.42));
/// ```
using HardshrinkFuncOptions = HardshrinkOptions;
} // namespace functional
// ============================================================================
/// Options for the `Hardtanh` module.
///
/// Example:
/// ```
/// Hardtanh
/// model(HardtanhOptions().min_val(-42.42).max_val(0.42).inplace(true));
/// ```
struct TORCH_API HardtanhOptions {
/// minimum value of the linear region range. Default: -1
TORCH_ARG(double, min_val) = -1.0;
/// maximum value of the linear region range. Default: 1
TORCH_ARG(double, max_val) = 1.0;
/// can optionally do the operation in-place. Default: False
TORCH_ARG(bool, inplace) = false;
};
namespace functional {
/// Options for `torch::nn::functional::hardtanh`.
///
/// See the documentation for `torch::nn::HardtanhOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::hardtanh(x,
/// F::HardtanhFuncOptions().min_val(-1.0).max_val(1.0).inplace(true));
/// ```
using HardtanhFuncOptions = HardtanhOptions;
} // namespace functional
// ============================================================================
/// Options for the `LeakyReLU` module.
///
/// Example:
/// ```
/// LeakyReLU model(LeakyReLUOptions().negative_slope(0.42).inplace(true));
/// ```
struct TORCH_API LeakyReLUOptions {
/// Controls the angle of the negative slope. Default: 1e-2
TORCH_ARG(double, negative_slope) = 1e-2;
/// can optionally do the operation in-place. Default: False
TORCH_ARG(bool, inplace) = false;
};
namespace functional {
/// Options for `torch::nn::functional::leaky_relu`.
///
/// See the documentation for `torch::nn::LeakyReLUOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::leaky_relu(x,
/// F::LeakyReLUFuncOptions().negative_slope(0.42).inplace(true));
/// ```
using LeakyReLUFuncOptions = LeakyReLUOptions;
} // namespace functional
// ============================================================================
/// Options for the `Softmax` module.
///
/// Example:
/// ```
/// Softmax model(SoftmaxOptions(1));
/// ```
struct TORCH_API SoftmaxOptions {
SoftmaxOptions(int64_t dim);
/// Dimension along which Softmax will be computed.
TORCH_ARG(int64_t, dim);
};
// ============================================================================
namespace functional {
/// Options for `torch::nn::functional::softmax`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::softmax(input, F::SoftmaxFuncOptions(1));
/// ```
struct TORCH_API SoftmaxFuncOptions {
SoftmaxFuncOptions(int64_t dim);
/// Dimension along which Softmax will be computed.
TORCH_ARG(int64_t, dim);
/// the desired data type of returned tensor.
/// If specified, the input tensor is casted to `dtype` before the operation
/// is performed. This is useful for preventing data type overflows. Default:
/// None.
TORCH_ARG(std::optional<torch::Dtype>, dtype) = std::nullopt;
};
} // namespace functional
// ============================================================================
/// Options for the `Softmin` module.
///
/// Example:
/// ```
/// Softmin model(SoftminOptions(1));
/// ```
struct TORCH_API SoftminOptions {
SoftminOptions(int64_t dim);
/// Dimension along which Softmin will be computed.
TORCH_ARG(int64_t, dim);
};
// ============================================================================
namespace functional {
/// Options for `torch::nn::functional::softmin`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::softmin(input, F::SoftminFuncOptions(1));
/// ```
struct TORCH_API SoftminFuncOptions {
SoftminFuncOptions(int64_t dim);
/// Dimension along which Softmin will be computed.
TORCH_ARG(int64_t, dim);
/// the desired data type of returned tensor.
/// If specified, the input tensor is casted to `dtype` before the operation
/// is performed. This is useful for preventing data type overflows. Default:
/// None.
TORCH_ARG(std::optional<torch::Dtype>, dtype) = std::nullopt;
};
} // namespace functional
// ============================================================================
/// Options for the `LogSoftmax` module.
///
/// Example:
/// ```
/// LogSoftmax model(LogSoftmaxOptions(1));
/// ```
struct TORCH_API LogSoftmaxOptions {
LogSoftmaxOptions(int64_t dim);
/// Dimension along which LogSoftmax will be computed.
TORCH_ARG(int64_t, dim);
};
// ============================================================================
namespace functional {
/// Options for `torch::nn::functional::log_softmax`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::log_softmax(input, LogSoftmaxFuncOptions(1));
/// ```
struct TORCH_API LogSoftmaxFuncOptions {
LogSoftmaxFuncOptions(int64_t dim);
/// Dimension along which LogSoftmax will be computed.
TORCH_ARG(int64_t, dim);
/// the desired data type of returned tensor.
/// If specified, the input tensor is casted to `dtype` before the operation
/// is performed. This is useful for preventing data type overflows. Default:
/// None.
TORCH_ARG(std::optional<torch::Dtype>, dtype) = std::nullopt;
};
} // namespace functional
// ============================================================================
/// Options for the `PReLU` module.
///
/// Example:
/// ```
/// PReLU model(PReLUOptions().num_parameters(42));
/// ```
struct TORCH_API PReLUOptions {
/// number of `a` to learn. Although it takes an int as input, there is only
/// two values are legitimate: 1, or the number of channels at input. Default:
/// 1
TORCH_ARG(int64_t, num_parameters) = 1;
/// the initial value of `a`. Default: 0.25
TORCH_ARG(double, init) = 0.25;
};
// ============================================================================
/// Options for the `ReLU` module.
///
/// Example:
/// ```
/// ReLU model(ReLUOptions().inplace(true));
/// ```
struct TORCH_API ReLUOptions {
/* implicit */ ReLUOptions(bool inplace = false);
/// can optionally do the operation in-place. Default: False
TORCH_ARG(bool, inplace);
};
namespace functional {
/// Options for `torch::nn::functional::relu`.
///
/// See the documentation for `torch::nn::ReLUOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::relu(x, F::ReLUFuncOptions().inplace(true));
/// ```
using ReLUFuncOptions = ReLUOptions;
} // namespace functional
// ============================================================================
/// Options for the `ReLU6` module.
///
/// Example:
/// ```
/// ReLU6 model(ReLU6Options().inplace(true));
/// ```
struct TORCH_API ReLU6Options {
/* implicit */ ReLU6Options(bool inplace = false);
/// can optionally do the operation in-place. Default: False
TORCH_ARG(bool, inplace);
};
namespace functional {
/// Options for `torch::nn::functional::relu6`.
///
/// See the documentation for `torch::nn::ReLU6Options` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::relu6(x, F::ReLU6FuncOptions().inplace(true));
/// ```
using ReLU6FuncOptions = ReLU6Options;
} // namespace functional
// ============================================================================
/// Options for the `RReLU` module.
///
/// Example:
/// ```
/// RReLU model(RReLUOptions().lower(0.24).upper(0.42).inplace(true));
/// ```
struct TORCH_API RReLUOptions {
/// lower bound of the uniform distribution. Default: 1/8
TORCH_ARG(double, lower) = 1.0 / 8.0;
/// upper bound of the uniform distribution. Default: 1/3
TORCH_ARG(double, upper) = 1.0 / 3.0;
/// can optionally do the operation in-place. Default: False
TORCH_ARG(bool, inplace) = false;
};
// ============================================================================
namespace functional {
/// Options for `torch::nn::functional::rrelu`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::rrelu(x, F::RReLUFuncOptions().lower(0.1).upper(0.4).inplace(true));
/// ```
struct TORCH_API RReLUFuncOptions {
/// lower bound of the uniform distribution. Default: 1/8
TORCH_ARG(double, lower) = 1.0 / 8.0;
/// upper bound of the uniform distribution. Default: 1/3
TORCH_ARG(double, upper) = 1.0 / 3.0;
TORCH_ARG(bool, training) = false;
/// can optionally do the operation in-place. Default: False
TORCH_ARG(bool, inplace) = false;
};
} // namespace functional
// ============================================================================
/// Options for the `CELU` module.
///
/// Example:
/// ```
/// CELU model(CELUOptions().alpha(42.42).inplace(true));
/// ```
struct TORCH_API CELUOptions {
/// The `alpha` value for the CELU formulation. Default: 1.0
TORCH_ARG(double, alpha) = 1.0;
/// can optionally do the operation in-place. Default: False
TORCH_ARG(bool, inplace) = false;
};
namespace functional {
/// Options for `torch::nn::functional::celu`.
///
/// See the documentation for `torch::nn::CELUOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::celu(x, F::CELUFuncOptions().alpha(0.42).inplace(true));
/// ```
using CELUFuncOptions = CELUOptions;
} // namespace functional
// ============================================================================
/// Options for the `Softplus` module.
///
/// Example:
/// ```
/// Softplus model(SoftplusOptions().beta(0.24).threshold(42.42));
/// ```
struct TORCH_API SoftplusOptions {
/// the `beta` value for the Softplus formulation. Default: 1
TORCH_ARG(double, beta) = 1.0;
/// values above this revert to a linear function. Default: 20
TORCH_ARG(double, threshold) = 20.0;
};
namespace functional {
/// Options for `torch::nn::functional::softplus`.
///
/// See the documentation for `torch::nn::SoftplusOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::softplus(x, F::SoftplusFuncOptions().beta(0.5).threshold(3.0));
/// ```
using SoftplusFuncOptions = SoftplusOptions;
} // namespace functional
// ============================================================================
/// Options for the `Softshrink` module.
///
/// Example:
/// ```
/// Softshrink model(SoftshrinkOptions(42.42));
/// ```
struct TORCH_API SoftshrinkOptions {
/* implicit */ SoftshrinkOptions(double lambda = 0.5);
/// the `lambda` value for the Softshrink formulation. Default: 0.5
TORCH_ARG(double, lambda);
};
namespace functional {
/// Options for `torch::nn::functional::softshrink`.
///
/// See the documentation for `torch::nn::SoftshrinkOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::softshrink(x, F::SoftshrinkFuncOptions(0.42));
/// ```
using SoftshrinkFuncOptions = SoftshrinkOptions;
} // namespace functional
// ============================================================================
/// Options for the `Threshold` module.
///
/// Example:
/// ```
/// Threshold model(ThresholdOptions(42.42, 24.24).inplace(true));
/// ```
struct TORCH_API ThresholdOptions {
ThresholdOptions(double threshold, double value)
: threshold_(threshold), value_(value) {}
/// The value to threshold at
TORCH_ARG(double, threshold);
/// The value to replace with
TORCH_ARG(double, value);
/// can optionally do the operation in-place. Default: False
TORCH_ARG(bool, inplace) = false;
};
namespace functional {
/// Options for `torch::nn::functional::threshold`.
///
/// See the documentation for `torch::nn::ThresholdOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::threshold(x, F::ThresholdFuncOptions(0.5, 0.5).inplace(true));
/// ```
using ThresholdFuncOptions = ThresholdOptions;
} // namespace functional
// ============================================================================
namespace functional {
/// Options for `torch::nn::functional::gumbel_softmax`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::gumbel_softmax(logits, F::GumbelSoftmaxFuncOptions().hard(true).dim(-1));
/// ```
struct TORCH_API GumbelSoftmaxFuncOptions {
/// non-negative scalar temperature
TORCH_ARG(double, tau) = 1.0;
/// returned samples will be discretized as one-hot vectors,
/// but will be differentiated as if it is the soft sample in autograd.
/// Default: False
TORCH_ARG(bool, hard) = false;
/// dimension along which softmax will be computed. Default: -1
TORCH_ARG(int, dim) = -1;
};
} // namespace functional
// ============================================================================
/// Options for the `MultiheadAttention` module.
///
/// Example:
/// ```
/// MultiheadAttention model(MultiheadAttentionOptions(20, 10).bias(false));
/// ```
struct TORCH_API MultiheadAttentionOptions {
MultiheadAttentionOptions(int64_t embed_dim, int64_t num_heads);
/// total dimension of the model.
TORCH_ARG(int64_t, embed_dim);
/// parallel attention heads.
TORCH_ARG(int64_t, num_heads);
/// a Dropout layer on attn_output_weights. Default: 0.0.
TORCH_ARG(double, dropout) = 0.0;
/// add bias as module parameter. Default: true.
TORCH_ARG(bool, bias) = true;
/// add bias to the key and value sequences at dim=0.
TORCH_ARG(bool, add_bias_kv) = false;
/// add a new batch of zeros to the key and value sequences at dim=1.
TORCH_ARG(bool, add_zero_attn) = false;
/// total number of features in key. Default: std::nullopt.
TORCH_ARG(int64_t, kdim);
/// total number of features in key. Default: std::nullopt.
TORCH_ARG(int64_t, vdim);
};
// ============================================================================
namespace functional {
/// Options for `torch::nn::functional::multi_head_attention_forward`
struct TORCH_API MultiheadAttentionForwardFuncOptions {
MultiheadAttentionForwardFuncOptions(
int64_t embed_dim_to_check,
int64_t num_heads,
Tensor in_proj_weight,
Tensor in_proj_bias,
Tensor bias_k,
Tensor bias_v,
bool add_zero_attn,
double dropout_p,
Tensor out_proj_weight,
Tensor out_proj_bias);
TORCH_ARG(int64_t, embed_dim_to_check);
TORCH_ARG(int64_t, num_heads);
TORCH_ARG(Tensor, in_proj_weight);
TORCH_ARG(Tensor, in_proj_bias);
TORCH_ARG(Tensor, bias_k);
TORCH_ARG(Tensor, bias_v);
TORCH_ARG(bool, add_zero_attn);
TORCH_ARG(double, dropout_p);
TORCH_ARG(Tensor, out_proj_weight);
TORCH_ARG(Tensor, out_proj_bias);
TORCH_ARG(bool, training) = true;
TORCH_ARG(Tensor, key_padding_mask) = {};
TORCH_ARG(bool, need_weights) = true;
TORCH_ARG(Tensor, attn_mask) = {};
TORCH_ARG(bool, use_separate_proj_weight) = false;
TORCH_ARG(Tensor, q_proj_weight) = {};
TORCH_ARG(Tensor, k_proj_weight) = {};
TORCH_ARG(Tensor, v_proj_weight) = {};
TORCH_ARG(Tensor, static_k) = {};
TORCH_ARG(Tensor, static_v) = {};
TORCH_ARG(bool, average_attn_weights) = true;
};
} // namespace functional
} // namespace torch::nn
```
|
========================================================================================================================================================
SOURCE CODE FILE: adaptive.h
LINES: 1
SIZE: 1.07 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\adaptive.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/types.h>
namespace torch::nn {
/// Options for the `AdaptiveLogSoftmaxWithLoss` module.
///
/// Example:
/// ```
/// AdaptiveLogSoftmaxWithLoss model(AdaptiveLogSoftmaxWithLossOptions(8, 10,
/// {4, 8}).div_value(2.).head_bias(true));
/// ```
struct TORCH_API AdaptiveLogSoftmaxWithLossOptions {
/* implicit */ AdaptiveLogSoftmaxWithLossOptions(
int64_t in_features,
int64_t n_classes,
std::vector<int64_t> cutoffs);
/// Number of features in the input tensor
TORCH_ARG(int64_t, in_features);
/// Number of classes in the dataset
TORCH_ARG(int64_t, n_classes);
/// Cutoffs used to assign targets to their buckets
TORCH_ARG(std::vector<int64_t>, cutoffs);
/// value used as an exponent to compute sizes of the clusters. Default: 4.0
TORCH_ARG(double, div_value) = 4.;
/// If ``true``, adds a bias term to the 'head' of
/// the adaptive softmax. Default: false
TORCH_ARG(bool, head_bias) = false;
};
} // namespace torch::nn
```
|
=========================================================================================================================================================
SOURCE CODE FILE: batchnorm.h
LINES: 1
SIZE: 2.79 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\batchnorm.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/types.h>
namespace torch::nn {
/// Options for the `BatchNorm` module.
struct TORCH_API BatchNormOptions {
/* implicit */ BatchNormOptions(int64_t num_features);
/// The number of features of the input tensor.
/// Changing this parameter after construction __has no effect__.
TORCH_ARG(int64_t, num_features);
/// The epsilon value added for numerical stability.
/// Changing this parameter after construction __is effective__.
TORCH_ARG(double, eps) = 1e-5;
/// A momentum multiplier for the mean and variance.
/// Changing this parameter after construction __is effective__.
TORCH_ARG(std::optional<double>, momentum) = 0.1;
/// Whether to learn a scale and bias that are applied in an affine
/// transformation on the input.
/// Changing this parameter after construction __has no effect__.
TORCH_ARG(bool, affine) = true;
/// Whether to store and update batch statistics (mean and variance) in the
/// module.
/// Changing this parameter after construction __has no effect__.
TORCH_ARG(bool, track_running_stats) = true;
};
/// Options for the `BatchNorm1d` module.
///
/// Example:
/// ```
/// BatchNorm1d
/// model(BatchNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
/// ```
using BatchNorm1dOptions = BatchNormOptions;
/// Options for the `BatchNorm2d` module.
///
/// Example:
/// ```
/// BatchNorm2d
/// model(BatchNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
/// ```
using BatchNorm2dOptions = BatchNormOptions;
/// Options for the `BatchNorm3d` module.
///
/// Example:
/// ```
/// BatchNorm3d
/// model(BatchNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
/// ```
using BatchNorm3dOptions = BatchNormOptions;
// ============================================================================
namespace functional {
/// Options for `torch::nn::functional::batch_norm`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::batch_norm(input, mean, variance,
/// F::BatchNormFuncOptions().weight(weight).bias(bias).momentum(0.1).eps(1e-05).training(false));
/// ```
struct TORCH_API BatchNormFuncOptions {
TORCH_ARG(Tensor, weight) = Tensor();
TORCH_ARG(Tensor, bias) = Tensor();
TORCH_ARG(bool, training) = false;
/// A momentum multiplier for the mean and variance.
/// Changing this parameter after construction __is effective__.
TORCH_ARG(double, momentum) = 0.1;
/// The epsilon value added for numerical stability.
/// Changing this parameter after construction __is effective__.
TORCH_ARG(double, eps) = 1e-5;
};
} // namespace functional
} // namespace torch::nn
```
|
====================================================================================================================================================
SOURCE CODE FILE: conv.h
LINES: 1
SIZE: 13.53 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\conv.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/enum.h>
#include <torch/expanding_array.h>
#include <torch/types.h>
namespace torch::nn {
namespace detail {
typedef std::variant<
enumtype::kZeros,
enumtype::kReflect,
enumtype::kReplicate,
enumtype::kCircular>
conv_padding_mode_t;
template <size_t D>
using conv_padding_t =
std::variant<ExpandingArray<D>, enumtype::kValid, enumtype::kSame>;
/// Options for a `D`-dimensional convolution or convolution transpose module.
template <size_t D>
struct ConvNdOptions {
using padding_t = conv_padding_t<D>;
ConvNdOptions(
int64_t in_channels,
int64_t out_channels,
ExpandingArray<D> kernel_size)
: in_channels_(in_channels),
out_channels_(out_channels),
kernel_size_(std::move(kernel_size)) {}
/// The number of channels the input volumes will have.
/// Changing this parameter after construction __has no effect__.
TORCH_ARG(int64_t, in_channels);
/// The number of output channels the convolution should produce.
/// Changing this parameter after construction __has no effect__.
TORCH_ARG(int64_t, out_channels);
/// The kernel size to use.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
/// This parameter __can__ be changed after construction.
TORCH_ARG(ExpandingArray<D>, kernel_size);
/// The stride of the convolution.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
/// This parameter __can__ be changed after construction.
TORCH_ARG(ExpandingArray<D>, stride) = 1;
/// The padding to add to the input volumes.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
/// This parameter __can__ be changed after construction.
TORCH_ARG(padding_t, padding) = 0;
public:
decltype(auto) padding(std::initializer_list<int64_t> il) {
return padding(IntArrayRef{il});
}
/// The kernel dilation.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
/// This parameter __can__ be changed after construction.
TORCH_ARG(ExpandingArray<D>, dilation) = 1;
/// If true, convolutions will be transpose convolutions (a.k.a.
/// deconvolutions).
/// Changing this parameter after construction __has no effect__.
TORCH_ARG(bool, transposed) = false;
/// For transpose convolutions, the padding to add to output volumes.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
/// This parameter __can__ be changed after construction.
TORCH_ARG(ExpandingArray<D>, output_padding) = 0;
/// The number of convolution groups.
/// This parameter __can__ be changed after construction.
TORCH_ARG(int64_t, groups) = 1;
/// Whether to add a bias after individual applications of the kernel.
/// Changing this parameter after construction __has no effect__.
TORCH_ARG(bool, bias) = true;
/// Accepted values `torch::kZeros`, `torch::kReflect`, `torch::kReplicate` or
/// `torch::kCircular`. Default: `torch::kZeros`
TORCH_ARG(conv_padding_mode_t, padding_mode) = torch::kZeros;
};
} // namespace detail
// ============================================================================
/// Options for a `D`-dimensional convolution module.
template <size_t D>
struct ConvOptions {
using padding_mode_t = detail::conv_padding_mode_t;
using padding_t = detail::conv_padding_t<D>;
ConvOptions(
int64_t in_channels,
int64_t out_channels,
ExpandingArray<D> kernel_size)
: in_channels_(in_channels),
out_channels_(out_channels),
kernel_size_(std::move(kernel_size)) {}
/// The number of channels the input volumes will have.
/// Changing this parameter after construction __has no effect__.
TORCH_ARG(int64_t, in_channels);
/// The number of output channels the convolution should produce.
/// Changing this parameter after construction __has no effect__.
TORCH_ARG(int64_t, out_channels);
/// The kernel size to use.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
/// This parameter __can__ be changed after construction.
TORCH_ARG(ExpandingArray<D>, kernel_size);
/// The stride of the convolution.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
/// This parameter __can__ be changed after construction.
TORCH_ARG(ExpandingArray<D>, stride) = 1;
/// The padding to add to the input volumes.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
/// This parameter __can__ be changed after construction.
TORCH_ARG(padding_t, padding) = 0;
public:
decltype(auto) padding(std::initializer_list<int64_t> il) {
return padding(IntArrayRef{il});
}
/// The kernel dilation.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
/// This parameter __can__ be changed after construction.
TORCH_ARG(ExpandingArray<D>, dilation) = 1;
/// The number of convolution groups.
/// This parameter __can__ be changed after construction.
TORCH_ARG(int64_t, groups) = 1;
/// Whether to add a bias after individual applications of the kernel.
/// Changing this parameter after construction __has no effect__.
TORCH_ARG(bool, bias) = true;
/// Accepted values `torch::kZeros`, `torch::kReflect`, `torch::kReplicate` or
/// `torch::kCircular`. Default: `torch::kZeros`
TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros;
};
/// `ConvOptions` specialized for the `Conv1d` module.
///
/// Example:
/// ```
/// Conv1d model(Conv1dOptions(3, 2, 3).stride(1).bias(false));
/// ```
using Conv1dOptions = ConvOptions<1>;
/// `ConvOptions` specialized for the `Conv2d` module.
///
/// Example:
/// ```
/// Conv2d model(Conv2dOptions(3, 2, 3).stride(1).bias(false));
/// ```
using Conv2dOptions = ConvOptions<2>;
/// `ConvOptions` specialized for the `Conv3d` module.
///
/// Example:
/// ```
/// Conv3d model(Conv3dOptions(3, 2, 3).stride(1).bias(false));
/// ```
using Conv3dOptions = ConvOptions<3>;
// ============================================================================
namespace functional {
/// Options for a `D`-dimensional convolution functional.
template <size_t D>
struct ConvFuncOptions {
using padding_t = torch::nn::detail::conv_padding_t<D>;
/// optional bias of shape `(out_channels)`. Default: ``None``
TORCH_ARG(torch::Tensor, bias) = Tensor();
/// The stride of the convolving kernel.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
TORCH_ARG(ExpandingArray<D>, stride) = 1;
/// Implicit paddings on both sides of the input.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
TORCH_ARG(padding_t, padding) = 0;
public:
decltype(auto) padding(std::initializer_list<int64_t> il) {
return padding(IntArrayRef{il});
}
/// The spacing between kernel elements.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
TORCH_ARG(ExpandingArray<D>, dilation) = 1;
/// Split input into groups, `in_channels` should be divisible by
/// the number of groups.
TORCH_ARG(int64_t, groups) = 1;
};
/// `ConvFuncOptions` specialized for `torch::nn::functional::conv1d`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::conv1d(x, weight, F::Conv1dFuncOptions().stride(1));
/// ```
using Conv1dFuncOptions = ConvFuncOptions<1>;
/// `ConvFuncOptions` specialized for `torch::nn::functional::conv2d`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::conv2d(x, weight, F::Conv2dFuncOptions().stride(1));
/// ```
using Conv2dFuncOptions = ConvFuncOptions<2>;
/// `ConvFuncOptions` specialized for `torch::nn::functional::conv3d`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::conv3d(x, weight, F::Conv3dFuncOptions().stride(1));
/// ```
using Conv3dFuncOptions = ConvFuncOptions<3>;
} // namespace functional
// ============================================================================
template <size_t D>
struct ConvTransposeOptions {
using padding_mode_t = detail::conv_padding_mode_t;
ConvTransposeOptions(
int64_t in_channels,
int64_t out_channels,
ExpandingArray<D> kernel_size)
: in_channels_(in_channels),
out_channels_(out_channels),
kernel_size_(std::move(kernel_size)) {}
/// The number of channels the input volumes will have.
/// Changing this parameter after construction __has no effect__.
TORCH_ARG(int64_t, in_channels);
/// The number of output channels the convolution should produce.
/// Changing this parameter after construction __has no effect__.
TORCH_ARG(int64_t, out_channels);
/// The kernel size to use.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
/// This parameter __can__ be changed after construction.
TORCH_ARG(ExpandingArray<D>, kernel_size);
/// The stride of the convolution.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
/// This parameter __can__ be changed after construction.
TORCH_ARG(ExpandingArray<D>, stride) = 1;
/// The padding to add to the input volumes.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
/// This parameter __can__ be changed after construction.
TORCH_ARG(ExpandingArray<D>, padding) = 0;
/// For transpose convolutions, the padding to add to output volumes.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
/// This parameter __can__ be changed after construction.
TORCH_ARG(ExpandingArray<D>, output_padding) = 0;
/// The number of convolution groups.
/// This parameter __can__ be changed after construction.
TORCH_ARG(int64_t, groups) = 1;
/// Whether to add a bias after individual applications of the kernel.
/// Changing this parameter after construction __has no effect__.
TORCH_ARG(bool, bias) = true;
/// The kernel dilation.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
/// This parameter __can__ be changed after construction.
TORCH_ARG(ExpandingArray<D>, dilation) = 1;
/// Accepted values `torch::kZeros`, `torch::kReflect`, `torch::kReplicate` or
/// `torch::kCircular`. Default: `torch::kZeros`
TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros;
};
/// `ConvTransposeOptions` specialized for the `ConvTranspose1d` module.
///
/// Example:
/// ```
/// ConvTranspose1d model(ConvTranspose1dOptions(3, 2,
/// 3).stride(1).bias(false));
/// ```
using ConvTranspose1dOptions = ConvTransposeOptions<1>;
/// `ConvTransposeOptions` specialized for the `ConvTranspose2d` module.
///
/// Example:
/// ```
/// ConvTranspose2d model(ConvTranspose2dOptions(3, 2,
/// 3).stride(1).bias(false));
/// ```
using ConvTranspose2dOptions = ConvTransposeOptions<2>;
/// `ConvTransposeOptions` specialized for the `ConvTranspose3d` module.
///
/// Example:
/// ```
/// ConvTranspose3d model(ConvTranspose3dOptions(2, 2,
/// 2).stride(1).bias(false));
/// ```
using ConvTranspose3dOptions = ConvTransposeOptions<3>;
// ============================================================================
namespace functional {
/// Options for a `D`-dimensional convolution functional.
template <size_t D>
struct ConvTransposeFuncOptions {
/// optional bias of shape `(out_channels)`. Default: ``None``
TORCH_ARG(torch::Tensor, bias) = Tensor();
/// The stride of the convolving kernel.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
TORCH_ARG(ExpandingArray<D>, stride) = 1;
/// Implicit paddings on both sides of the input.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
TORCH_ARG(ExpandingArray<D>, padding) = 0;
/// Additional size added to one side of each dimension in the output shape.
/// Default: 0
TORCH_ARG(ExpandingArray<D>, output_padding) = 0;
/// Split input into groups, `in_channels` should be divisible by
/// the number of groups.
TORCH_ARG(int64_t, groups) = 1;
/// The spacing between kernel elements.
/// For a `D`-dim convolution, must be a single number or a list of `D`
/// numbers.
TORCH_ARG(ExpandingArray<D>, dilation) = 1;
};
/// `ConvTransposeFuncOptions` specialized for
/// `torch::nn::functional::conv_transpose1d`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::conv_transpose1d(x, weight, F::ConvTranspose1dFuncOptions().stride(1));
/// ```
using ConvTranspose1dFuncOptions = ConvTransposeFuncOptions<1>;
/// `ConvTransposeFuncOptions` specialized for
/// `torch::nn::functional::conv_transpose2d`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::conv_transpose2d(x, weight, F::ConvTranspose2dFuncOptions().stride(1));
/// ```
using ConvTranspose2dFuncOptions = ConvTransposeFuncOptions<2>;
/// `ConvTransposeFuncOptions` specialized for
/// `torch::nn::functional::conv_transpose3d`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::conv_transpose3d(x, weight, F::ConvTranspose3dFuncOptions().stride(1));
/// ```
using ConvTranspose3dFuncOptions = ConvTransposeFuncOptions<3>;
} // namespace functional
} // namespace torch::nn
```
|
========================================================================================================================================================
SOURCE CODE FILE: distance.h
LINES: 1
SIZE: 2.01 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\distance.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/types.h>
namespace torch::nn {
/// Options for the `CosineSimilarity` module.
///
/// Example:
/// ```
/// CosineSimilarity model(CosineSimilarityOptions().dim(0).eps(0.5));
/// ```
struct TORCH_API CosineSimilarityOptions {
/// Dimension where cosine similarity is computed. Default: 1
TORCH_ARG(int64_t, dim) = 1;
/// Small value to avoid division by zero. Default: 1e-8
TORCH_ARG(double, eps) = 1e-8;
};
namespace functional {
/// Options for `torch::nn::functional::cosine_similarity`.
///
/// See the documentation for `torch::nn::CosineSimilarityOptions` class to
/// learn what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::cosine_similarity(input1, input2,
/// F::CosineSimilarityFuncOptions().dim(1));
/// ```
using CosineSimilarityFuncOptions = CosineSimilarityOptions;
} // namespace functional
// ============================================================================
/// Options for the `PairwiseDistance` module.
///
/// Example:
/// ```
/// PairwiseDistance
/// model(PairwiseDistanceOptions().p(3).eps(0.5).keepdim(true));
/// ```
struct TORCH_API PairwiseDistanceOptions {
/// The norm degree. Default: 2
TORCH_ARG(double, p) = 2.0;
/// Small value to avoid division by zero. Default: 1e-6
TORCH_ARG(double, eps) = 1e-6;
/// Determines whether or not to keep the vector dimension. Default: false
TORCH_ARG(bool, keepdim) = false;
};
namespace functional {
/// Options for `torch::nn::functional::pairwise_distance`.
///
/// See the documentation for `torch::nn::PairwiseDistanceOptions` class to
/// learn what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::pairwise_distance(input1, input2, F::PairwiseDistanceFuncOptions().p(1));
/// ```
using PairwiseDistanceFuncOptions = PairwiseDistanceOptions;
} // namespace functional
} // namespace torch::nn
```
|
=======================================================================================================================================================
SOURCE CODE FILE: dropout.h
LINES: 1
SIZE: 3.10 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\dropout.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/types.h>
namespace torch::nn {
/// Options for the `Dropout` module.
///
/// Example:
/// ```
/// Dropout model(DropoutOptions().p(0.42).inplace(true));
/// ```
struct TORCH_API DropoutOptions {
/* implicit */ DropoutOptions(double p = 0.5);
/// The probability of an element to be zeroed. Default: 0.5
TORCH_ARG(double, p) = 0.5;
/// can optionally do the operation in-place. Default: False
TORCH_ARG(bool, inplace) = false;
};
/// Options for the `Dropout2d` module.
///
/// Example:
/// ```
/// Dropout2d model(Dropout2dOptions().p(0.42).inplace(true));
/// ```
using Dropout2dOptions = DropoutOptions;
/// Options for the `Dropout3d` module.
///
/// Example:
/// ```
/// Dropout3d model(Dropout3dOptions().p(0.42).inplace(true));
/// ```
using Dropout3dOptions = DropoutOptions;
/// Options for the `AlphaDropout` module.
///
/// Example:
/// ```
/// AlphaDropout model(AlphaDropoutOptions(0.2).inplace(true));
/// ```
using AlphaDropoutOptions = DropoutOptions;
/// Options for the `FeatureAlphaDropout` module.
///
/// Example:
/// ```
/// FeatureAlphaDropout model(FeatureAlphaDropoutOptions(0.2).inplace(true));
/// ```
using FeatureAlphaDropoutOptions = DropoutOptions;
namespace functional {
/// Options for `torch::nn::functional::dropout`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::dropout(input, F::DropoutFuncOptions().p(0.5));
/// ```
struct TORCH_API DropoutFuncOptions {
/// The probability of an element to be zeroed. Default: 0.5
TORCH_ARG(double, p) = 0.5;
TORCH_ARG(bool, training) = true;
/// can optionally do the operation in-place. Default: False
TORCH_ARG(bool, inplace) = false;
};
/// Options for `torch::nn::functional::dropout2d`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::dropout2d(input, F::Dropout2dFuncOptions().p(0.5));
/// ```
using Dropout2dFuncOptions = DropoutFuncOptions;
/// Options for `torch::nn::functional::dropout3d`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::dropout3d(input, F::Dropout3dFuncOptions().p(0.5));
/// ```
using Dropout3dFuncOptions = DropoutFuncOptions;
/// Options for `torch::nn::functional::alpha_dropout`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::alpha_dropout(input,
/// F::AlphaDropoutFuncOptions().p(0.5).training(false));
/// ```
struct TORCH_API AlphaDropoutFuncOptions {
TORCH_ARG(double, p) = 0.5;
TORCH_ARG(bool, training) = false;
TORCH_ARG(bool, inplace) = false;
};
/// Options for `torch::nn::functional::feature_alpha_dropout`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::feature_alpha_dropout(input,
/// F::FeatureAlphaDropoutFuncOptions().p(0.5).training(false));
/// ```
struct TORCH_API FeatureAlphaDropoutFuncOptions {
TORCH_ARG(double, p) = 0.5;
TORCH_ARG(bool, training) = false;
TORCH_ARG(bool, inplace) = false;
};
} // namespace functional
} // namespace torch::nn
```
|
=========================================================================================================================================================
SOURCE CODE FILE: embedding.h
LINES: 1
SIZE: 11.60 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\embedding.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/enum.h>
#include <torch/types.h>
namespace torch::nn {
/// Options for the `Embedding` module.
///
/// Example:
/// ```
/// Embedding model(EmbeddingOptions(10,
/// 2).padding_idx(3).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true));
/// ```
struct TORCH_API EmbeddingOptions {
EmbeddingOptions(int64_t num_embeddings, int64_t embedding_dim);
/// The size of the dictionary of embeddings.
TORCH_ARG(int64_t, num_embeddings);
/// The size of each embedding vector.
TORCH_ARG(int64_t, embedding_dim);
/// If specified, the entries at `padding_idx` do not contribute to the
/// gradient; therefore, the embedding vector at `padding_idx` is not updated
/// during training, i.e. it remains as a fixed "pad". For a newly constructed
/// Embedding, the embedding vector at `padding_idx` will default to all
/// zeros, but can be updated to another value to be used as the padding
/// vector.
TORCH_ARG(std::optional<int64_t>, padding_idx) = std::nullopt;
/// If given, each embedding vector with norm larger than `max_norm` is
/// renormalized to have norm `max_norm`.
TORCH_ARG(std::optional<double>, max_norm) = std::nullopt;
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
TORCH_ARG(double, norm_type) = 2.;
/// If given, this will scale gradients by the inverse of frequency of the
/// words in the mini-batch. Default ``false``.
TORCH_ARG(bool, scale_grad_by_freq) = false;
/// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
TORCH_ARG(bool, sparse) = false;
/// The learnable weights of the module of shape (num_embeddings,
/// embedding_dim)
TORCH_ARG(torch::Tensor, _weight) = Tensor();
};
// ============================================================================
/// Options for the `Embedding::from_pretrained` function.
struct TORCH_API EmbeddingFromPretrainedOptions {
/// If ``true``, the tensor does not get updated in the learning process.
/// Equivalent to ``embedding.weight.requires_grad_(false)``. Default:
/// ``true``
TORCH_ARG(bool, freeze) = true;
/// If specified, the entries at `padding_idx` do not contribute to the
/// gradient; therefore, the embedding vector at `padding_idx` is not updated
/// during training, i.e. it remains as a fixed "pad".
TORCH_ARG(std::optional<int64_t>, padding_idx) = std::nullopt;
/// If given, each embedding vector with norm larger than `max_norm` is
/// renormalized to have norm `max_norm`.
TORCH_ARG(std::optional<double>, max_norm) = std::nullopt;
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
TORCH_ARG(double, norm_type) = 2.;
/// If given, this will scale gradients by the inverse of frequency of the
/// words in the mini-batch. Default ``false``.
TORCH_ARG(bool, scale_grad_by_freq) = false;
/// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
TORCH_ARG(bool, sparse) = false;
};
// ============================================================================
namespace functional {
/// Options for `torch::nn::functional::embedding`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::embedding(input, weight,
/// F::EmbeddingFuncOptions().norm_type(2.5).scale_grad_by_freq(true).sparse(true));
/// ```
struct TORCH_API EmbeddingFuncOptions {
/// If specified, the entries at `padding_idx` do not contribute to the
/// gradient; therefore, the embedding vector at `padding_idx` is not updated
/// during training, i.e. it remains as a fixed "pad".
TORCH_ARG(std::optional<int64_t>, padding_idx) = std::nullopt;
/// If given, each embedding vector with norm larger than `max_norm` is
/// renormalized to have norm `max_norm`.
TORCH_ARG(std::optional<double>, max_norm) = std::nullopt;
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
TORCH_ARG(double, norm_type) = 2.;
/// If given, this will scale gradients by the inverse of frequency of the
/// words in the mini-batch. Default ``false``.
TORCH_ARG(bool, scale_grad_by_freq) = false;
/// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
TORCH_ARG(bool, sparse) = false;
};
} // namespace functional
// ============================================================================
typedef std::variant<enumtype::kSum, enumtype::kMean, enumtype::kMax>
EmbeddingBagMode;
/// Options for the `EmbeddingBag` module.
///
/// Example:
/// ```
/// EmbeddingBag model(EmbeddingBagOptions(10,
/// 2).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true).mode(torch::kSum));
/// ```
struct TORCH_API EmbeddingBagOptions {
EmbeddingBagOptions(int64_t num_embeddings, int64_t embedding_dim);
/// The size of the dictionary of embeddings.
TORCH_ARG(int64_t, num_embeddings);
/// The size of each embedding vector.
TORCH_ARG(int64_t, embedding_dim);
/// If given, each embedding vector with norm larger than `max_norm` is
/// renormalized to have norm `max_norm`.
TORCH_ARG(std::optional<double>, max_norm) = std::nullopt;
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
TORCH_ARG(double, norm_type) = 2.;
/// If given, this will scale gradients by the inverse of frequency of the
/// words in the mini-batch. Default ``false``. Note: this option is not
/// supported when ``mode="kMax"``.
TORCH_ARG(bool, scale_grad_by_freq) = false;
/// ``"kSum"``, ``"kMean"`` or ``"kMax"``. Specifies the way to reduce the
/// bag. ``"kSum"`` computes the weighted sum, taking `per_sample_weights`
/// into consideration. ``"kMean"`` computes the average of the values in the
/// bag, ``"kMax"`` computes the max value over each bag.
TORCH_ARG(EmbeddingBagMode, mode) = torch::kMean;
/// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
/// Note: this option is not supported when ``mode="kMax"``.
TORCH_ARG(bool, sparse) = false;
/// The learnable weights of the module of shape (num_embeddings,
/// embedding_dim)
TORCH_ARG(torch::Tensor, _weight) = Tensor();
/// If ``true``, `offsets` has one additional element, where the last element
/// is equivalent to the size of `indices`. This matches the CSR format.
TORCH_ARG(bool, include_last_offset) = false;
/// If specified, the entries at `padding_idx` do not contribute to the
/// gradient; therefore, the embedding vector at padding_idx is not updated
/// during training, i.e. it remains as a fixed "pad". For a newly constructed
/// EmbeddingBag, the embedding vector at `padding_idx` will default to all
/// zeros, but can be updated to another value to be used as the padding
/// vector. Note that the embedding vector at `padding_idx` is excluded from
/// the reduction.
TORCH_ARG(std::optional<int64_t>, padding_idx) = std::nullopt;
};
// ============================================================================
/// Options for the `EmbeddingBag::from_pretrained` function.
struct TORCH_API EmbeddingBagFromPretrainedOptions {
/// If ``true``, the tensor does not get updated in the learning process.
/// Equivalent to ``embeddingbag.weight.requires_grad_(false)``. Default:
/// ``true``
TORCH_ARG(bool, freeze) = true;
/// If given, each embedding vector with norm larger than `max_norm` is
/// renormalized to have norm `max_norm`.
TORCH_ARG(std::optional<double>, max_norm) = std::nullopt;
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
TORCH_ARG(double, norm_type) = 2.;
/// If given, this will scale gradients by the inverse of frequency of the
/// words in the mini-batch. Default ``false``. Note: this option is not
/// supported when ``mode="kMax"``.
TORCH_ARG(bool, scale_grad_by_freq) = false;
/// ``"kSum"``, ``"kMean"`` or ``"kMax"``. Specifies the way to reduce the
/// bag. ``"kSum"`` computes the weighted sum, taking `per_sample_weights`
/// into consideration. ``"kMean"`` computes the average of the values in the
/// bag, ``"kMax"`` computes the max value over each bag.
TORCH_ARG(EmbeddingBagMode, mode) = torch::kMean;
/// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
/// Note: this option is not supported when ``mode="kMax"``.
TORCH_ARG(bool, sparse) = false;
/// If ``true``, `offsets` has one additional element, where the last element
/// is equivalent to the size of `indices`. This matches the CSR format. Note:
/// this option is currently only supported when ``mode="sum"``.
TORCH_ARG(bool, include_last_offset) = false;
/// If specified, the entries at `padding_idx` do not contribute to the
/// gradient; therefore, the embedding vector at padding_idx is not updated
/// during training, i.e. it remains as a fixed "pad". Note that the embedding
/// vector at `padding_idx` is excluded from the reduction.
TORCH_ARG(std::optional<int64_t>, padding_idx) = std::nullopt;
};
// ============================================================================
namespace functional {
/// Options for `torch::nn::functional::embedding_bag`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::embedding_bag(input, weight,
/// F::EmbeddingBagFuncOptions().mode(torch::kSum).offsets(offsets));
/// ```
struct TORCH_API EmbeddingBagFuncOptions {
/// Only used when `input` is 1D. `offsets` determines
/// the starting index position of each bag (sequence) in `input`.
TORCH_ARG(torch::Tensor, offsets) = Tensor();
/// If given, each embedding vector with norm larger than `max_norm` is
/// renormalized to have norm `max_norm`.
TORCH_ARG(std::optional<double>, max_norm) = std::nullopt;
/// The p of the p-norm to compute for the `max_norm` option. Default ``2``.
TORCH_ARG(double, norm_type) = 2.;
/// If given, this will scale gradients by the inverse of frequency of the
/// words in the mini-batch. Default ``false``. Note: this option is not
/// supported when ``mode="kMax"``.
TORCH_ARG(bool, scale_grad_by_freq) = false;
/// ``"kSum"``, ``"kMean"`` or ``"kMax"``. Specifies the way to reduce the
/// bag. ``"kSum"`` computes the weighted sum, taking `per_sample_weights`
/// into consideration. ``"kMean"`` computes the average of the values in the
/// bag, ``"kMax"`` computes the max value over each bag.
TORCH_ARG(EmbeddingBagMode, mode) = torch::kMean;
/// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor.
/// Note: this option is not supported when ``mode="kMax"``.
TORCH_ARG(bool, sparse) = false;
/// a tensor of float / double weights, or None to indicate all weights should
/// be taken to be 1. If specified, `per_sample_weights` must have exactly the
/// same shape as input and is treated as having the same `offsets`, if those
/// are not None.
TORCH_ARG(torch::Tensor, per_sample_weights) = Tensor();
/// If ``true``, `offsets` has one additional element, where the last element
/// is equivalent to the size of `indices`. This matches the CSR format. Note:
/// this option is currently only supported when ``mode="sum"``.
TORCH_ARG(bool, include_last_offset) = false;
/// If specified, the entries at `padding_idx` do not contribute to the
/// gradient; therefore, the embedding vector at padding_idx is not updated
/// during training, i.e. it remains as a fixed "pad". Note that the embedding
/// vector at `padding_idx` is excluded from the reduction.
TORCH_ARG(std::optional<int64_t>, padding_idx) = std::nullopt;
};
} // namespace functional
} // namespace torch::nn
```
|
====================================================================================================================================================
SOURCE CODE FILE: fold.h
LINES: 1
SIZE: 2.94 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\fold.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/expanding_array.h>
#include <torch/types.h>
namespace torch::nn {
/// Options for the `Fold` module.
///
/// Example:
/// ```
/// Fold model(FoldOptions({8, 8}, {3, 3}).dilation(2).padding({2,
/// 1}).stride(2));
/// ```
struct TORCH_API FoldOptions {
FoldOptions(ExpandingArray<2> output_size, ExpandingArray<2> kernel_size)
: output_size_(output_size), kernel_size_(kernel_size) {}
/// describes the spatial shape of the large containing tensor of the sliding
/// local blocks. It is useful to resolve the ambiguity when multiple input
/// shapes map to same number of sliding blocks, e.g., with stride > 0.
TORCH_ARG(ExpandingArray<2>, output_size);
/// the size of the sliding blocks
TORCH_ARG(ExpandingArray<2>, kernel_size);
/// controls the spacing between the kernel points; also known as the à trous
/// algorithm.
TORCH_ARG(ExpandingArray<2>, dilation) = 1;
/// controls the amount of implicit zero-paddings on both sides for padding
/// number of points for each dimension before reshaping.
TORCH_ARG(ExpandingArray<2>, padding) = 0;
/// controls the stride for the sliding blocks.
TORCH_ARG(ExpandingArray<2>, stride) = 1;
};
namespace functional {
/// Options for `torch::nn::functional::fold`.
///
/// See the documentation for `torch::nn::FoldOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::fold(input, F::FoldFuncOptions({3, 2}, {2, 2}));
/// ```
using FoldFuncOptions = FoldOptions;
} // namespace functional
// ============================================================================
/// Options for the `Unfold` module.
///
/// Example:
/// ```
/// Unfold model(UnfoldOptions({2, 4}).dilation(2).padding({2, 1}).stride(2));
/// ```
struct TORCH_API UnfoldOptions {
UnfoldOptions(ExpandingArray<2> kernel_size) : kernel_size_(kernel_size) {}
/// the size of the sliding blocks
TORCH_ARG(ExpandingArray<2>, kernel_size);
/// controls the spacing between the kernel points; also known as the à trous
/// algorithm.
TORCH_ARG(ExpandingArray<2>, dilation) = 1;
/// controls the amount of implicit zero-paddings on both sides for padding
/// number of points for each dimension before reshaping.
TORCH_ARG(ExpandingArray<2>, padding) = 0;
/// controls the stride for the sliding blocks.
TORCH_ARG(ExpandingArray<2>, stride) = 1;
};
namespace functional {
/// Options for `torch::nn::functional::unfold`.
///
/// See the documentation for `torch::nn::UnfoldOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::unfold(input, F::UnfoldFuncOptions({2, 2}).padding(1).stride(2));
/// ```
using UnfoldFuncOptions = UnfoldOptions;
} // namespace functional
} // namespace torch::nn
```
|
============================================================================================================================================================
SOURCE CODE FILE: instancenorm.h
LINES: 1
SIZE: 2.33 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\instancenorm.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/nn/options/batchnorm.h>
#include <torch/types.h>
namespace torch::nn {
/// Options for the `InstanceNorm` module.
struct TORCH_API InstanceNormOptions {
/* implicit */ InstanceNormOptions(int64_t num_features);
/// The number of features of the input tensor.
TORCH_ARG(int64_t, num_features);
/// The epsilon value added for numerical stability.
TORCH_ARG(double, eps) = 1e-5;
/// A momentum multiplier for the mean and variance.
TORCH_ARG(double, momentum) = 0.1;
/// Whether to learn a scale and bias that are applied in an affine
/// transformation on the input.
TORCH_ARG(bool, affine) = false;
/// Whether to store and update batch statistics (mean and variance) in the
/// module.
TORCH_ARG(bool, track_running_stats) = false;
};
/// Options for the `InstanceNorm1d` module.
///
/// Example:
/// ```
/// InstanceNorm1d
/// model(InstanceNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
/// ```
using InstanceNorm1dOptions = InstanceNormOptions;
/// Options for the `InstanceNorm2d` module.
///
/// Example:
/// ```
/// InstanceNorm2d
/// model(InstanceNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
/// ```
using InstanceNorm2dOptions = InstanceNormOptions;
/// Options for the `InstanceNorm3d` module.
///
/// Example:
/// ```
/// InstanceNorm3d
/// model(InstanceNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true));
/// ```
using InstanceNorm3dOptions = InstanceNormOptions;
namespace functional {
/// Options for `torch::nn::functional::instance_norm`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::instance_norm(input,
/// F::InstanceNormFuncOptions().running_mean(mean).running_var(variance).weight(weight).bias(bias).momentum(0.1).eps(1e-5));
/// ```
struct TORCH_API InstanceNormFuncOptions {
TORCH_ARG(Tensor, running_mean) = Tensor();
TORCH_ARG(Tensor, running_var) = Tensor();
TORCH_ARG(Tensor, weight) = Tensor();
TORCH_ARG(Tensor, bias) = Tensor();
TORCH_ARG(bool, use_input_stats) = true;
TORCH_ARG(double, momentum) = 0.1;
TORCH_ARG(double, eps) = 1e-5;
};
} // namespace functional
} // namespace torch::nn
```
|
======================================================================================================================================================
SOURCE CODE FILE: linear.h
LINES: 1
SIZE: 2.80 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\linear.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/types.h>
namespace torch::nn {
/// Options for the `Linear` module.
///
/// Example:
/// ```
/// Linear model(LinearOptions(5, 2).bias(false));
/// ```
struct TORCH_API LinearOptions {
LinearOptions(int64_t in_features, int64_t out_features);
/// size of each input sample
TORCH_ARG(int64_t, in_features);
/// size of each output sample
TORCH_ARG(int64_t, out_features);
/// If set to false, the layer will not learn an additive bias. Default: true
TORCH_ARG(bool, bias) = true;
};
// ============================================================================
/// Options for the `Flatten` module.
///
/// Example:
/// ```
/// Flatten model(FlattenOptions().start_dim(2).end_dim(4));
/// ```
struct TORCH_API FlattenOptions {
/// first dim to flatten
TORCH_ARG(int64_t, start_dim) = 1;
/// last dim to flatten
TORCH_ARG(int64_t, end_dim) = -1;
};
// ============================================================================
/// Options for the `Unflatten` module.
///
/// Note: If input tensor is named, use dimname and namedshape arguments.
///
/// Example:
/// ```
/// Unflatten unnamed_model(UnflattenOptions(0, {2, 2}));
/// Unflatten named_model(UnflattenOptions("B", {{"B1", 2}, {"B2", 2}}));
/// ```
struct TORCH_API UnflattenOptions {
typedef std::vector<std::pair<std::string, int64_t>> namedshape_t;
UnflattenOptions(int64_t dim, std::vector<int64_t> sizes);
UnflattenOptions(const char* dimname, namedshape_t namedshape);
UnflattenOptions(std::string dimname, namedshape_t namedshape);
/// dim to unflatten
TORCH_ARG(int64_t, dim);
/// name of dim to unflatten, for use with named tensors
TORCH_ARG(std::string, dimname);
/// new shape of unflattened dim
TORCH_ARG(std::vector<int64_t>, sizes);
/// new shape of unflattened dim with names, for use with named tensors
TORCH_ARG(namedshape_t, namedshape);
};
// ============================================================================
/// Options for the `Bilinear` module.
///
/// Example:
/// ```
/// Bilinear model(BilinearOptions(3, 2, 4).bias(false));
/// ```
struct TORCH_API BilinearOptions {
BilinearOptions(
int64_t in1_features,
int64_t in2_features,
int64_t out_features);
/// The number of features in input 1 (columns of the input1 matrix).
TORCH_ARG(int64_t, in1_features);
/// The number of features in input 2 (columns of the input2 matrix).
TORCH_ARG(int64_t, in2_features);
/// The number of output features to produce (columns of the output matrix).
TORCH_ARG(int64_t, out_features);
/// Whether to learn and add a bias after the bilinear transformation.
TORCH_ARG(bool, bias) = true;
};
} // namespace torch::nn
```
|
====================================================================================================================================================
SOURCE CODE FILE: loss.h
LINES: 1
SIZE: 26.86 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\loss.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/enum.h>
#include <torch/types.h>
namespace torch::nn {
/// Options for the `L1Loss` module.
///
/// Example:
/// ```
/// L1Loss model(L1LossOptions(torch::kNone));
/// ```
struct TORCH_API L1LossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
TORCH_OPTIONS_CTOR_VARIANT_ARG3(L1LossOptions, reduction, kNone, kMean, kSum)
/// Specifies the reduction to apply to the output.
TORCH_ARG(reduction_t, reduction) = torch::kMean;
};
namespace functional {
/// Options for `torch::nn::functional::l1_loss`.
///
/// See the documentation for `torch::nn::L1LossOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::l1_loss(input, target, F::L1LossFuncOptions(torch::kNone));
/// ```
using L1LossFuncOptions = L1LossOptions;
} // namespace functional
// ============================================================================
/// Options for the `KLDivLoss` module.
///
/// Example:
/// ```
/// KLDivLoss
/// model(KLDivLossOptions().reduction(torch::kNone).log_target(false));
/// ```
struct TORCH_API KLDivLossOptions {
typedef std::variant<
enumtype::kNone,
enumtype::kBatchMean,
enumtype::kSum,
enumtype::kMean>
reduction_t;
TORCH_OPTIONS_CTOR_VARIANT_ARG4(
KLDivLossOptions,
reduction,
kNone,
kBatchMean,
kSum,
kMean)
/// Specifies the reduction to apply to the output.
/// ``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``. Default: ``'mean'``
TORCH_ARG(reduction_t, reduction) = torch::kMean;
/// Specifies whether `target` is accepted in the log space. Default: False
TORCH_ARG(bool, log_target) = false;
};
namespace functional {
/// Options for `torch::nn::functional::kl_div`.
///
/// See the documentation for `torch::nn::KLDivLossOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::kl_div(input, target,
/// F::KLDivFuncOptions().reduction(torch::kNone).log_target(false));
/// ```
using KLDivFuncOptions = KLDivLossOptions;
} // namespace functional
// ============================================================================
/// Options for the `MSELoss` module.
///
/// Example:
/// ```
/// MSELoss model(MSELossOptions(torch::kNone));
/// ```
struct TORCH_API MSELossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
TORCH_OPTIONS_CTOR_VARIANT_ARG3(MSELossOptions, reduction, kNone, kMean, kSum)
/// Specifies the reduction to apply to the output.
/// ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'``
TORCH_ARG(reduction_t, reduction) = torch::kMean;
};
namespace functional {
/// Options for `torch::nn::functional::mse_loss`.
///
/// See the documentation for `torch::nn::MSELossOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::mse_loss(input, target, F::MSELossFuncOptions(torch::kNone));
/// ```
using MSELossFuncOptions = MSELossOptions;
} // namespace functional
// ============================================================================
/// Options for the `BCELoss` module.
///
/// Example:
/// ```
/// BCELoss model(BCELossOptions().reduction(torch::kNone).weight(weight));
/// ```
struct TORCH_API BCELossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
/// A manual rescaling weight given to the loss of each batch element.
TORCH_ARG(Tensor, weight) = {};
/// Specifies the reduction to apply to the output.
/// ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'``
TORCH_ARG(reduction_t, reduction) = torch::kMean;
};
namespace functional {
/// Options for `torch::nn::functional::binary_cross_entropy`.
///
/// See the documentation for `torch::nn::BCELossOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::binary_cross_entropy(input, target,
/// F::BinaryCrossEntropyFuncOptions().weight(weight));
/// ```
using BinaryCrossEntropyFuncOptions = BCELossOptions;
} // namespace functional
// ============================================================================
/// Options for the `HingeEmbeddingLoss` module.
///
/// Example:
/// ```
/// HingeEmbeddingLoss
/// model(HingeEmbeddingLossOptions().margin(4).reduction(torch::kNone));
/// ```
struct TORCH_API HingeEmbeddingLossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
/// Specifies the threshold for which the distance of a negative sample must
/// reach in order to incur zero loss. Default: 1
TORCH_ARG(double, margin) = 1.0;
/// Specifies the reduction to apply to the output. Default: Mean
TORCH_ARG(reduction_t, reduction) = torch::kMean;
};
namespace functional {
/// Options for `torch::nn::functional::hinge_embedding_loss`.
///
/// See the documentation for `torch::nn::HingeEmbeddingLossOptions` class to
/// learn what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::hinge_embedding_loss(input, target,
/// F::HingeEmbeddingLossFuncOptions().margin(2));
/// ```
using HingeEmbeddingLossFuncOptions = HingeEmbeddingLossOptions;
} // namespace functional
// ============================================================================
/// Options for the `MultiMarginLoss` module.
///
/// Example:
/// ```
/// MultiMarginLoss model(MultiMarginLossOptions().margin(2).weight(weight));
/// ```
struct TORCH_API MultiMarginLossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
/// Has a default value of :math:`1`. :math:`1` and :math:`2`
/// are the only supported values.
TORCH_ARG(int64_t, p) = 1;
/// Has a default value of :math:`1`.
TORCH_ARG(double, margin) = 1.0;
/// A manual rescaling weight given to each
/// class. If given, it has to be a Tensor of size `C`. Otherwise, it is
/// treated as if having all ones.
TORCH_ARG(Tensor, weight) = Tensor();
/// Specifies the reduction to apply to the output:
/// ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be
/// applied,
/// ``'mean'``: the sum of the output will be divided by the number of
/// elements in the output, ``'sum'``: the output will be summed. Default:
/// ``'mean'``
TORCH_ARG(reduction_t, reduction) = torch::kMean;
};
namespace functional {
/// Options for `torch::nn::functional::multi_margin_loss`.
///
/// See the documentation for `torch::nn::MultiMarginLossOptions` class to learn
/// what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::multi_margin_loss(input, target,
/// F::MultiMarginLossFuncOptions().margin(2).weight(weight));
/// ```
using MultiMarginLossFuncOptions = MultiMarginLossOptions;
} // namespace functional
// ============================================================================
/// Options for the `CosineEmbeddingLoss` module.
///
/// Example:
/// ```
/// CosineEmbeddingLoss model(CosineEmbeddingLossOptions().margin(0.5));
/// ```
struct TORCH_API CosineEmbeddingLossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
/// Specifies the threshold for which the distance of a negative sample must
/// reach in order to incur zero loss. Should be a number from -1 to 1, 0
/// to 0.5 is suggested. Default: 0.0
TORCH_ARG(double, margin) = 0.0;
/// Specifies the reduction to apply to the output. Default: Mean
TORCH_ARG(reduction_t, reduction) = torch::kMean;
};
namespace functional {
/// Options for `torch::nn::functional::cosine_embedding_loss`.
///
/// See the documentation for `torch::nn::CosineEmbeddingLossOptions` class to
/// learn what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::cosine_embedding_loss(input1, input2, target,
/// F::CosineEmbeddingLossFuncOptions().margin(0.5));
/// ```
using CosineEmbeddingLossFuncOptions = CosineEmbeddingLossOptions;
} // namespace functional
// ============================================================================
/// Options for the `MultiLabelMarginLoss` module.
///
/// Example:
/// ```
/// MultiLabelMarginLoss model(MultiLabelMarginLossOptions(torch::kNone));
/// ```
struct TORCH_API MultiLabelMarginLossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
TORCH_OPTIONS_CTOR_VARIANT_ARG3(
MultiLabelMarginLossOptions,
reduction,
kNone,
kMean,
kSum)
/// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
/// 'none': no reduction will be applied, 'mean': the sum of the output will
/// be divided by the number of elements in the output, 'sum': the output will
/// be summed. Default: 'mean'
TORCH_ARG(reduction_t, reduction) = torch::kMean;
};
namespace functional {
/// Options for `torch::nn::functional::multilabel_margin_loss`.
///
/// See the documentation for `torch::nn::MultiLabelMarginLossOptions` class to
/// learn what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::multilabel_margin_loss(input, target,
/// F::MultilabelMarginLossFuncOptions(torch::kNone));
/// ```
using MultilabelMarginLossFuncOptions = MultiLabelMarginLossOptions;
} // namespace functional
// ============================================================================
/// Options for the `SoftMarginLoss` module.
///
/// Example:
/// ```
/// SoftMarginLoss model(SoftMarginLossOptions(torch::kNone));
/// ```
struct TORCH_API SoftMarginLossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
TORCH_OPTIONS_CTOR_VARIANT_ARG3(
SoftMarginLossOptions,
reduction,
kNone,
kMean,
kSum)
/// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
/// 'none': no reduction will be applied, 'mean': the sum of the output will
/// be divided by the number of elements in the output, 'sum': the output will
/// be summed. Default: 'mean'
TORCH_ARG(reduction_t, reduction) = torch::kMean;
};
namespace functional {
/// Options for `torch::nn::functional::soft_margin_loss`.
///
/// See the documentation for `torch::nn::SoftMarginLossOptions` class to learn
/// what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::soft_margin_loss(input, target,
/// F::SoftMarginLossFuncOptions(torch::kNone));
/// ```
using SoftMarginLossFuncOptions = SoftMarginLossOptions;
} // namespace functional
// ============================================================================
/// Options for the `MultiLabelSoftMarginLoss` module.
///
/// Example:
/// ```
/// MultiLabelSoftMarginLoss
/// model(MultiLabelSoftMarginLossOptions().reduction(torch::kNone).weight(weight));
/// ```
struct TORCH_API MultiLabelSoftMarginLossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
/// A manual rescaling weight given to each
/// class. If given, it has to be a Tensor of size `C`. Otherwise, it is
/// treated as if having all ones.
TORCH_ARG(Tensor, weight) = Tensor();
/// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
/// 'none': no reduction will be applied, 'mean': the sum of the output will
/// be divided by the number of elements in the output, 'sum': the output will
/// be summed. Default: 'mean'
TORCH_ARG(reduction_t, reduction) = torch::kMean;
};
namespace functional {
/// Options for `torch::nn::functional::multilabel_soft_margin_loss`.
///
/// See the documentation for `torch::nn::MultiLabelSoftMarginLossOptions` class
/// to learn what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::multilabel_soft_margin_loss(input, target,
/// F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight));
/// ```
using MultilabelSoftMarginLossFuncOptions = MultiLabelSoftMarginLossOptions;
} // namespace functional
// ============================================================================
/// Options for the `TripletMarginLoss` module.
///
/// Example:
/// ```
/// TripletMarginLoss
/// model(TripletMarginLossOptions().margin(3).p(2).eps(1e-06).swap(false));
/// ```
struct TORCH_API TripletMarginLossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
/// Specifies the threshold for which the distance of a negative sample must
/// reach in order to incur zero loss. Default: 1
TORCH_ARG(double, margin) = 1.0;
/// Specifies the norm degree for pairwise distance. Default: 2
TORCH_ARG(double, p) = 2.0;
TORCH_ARG(double, eps) = 1e-6;
/// The distance swap is described in detail in the paper Learning shallow
/// convolutional feature descriptors with triplet losses by V. Balntas,
/// E. Riba et al. Default: False
TORCH_ARG(bool, swap) = false;
/// Specifies the reduction to apply to the output. Default: Mean
TORCH_ARG(reduction_t, reduction) = torch::kMean;
};
namespace functional {
/// Options for `torch::nn::functional::triplet_margin_loss`.
///
/// See the documentation for `torch::nn::TripletMarginLossOptions` class to
/// learn what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::triplet_margin_loss(anchor, positive, negative,
/// F::TripletMarginLossFuncOptions().margin(1.0));
/// ```
using TripletMarginLossFuncOptions = TripletMarginLossOptions;
} // namespace functional
// ============================================================================
/// Options for the `TripletMarginWithDistanceLoss` module.
///
/// Example:
/// ```
/// TripletMarginWithDistanceLoss
/// model(TripletMarginWithDistanceLossOptions().margin(3).swap(false));
/// ```
struct TORCH_API TripletMarginWithDistanceLossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
typedef std::function<Tensor(const Tensor&, const Tensor&)>
distance_function_t;
/// Specifies a nonnegative, real-valued function that quantifies the
/// closeness of two tensors. If not specified, `F::pairwise_distance` will
/// be used. Default: nullopt
TORCH_ARG(std::optional<distance_function_t>, distance_function) =
std::nullopt;
/// Specifies a nonnegative margin representing the minimum difference
/// between the positive and negative distances required for the loss to be 0.
/// Larger margins penalize cases where the negative examples are not distance
/// enough from the anchors, relative to the positives. Default: 1
TORCH_ARG(double, margin) = 1.0;
/// Whether to use the distance swap described in the paper Learning shallow
/// convolutional feature descriptors with triplet losses by V. Balntas,
/// E. Riba et al. If True, and if the positive example is closer to the
/// negative example than the anchor is, swaps the positive example and the
/// anchor in the loss computation. Default: False
TORCH_ARG(bool, swap) = false;
/// Specifies the reduction to apply to the output. Default: Mean
TORCH_ARG(reduction_t, reduction) = torch::kMean;
};
namespace functional {
/// Options for `torch::nn::functional::triplet_margin_with_distance_loss`.
///
/// See the documentation for `torch::nn::TripletMarginWithDistanceLossOptions`
/// class to learn what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::triplet_margin_with_distance_loss(anchor, positive, negative,
/// F::TripletMarginWithDistanceLossFuncOptions().margin(1.0));
/// ```
using TripletMarginWithDistanceLossFuncOptions =
TripletMarginWithDistanceLossOptions;
} // namespace functional
// ============================================================================
/// Options for the `CTCLoss` module.
///
/// Example:
/// ```
/// CTCLoss
/// model(CTCLossOptions().blank(42).zero_infinity(false).reduction(torch::kSum));
/// ```
struct TORCH_API CTCLossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
/// blank label. Default `0`.
TORCH_ARG(int64_t, blank) = 0;
/// Specifies the reduction to apply to the output. Default: Mean
TORCH_ARG(reduction_t, reduction) = torch::kMean;
/// Whether to zero infinite losses and the associated gradients.
/// Default: `false`. Infinite losses mainly occur when the inputs are
/// too short to be aligned to the targets.
TORCH_ARG(bool, zero_infinity) = false;
};
namespace functional {
/// Options for `torch::nn::functional::ctc_loss`.
///
/// See the documentation for `torch::nn::CTCLossOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::ctc_loss(log_probs, targets, input_lengths, target_lengths,
/// F::CTCLossFuncOptions().reduction(torch::kNone));
/// ```
using CTCLossFuncOptions = CTCLossOptions;
} // namespace functional
// ============================================================================
/// Options for the `SmoothL1Loss` module.
///
/// Example:
/// ```
/// SmoothL1Loss model(SmoothL1LossOptions().reduction(torch::kNone).beta(0.5));
/// ```
struct TORCH_API SmoothL1LossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
TORCH_OPTIONS_CTOR_VARIANT_ARG3(
SmoothL1LossOptions,
reduction,
kNone,
kMean,
kSum)
/// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
/// 'none': no reduction will be applied, 'mean': the sum of the output will
/// be divided by the number of elements in the output, 'sum': the output will
/// be summed. Default: 'mean'
TORCH_ARG(reduction_t, reduction) = torch::kMean;
/// Specifies the threshold at which to change between L1 and L2 loss.
/// If beta is not specified, a value of 1.0 will be used.
/// Default: nullopt
TORCH_ARG(std::optional<double>, beta) = std::nullopt;
};
namespace functional {
/// Options for `torch::nn::functional::smooth_l1_loss`.
///
/// See the documentation for `torch::nn::SmoothL1LossOptions` class to learn
/// what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone));
/// ```
using SmoothL1LossFuncOptions = SmoothL1LossOptions;
} // namespace functional
// ============================================================================
/// Options for the `HuberLoss` module.
///
/// Example:
/// ```
/// HuberLoss model(HuberLossOptions().reduction(torch::kNone).delta(0.5));
/// ```
struct TORCH_API HuberLossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
TORCH_OPTIONS_CTOR_VARIANT_ARG3(
HuberLossOptions,
reduction,
kNone,
kMean,
kSum)
/// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
/// 'none': no reduction will be applied, 'mean': the sum of the output will
/// be divided by the number of elements in the output, 'sum': the output will
/// be summed. Default: 'mean'
TORCH_ARG(reduction_t, reduction) = torch::kMean;
/// Specifies the threshold at which to change between L1 and L2 loss.
/// Default: 1.0
TORCH_ARG(double, delta) = 1.0;
};
namespace functional {
/// Options for `torch::nn::functional::huber_loss`.
///
/// See the documentation for `torch::nn::HuberLossOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::huber_loss(input, target, F::HuberLossFuncOptions(torch::kNone));
/// ```
using HuberLossFuncOptions = HuberLossOptions;
} // namespace functional
// ============================================================================
/// Options for the `PoissonNLLLoss` module.
///
/// Example:
/// ```
/// PoissonNLLLoss
/// model(PoissonNLLLossOptions().log_input(false).full(true).eps(0.42).reduction(torch::kSum));
/// ```
struct TORCH_API PoissonNLLLossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
/// if true the loss is computed as `exp(input) - target * input`,
/// if false the loss is `input - target * log(input + eps)`.
TORCH_ARG(bool, log_input) = true;
/// whether to compute full loss, i.e. to add the Stirling approximation term
/// target * log(target) - target + 0.5 * log(2 * pi * target).
TORCH_ARG(bool, full) = false;
/// Small value to avoid evaluation of `log(0)` when `log_input = false`.
/// Default: 1e-8
TORCH_ARG(double, eps) = 1e-8;
/// Specifies the reduction to apply to the output. Default: Mean
TORCH_ARG(reduction_t, reduction) = torch::kMean;
};
namespace functional {
/// Options for `torch::nn::functional::poisson_nll_loss`.
///
/// See the documentation for `torch::nn::PoissonNLLLossOptions` class to learn
/// what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::poisson_nll_loss(input, target,
/// F::PoissonNLLLossFuncOptions().reduction(torch::kNone));
/// ```
using PoissonNLLLossFuncOptions = PoissonNLLLossOptions;
} // namespace functional
// ============================================================================
/// Options for the `MarginRankingLoss` module.
///
/// Example:
/// ```
/// MarginRankingLoss
/// model(MarginRankingLossOptions().margin(0.5).reduction(torch::kSum));
/// ```
struct TORCH_API MarginRankingLossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
/// Has a default value of `0`.
TORCH_ARG(double, margin) = 0;
/// Specifies the reduction to apply to the output. Default: Mean
TORCH_ARG(reduction_t, reduction) = torch::kMean;
};
namespace functional {
/// Options for `torch::nn::functional::margin_ranking_loss`.
///
/// See the documentation for `torch::nn::MarginRankingLossOptions` class to
/// learn what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::margin_ranking_loss(input1, input2, target,
/// F::MarginRankingLossFuncOptions().margin(0.5).reduction(torch::kSum));
/// ```
using MarginRankingLossFuncOptions = MarginRankingLossOptions;
} // namespace functional
// ============================================================================
/// Options for the `NLLLoss` module.
///
/// Example:
/// ```
/// NLLLoss model(NLLLossOptions().ignore_index(-100).reduction(torch::kMean));
/// ```
struct TORCH_API NLLLossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
/// A manual rescaling weight given to each
/// class. If given, it has to be a Tensor of size `C`. Otherwise, it is
/// treated as if having all ones.
TORCH_ARG(Tensor, weight) = {};
/// Specifies a target value that is ignored
/// and does not contribute to the input gradient.
TORCH_ARG(int64_t, ignore_index) = -100;
/// Specifies the reduction to apply to the output. Default: Mean
TORCH_ARG(reduction_t, reduction) = torch::kMean;
};
namespace functional {
/// Options for `torch::nn::functional::nll_loss`.
///
/// See the documentation for `torch::nn::NLLLossOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::nll_loss(input, target,
/// F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean));
/// ```
using NLLLossFuncOptions = NLLLossOptions;
} // namespace functional
// ============================================================================
/// Options for the `CrossEntropyLoss` module.
///
/// Example:
/// ```
/// CrossEntropyLoss
/// model(CrossEntropyLossOptions().ignore_index(-100).reduction(torch::kMean));
/// ```
struct TORCH_API CrossEntropyLossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
/// A manual rescaling weight given to each class. If given, has to be a
/// Tensor of size C
TORCH_ARG(Tensor, weight) = {};
/// Specifies a target value that is ignored
/// and does not contribute to the input gradient.
TORCH_ARG(int64_t, ignore_index) = -100;
/// Specifies the reduction to apply to the output. Default: Mean
TORCH_ARG(reduction_t, reduction) = torch::kMean;
/// Specifies the amount of smoothing when computing the loss. Default: 0.0
TORCH_ARG(double, label_smoothing) = 0.0;
};
namespace functional {
/// Options for `torch::nn::functional::cross_entropy`.
///
/// See the documentation for `torch::nn::CrossEntropyLossOptions` class to
/// learn what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::cross_entropy(input, target,
/// F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean));
/// ```
using CrossEntropyFuncOptions = CrossEntropyLossOptions;
} // namespace functional
// ============================================================================
/// Options for the `BCEWithLogitsLoss` module.
///
/// Example:
/// ```
/// BCEWithLogitsLoss
/// model(BCEWithLogitsLossOptions().reduction(torch::kNone).weight(weight));
/// ```
struct TORCH_API BCEWithLogitsLossOptions {
typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
reduction_t;
/// A manual rescaling weight given to the loss of each batch element.
/// If given, has to be a Tensor of size `nbatch`.
TORCH_ARG(Tensor, weight) = {};
/// Specifies the reduction to apply to the output. Default: Mean
TORCH_ARG(reduction_t, reduction) = torch::kMean;
/// A weight of positive examples.
/// Must be a vector with length equal to the number of classes.
TORCH_ARG(Tensor, pos_weight) = {};
};
namespace functional {
/// Options for `torch::nn::functional::binary_cross_entropy_with_logits`.
///
/// See the documentation for `torch::nn::BCEWithLogitsLossOptions` class to
/// learn what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::binary_cross_entropy_with_logits(input, target,
/// F::BinaryCrossEntropyWithLogitsFuncOptions().pos_weight(pos_weight).reduction(torch::kSum));
/// ```
using BinaryCrossEntropyWithLogitsFuncOptions = BCEWithLogitsLossOptions;
} // namespace functional
} // namespace torch::nn
```
|
=============================================================================================================================================================
SOURCE CODE FILE: normalization.h
LINES: 1
SIZE: 5.55 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\normalization.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/types.h>
#include <vector>
namespace torch::nn {
/// Options for the `LayerNorm` module.
///
/// Example:
/// ```
/// LayerNorm model(LayerNormOptions({2,
/// 2}).elementwise_affine(false).eps(2e-5));
/// ```
struct TORCH_API LayerNormOptions {
/* implicit */ LayerNormOptions(std::vector<int64_t> normalized_shape);
/// input shape from an expected input.
TORCH_ARG(std::vector<int64_t>, normalized_shape);
/// a value added to the denominator for numerical stability. ``Default:
/// 1e-5``.
TORCH_ARG(double, eps) = 1e-5;
/// a boolean value that when set to ``true``, this module
/// has learnable per-element affine parameters initialized to ones (for
/// weights) and zeros (for biases). ``Default: true``.
TORCH_ARG(bool, elementwise_affine) = true;
};
// ============================================================================
namespace functional {
/// Options for `torch::nn::functional::layer_norm`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::layer_norm(input, F::LayerNormFuncOptions({2, 2}).eps(2e-5));
/// ```
struct TORCH_API LayerNormFuncOptions {
/* implicit */ LayerNormFuncOptions(std::vector<int64_t> normalized_shape);
/// input shape from an expected input.
TORCH_ARG(std::vector<int64_t>, normalized_shape);
TORCH_ARG(Tensor, weight) = {};
TORCH_ARG(Tensor, bias) = {};
/// a value added to the denominator for numerical stability. ``Default:
/// 1e-5``.
TORCH_ARG(double, eps) = 1e-5;
};
} // namespace functional
// ============================================================================
/// Options for the `LocalResponseNorm` module.
///
/// Example:
/// ```
/// LocalResponseNorm
/// model(LocalResponseNormOptions(2).alpha(0.0002).beta(0.85).k(2.));
/// ```
struct TORCH_API LocalResponseNormOptions {
/* implicit */ LocalResponseNormOptions(int64_t size) : size_(size) {}
/// amount of neighbouring channels used for normalization
TORCH_ARG(int64_t, size);
/// multiplicative factor. Default: 1e-4
TORCH_ARG(double, alpha) = 1e-4;
/// exponent. Default: 0.75
TORCH_ARG(double, beta) = 0.75;
/// additive factor. Default: 1
TORCH_ARG(double, k) = 1.;
};
namespace functional {
/// Options for `torch::nn::functional::local_response_norm`.
///
/// See the documentation for `torch::nn::LocalResponseNormOptions` class to
/// learn what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::local_response_norm(x, F::LocalResponseNormFuncOptions(2));
/// ```
using LocalResponseNormFuncOptions = LocalResponseNormOptions;
} // namespace functional
// ============================================================================
/// Options for the `CrossMapLRN2d` module.
///
/// Example:
/// ```
/// CrossMapLRN2d model(CrossMapLRN2dOptions(3).alpha(1e-5).beta(0.1).k(10));
/// ```
struct TORCH_API CrossMapLRN2dOptions {
CrossMapLRN2dOptions(int64_t size);
TORCH_ARG(int64_t, size);
TORCH_ARG(double, alpha) = 1e-4;
TORCH_ARG(double, beta) = 0.75;
TORCH_ARG(int64_t, k) = 1;
};
// ============================================================================
namespace functional {
/// Options for `torch::nn::functional::normalize`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::normalize(input, F::NormalizeFuncOptions().p(1).dim(-1));
/// ```
struct TORCH_API NormalizeFuncOptions {
/// The exponent value in the norm formulation. Default: 2.0
TORCH_ARG(double, p) = 2.0;
/// The dimension to reduce. Default: 1
TORCH_ARG(int64_t, dim) = 1;
/// Small value to avoid division by zero. Default: 1e-12
TORCH_ARG(double, eps) = 1e-12;
/// the output tensor. If `out` is used, this
/// operation won't be differentiable.
TORCH_ARG(std::optional<Tensor>, out) = std::nullopt;
};
} // namespace functional
// ============================================================================
/// Options for the `GroupNorm` module.
///
/// Example:
/// ```
/// GroupNorm model(GroupNormOptions(2, 2).eps(2e-5).affine(false));
/// ```
struct TORCH_API GroupNormOptions {
/* implicit */ GroupNormOptions(int64_t num_groups, int64_t num_channels);
/// number of groups to separate the channels into
TORCH_ARG(int64_t, num_groups);
/// number of channels expected in input
TORCH_ARG(int64_t, num_channels);
/// a value added to the denominator for numerical stability. Default: 1e-5
TORCH_ARG(double, eps) = 1e-5;
/// a boolean value that when set to ``true``, this module
/// has learnable per-channel affine parameters initialized to ones (for
/// weights) and zeros (for biases). Default: ``true``.
TORCH_ARG(bool, affine) = true;
};
// ============================================================================
namespace functional {
/// Options for `torch::nn::functional::group_norm`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::group_norm(input, F::GroupNormFuncOptions(2).eps(2e-5));
/// ```
struct TORCH_API GroupNormFuncOptions {
/* implicit */ GroupNormFuncOptions(int64_t num_groups);
/// number of groups to separate the channels into
TORCH_ARG(int64_t, num_groups);
TORCH_ARG(Tensor, weight) = {};
TORCH_ARG(Tensor, bias) = {};
/// a value added to the denominator for numerical stability. Default: 1e-5
TORCH_ARG(double, eps) = 1e-5;
};
} // namespace functional
} // namespace torch::nn
```
|
=======================================================================================================================================================
SOURCE CODE FILE: padding.h
LINES: 1
SIZE: 6.89 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\padding.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/enum.h>
#include <torch/expanding_array.h>
#include <torch/types.h>
namespace torch::nn {
/// Options for a `D`-dimensional ReflectionPad module.
template <size_t D>
struct TORCH_API ReflectionPadOptions {
ReflectionPadOptions(ExpandingArray<D * 2> padding) : padding_(padding) {}
/// The size of the padding.
/// If it is `int`, uses the same padding in all boundaries.
/// If it is a 2-`tuple` (for ReflectionPad1d), uses (padding_left,
/// padding_right). If it is a 4-`tuple` (for ReflectionPad2d), uses
/// (padding_left, padding_right, padding_top, padding_bottom). If it is a
/// 6-`tuple` (for ReflectionPad3d), uses (padding_left, padding_right,
/// padding_top, padding_bottom, padding_front, padding_back).
TORCH_ARG(ExpandingArray<D * 2>, padding);
};
/// `ReflectionPadOptions` specialized for the `ReflectionPad1d` module.
///
/// Example:
/// ```
/// ReflectionPad1d model(ReflectionPad1dOptions({3, 1}));
/// ```
using ReflectionPad1dOptions = ReflectionPadOptions<1>;
/// `ReflectionPadOptions` specialized for the `ReflectionPad2d` module.
///
/// Example:
/// ```
/// ReflectionPad2d model(ReflectionPad2dOptions({1, 1, 2, 0}));
/// ```
using ReflectionPad2dOptions = ReflectionPadOptions<2>;
/// `ReflectionPadOptions` specialized for the `ReflectionPad3d` module.
///
/// Example:
/// ```
/// ReflectionPad3d model(ReflectionPad3dOptions({1, 1, 2, 0, 1, 1}));
/// ```
using ReflectionPad3dOptions = ReflectionPadOptions<3>;
// ============================================================================
/// Options for a `D`-dimensional ReplicationPad module.
template <size_t D>
struct TORCH_API ReplicationPadOptions {
ReplicationPadOptions(ExpandingArray<D * 2> padding) : padding_(padding) {}
/// The size of the padding.
/// - If it is `int`, uses the same padding in all boundaries.
/// - If it is a 2-`tuple` (for ReplicationPad1d), uses (padding_left,
/// padding_right).
/// - If it is a 4-`tuple` (for ReplicationPad2d), uses (padding_left,
/// padding_right, padding_top, padding_bottom).
/// - If it is a 6-`tuple` (for ReplicationPad3d), uses
/// (padding_left, padding_right, padding_top, padding_bottom,
/// padding_front, padding_back).
TORCH_ARG(ExpandingArray<D * 2>, padding);
};
/// `ReplicationPadOptions` specialized for the `ReplicationPad1d` module.
///
/// Example:
/// ```
/// ReplicationPad1d model(ReplicationPad1dOptions({3, 1}));
/// ```
using ReplicationPad1dOptions = ReplicationPadOptions<1>;
/// `ReplicationPadOptions` specialized for the `ReplicationPad2d` module.
///
/// Example:
/// ```
/// ReplicationPad2d model(ReplicationPad2dOptions({1, 1, 2, 0}));
/// ```
using ReplicationPad2dOptions = ReplicationPadOptions<2>;
/// `ReplicationPadOptions` specialized for the `ReplicationPad3d` module.
///
/// Example:
/// ```
/// ReplicationPad3d model(ReplicationPad3dOptions({1, 2, 1, 2, 1, 2}));
/// ```
using ReplicationPad3dOptions = ReplicationPadOptions<3>;
// ============================================================================
template <size_t D>
struct TORCH_API ZeroPadOptions {
ZeroPadOptions(ExpandingArray<D * 2> padding) : padding_(padding) {}
/// The size of the padding.
/// - If it is `int`, uses the same padding in all boundaries.
/// - If it is a 2-`tuple` (for ZeroPad1d), uses (padding_left,
/// padding_right).
/// - If it is a 4-`tuple` (for ZeroPad2d), uses (padding_left, padding_right,
/// padding_top, padding_bottom).
/// - If it is a 6-`tuple` (for ZeroPad3d), uses
/// (padding_left, padding_right, padding_top, padding_bottom,
/// padding_front, padding_back).
TORCH_ARG(ExpandingArray<D * 2>, padding);
};
/// `ZeroPadOptions` specialized for the `ZeroPad1d` module.
///
/// Example:
/// ```
/// ConstantPad1d model(ConstantPad1dOptions({3, 1});
/// ```
using ZeroPad1dOptions = ZeroPadOptions<1>;
/// `ZeroPadOptions` specialized for the `ZeroPad2d` module.
///
/// Example:
/// ```
/// ConstantPad2d model(ConstantPad2dOptions({1, 1, 2, 0});
/// ```
using ZeroPad2dOptions = ZeroPadOptions<2>;
/// `ZeroPadOptions` specialized for the `ZeroPad3d` module.
///
/// Example:
/// ```
/// ConstantPad3d model(ConstantPad3dOptions({1, 2, 1, 2, 1, 2});
/// ```
using ZeroPad3dOptions = ZeroPadOptions<3>;
// ============================================================================
/// Options for a `D`-dimensional ConstantPad module.
template <size_t D>
struct TORCH_API ConstantPadOptions {
ConstantPadOptions(ExpandingArray<D * 2> padding, double value)
: padding_(padding), value_(value) {}
/// The size of the padding.
/// - If it is `int`, uses the same padding in all boundaries.
/// - If it is a 2-`tuple` (for ConstantPad1d), uses (padding_left,
/// padding_right).
/// - If it is a 4-`tuple` (for ConstantPad2d), uses (padding_left,
/// padding_right, padding_top, padding_bottom).
/// - If it is a 6-`tuple` (for ConstantPad3d), uses
/// (padding_left, padding_right, padding_top, padding_bottom,
/// padding_front, padding_back).
TORCH_ARG(ExpandingArray<D * 2>, padding);
/// Fill value for constant padding.
TORCH_ARG(double, value);
};
/// `ConstantPadOptions` specialized for the `ConstantPad1d` module.
///
/// Example:
/// ```
/// ConstantPad1d model(ConstantPad1dOptions({3, 1}, 3.5));
/// ```
using ConstantPad1dOptions = ConstantPadOptions<1>;
/// `ConstantPadOptions` specialized for the `ConstantPad2d` module.
///
/// Example:
/// ```
/// ConstantPad2d model(ConstantPad2dOptions({3, 0, 2, 1}, 3.5));
/// ```
using ConstantPad2dOptions = ConstantPadOptions<2>;
/// `ConstantPadOptions` specialized for the `ConstantPad3d` module.
///
/// Example:
/// ```
/// ConstantPad3d model(ConstantPad3dOptions({1, 2, 1, 2, 1, 2}, 3.5));
/// ```
using ConstantPad3dOptions = ConstantPadOptions<3>;
// ============================================================================
namespace functional {
/// Options for `torch::nn::functional::pad`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::pad(input, F::PadFuncOptions({1, 2, 2, 1, 1,
/// 2}).mode(torch::kReplicate));
/// ```
struct TORCH_API PadFuncOptions {
typedef std::variant<
enumtype::kConstant,
enumtype::kReflect,
enumtype::kReplicate,
enumtype::kCircular>
mode_t;
PadFuncOptions(std::vector<int64_t> pad);
/// m-elements tuple, where m/2 <= input dimensions and m is even.
TORCH_ARG(std::vector<int64_t>, pad);
/// "constant", "reflect", "replicate" or "circular". Default: "constant"
TORCH_ARG(mode_t, mode) = torch::kConstant;
/// fill value for "constant" padding. Default: 0
TORCH_ARG(double, value) = 0;
};
} // namespace functional
} // namespace torch::nn
```
|
============================================================================================================================================================
SOURCE CODE FILE: pixelshuffle.h
LINES: 1
SIZE: 1.66 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\pixelshuffle.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/types.h>
namespace torch::nn {
/// Options for the `PixelShuffle` module.
///
/// Example:
/// ```
/// PixelShuffle model(PixelShuffleOptions(5));
/// ```
struct TORCH_API PixelShuffleOptions {
PixelShuffleOptions(int64_t upscale_factor)
: upscale_factor_(upscale_factor) {}
/// Factor to increase spatial resolution by
TORCH_ARG(int64_t, upscale_factor);
};
/// Options for the `PixelUnshuffle` module.
///
/// Example:
/// ```
/// PixelUnshuffle model(PixelUnshuffleOptions(5));
/// ```
struct TORCH_API PixelUnshuffleOptions {
/* implicit */ PixelUnshuffleOptions(int64_t downscale_factor)
: downscale_factor_(downscale_factor) {}
/// Factor to decrease spatial resolution by
TORCH_ARG(int64_t, downscale_factor);
};
namespace functional {
/// Options for `torch::nn::functional::pixel_shuffle`.
///
/// See the documentation for `torch::nn::PixelShuffleOptions` class to learn
/// what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::pixel_shuffle(x, F::PixelShuffleFuncOptions(2));
/// ```
using PixelShuffleFuncOptions = PixelShuffleOptions;
/// Options for `torch::nn::functional::pixel_unshuffle`.
///
/// See the documentation for `torch::nn::PixelUnshuffleOptions` class to learn
/// what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::pixel_unshuffle(x, F::PixelUnshuffleFuncOptions(2));
/// ```
using PixelUnshuffleFuncOptions = PixelUnshuffleOptions;
} // namespace functional
} // namespace torch::nn
```
|
=======================================================================================================================================================
SOURCE CODE FILE: pooling.h
LINES: 1
SIZE: 17.88 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\pooling.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/expanding_array.h>
#include <torch/types.h>
namespace torch::nn {
/// Options for a `D`-dimensional avgpool module.
template <size_t D>
struct AvgPoolOptions {
AvgPoolOptions(ExpandingArray<D> kernel_size)
: kernel_size_(kernel_size), stride_(kernel_size) {}
/// the size of the window to take an average over
TORCH_ARG(ExpandingArray<D>, kernel_size);
/// the stride of the window. Default value is `kernel_size`
TORCH_ARG(ExpandingArray<D>, stride);
/// implicit zero padding to be added on both sides
TORCH_ARG(ExpandingArray<D>, padding) = 0;
/// when True, will use `ceil` instead of `floor` to compute the output shape
TORCH_ARG(bool, ceil_mode) = false;
/// when True, will include the zero-padding in the averaging calculation
TORCH_ARG(bool, count_include_pad) = true;
/// if specified, it will be used as divisor, otherwise size of the pooling
/// region will be used.
TORCH_ARG(std::optional<int64_t>, divisor_override) = std::nullopt;
};
/// `AvgPoolOptions` specialized for the `AvgPool1d` module.
///
/// Example:
/// ```
/// AvgPool1d model(AvgPool1dOptions(3).stride(2));
/// ```
using AvgPool1dOptions = AvgPoolOptions<1>;
/// `AvgPoolOptions` specialized for the `AvgPool2d` module.
///
/// Example:
/// ```
/// AvgPool2d model(AvgPool2dOptions({3, 2}).stride({2, 2}));
/// ```
using AvgPool2dOptions = AvgPoolOptions<2>;
/// `AvgPoolOptions` specialized for the `AvgPool3d` module.
///
/// Example:
/// ```
/// AvgPool3d model(AvgPool3dOptions(5).stride(2));
/// ```
using AvgPool3dOptions = AvgPoolOptions<3>;
namespace functional {
/// Options for `torch::nn::functional::avg_pool1d`.
///
/// See the documentation for `torch::nn::AvgPool1dOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::avg_pool1d(x, F::AvgPool1dFuncOptions(3).stride(2));
/// ```
using AvgPool1dFuncOptions = AvgPool1dOptions;
} // namespace functional
namespace functional {
/// Options for `torch::nn::functional::avg_pool2d`.
///
/// See the documentation for `torch::nn::AvgPool2dOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::avg_pool2d(x, F::AvgPool2dFuncOptions(3).stride(2));
/// ```
using AvgPool2dFuncOptions = AvgPool2dOptions;
} // namespace functional
namespace functional {
/// Options for `torch::nn::functional::avg_pool3d`.
///
/// See the documentation for `torch::nn::AvgPool3dOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::avg_pool3d(x, F::AvgPool3dFuncOptions(3).stride(2));
/// ```
using AvgPool3dFuncOptions = AvgPool3dOptions;
} // namespace functional
// ============================================================================
/// Options for a `D`-dimensional maxpool module.
template <size_t D>
struct MaxPoolOptions {
MaxPoolOptions(ExpandingArray<D> kernel_size)
: kernel_size_(kernel_size), stride_(kernel_size) {}
/// the size of the window to take a max over
TORCH_ARG(ExpandingArray<D>, kernel_size);
/// the stride of the window. Default value is `kernel_size
TORCH_ARG(ExpandingArray<D>, stride);
/// implicit zero padding to be added on both sides
TORCH_ARG(ExpandingArray<D>, padding) = 0;
/// a parameter that controls the stride of elements in the window
TORCH_ARG(ExpandingArray<D>, dilation) = 1;
/// when True, will use `ceil` instead of `floor` to compute the output shape
TORCH_ARG(bool, ceil_mode) = false;
};
/// `MaxPoolOptions` specialized for the `MaxPool1d` module.
///
/// Example:
/// ```
/// MaxPool1d model(MaxPool1dOptions(3).stride(2));
/// ```
using MaxPool1dOptions = MaxPoolOptions<1>;
/// `MaxPoolOptions` specialized for the `MaxPool2d` module.
///
/// Example:
/// ```
/// MaxPool2d model(MaxPool2dOptions({3, 2}).stride({2, 2}));
/// ```
using MaxPool2dOptions = MaxPoolOptions<2>;
/// `MaxPoolOptions` specialized for the `MaxPool3d` module.
///
/// Example:
/// ```
/// MaxPool3d model(MaxPool3dOptions(3).stride(2));
/// ```
using MaxPool3dOptions = MaxPoolOptions<3>;
namespace functional {
/// Options for `torch::nn::functional::max_pool1d` and
/// `torch::nn::functional::max_pool1d_with_indices`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::max_pool1d(x, F::MaxPool1dFuncOptions(3).stride(2));
/// ```
using MaxPool1dFuncOptions = MaxPool1dOptions;
} // namespace functional
namespace functional {
/// Options for `torch::nn::functional::max_pool2d` and
/// `torch::nn::functional::max_pool2d_with_indices`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::max_pool2d(x, F::MaxPool2dFuncOptions(3).stride(2));
/// ```
using MaxPool2dFuncOptions = MaxPool2dOptions;
} // namespace functional
namespace functional {
/// Options for `torch::nn::functional::max_pool3d` and
/// `torch::nn::functional::max_pool3d_with_indices`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::max_pool3d(x, F::MaxPool3dFuncOptions(3).stride(2));
/// ```
using MaxPool3dFuncOptions = MaxPool3dOptions;
} // namespace functional
// ============================================================================
/// Options for a `D`-dimensional adaptive maxpool module.
template <typename output_size_t>
struct AdaptiveMaxPoolOptions {
AdaptiveMaxPoolOptions(output_size_t output_size)
: output_size_(output_size) {}
/// the target output size
TORCH_ARG(output_size_t, output_size);
};
/// `AdaptiveMaxPoolOptions` specialized for the `AdaptiveMaxPool1d` module.
///
/// Example:
/// ```
/// AdaptiveMaxPool1d model(AdaptiveMaxPool1dOptions(3));
/// ```
using AdaptiveMaxPool1dOptions = AdaptiveMaxPoolOptions<ExpandingArray<1>>;
/// `AdaptiveMaxPoolOptions` specialized for the `AdaptiveMaxPool2d` module.
///
/// Example:
/// ```
/// AdaptiveMaxPool2d model(AdaptiveMaxPool2dOptions({3, 2}));
/// ```
using AdaptiveMaxPool2dOptions =
AdaptiveMaxPoolOptions<ExpandingArrayWithOptionalElem<2>>;
/// `AdaptiveMaxPoolOptions` specialized for the `AdaptiveMaxPool3d` module.
///
/// Example:
/// ```
/// AdaptiveMaxPool3d model(AdaptiveMaxPool3dOptions(3));
/// ```
using AdaptiveMaxPool3dOptions =
AdaptiveMaxPoolOptions<ExpandingArrayWithOptionalElem<3>>;
namespace functional {
/// Options for `torch::nn::functional::adaptive_max_pool1d` and
/// `torch::nn::functional::adaptive_max_pool1d_with_indices`
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::adaptive_max_pool1d(x, F::AdaptiveMaxPool1dFuncOptions(3));
/// ```
using AdaptiveMaxPool1dFuncOptions = AdaptiveMaxPool1dOptions;
} // namespace functional
namespace functional {
/// Options for `torch::nn::functional::adaptive_max_pool2d` and
/// `torch::nn::functional::adaptive_max_pool2d_with_indices`
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::adaptive_max_pool2d(x, F::AdaptiveMaxPool2dFuncOptions(3));
/// ```
using AdaptiveMaxPool2dFuncOptions = AdaptiveMaxPool2dOptions;
} // namespace functional
namespace functional {
/// Options for `torch::nn::functional::adaptive_max_pool3d` and
/// `torch::nn::functional::adaptive_max_pool3d_with_indices`
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::adaptive_max_pool3d(x, F::AdaptiveMaxPool3dFuncOptions(3));
/// ```
using AdaptiveMaxPool3dFuncOptions = AdaptiveMaxPool3dOptions;
} // namespace functional
// ============================================================================
/// Options for a `D`-dimensional adaptive avgpool module.
template <typename output_size_t>
struct AdaptiveAvgPoolOptions {
AdaptiveAvgPoolOptions(output_size_t output_size)
: output_size_(output_size) {}
/// the target output size
TORCH_ARG(output_size_t, output_size);
};
/// `AdaptiveAvgPoolOptions` specialized for the `AdaptiveAvgPool1d` module.
///
/// Example:
/// ```
/// AdaptiveAvgPool1d model(AdaptiveAvgPool1dOptions(5));
/// ```
using AdaptiveAvgPool1dOptions = AdaptiveAvgPoolOptions<ExpandingArray<1>>;
/// `AdaptiveAvgPoolOptions` specialized for the `AdaptiveAvgPool2d` module.
///
/// Example:
/// ```
/// AdaptiveAvgPool2d model(AdaptiveAvgPool2dOptions({3, 2}));
/// ```
using AdaptiveAvgPool2dOptions =
AdaptiveAvgPoolOptions<ExpandingArrayWithOptionalElem<2>>;
/// `AdaptiveAvgPoolOptions` specialized for the `AdaptiveAvgPool3d` module.
///
/// Example:
/// ```
/// AdaptiveAvgPool3d model(AdaptiveAvgPool3dOptions(3));
/// ```
using AdaptiveAvgPool3dOptions =
AdaptiveAvgPoolOptions<ExpandingArrayWithOptionalElem<3>>;
namespace functional {
/// Options for `torch::nn::functional::adaptive_avg_pool1d`.
///
/// See the documentation for `torch::nn::AdaptiveAvgPool1dOptions` class to
/// learn what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::adaptive_avg_pool1d(x, F::AdaptiveAvgPool1dFuncOptions(3));
/// ```
using AdaptiveAvgPool1dFuncOptions = AdaptiveAvgPool1dOptions;
} // namespace functional
namespace functional {
/// Options for `torch::nn::functional::adaptive_avg_pool2d`.
///
/// See the documentation for `torch::nn::AdaptiveAvgPool2dOptions` class to
/// learn what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3));
/// ```
using AdaptiveAvgPool2dFuncOptions = AdaptiveAvgPool2dOptions;
} // namespace functional
namespace functional {
/// Options for `torch::nn::functional::adaptive_avg_pool3d`.
///
/// See the documentation for `torch::nn::AdaptiveAvgPool3dOptions` class to
/// learn what arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::adaptive_avg_pool3d(x, F::AdaptiveAvgPool3dFuncOptions(3));
/// ```
using AdaptiveAvgPool3dFuncOptions = AdaptiveAvgPool3dOptions;
} // namespace functional
// ============================================================================
/// Options for a `D`-dimensional maxunpool module.
template <size_t D>
struct MaxUnpoolOptions {
MaxUnpoolOptions(ExpandingArray<D> kernel_size)
: kernel_size_(kernel_size), stride_(kernel_size) {}
/// the size of the window to take a max over
TORCH_ARG(ExpandingArray<D>, kernel_size);
/// the stride of the window. Default value is `kernel_size
TORCH_ARG(ExpandingArray<D>, stride);
/// implicit zero padding to be added on both sides
TORCH_ARG(ExpandingArray<D>, padding) = 0;
};
/// `MaxUnpoolOptions` specialized for the `MaxUnpool1d` module.
///
/// Example:
/// ```
/// MaxUnpool1d model(MaxUnpool1dOptions(3).stride(2).padding(1));
/// ```
using MaxUnpool1dOptions = MaxUnpoolOptions<1>;
/// `MaxUnpoolOptions` specialized for the `MaxUnpool2d` module.
///
/// Example:
/// ```
/// MaxUnpool2d model(MaxUnpool2dOptions(3).stride(2).padding(1));
/// ```
using MaxUnpool2dOptions = MaxUnpoolOptions<2>;
/// `MaxUnpoolOptions` specialized for the `MaxUnpool3d` module.
///
/// Example:
/// ```
/// MaxUnpool3d model(MaxUnpool3dOptions(3).stride(2).padding(1));
/// ```
using MaxUnpool3dOptions = MaxUnpoolOptions<3>;
// ============================================================================
namespace functional {
/// Options for a `D`-dimensional maxunpool functional.
template <size_t D>
struct MaxUnpoolFuncOptions {
MaxUnpoolFuncOptions(ExpandingArray<D> kernel_size)
: kernel_size_(kernel_size), stride_(kernel_size) {}
/// the size of the window to take a max over
TORCH_ARG(ExpandingArray<D>, kernel_size);
/// the stride of the window. Default value is `kernel_size
TORCH_ARG(ExpandingArray<D>, stride);
/// implicit zero padding to be added on both sides
TORCH_ARG(ExpandingArray<D>, padding) = 0;
/// the targeted output size
TORCH_ARG(std::optional<std::vector<int64_t>>, output_size) = std::nullopt;
};
/// `MaxUnpoolFuncOptions` specialized for
/// `torch::nn::functional::max_unpool1d`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::max_unpool1d(x, indices,
/// F::MaxUnpool1dFuncOptions(3).stride(2).padding(1));
/// ```
using MaxUnpool1dFuncOptions = MaxUnpoolFuncOptions<1>;
/// `MaxUnpoolFuncOptions` specialized for
/// `torch::nn::functional::max_unpool2d`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::max_unpool2d(x, indices,
/// F::MaxUnpool2dFuncOptions(3).stride(2).padding(1));
/// ```
using MaxUnpool2dFuncOptions = MaxUnpoolFuncOptions<2>;
/// `MaxUnpoolFuncOptions` specialized for
/// `torch::nn::functional::max_unpool3d`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::max_unpool3d(x, indices, F::MaxUnpool3dFuncOptions(3));
/// ```
using MaxUnpool3dFuncOptions = MaxUnpoolFuncOptions<3>;
} // namespace functional
// ============================================================================
/// Options for a `D`-dimensional fractional maxpool module.
template <size_t D>
struct FractionalMaxPoolOptions {
FractionalMaxPoolOptions(ExpandingArray<D> kernel_size)
: kernel_size_(kernel_size) {}
/// the size of the window to take a max over
TORCH_ARG(ExpandingArray<D>, kernel_size);
/// the target output size of the image
TORCH_ARG(std::optional<ExpandingArray<D>>, output_size) = std::nullopt;
/// If one wants to have an output size as a ratio of the input size, this
/// option can be given. This has to be a number or tuple in the range (0, 1)
using ExpandingArrayDouble = torch::ExpandingArray<D, double>;
TORCH_ARG(std::optional<ExpandingArrayDouble>, output_ratio) = std::nullopt;
TORCH_ARG(torch::Tensor, _random_samples) = Tensor();
};
/// `FractionalMaxPoolOptions` specialized for the `FractionalMaxPool2d` module.
///
/// Example:
/// ```
/// FractionalMaxPool2d model(FractionalMaxPool2dOptions(5).output_size(1));
/// ```
using FractionalMaxPool2dOptions = FractionalMaxPoolOptions<2>;
/// `FractionalMaxPoolOptions` specialized for the `FractionalMaxPool3d` module.
///
/// Example:
/// ```
/// FractionalMaxPool3d model(FractionalMaxPool3dOptions(5).output_size(1));
/// ```
using FractionalMaxPool3dOptions = FractionalMaxPoolOptions<3>;
namespace functional {
/// Options for `torch::nn::functional::fractional_max_pool2d` and
/// `torch::nn::functional::fractional_max_pool2d_with_indices`
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::fractional_max_pool2d(x,
/// F::FractionalMaxPool2dFuncOptions(3).output_size(2));
/// ```
using FractionalMaxPool2dFuncOptions = FractionalMaxPool2dOptions;
} // namespace functional
namespace functional {
/// Options for `torch::nn::functional::fractional_max_pool3d` and
/// `torch::nn::functional::fractional_max_pool3d_with_indices`
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::fractional_max_pool3d(x,
/// F::FractionalMaxPool3dFuncOptions(3).output_size(2));
/// ```
using FractionalMaxPool3dFuncOptions = FractionalMaxPool3dOptions;
} // namespace functional
// ============================================================================
/// Options for a `D`-dimensional lppool module.
template <size_t D>
struct LPPoolOptions {
LPPoolOptions(double norm_type, ExpandingArray<D> kernel_size)
: norm_type_(norm_type),
kernel_size_(kernel_size),
stride_(kernel_size) {}
TORCH_ARG(double, norm_type);
// the size of the window to take an average over
TORCH_ARG(ExpandingArray<D>, kernel_size);
// the stride of the window. Default value is `kernel_size`
TORCH_ARG(ExpandingArray<D>, stride);
// when True, will use `ceil` instead of `floor` to compute the output shape
TORCH_ARG(bool, ceil_mode) = false;
};
/// `LPPoolOptions` specialized for the `LPPool1d` module.
///
/// Example:
/// ```
/// LPPool1d model(LPPool1dOptions(1, 2).stride(5).ceil_mode(true));
/// ```
using LPPool1dOptions = LPPoolOptions<1>;
/// `LPPoolOptions` specialized for the `LPPool2d` module.
///
/// Example:
/// ```
/// LPPool2d model(LPPool2dOptions(1, std::vector<int64_t>({3, 4})).stride({5,
/// 6}).ceil_mode(true));
/// ```
using LPPool2dOptions = LPPoolOptions<2>;
/// `LPPoolOptions` specialized for the `LPPool3d` module.
///
/// Example:
/// ```
/// LPPool3d model(LPPool3dOptions(1, std::vector<int64_t>({3, 4, 5})).stride(
/// {5, 6, 7}).ceil_mode(true));
/// ```
using LPPool3dOptions = LPPoolOptions<3>;
namespace functional {
/// Options for `torch::nn::functional::lp_pool1d`.
///
/// See the documentation for `torch::nn::LPPool1dOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::lp_pool1d(x, F::LPPool1dFuncOptions(2, 3).stride(2));
/// ```
using LPPool1dFuncOptions = LPPool1dOptions;
} // namespace functional
namespace functional {
/// Options for `torch::nn::functional::lp_pool2d`.
///
/// See the documentation for `torch::nn::LPPool2dOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::lp_pool2d(x, F::LPPool2dFuncOptions(2, {2, 3}).stride(2));
/// ```
using LPPool2dFuncOptions = LPPool2dOptions;
} // namespace functional
namespace functional {
/// Options for `torch::nn::functional::lp_pool3d`.
///
/// See the documentation for `torch::nn::LPPool3dOptions` class to learn what
/// arguments are supported.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::lp_pool3d(x, F::LPPool3dFuncOptions(2, {2, 3, 4}).stride(2));
/// ```
using LPPool3dFuncOptions = LPPool3dOptions;
} // namespace functional
} // namespace torch::nn
```
|
===================================================================================================================================================
SOURCE CODE FILE: rnn.h
LINES: 1
SIZE: 8.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\rnn.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/enum.h>
#include <torch/types.h>
namespace torch::nn {
namespace detail {
/// Common options for RNN, LSTM and GRU modules.
struct TORCH_API RNNOptionsBase {
typedef std::variant<
enumtype::kLSTM,
enumtype::kGRU,
enumtype::kRNN_TANH,
enumtype::kRNN_RELU>
rnn_options_base_mode_t;
RNNOptionsBase(
rnn_options_base_mode_t mode,
int64_t input_size,
int64_t hidden_size);
TORCH_ARG(rnn_options_base_mode_t, mode);
/// The number of features of a single sample in the input sequence `x`.
TORCH_ARG(int64_t, input_size);
/// The number of features in the hidden state `h`.
TORCH_ARG(int64_t, hidden_size);
/// The number of recurrent layers (cells) to use.
TORCH_ARG(int64_t, num_layers) = 1;
/// Whether a bias term should be added to all linear operations.
TORCH_ARG(bool, bias) = true;
/// If true, the input sequence should be provided as `(batch, sequence,
/// features)`. If false (default), the expected layout is `(sequence, batch,
/// features)`.
TORCH_ARG(bool, batch_first) = false;
/// If non-zero, adds dropout with the given probability to the output of each
/// RNN layer, except the final layer.
TORCH_ARG(double, dropout) = 0.0;
/// Whether to make the RNN bidirectional.
TORCH_ARG(bool, bidirectional) = false;
/// Cell projection dimension. If 0, projections are not added. Can only be
/// used for LSTMs.
TORCH_ARG(int64_t, proj_size) = 0;
};
} // namespace detail
/// Options for the `RNN` module.
///
/// Example:
/// ```
/// RNN model(RNNOptions(128,
/// 64).num_layers(3).dropout(0.2).nonlinearity(torch::kTanh));
/// ```
struct TORCH_API RNNOptions {
typedef std::variant<enumtype::kTanh, enumtype::kReLU> nonlinearity_t;
RNNOptions(int64_t input_size, int64_t hidden_size);
/// The number of expected features in the input `x`
TORCH_ARG(int64_t, input_size);
/// The number of features in the hidden state `h`
TORCH_ARG(int64_t, hidden_size);
/// Number of recurrent layers. E.g., setting ``num_layers=2``
/// would mean stacking two RNNs together to form a `stacked RNN`,
/// with the second RNN taking in outputs of the first RNN and
/// computing the final results. Default: 1
TORCH_ARG(int64_t, num_layers) = 1;
/// The non-linearity to use. Can be either ``torch::kTanh`` or
/// ``torch::kReLU``. Default: ``torch::kTanh``
TORCH_ARG(nonlinearity_t, nonlinearity) = torch::kTanh;
/// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
/// Default: ``true``
TORCH_ARG(bool, bias) = true;
/// If ``true``, then the input and output tensors are provided
/// as `(batch, seq, feature)`. Default: ``false``
TORCH_ARG(bool, batch_first) = false;
/// If non-zero, introduces a `Dropout` layer on the outputs of each
/// RNN layer except the last layer, with dropout probability equal to
/// `dropout`. Default: 0
TORCH_ARG(double, dropout) = 0.0;
/// If ``true``, becomes a bidirectional RNN. Default: ``false``
TORCH_ARG(bool, bidirectional) = false;
};
/// Options for the `LSTM` module.
///
/// Example:
/// ```
/// LSTM model(LSTMOptions(2,
/// 4).num_layers(3).batch_first(false).bidirectional(true));
/// ```
struct TORCH_API LSTMOptions {
LSTMOptions(int64_t input_size, int64_t hidden_size);
/// The number of expected features in the input `x`
TORCH_ARG(int64_t, input_size);
/// The number of features in the hidden state `h`
TORCH_ARG(int64_t, hidden_size);
/// Number of recurrent layers. E.g., setting ``num_layers=2``
/// would mean stacking two LSTMs together to form a `stacked LSTM`,
/// with the second LSTM taking in outputs of the first LSTM and
/// computing the final results. Default: 1
TORCH_ARG(int64_t, num_layers) = 1;
/// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
/// Default: ``true``
TORCH_ARG(bool, bias) = true;
/// If ``true``, then the input and output tensors are provided
/// as (batch, seq, feature). Default: ``false``
TORCH_ARG(bool, batch_first) = false;
/// If non-zero, introduces a `Dropout` layer on the outputs of each
/// LSTM layer except the last layer, with dropout probability equal to
/// `dropout`. Default: 0
TORCH_ARG(double, dropout) = 0.0;
/// If ``true``, becomes a bidirectional LSTM. Default: ``false``
TORCH_ARG(bool, bidirectional) = false;
/// Cell projection dimension. If 0, projections are not added
TORCH_ARG(int64_t, proj_size) = 0;
};
/// Options for the `GRU` module.
///
/// Example:
/// ```
/// GRU model(GRUOptions(2,
/// 4).num_layers(3).batch_first(false).bidirectional(true));
/// ```
struct TORCH_API GRUOptions {
GRUOptions(int64_t input_size, int64_t hidden_size);
/// The number of expected features in the input `x`
TORCH_ARG(int64_t, input_size);
/// The number of features in the hidden state `h`
TORCH_ARG(int64_t, hidden_size);
/// Number of recurrent layers. E.g., setting ``num_layers=2``
/// would mean stacking two GRUs together to form a `stacked GRU`,
/// with the second GRU taking in outputs of the first GRU and
/// computing the final results. Default: 1
TORCH_ARG(int64_t, num_layers) = 1;
/// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
/// Default: ``true``
TORCH_ARG(bool, bias) = true;
/// If ``true``, then the input and output tensors are provided
/// as (batch, seq, feature). Default: ``false``
TORCH_ARG(bool, batch_first) = false;
/// If non-zero, introduces a `Dropout` layer on the outputs of each
/// GRU layer except the last layer, with dropout probability equal to
/// `dropout`. Default: 0
TORCH_ARG(double, dropout) = 0.0;
/// If ``true``, becomes a bidirectional GRU. Default: ``false``
TORCH_ARG(bool, bidirectional) = false;
};
namespace detail {
/// Common options for RNNCell, LSTMCell and GRUCell modules
struct TORCH_API RNNCellOptionsBase {
RNNCellOptionsBase(
int64_t input_size,
int64_t hidden_size,
bool bias,
int64_t num_chunks);
TORCH_ARG(int64_t, input_size);
TORCH_ARG(int64_t, hidden_size);
TORCH_ARG(bool, bias);
TORCH_ARG(int64_t, num_chunks);
};
} // namespace detail
/// Options for the `RNNCell` module.
///
/// Example:
/// ```
/// RNNCell model(RNNCellOptions(20,
/// 10).bias(false).nonlinearity(torch::kReLU));
/// ```
struct TORCH_API RNNCellOptions {
typedef std::variant<enumtype::kTanh, enumtype::kReLU> nonlinearity_t;
RNNCellOptions(int64_t input_size, int64_t hidden_size);
/// The number of expected features in the input `x`
TORCH_ARG(int64_t, input_size);
/// The number of features in the hidden state `h`
TORCH_ARG(int64_t, hidden_size);
/// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
/// Default: ``true``
TORCH_ARG(bool, bias) = true;
/// The non-linearity to use. Can be either ``torch::kTanh`` or
/// ``torch::kReLU``. Default: ``torch::kTanh``
TORCH_ARG(nonlinearity_t, nonlinearity) = torch::kTanh;
};
/// Options for the `LSTMCell` module.
///
/// Example:
/// ```
/// LSTMCell model(LSTMCellOptions(20, 10).bias(false));
/// ```
struct TORCH_API LSTMCellOptions {
LSTMCellOptions(int64_t input_size, int64_t hidden_size);
/// The number of expected features in the input `x`
TORCH_ARG(int64_t, input_size);
/// The number of features in the hidden state `h`
TORCH_ARG(int64_t, hidden_size);
/// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
/// Default: ``true``
TORCH_ARG(bool, bias) = true;
};
/// Options for the `GRUCell` module.
///
/// Example:
/// ```
/// GRUCell model(GRUCellOptions(20, 10).bias(false));
/// ```
struct TORCH_API GRUCellOptions {
GRUCellOptions(int64_t input_size, int64_t hidden_size);
/// The number of expected features in the input `x`
TORCH_ARG(int64_t, input_size);
/// The number of features in the hidden state `h`
TORCH_ARG(int64_t, hidden_size);
/// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`.
/// Default: ``true``
TORCH_ARG(bool, bias) = true;
};
} // namespace torch::nn
```
|
===========================================================================================================================================================
SOURCE CODE FILE: transformer.h
LINES: 1
SIZE: 1.83 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\transformer.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/enum.h>
#include <torch/types.h>
#include <torch/nn/modules/container/any.h>
#include <torch/nn/options/transformerlayer.h>
namespace torch::nn {
/// Options for the `Transformer` module
///
/// Example:
/// ```
/// TransformerOptions options;
/// TransformerOptions options(16, 4);
/// auto options = TransformerOptions().d_model(4).nhead(2).dropout(0.0);
/// ```
struct TORCH_API TransformerOptions {
// The following constructors are commonly used
// Please don't add more unless it is proved as a common usage
TransformerOptions() = default;
TransformerOptions(int64_t d_model, int64_t nhead);
TransformerOptions(
int64_t d_model,
int64_t nhead,
int64_t num_encoder_layers,
int64_t num_decoder_layers);
/// the number of expected features in the encoder/decoder inputs
/// (default=512)
TORCH_ARG(int64_t, d_model) = 512;
/// the number of heads in the multiheadattention models (default=8)
TORCH_ARG(int64_t, nhead) = 8;
/// the number of sub-encoder-layers in the encoder (default=6)
TORCH_ARG(int64_t, num_encoder_layers) = 6;
/// the number of sub-decoder-layers in the decoder (default=6)
TORCH_ARG(int64_t, num_decoder_layers) = 6;
/// the dimension of the feedforward network model (default=2048)
TORCH_ARG(int64_t, dim_feedforward) = 2048;
/// the dropout value (default=0.1)
TORCH_ARG(double, dropout) = 0.1;
/// the activation function of encoder/decoder intermediate layer
/// (default=``torch::kReLU``)
TORCH_ARG(activation_t, activation) = torch::kReLU;
/// custom encoder (default=None)
TORCH_ARG(AnyModule, custom_encoder);
/// custom decoder (default=None)
TORCH_ARG(AnyModule, custom_decoder);
};
} // namespace torch::nn
```
|
================================================================================================================================================================
SOURCE CODE FILE: transformercoder.h
LINES: 1
SIZE: 2.34 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\transformercoder.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/enum.h>
#include <torch/types.h>
#include <torch/nn/modules/container/any.h>
#include <torch/nn/modules/transformerlayer.h>
namespace torch::nn {
/// Options for the `TransformerEncoder`
///
/// Example:
/// ```
/// TransformerEncoderLayer encoderLayer(TransformerEncoderLayerOptions(512,
/// 8).dropout(0.1)); auto options = TransformerEncoderOptions(encoderLayer,
/// 6).norm(LayerNorm(LayerNormOptions({2})));
/// ```
struct TORCH_API TransformerEncoderOptions {
// This constructor will keep a shallow copy of encoder_layer, so it keeps all
// the data in encoder_layer.
TransformerEncoderOptions(
TransformerEncoderLayer encoder_layer,
int64_t num_layers);
// This constructor will create a new TransformerEncoderLayer obj based on
// passed in encoder_layer_options.
TransformerEncoderOptions(
const TransformerEncoderLayerOptions& encoder_layer_options,
int64_t num_layers);
/// transformer Encoder Layer
TORCH_ARG(TransformerEncoderLayer, encoder_layer) = nullptr;
/// number of encoder layers
TORCH_ARG(int64_t, num_layers);
/// normalization module
TORCH_ARG(AnyModule, norm);
};
/// Options for the `TransformerDecoder` module.
///
/// Example:
/// ```
/// TransformerDecoderLayer decoder_layer(TransformerDecoderLayerOptions(512,
/// 8).dropout(0.1)); auto options = TransformerDecoderOptions(decoder_layer,
/// 6)norm(LayerNorm(LayerNormOptions({2}))); TransformerDecoder
/// transformer_decoder(options);
/// ```
struct TORCH_API TransformerDecoderOptions {
// This constructor will keep the a ref of passed in decoder_layer,
// so it keeps all the data in decoder_layer.
TransformerDecoderOptions(
TransformerDecoderLayer decoder_layer,
int64_t num_layers);
// This constructor will create a new TransformerDecoderLayer obj,
// based on passed in decoder_layer_options.
TransformerDecoderOptions(
const TransformerDecoderLayerOptions& decoder_layer_options,
int64_t num_layers);
/// decoder layer to be cloned
TORCH_ARG(TransformerDecoderLayer, decoder_layer) = nullptr;
/// number of decoder layers
TORCH_ARG(int64_t, num_layers);
/// normalization module
TORCH_ARG(AnyModule, norm);
};
} // namespace torch::nn
```
|
================================================================================================================================================================
SOURCE CODE FILE: transformerlayer.h
LINES: 1
SIZE: 2.08 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\transformerlayer.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/enum.h>
#include <torch/types.h>
namespace torch::nn {
using activation_t = std::variant<
enumtype::kReLU,
enumtype::kGELU,
std::function<Tensor(const Tensor&)>>;
/// Options for the `TransformerEncoderLayer`
///
/// Example:
/// ```
/// auto options = TransformerEncoderLayer(512, 8).dropout(0.2);
/// ```
struct TORCH_API TransformerEncoderLayerOptions {
/* implicit */ TransformerEncoderLayerOptions(int64_t d_model, int64_t nhead);
/// the number of expected features in the input
TORCH_ARG(int64_t, d_model);
/// the number of heads in the multiheadattention models
TORCH_ARG(int64_t, nhead);
/// the dimension of the feedforward network model, default is 2048
TORCH_ARG(int64_t, dim_feedforward) = 2048;
/// the dropout value, default is 0.1
TORCH_ARG(double, dropout) = 0.1;
/// the activation function of intermediate layer, can be ``torch::kReLU``,
/// ``torch::GELU``, or a unary callable. Default: ``torch::kReLU``
TORCH_ARG(activation_t, activation) = torch::kReLU;
};
// ============================================================================
/// Options for the `TransformerDecoderLayer` module.
///
/// Example:
/// ```
/// TransformerDecoderLayer model(TransformerDecoderLayerOptions(512,
/// 8).dropout(0.2));
/// ```
struct TORCH_API TransformerDecoderLayerOptions {
TransformerDecoderLayerOptions(int64_t d_model, int64_t nhead);
/// number of expected features in the input
TORCH_ARG(int64_t, d_model);
/// number of heads in the multiheadattention models
TORCH_ARG(int64_t, nhead);
/// dimension of the feedforward network model. Default: 2048
TORCH_ARG(int64_t, dim_feedforward) = 2048;
/// dropout value. Default: 1
TORCH_ARG(double, dropout) = 0.1;
/// activation function of intermediate layer, can be ``torch::kGELU``,
/// ``torch::kReLU``, or a unary callable. Default: ``torch::kReLU``
TORCH_ARG(activation_t, activation) = torch::kReLU;
};
} // namespace torch::nn
```
|
==========================================================================================================================================================
SOURCE CODE FILE: upsampling.h
LINES: 1
SIZE: 4.13 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\upsampling.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/enum.h>
#include <torch/expanding_array.h>
#include <torch/types.h>
#include <vector>
namespace torch::nn {
/// Options for the `Upsample` module.
///
/// Example:
/// ```
/// Upsample
/// model(UpsampleOptions().scale_factor(std::vector<double>({3})).mode(torch::kLinear).align_corners(false));
/// ```
struct TORCH_API UpsampleOptions {
/// output spatial sizes.
TORCH_ARG(std::optional<std::vector<int64_t>>, size) = std::nullopt;
/// multiplier for spatial size.
TORCH_ARG(std::optional<std::vector<double>>, scale_factor) = std::nullopt;
/// the upsampling algorithm: one of "nearest", "linear", "bilinear",
/// "bicubic" and "trilinear". Default: "nearest"
typedef std::variant<
enumtype::kNearest,
enumtype::kLinear,
enumtype::kBilinear,
enumtype::kBicubic,
enumtype::kTrilinear>
mode_t;
TORCH_ARG(mode_t, mode) = torch::kNearest;
/// if "True", the corner pixels of the input and output tensors are
/// aligned, and thus preserving the values at those pixels. This only has
/// effect when :attr:`mode` is "linear", "bilinear", "bicubic", or
/// "trilinear". Default: "False"
TORCH_ARG(std::optional<bool>, align_corners) = std::nullopt;
};
namespace functional {
/// Options for `torch::nn::functional::interpolate`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::interpolate(input,
/// F::InterpolateFuncOptions().size(std::vector<int64_t>({4})).mode(torch::kNearest));
/// ```
struct TORCH_API InterpolateFuncOptions {
typedef std::variant<
enumtype::kNearest,
enumtype::kLinear,
enumtype::kBilinear,
enumtype::kBicubic,
enumtype::kTrilinear,
enumtype::kArea,
enumtype::kNearestExact>
mode_t;
/// output spatial sizes.
TORCH_ARG(std::optional<std::vector<int64_t>>, size) = std::nullopt;
/// multiplier for spatial size.
TORCH_ARG(std::optional<std::vector<double>>, scale_factor) = std::nullopt;
/// the upsampling algorithm: one of "nearest", "linear", "bilinear",
/// "bicubic", "trilinear", "area", "nearest-exact". Default: "nearest"
TORCH_ARG(mode_t, mode) = torch::kNearest;
/// Geometrically, we consider the pixels of the input and output as squares
/// rather than points. If set to "True", the input and output tensors are
/// aligned by the center points of their corner pixels, preserving the values
/// at the corner pixels. If set to "False", the input and output tensors
/// are aligned by the corner points of their corner pixels, and the
/// interpolation uses edge value padding for out-of-boundary values, making
/// this operation *independent* of input size when `scale_factor` is
/// kept the same. It is *required* when interpolating mode is "linear",
/// "bilinear", "bicubic" or "trilinear". Default: "False"
TORCH_ARG(std::optional<bool>, align_corners) = std::nullopt;
/// recompute the scale_factor for use in the
/// interpolation calculation. When `scale_factor` is passed as a parameter,
/// it is used to compute the `output_size`. If `recompute_scale_factor` is
/// `true` or not specified, a new `scale_factor` will be computed based on
/// the output and input sizes for use in the interpolation computation (i.e.
/// the computation will be identical to if the computed `output_size` were
/// passed-in explicitly). Otherwise, the passed-in `scale_factor` will be
/// used in the interpolation computation. Note that when `scale_factor` is
/// floating-point, the recomputed scale_factor may differ from the one passed
/// in due to rounding and precision issues.
TORCH_ARG(std::optional<bool>, recompute_scale_factor) = std::nullopt;
/// flag to apply anti-aliasing. Using anti-alias
/// option together with :attr:`align_corners` equals "False", interpolation
/// result would match Pillow result for downsampling operation. Supported
/// modes: "bilinear". Default: "False".
TORCH_ARG(bool, antialias) = false;
};
} // namespace functional
} // namespace torch::nn
```
|
======================================================================================================================================================
SOURCE CODE FILE: vision.h
LINES: 1
SIZE: 1.06 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\options\vision.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <torch/enum.h>
#include <torch/types.h>
namespace torch::nn::functional {
/// Options for `torch::nn::functional::grid_sample`.
///
/// Example:
/// ```
/// namespace F = torch::nn::functional;
/// F::grid_sample(input, grid,
/// F::GridSampleFuncOptions().mode(torch::kBilinear).padding_mode(torch::kZeros).align_corners(true));
/// ```
struct TORCH_API GridSampleFuncOptions {
typedef std::variant<enumtype::kBilinear, enumtype::kNearest> mode_t;
typedef std::
variant<enumtype::kZeros, enumtype::kBorder, enumtype::kReflection>
padding_mode_t;
/// interpolation mode to calculate output values. Default: Bilinear
TORCH_ARG(mode_t, mode) = torch::kBilinear;
/// padding mode for outside grid values. Default: Zeros
TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros;
/// Specifies perspective to pixel as point. Default: false
TORCH_ARG(std::optional<bool>, align_corners) = std::nullopt;
};
} // namespace torch::nn::functional
```
|
==============================================================================================================================================================
SOURCE CODE FILE: data_parallel.h
LINES: 1
SIZE: 11.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\parallel\data_parallel.h
ENCODING: utf-8
```h
#pragma once
#include <torch/cuda.h>
#include <torch/nn/module.h>
#include <torch/nn/pimpl.h>
#include <torch/types.h>
#include <ATen/core/functional.h>
#include <torch/csrc/autograd/functions/comm.h>
#include <torch/csrc/autograd/functions/utils.h>
#include <ATen/Device.h>
#include <ATen/Parallel.h>
#include <c10/core/TensorOptions.h>
#include <c10/util/Exception.h>
#include <c10/util/irange.h>
#include <exception>
#include <memory>
#include <mutex>
#include <vector>
namespace torch::nn {
namespace {
// Note [Replicating Modules]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Module replication is implemented in the following two steps:
// 1) create a module replica on each destination device using Module.clone().
// 2) manually add a gradient edge pointing from every parameter X in every
// module replica to the same parameter X in the original module, using
// ReduceAdd as the grad_fn.
//
// ReduceAdd can ONLY be used during the backward pass of data parallel. Forward
// pass cannot use this function as it does not setup gradient function and
// history at all. Do NOT try to use ReduceAdd for any other purposes.
//
// NB: An alternative is to add Broadcast and ReduceAddCoalesce to
// torch/csrc/autograd/functions/comm.cpp as normal autograd functions,
// implement a Replicatable (like cloneable) class and add it as a friend class
// in Module.h. In the forward pass, the Replicatable could use the Broadcast
// function to replicate every module parameter and set gradient functions using
// ReduceAddCoalesce (like how it is implemented in Python). However, unlike in
// Python, where changes to Linear._parameters["weight"] would also apply to
// Linear.weight (using Linear as an example), Linear.weight and
// Linear.parameters_["weight"] are two tensor objects pointing to the same
// TensorImpl. Assigning a new tensor to Linear.parameters_["weight"] will not
// change Linear.weight. To make this work, we will have to:
// 1) force every module to also inherit from Replicatable
// 2) force every module to implement an additional function, e.g.,
// Replicatable::load_params(), to pick up changes from parameters_ to their
// own member fields.
// This will be an overkill as Replicatable will only be used in data_parallel,
// not even ddp.
// Autograd function for the replicate step in data parallel. This is only used
// in data parallel, and should not be exposed as a user API.
struct ReduceAdd : public autograd::Node {
explicit ReduceAdd(const at::Device& destination_device)
: destination_device_(destination_device){};
~ReduceAdd() override = default;
// NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
autograd::variable_list apply(autograd::variable_list&& inputs) override {
TORCH_CHECK(
!torch::autograd::compute_requires_grad(inputs),
"ReduceAdd can only be used during the backward pass of data parallel.");
Tensor output = torch::zeros_like(inputs[0], {destination_device_});
for (auto& input : inputs) {
TORCH_CHECK(
input.sizes() == inputs[0].sizes(),
"All inputs of ReduceAdd must have the same size, but got ",
input.sizes(),
" and ",
inputs[0].sizes());
TORCH_CHECK(
input.dtype() == inputs[0].dtype(),
"All inputs of ReduceAdd must have the same dtype, but got ",
input.dtype(),
" and ",
inputs[0].dtype());
// TODO: use nccl reduce
output.add_(input.to(destination_device_));
}
return {output};
}
private:
at::Device destination_device_;
};
} // namespace
// A friend function to Module, it recursively sets gradient edges pointing from
// every parameter X in every module replica to the same parameter X in the
// original module. See [Replicating Modules]
template <typename ModuleType>
void replicate_grad_edges(
const std::shared_ptr<Module>& module,
const std::vector<std::shared_ptr<ModuleType>>& replicas,
const std::vector<Device>& devices) {
for (auto& parameter : module->named_parameters(/*recurse=*/false)) {
auto grad_fn = std::make_shared<ReduceAdd>((*parameter).device());
grad_fn->set_next_edges(autograd::collect_next_edges(*parameter));
for (const auto i : c10::irange(devices.size())) {
autograd::set_history(replicas[i]->parameters_[parameter.key()], grad_fn);
}
}
for (auto& buffer : module->named_buffers(/*recurse=*/false)) {
if (buffer.value().requires_grad()) {
auto grad_fn = std::make_shared<ReduceAdd>((*buffer).device());
grad_fn->set_next_edges(autograd::collect_next_edges(*buffer));
for (const auto i : c10::irange(devices.size())) {
autograd::set_history(replicas[i]->buffers_[buffer.key()], grad_fn);
}
}
}
for (auto& child : module->children_) {
std::vector<std::shared_ptr<Module>> child_replicas;
child_replicas.reserve(devices.size());
for (auto& replica : replicas) {
child_replicas.push_back(replica->children_[child.key()]);
}
// recursively set gradient edges for all children
replicate_grad_edges(*child, child_replicas, devices);
}
}
namespace parallel {
/// Replicates a module on the given list of devices.
/// A replica is created by calling `clone()` on the module. For this, the
/// module must inherit from `nn::Cloneable`, or define its own `clone()`
/// method, which is expected to perform a deep copy of the module.
template <typename ModuleType>
std::vector<std::shared_ptr<ModuleType>> replicate(
const std::shared_ptr<ModuleType>& module,
const std::vector<Device>& devices) {
std::vector<std::shared_ptr<ModuleType>> replicas;
replicas.reserve(devices.size());
for (const auto& device : devices) {
replicas.push_back(
std::dynamic_pointer_cast<ModuleType>(module->clone(device)));
}
// Configure gradient edges to point from replcia parameters to original
// module parameters. See [Replicating Modules]
replicate_grad_edges(module, replicas, devices);
return replicas;
}
/// Replicates a module holder on the given list of devices.
/// This method allows calling `replicate()` with a module holder, such as
/// `Linear`.
template <typename ModuleType>
std::vector<ModuleHolder<ModuleType>> replicate(
const ModuleHolder<ModuleType>& module,
const std::vector<Device>& devices) {
auto ptrs = replicate(module.ptr(), devices);
return std::vector<ModuleHolder<ModuleType>>(ptrs.begin(), ptrs.end());
}
/// Applies the given inputs to the given modules in a parallel fashion.
/// Conceptually, a thread is spawned for each `(module, input)` pair, in which
/// `forward()` is called on the module with its corresponding input. The
/// outputs of the individual calls are stored in a vector and returned.
///
/// The first exception caught by any thread is stashed and rethrown after all
/// threads have completed their operation.
///
/// Further remarks:
/// 1. The length of the module container must match the length of the inputs.
/// 2. If a list of devices is supplied, it must match the list of modules in
/// length. Each device will be set to the current default device during the
/// invocation of the respective module. This means any tensors allocated on the
/// default device inside the module will be constructed on this device.
template <typename ModuleType>
std::vector<Tensor> parallel_apply(
std::vector<ModuleType>& modules,
const std::vector<Tensor>& inputs,
const std::optional<std::vector<Device>>& devices = std::nullopt) {
TORCH_CHECK(
modules.size() == inputs.size(), "Must have as many inputs as modules");
if (devices) {
TORCH_CHECK(
modules.size() == devices->size(),
"Must have as many devices as modules");
}
std::vector<Tensor> outputs(modules.size());
std::mutex mutex;
// std::exception_ptr can be passed between threads:
// > An instance of std::exception_ptr may be passed to another function,
// > possibly on another thread, where the exception may be rethrown [...].
// https://en.cppreference.com/w/cpp/error/exception_ptr
std::exception_ptr exception;
at::parallel_for(
/*begin=*/0,
/*end=*/modules.size(),
/*grain_size=*/1,
[&modules, &inputs, &devices, &outputs, &mutex, &exception](
int64_t index, int64_t stop) {
for (; index < stop; ++index) {
try {
auto output = modules[index]->forward(inputs[index]);
output =
output.to(devices ? (*devices)[index] : inputs[index].device());
std::lock_guard<std::mutex> lock(mutex);
outputs[index] = output;
} catch (...) {
std::lock_guard<std::mutex> lock(mutex);
if (!exception) {
exception = std::current_exception();
}
}
}
});
if (exception) {
std::rethrow_exception(exception);
}
return outputs;
}
/// Evaluates `module(input)` in parallel across the given `devices`. If
/// `devices` is not supplied, the invocation is parallelized across all
/// available CUDA devices. If `output_device` is supplied, the final, combined
/// tensor will be placed on this device. If not, it defaults to the first
/// device in `devices`.
///
/// In detail, this method performs the following four distinct steps:
/// 1. *Scatter* the input to the given devices,
/// 2. *Replicate* (deep clone) the model on each device,
/// 3. *Evaluate* each module with its input on its device,
/// 4. *Gather* the outputs of each replica into a single output tensor, located
/// on the `output_device`.
template <typename ModuleType>
Tensor data_parallel(
ModuleType module,
Tensor input,
std::optional<std::vector<Device>> devices = std::nullopt,
std::optional<Device> output_device = std::nullopt,
int64_t dim = 0) {
if (!devices) {
const auto device_count = torch::cuda::device_count();
TORCH_CHECK(
device_count > 0, "Expected at least one CUDA device to be available");
devices = std::vector<Device>();
devices->reserve(device_count);
for (const auto index : c10::irange(device_count)) {
devices->emplace_back(kCUDA, static_cast<torch::DeviceIndex>(index));
}
}
if (!output_device) {
output_device = devices->front();
}
if (devices->size() == 1) {
module->to(devices->front());
input = input.to(devices->front());
return module->forward(std::move(input)).to(*output_device);
}
autograd::Scatter scatter(*devices, /*chunk_sizes=*/std::nullopt, dim);
auto scattered_inputs = fmap<Tensor>(scatter.apply({std::move(input)}));
// Input tensor might not be big enough to scale across all available devices
if (scattered_inputs.size() < devices->size()) {
devices->resize(
scattered_inputs.size(),
Device(DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES));
}
auto replicas = replicate(module, *devices);
auto outputs = parallel_apply(replicas, scattered_inputs, *devices);
return autograd::Gather(*output_device, dim)
.apply(fmap<autograd::Variable>(std::move(outputs)))
.front();
}
} // namespace parallel
} // namespace torch::nn
```
|
=================================================================================================================================================
SOURCE CODE FILE: pimpl-inl.h
LINES: 1
SIZE: 3.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\pimpl-inl.h
ENCODING: utf-8
```h
// This class exists only to do SFINAE on abstract types `T` that are really
// `ModuleHolder<ModuleType>`, because there's no good way to say that `T` is a
// `ModuleHolder` over some unknown type `ModuleType`. With this, you can do
// `enable_if_t<is_base_of_v<ModuleHolderIndicator, T>>`.
struct ModuleHolderIndicator {};
// A type trait that is true for types that are `ModuleHolder`s.
template <typename T>
using is_module_holder =
std::is_base_of<ModuleHolderIndicator, std::decay_t<T>>;
template <typename T>
using disable_if_module_holder_t =
std::enable_if_t<!is_module_holder<T>::value>;
// A collection of templates that answer the question whether a type `T` is a
// `ModuleHolder`, and if so whether its contained type is of type `C`. This is
// tricky because it is hard to short circuit in template metaprogramming. A
// naive and incorrect solution to this problem would be something like
// `disable_if<is_module_holder<T>::value && typename T::ContainedType == C>`.
// This would disable all types that are not `ModuleHolder`s, because even
// though the `is_module_holder<T>::value` may be `false` for such types the
// `T::ContainedType` access would be ill-formed and thus fail the whole
// expression by the rules of SFINAE. Instead we have to use template
// specialization to statically branch on the first condition
// (`is_module_holder<T>`) and are only then allowed to query
// `T::ContainedType` in the branch for which the condition was true.
// Base template.
template <bool is_module_holder_value, typename T, typename C>
struct is_module_holder_of_impl;
// False branch. `T` is not a `ModuleHolder` and thus not a `ModuleHolder` with
// contained type `C`.
template <typename T, typename C>
struct is_module_holder_of_impl<false, T, C> : std::false_type {};
// True branch. `T` is a `ModuleHolder` and thus we can legit access its
// `ContainedType` and compare it against `C`.
template <typename T, typename C>
struct is_module_holder_of_impl<true, T, C>
: std::is_same<typename T::ContainedType, C> {};
// Helper template.
template <typename T, typename C>
struct is_module_holder_of : is_module_holder_of_impl<
is_module_holder<T>::value,
std::decay_t<T>,
std::decay_t<C>> {};
// A collection of templates that allow deducing the return type of the
// `forward()` method, but only if a module actually has a `forward()` method,
// and otherwise deduces to the type `void`.
template <bool has_forward_value, typename C, typename... Args>
struct return_type_of_forward_impl;
template <typename C, typename... Args>
struct return_type_of_forward_impl<true, C, Args...> {
using type = decltype(::std::declval<C>().forward(::std::declval<Args>()...));
};
template <typename C, typename... Args>
struct return_type_of_forward_impl<false, C, Args...> {
using type = void;
};
template <typename C, typename... Args>
using return_type_of_forward = return_type_of_forward_impl<
torch::detail::has_forward<C>::value,
C,
Args...>;
template <typename C, typename... Args>
using return_type_of_forward_t =
typename return_type_of_forward<C, Args...>::type;
```
|
=============================================================================================================================================
SOURCE CODE FILE: pimpl.h
LINES: 1
SIZE: 6.67 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\pimpl.h
ENCODING: utf-8
```h
#pragma once
#include <torch/arg.h>
#include <torch/detail/static.h>
#include <torch/serialize/archive.h>
#include <torch/types.h>
#include <torch/csrc/utils/variadic.h>
#include <memory>
#include <type_traits>
#include <utility>
namespace torch {
namespace detail {
// Dump all the template metaprogramming in this file.
#include <torch/csrc/api/include/torch/nn/pimpl-inl.h>
} // namespace detail
namespace nn {
/// A `ModuleHolder` is essentially a wrapper around `std::shared_ptr<M>` where
/// `M` is an `nn::Module` subclass, with convenient constructors defined for
/// the kind of constructions we want to allow for our modules.
template <typename Contained>
class ModuleHolder : torch::detail::ModuleHolderIndicator {
protected:
/// The module pointer this class wraps.
/// NOTE: Must be placed at the top of the class so that we can use it with
/// trailing return types below.
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
std::shared_ptr<Contained> impl_;
public:
using ContainedType = Contained;
/// Default constructs the contained module if if has a default constructor,
/// else produces a static error.
///
/// NOTE: This uses the behavior of template
/// classes in C++ that constructors (or any methods) are only compiled when
/// actually used.
ModuleHolder() : impl_(default_construct()) {
static_assert(
std::is_default_constructible_v<Contained>,
"You are trying to default construct a module which has "
"no default constructor. Use = nullptr to give it the empty state "
"(e.g. `Linear linear = nullptr;` instead of `Linear linear;`).");
}
/// Constructs the `ModuleHolder` with an empty contained value. Access to
/// the underlying module is not permitted and will throw an exception, until
/// a value is assigned.
/* implicit */ ModuleHolder(std::nullptr_t) : impl_(nullptr) {}
/// Constructs the `ModuleHolder` with a contained module, forwarding all
/// arguments to its constructor.
template <
typename Head,
typename... Tail,
typename = std::enable_if_t<
!(torch::detail::is_module_holder_of<Head, ContainedType>::value &&
(sizeof...(Tail) == 0))>>
explicit ModuleHolder(Head&& head, Tail&&... tail)
: impl_(new Contained(
std::forward<Head>(head),
std::forward<Tail>(tail)...)) {}
/// Constructs the `ModuleHolder` from a pointer to the contained type.
/// Example: `Linear(std::make_shared<LinearImpl>(...))`.
/* implicit */ ModuleHolder(std::shared_ptr<Contained> module)
: impl_(std::move(module)) {}
/// Returns true if the `ModuleHolder` contains a module, or false if it is
/// `nullptr`.
explicit operator bool() const noexcept {
return !is_empty();
}
/// Forwards to the contained module.
Contained* operator->() {
return get();
}
/// Forwards to the contained module.
const Contained* operator->() const {
return get();
}
/// Returns a reference to the contained module.
Contained& operator*() {
return *get();
}
/// Returns a const reference to the contained module.
const Contained& operator*() const {
return *get();
}
/// Returns a shared pointer to the underlying module.
const std::shared_ptr<Contained>& ptr() const {
TORCH_CHECK(!is_empty(), "Accessing empty ModuleHolder");
return impl_;
}
/// Returns a pointer to the underlying module.
Contained* get() {
TORCH_CHECK(!is_empty(), "Accessing empty ModuleHolder");
return impl_.get();
}
/// Returns a const pointer to the underlying module.
const Contained* get() const {
TORCH_CHECK(!is_empty(), "Accessing empty ModuleHolder");
return impl_.get();
}
/// Calls the `forward()` method of the contained module.
template <typename... Args>
auto operator()(Args&&... args)
-> torch::detail::return_type_of_forward_t<Contained, Args...> {
// This will not compile if the module does not have a `forward()` method
// (as expected).
// NOTE: `std::forward` is qualified to prevent VS2017 emitting
// error C2872: 'std': ambiguous symbol
return impl_->forward(::std::forward<Args>(args)...);
}
/// Forwards to the subscript operator of the contained module.
/// NOTE: std::forward is qualified to prevent VS2017 emitting
/// error C2872: 'std': ambiguous symbol
template <typename Arg>
decltype(auto) operator[](Arg&& arg) {
return (*impl_)[::std::forward<Arg>(arg)];
}
/// Returns true if the `ModuleHolder` does not contain a module.
bool is_empty() const noexcept {
return impl_ == nullptr;
}
private:
template <typename T = Contained>
std::shared_ptr<Contained> default_construct() {
if constexpr (std::is_default_constructible_v<T>) {
return std::make_shared<Contained>();
} else {
return nullptr;
}
}
};
/// Pretty prints the given `Module` into the `ostream`.
template <typename ModuleType>
std::ostream& operator<<(
std::ostream& stream,
const nn::ModuleHolder<ModuleType>& module) {
return stream << *module;
}
/// Serializes a `ModuleHolder` into an `OutputArchive`.
template <typename ModuleType>
serialize::OutputArchive& operator<<(
serialize::OutputArchive& archive,
const nn::ModuleHolder<ModuleType>& module) {
return archive << module.ptr();
}
/// Deserializes a `ModuleHolder` from an `InputArchive`.
template <typename ModuleType>
serialize::InputArchive& operator>>(
serialize::InputArchive& archive,
nn::ModuleHolder<ModuleType>& module) {
return archive >> module.ptr();
}
} // namespace nn
} // namespace torch
// Workaround for CUDA 10.2 and below not allowing attribute unused on
// using declarations.
#ifdef __CUDACC__
#define TORCH_UNUSED_EXCEPT_CUDA
#else
#define TORCH_UNUSED_EXCEPT_CUDA [[maybe_unused]]
#endif
/// Defines a class `Name` which inherits from `nn::ModuleHolder` to provide a
/// wrapper over a `std::shared_ptr<ImplType>`.
/// `Impl` is a type alias for `ImplType` which provides a way to call static
/// method of `ImplType`.
#define TORCH_MODULE_IMPL(Name, ImplType) \
class Name : public torch::nn::ModuleHolder<ImplType> { /* NOLINT */ \
public: \
using torch::nn::ModuleHolder<ImplType>::ModuleHolder; \
using Impl TORCH_UNUSED_EXCEPT_CUDA = ImplType; \
}
/// Like `TORCH_MODULE_IMPL`, but defaults the `ImplType` name to `<Name>Impl`.
#define TORCH_MODULE(Name) TORCH_MODULE_IMPL(Name, Name##Impl)
```
|
=============================================================================================================================================
SOURCE CODE FILE: utils.h
LINES: 1
SIZE: 0.13 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\utils.h
ENCODING: utf-8
```h
#pragma once
#include <torch/nn/utils/clip_grad.h>
#include <torch/nn/utils/convert_parameters.h>
#include <torch/nn/utils/rnn.h>
```
|
=======================================================================================================================================================
SOURCE CODE FILE: clip_grad.h
LINES: 1
SIZE: 4.87 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\utils\clip_grad.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/types.h>
#include <utility>
#include <vector>
namespace torch::nn::utils {
// Clips gradient norm of a vector of Tensors.
// See
// https://pytorch.org/docs/stable/nn.html?highlight=clip_grad_norm#torch.nn.utils.clip_grad_norm_
// for more details about this module.
//
// Difference with the python version: unlike the python version, even when
// skipping the finiteness checks (error_if_nonfinite = false), this function
// will introduce a device <=> CPU synchronization (for devices where that makes
// sense!) in order to return a CPU-side `double`. This C++ version therefore
// cannot be run fully asynchronously w.r.t. the device of the gradients.
inline double clip_grad_norm_(
const std::vector<Tensor>& parameters,
double max_norm,
double norm_type = 2.0,
bool error_if_nonfinite = false) {
std::vector<Tensor> params_with_grad;
for (const auto& param : parameters) {
auto& grad = param.grad();
if (grad.defined()) {
params_with_grad.push_back(param);
}
}
if (params_with_grad.empty()) {
return 0.0;
}
Tensor total_norm_tensor;
if (norm_type == std::numeric_limits<double>::infinity()) {
std::vector<Tensor> norms;
norms.reserve(params_with_grad.size());
for (const auto& param : params_with_grad) {
norms.emplace_back(param.grad().data().abs().max());
}
total_norm_tensor =
(norms.size() == 1) ? norms[0] : torch::max(torch::stack(norms));
} else if (norm_type == 0) {
total_norm_tensor =
torch::full({}, static_cast<double>(params_with_grad.size()));
} else {
std::vector<Tensor> norms;
norms.reserve(params_with_grad.size());
for (const auto& param : params_with_grad) {
norms.emplace_back(param.grad().data().norm(norm_type));
}
total_norm_tensor =
(norms.size() == 1) ? norms[0] : torch::stack(norms).norm(norm_type);
}
// When possible (ie when skipping the finiteness check), we avoid
// synchronizing the CPU and the gradients' device until the very end to
// preserve async execution on the device. When checking for finite-ness, this
// optional ensures we only sync once.
std::optional<double> total_norm = std::nullopt;
if (error_if_nonfinite) {
total_norm = total_norm_tensor.item().toDouble();
TORCH_CHECK(
std::isfinite(*total_norm),
"The total norm of order ",
norm_type,
" for gradients from `parameters` ",
"is non-finite, so it cannot be clipped. To disable this error and scale ",
"the gradients with the non-finite norm anyway, set ",
"`error_if_nonfinite=false`");
}
auto clip_coef = max_norm / (total_norm_tensor + 1e-6);
auto clip_coef_clamped =
torch::clamp(clip_coef, std::nullopt /* min */, 1.0 /* max */);
for (auto& param : params_with_grad) {
param.grad().data().mul_(clip_coef_clamped);
}
if (!total_norm.has_value()) {
total_norm = total_norm_tensor.item().toDouble();
}
return *total_norm;
}
// A wrapper around clip_grad_norm_ that allows us to call the function with a
// braced-init-list of Tensors.
inline double clip_grad_norm_(
std::initializer_list<Tensor> parameters,
double max_norm,
double norm_type = 2.0,
bool error_if_nonfinite = false) {
return clip_grad_norm_(
std::vector<Tensor>(parameters), max_norm, norm_type, error_if_nonfinite);
}
// A wrapper around clip_grad_norm_ that allows us to call the function with a
// single Tensor.
inline double clip_grad_norm_(
Tensor parameter,
double max_norm,
double norm_type = 2.0,
bool error_if_nonfinite = false) {
std::vector<Tensor> params = {std::move(parameter)};
return clip_grad_norm_(params, max_norm, norm_type, error_if_nonfinite);
}
// Clips gradient of an iterable of parameters at specified value.
// Gradients are modified in-place.
// See https://pytorch.org/docs/stable/nn.html#clip-grad-value
// for more details about this module.
inline void clip_grad_value_(
const std::vector<Tensor>& parameters,
double clip_value) {
for (const auto& param : parameters) {
if (param.grad().defined()) {
param.grad().data().clamp_(-clip_value, clip_value);
}
}
}
// A wrapper around clip_grad_value_ that allows us to call the function with a
// braced-init-list of Tensors.
inline void clip_grad_value_(
std::initializer_list<Tensor> parameters,
double clip_value) {
clip_grad_value_(std::vector<Tensor>(parameters), clip_value);
}
// A wrapper around clip_grad_value_ that allows us to call the function with a
// single Tensor.
inline void clip_grad_value_(Tensor parameter, double clip_value) {
std::vector<Tensor> params = {std::move(parameter)};
clip_grad_value_(params, clip_value);
}
} // namespace torch::nn::utils
```
|
================================================================================================================================================================
SOURCE CODE FILE: convert_parameters.h
LINES: 1
SIZE: 2.40 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\utils\convert_parameters.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/types.h>
namespace torch::nn::utils {
// This helper function is to check if the parameters are located
// in the same device. Currently, the conversion between model parameters
// and single vector form is not supported for multiple allocations,
// e.g. parameters in different GPUs, or mixture of CPU/GPU.
inline std::optional<int64_t> _check_param_device(
const torch::Tensor& param,
std::optional<int64_t> old_param_device) {
// Meet the first parameter
if (old_param_device == std::nullopt) {
old_param_device = param.is_cuda() ? param.get_device() : -1;
} else {
bool warn = false;
if (param.is_cuda()) { // Check if in same GPU
warn = (param.get_device() != old_param_device);
} else { // Check if in CPU
warn = (old_param_device != -1);
}
if (warn) {
TORCH_CHECK(
false,
"Found two parameters on different devices, ",
"this is currently not supported.");
}
}
return old_param_device;
}
// Convert parameters to one vector
inline torch::Tensor parameters_to_vector(
const std::vector<torch::Tensor>& parameters) {
std::optional<int64_t> param_device;
std::vector<torch::Tensor> vec;
vec.reserve(parameters.size());
for (const torch::Tensor& param : parameters) {
// Ensure the parameters are located in the same device
param_device = _check_param_device(param, param_device);
vec.push_back(param.view(-1));
}
return torch::cat(vec);
}
// Convert one vector to the parameters
inline void vector_to_parameters(
const torch::Tensor& vec,
const std::vector<torch::Tensor>& parameters) {
// Flag for the device where the parameter is located
std::optional<int64_t> param_device;
// Pointer for slicing the vector for each parameter
int64_t pointer = 0;
for (const torch::Tensor& param : parameters) {
// Ensure the parameters are located in the same device
param_device = _check_param_device(param, param_device);
// The length of the parameter
auto num_param = param.numel();
// Slice the vector, reshape it, and replace the old data of the parameter
param.set_data(
vec.slice(0, pointer, pointer + num_param).view_as(param).data());
// Increment the pointer
pointer += num_param;
}
}
} // namespace torch::nn::utils
```
|
=================================================================================================================================================
SOURCE CODE FILE: rnn.h
LINES: 1
SIZE: 12.83 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\nn\utils\rnn.h
ENCODING: utf-8
```h
#pragma once
#include <c10/util/irange.h>
#include <torch/types.h>
#include <utility>
namespace torch::nn::utils::rnn {
inline Tensor invert_permutation(const Tensor& permutation) {
if (!permutation.defined()) {
return torch::Tensor();
}
Tensor output =
torch::empty_like(permutation, torch::MemoryFormat::Contiguous);
output.scatter_(
0,
permutation,
torch::arange(0, permutation.numel(), permutation.device()));
return output;
}
/// Holds the data and list of `batch_sizes` of a packed sequence.
///
/// All RNN modules accept packed sequences as inputs.
///
/// Note:
/// Instances of this class should never be created manually. They are meant
/// to be instantiated by functions like `pack_padded_sequence`.
///
/// Batch sizes represent the number elements at each sequence step in
/// the batch, not the varying sequence lengths passed to
/// `pack_padded_sequence`. For instance, given data ``abc`` and ``x``
/// the :class:`PackedSequence` would contain data ``axbc`` with
/// ``batch_sizes=[2,1,1]``.
///
/// Attributes:
/// data (Tensor): Tensor containing packed sequence
/// batch_sizes (Tensor): Tensor of integers holding
/// information about the batch size at each sequence step
/// sorted_indices (Tensor, optional): Tensor of integers holding how this
/// :class:`PackedSequence` is constructed from sequences.
/// unsorted_indices (Tensor, optional): Tensor of integers holding how this
/// to recover the original sequences with correct order.
///
/// .. note::
/// `data` can be on arbitrary device and of arbitrary dtype.
/// `sorted_indices` and `unsorted_indices` must be ``torch::kInt64``
/// tensors on the same device as `data`.
///
/// However, `batch_sizes` should always be a CPU ``torch::kInt64`` tensor.
///
/// This invariant is maintained throughout `PackedSequence` class,
/// and all functions that construct a `PackedSequence` in libtorch
/// (i.e., they only pass in tensors conforming to this constraint).
class PackedSequence {
public:
explicit PackedSequence(
Tensor data,
Tensor batch_sizes,
Tensor sorted_indices = {},
Tensor unsorted_indices = {}) {
// NB: if unsorted_indices is provided, it should be the inverse permutation
// to sorted_indices. Don't assert it here because the PackedSequence ctor
// should only be used internally.
if (!unsorted_indices.defined()) {
unsorted_indices = invert_permutation(sorted_indices);
}
TORCH_CHECK(
batch_sizes.device().type() == kCPU,
"batch_sizes should always be on CPU. "
"Instances of PackedSequence should never be created manually. "
"They should be instantiated by functions like pack_sequence "
"and pack_padded_sequences in nn::utils::rnn. "
"https://pytorch.org/docs/stable/nn.html#torch.nn.utils.rnn.pack_sequence");
data_ = std::move(data);
batch_sizes_ = std::move(batch_sizes);
sorted_indices_ = std::move(sorted_indices);
unsorted_indices_ = std::move(unsorted_indices);
}
const Tensor& data() const {
return data_;
}
const Tensor& batch_sizes() const {
return batch_sizes_;
}
const Tensor& sorted_indices() const {
return sorted_indices_;
}
const Tensor& unsorted_indices() const {
return unsorted_indices_;
}
PackedSequence pin_memory() const {
// Why not convert `batch_sizes`?
// See NOTE [ device and dtype of a PackedSequence ]
return PackedSequence(
data_.pin_memory(),
batch_sizes_,
sorted_indices_.defined() ? sorted_indices_.pin_memory() : Tensor(),
unsorted_indices_.defined() ? unsorted_indices_.pin_memory()
: Tensor());
}
PackedSequence to(TensorOptions options) const {
// Performs dtype and/or device conversion on `data_`.
//
// If the ``data_`` Tensor already has the correct `torch::Dtype`
// and `torch::Device`, then ``self`` is returned.
// Otherwise, returns a copy with the desired configuration.
// Why not convert `batch_sizes`?
// See NOTE [ device and dtype of a PackedSequence ]
Tensor data = data_.to(options);
if (data.is_same(data_)) {
return *this;
} else {
// Does not forward device or dtype args, device is set from data.device()
Tensor sorted_indices = sorted_indices_.defined()
? sorted_indices_.to(
options.device(data.device()).dtype(sorted_indices_.dtype()))
: Tensor();
Tensor unsorted_indices = unsorted_indices_.defined()
? unsorted_indices_.to(
options.device(data.device()).dtype(unsorted_indices_.dtype()))
: Tensor();
return PackedSequence(
std::move(data),
batch_sizes_,
std::move(sorted_indices),
std::move(unsorted_indices));
}
}
PackedSequence cuda() const {
return to(kCUDA);
}
PackedSequence cpu() const {
return to(kCPU);
}
/// Returns true if `data_` stored on a gpu
bool is_cuda() const {
return data_.is_cuda();
}
/// Returns true if `data_` stored on in pinned memory
bool is_pinned() const {
return data_.is_pinned();
}
private:
Tensor data_;
Tensor batch_sizes_;
Tensor sorted_indices_;
Tensor unsorted_indices_;
};
/// Packs a Tensor containing padded sequences of variable length.
///
/// `input` can be of size ``T x B x *`` where `T` is the length of the
/// longest sequence (equal to ``lengths[0]``), ``B`` is the batch size, and
/// ``*`` is any number of dimensions (including 0). If ``batch_first`` is
/// ``true``, ``B x T x *`` `input` is expected.
///
/// For unsorted sequences, use `enforce_sorted = false`. If `enforce_sorted` is
/// ``true``, the sequences should be sorted by length in a decreasing order,
/// i.e.
/// ``input[:,0]`` should be the longest sequence, and ``input[:,B-1]`` the
/// shortest one.
///
/// Note:
/// This function accepts any input that has at least two dimensions. You
/// can apply it to pack the labels, and use the output of the RNN with
/// them to compute the loss directly. A Tensor can be retrieved from
/// a `PackedSequence` object by calling its ``.data()`` function.
///
/// Arguments:
/// input (Tensor): padded batch of variable length sequences.
/// lengths (Tensor): list of sequences lengths of each batch element.
/// batch_first (bool, optional): if ``true``, the input is expected in ``B
/// x T x *``
/// format. Default: ``false``.
/// enforce_sorted (bool, optional): if ``true``, the input is expected to
/// contain sequences sorted by length in a decreasing order. If
/// ``false``, this condition is not checked. Default: ``true``.
///
/// Returns:
/// a `PackedSequence` object
inline PackedSequence pack_padded_sequence(
Tensor input,
Tensor lengths,
bool batch_first = false,
bool enforce_sorted = true) {
lengths = lengths.to(kInt64);
Tensor sorted_indices;
if (enforce_sorted) {
sorted_indices = Tensor();
} else {
std::tie(lengths, sorted_indices) =
torch::sort(lengths, /*dim=*/-1, /*descending=*/true);
sorted_indices = sorted_indices.to(input.device());
int64_t batch_dim = batch_first ? 0 : 1;
input = input.index_select(batch_dim, sorted_indices);
}
auto [data, batch_sizes] =
torch::_pack_padded_sequence(input, lengths, batch_first);
return PackedSequence(
std::move(data), std::move(batch_sizes), std::move(sorted_indices), {});
}
/// Pads a packed batch of variable length sequences.
///
/// It is an inverse operation to `pack_padded_sequence`.
///
/// The returned Tensor's data will be of size ``T x B x *``, where `T` is the
/// length of the longest sequence and `B` is the batch size. If ``batch_first``
/// is true, the data will be transposed into ``B x T x *`` format.
///
/// Batch elements will be ordered decreasingly by their length.
///
/// Arguments:
/// sequence (PackedSequence): batch to pad
/// batch_first (bool, optional): if ``true``, the output will be in ``B x T
/// x *``
/// format.
/// padding_value (double, optional): values for padded elements.
/// total_length (int64_t, optional): if specified, the output will be
/// padded to
/// have length `total_length`. This method will throw error
/// if `total_length` is less than the max sequence length in
/// `sequence`.
///
/// Returns:
/// Tuple of Tensor containing the padded sequence, and a Tensor
/// containing the list of lengths of each sequence in the batch.
inline std::tuple<Tensor, Tensor> pad_packed_sequence(
const PackedSequence& sequence,
bool batch_first = false,
double padding_value = 0.0,
std::optional<int64_t> total_length = std::nullopt) {
int64_t max_seq_length = sequence.batch_sizes().size(0);
if (total_length.has_value()) {
int64_t total_length_val = total_length.value();
TORCH_CHECK(
total_length_val >= max_seq_length,
"Expected total_length to be at least the length "
"of the longest sequence in input, but got "
"total_length=",
total_length_val,
" and max sequence length being ",
max_seq_length);
max_seq_length = total_length_val;
}
auto [padded_output, lengths] = torch::_pad_packed_sequence(
sequence.data(),
sequence.batch_sizes(),
batch_first,
padding_value,
max_seq_length);
const Tensor& unsorted_indices = sequence.unsorted_indices();
if (unsorted_indices.defined()) {
int64_t batch_dim = batch_first ? 0 : 1;
return std::make_tuple(
padded_output.index_select(batch_dim, unsorted_indices),
lengths.index({unsorted_indices.cpu()}));
}
return std::make_tuple(padded_output, lengths);
}
/// Pad a list of variable length Tensors with ``padding_value``
///
/// ``pad_sequence`` stacks a list of Tensors along a new dimension,
/// and pads them to equal length. For example, if the input is list of
/// sequences with size ``L x *`` and if batch_first is false, and ``T x B x *``
/// otherwise.
///
/// `B` is batch size. It is equal to the number of elements in ``sequences``.
/// `T` is length of the longest sequence.
/// `L` is length of the sequence.
/// `*` is any number of trailing dimensions, including none.
///
/// Note:
/// This function returns a Tensor of size ``T x B x *`` or ``B x T x *``
/// where `T` is the length of the longest sequence. This function assumes
/// trailing dimensions and type of all the Tensors in sequences are same.
///
/// Arguments:
/// sequences (torch::ArrayRef<Tensor>): list of variable length sequences.
/// batch_first (bool, optional): output will be in ``B x T x *`` if true,
/// or in
/// ``T x B x *`` otherwise
/// padding_value (double, optional): value for padded elements. Default: 0.
/// padding_side (str, optional): the side to pad the sequences on. Default:
/// "right".
///
/// Returns:
/// Tensor of size ``T x B x *`` if `batch_first` is ``false``.
/// Tensor of size ``B x T x *`` otherwise
inline Tensor pad_sequence(
ArrayRef<Tensor> sequences,
bool batch_first = false,
double padding_value = 0,
std::string_view padding_side = "right") {
return at::pad_sequence(sequences, batch_first, padding_value, padding_side);
}
/// Packs a list of variable length Tensors
///
/// ``sequences`` should be a list of Tensors of size ``L x *``, where `L` is
/// the length of a sequence and `*` is any number of trailing dimensions,
/// including zero.
///
/// For unsorted sequences, use `enforce_sorted = false`. If ``enforce_sorted``
/// is ``true``, the sequences should be sorted in the order of decreasing
/// length.
///
///
/// Arguments:
/// sequences (torch::ArrayRef<Tensor>): A list of sequences of decreasing
/// length. enforce_sorted (bool, optional): if ``true``, checks that the
/// input
/// contains sequences sorted by length in a decreasing order. If
/// ``false``, this condition is not checked. Default: ``true``.
///
/// Returns:
/// a `PackedSequence` object
inline PackedSequence pack_sequence(
ArrayRef<Tensor> sequences,
bool enforce_sorted = true) {
Tensor lengths = torch::empty({(int64_t)sequences.size()}, kInt64);
for (const auto i : c10::irange(sequences.size())) {
lengths[static_cast<int64_t>(i)] = sequences[i].size(0);
}
return pack_padded_sequence(
at::pad_sequence(sequences),
std::move(lengths),
/*batch_first=*/false,
/*enforce_sorted=*/enforce_sorted);
}
} // namespace torch::nn::utils::rnn
```
|
==========================================================================================================================================
SOURCE CODE FILE: optim.h
LINES: 1
SIZE: 0.40 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\optim.h
ENCODING: utf-8
```h
#pragma once
#include <torch/optim/adagrad.h>
#include <torch/optim/adam.h>
#include <torch/optim/adamw.h>
#include <torch/optim/lbfgs.h>
#include <torch/optim/optimizer.h>
#include <torch/optim/rmsprop.h>
#include <torch/optim/sgd.h>
#include <torch/optim/schedulers/lr_scheduler.h>
#include <torch/optim/schedulers/reduce_on_plateau_scheduler.h>
#include <torch/optim/schedulers/step_lr.h>
```
|
==================================================================================================================================================
SOURCE CODE FILE: adagrad.h
LINES: 1
SIZE: 3.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\optim\adagrad.h
ENCODING: utf-8
```h
#pragma once
#include <torch/nn/pimpl.h>
#include <torch/optim/optimizer.h>
#include <torch/optim/serialize.h>
#include <torch/serialize/archive.h>
#include <torch/types.h>
#include <utility>
#include <vector>
namespace torch::serialize {
class OutputArchive;
class InputArchive;
} // namespace torch::serialize
namespace torch::optim {
struct TORCH_API AdagradOptions
: public OptimizerCloneableOptions<AdagradOptions> {
AdagradOptions(double lr = 1e-2);
TORCH_ARG(double, lr) = 1e-2;
TORCH_ARG(double, lr_decay) = 0;
TORCH_ARG(double, weight_decay) = 0;
TORCH_ARG(double, initial_accumulator_value) = 0;
TORCH_ARG(double, eps) = 1e-10;
public:
void serialize(torch::serialize::InputArchive& archive) override;
void serialize(torch::serialize::OutputArchive& archive) const override;
TORCH_API friend bool operator==(
const AdagradOptions& lhs,
const AdagradOptions& rhs);
double get_lr() const override;
void set_lr(const double lr) override;
};
struct TORCH_API AdagradParamState
: public OptimizerCloneableParamState<AdagradParamState> {
TORCH_ARG(torch::Tensor, sum);
TORCH_ARG(int64_t, step) = 0;
public:
void serialize(torch::serialize::InputArchive& archive) override;
void serialize(torch::serialize::OutputArchive& archive) const override;
TORCH_API friend bool operator==(
const AdagradParamState& lhs,
const AdagradParamState& rhs);
};
class TORCH_API Adagrad : public Optimizer {
public:
explicit Adagrad(
const std::vector<OptimizerParamGroup>& param_groups,
AdagradOptions defaults = {})
: Optimizer(param_groups, std::make_unique<AdagradOptions>(defaults)) {
TORCH_CHECK(defaults.lr() >= 0, "Invalid learning rate: ", defaults.lr());
TORCH_CHECK(
defaults.lr_decay() >= 0,
"Invalid lr_decay value: ",
defaults.lr_decay());
TORCH_CHECK(
defaults.weight_decay() >= 0,
"Invalid weight_decay value: ",
defaults.weight_decay());
TORCH_CHECK(
defaults.initial_accumulator_value() >= 0,
"Invalid initial_accumulator_value value: ",
defaults.initial_accumulator_value());
TORCH_CHECK(defaults.eps() >= 0, "Invalid epsilon value: ", defaults.eps());
for (const auto& group : param_groups_) {
for (const auto& p : group.params()) {
auto state = std::make_unique<AdagradParamState>();
state->step(0);
state->sum(torch::full_like(
p.data(),
defaults.initial_accumulator_value(),
at::MemoryFormat::Preserve));
state_[p.unsafeGetTensorImpl()] = std::move(state);
}
}
}
explicit Adagrad(std::vector<Tensor> params, AdagradOptions defaults = {})
: Adagrad({OptimizerParamGroup(std::move(params))}, std::move(defaults)) {
}
torch::Tensor step(LossClosure closure = nullptr) override;
void save(serialize::OutputArchive& archive) const override;
void load(serialize::InputArchive& archive) override;
private:
template <typename Self, typename Archive>
static void serialize(Self& self, Archive& archive) {
_TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(Adagrad);
}
};
} // namespace torch::optim
```
|
===============================================================================================================================================
SOURCE CODE FILE: adam.h
LINES: 1
SIZE: 2.90 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\optim\adam.h
ENCODING: utf-8
```h
#pragma once
#include <torch/nn/module.h>
#include <torch/optim/optimizer.h>
#include <torch/optim/serialize.h>
#include <utility>
#include <vector>
namespace torch::serialize {
class OutputArchive;
class InputArchive;
} // namespace torch::serialize
namespace torch::optim {
struct TORCH_API AdamOptions : public OptimizerCloneableOptions<AdamOptions> {
AdamOptions(double lr = 1e-3);
TORCH_ARG(double, lr) = 1e-3;
typedef std::tuple<double, double> betas_t;
TORCH_ARG(betas_t, betas) = std::make_tuple(0.9, 0.999);
TORCH_ARG(double, eps) = 1e-8;
TORCH_ARG(double, weight_decay) = 0;
TORCH_ARG(bool, amsgrad) = false;
public:
void serialize(torch::serialize::InputArchive& archive) override;
void serialize(torch::serialize::OutputArchive& archive) const override;
TORCH_API friend bool operator==(
const AdamOptions& lhs,
const AdamOptions& rhs);
double get_lr() const override;
void set_lr(const double lr) override;
};
struct TORCH_API AdamParamState
: public OptimizerCloneableParamState<AdamParamState> {
TORCH_ARG(int64_t, step) = 0;
TORCH_ARG(torch::Tensor, exp_avg);
TORCH_ARG(torch::Tensor, exp_avg_sq);
TORCH_ARG(torch::Tensor, max_exp_avg_sq) = {};
public:
void serialize(torch::serialize::InputArchive& archive) override;
void serialize(torch::serialize::OutputArchive& archive) const override;
TORCH_API friend bool operator==(
const AdamParamState& lhs,
const AdamParamState& rhs);
};
class TORCH_API Adam : public Optimizer {
public:
explicit Adam(
const std::vector<OptimizerParamGroup>& param_groups,
AdamOptions defaults = {})
: Optimizer(param_groups, std::make_unique<AdamOptions>(defaults)) {
TORCH_CHECK(defaults.lr() >= 0, "Invalid learning rate: ", defaults.lr());
TORCH_CHECK(defaults.eps() >= 0, "Invalid epsilon value: ", defaults.eps());
auto betas = defaults.betas();
TORCH_CHECK(
0 <= std::get<0>(betas) && std::get<0>(betas) < 1.0,
"Invalid beta parameter at index 0: ",
std::get<0>(betas));
TORCH_CHECK(
0 <= std::get<1>(betas) && std::get<1>(betas) < 1.0,
"Invalid beta parameter at index 1: ",
std::get<1>(betas));
TORCH_CHECK(
defaults.weight_decay() >= 0,
"Invalid weight_decay value: ",
defaults.weight_decay());
}
explicit Adam(std::vector<Tensor> params, AdamOptions defaults = {})
: Adam({OptimizerParamGroup(std::move(params))}, std::move(defaults)) {}
torch::Tensor step(LossClosure closure = nullptr) override;
void save(serialize::OutputArchive& archive) const override;
void load(serialize::InputArchive& archive) override;
private:
template <typename Self, typename Archive>
static void serialize(Self& self, Archive& archive) {
_TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(Adam);
}
};
} // namespace torch::optim
```
|
================================================================================================================================================
SOURCE CODE FILE: adamw.h
LINES: 1
SIZE: 2.92 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\optim\adamw.h
ENCODING: utf-8
```h
#pragma once
#include <torch/nn/module.h>
#include <torch/optim/optimizer.h>
#include <torch/optim/serialize.h>
#include <utility>
#include <vector>
namespace torch::serialize {
class OutputArchive;
class InputArchive;
} // namespace torch::serialize
namespace torch::optim {
struct TORCH_API AdamWOptions : public OptimizerCloneableOptions<AdamWOptions> {
AdamWOptions(double lr = 1e-3);
TORCH_ARG(double, lr) = 1e-3;
typedef std::tuple<double, double> betas_t;
TORCH_ARG(betas_t, betas) = std::make_tuple(0.9, 0.999);
TORCH_ARG(double, eps) = 1e-8;
TORCH_ARG(double, weight_decay) = 1e-2;
TORCH_ARG(bool, amsgrad) = false;
public:
void serialize(torch::serialize::InputArchive& archive) override;
void serialize(torch::serialize::OutputArchive& archive) const override;
TORCH_API friend bool operator==(
const AdamWOptions& lhs,
const AdamWOptions& rhs);
double get_lr() const override;
void set_lr(const double lr) override;
};
struct TORCH_API AdamWParamState
: public OptimizerCloneableParamState<AdamWParamState> {
TORCH_ARG(int64_t, step) = 0;
TORCH_ARG(torch::Tensor, exp_avg);
TORCH_ARG(torch::Tensor, exp_avg_sq);
TORCH_ARG(torch::Tensor, max_exp_avg_sq) = {};
public:
void serialize(torch::serialize::InputArchive& archive) override;
void serialize(torch::serialize::OutputArchive& archive) const override;
TORCH_API friend bool operator==(
const AdamWParamState& lhs,
const AdamWParamState& rhs);
};
class TORCH_API AdamW : public Optimizer {
public:
explicit AdamW(
const std::vector<OptimizerParamGroup>& param_groups,
AdamWOptions defaults = {})
: Optimizer(param_groups, std::make_unique<AdamWOptions>(defaults)) {
TORCH_CHECK(defaults.lr() >= 0, "Invalid learning rate: ", defaults.lr());
TORCH_CHECK(defaults.eps() >= 0, "Invalid epsilon value: ", defaults.eps());
auto betas = defaults.betas();
TORCH_CHECK(
0 <= std::get<0>(betas) && std::get<0>(betas) < 1.0,
"Invalid beta parameter at index 0: ",
std::get<0>(betas));
TORCH_CHECK(
0 <= std::get<1>(betas) && std::get<1>(betas) < 1.0,
"Invalid beta parameter at index 1: ",
std::get<1>(betas));
TORCH_CHECK(
defaults.weight_decay() >= 0,
"Invalid weight_decay value: ",
defaults.weight_decay());
}
explicit AdamW(std::vector<Tensor> params, AdamWOptions defaults = {})
: AdamW({OptimizerParamGroup(std::move(params))}, std::move(defaults)) {}
torch::Tensor step(LossClosure closure = nullptr) override;
void save(serialize::OutputArchive& archive) const override;
void load(serialize::InputArchive& archive) override;
private:
template <typename Self, typename Archive>
static void serialize(Self& self, Archive& archive) {
_TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(AdamW);
}
};
} // namespace torch::optim
```
|
================================================================================================================================================
SOURCE CODE FILE: lbfgs.h
LINES: 1
SIZE: 3.46 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\optim\lbfgs.h
ENCODING: utf-8
```h
#pragma once
#include <torch/nn/module.h>
#include <torch/optim/optimizer.h>
#include <torch/optim/serialize.h>
#include <torch/serialize/archive.h>
#include <deque>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
namespace torch::optim {
struct TORCH_API LBFGSOptions : public OptimizerCloneableOptions<LBFGSOptions> {
LBFGSOptions(double lr = 1);
TORCH_ARG(double, lr) = 1;
TORCH_ARG(int64_t, max_iter) = 20;
TORCH_ARG(std::optional<int64_t>, max_eval) = std::nullopt;
TORCH_ARG(double, tolerance_grad) = 1e-7;
TORCH_ARG(double, tolerance_change) = 1e-9;
TORCH_ARG(int64_t, history_size) = 100;
TORCH_ARG(std::optional<std::string>, line_search_fn) = std::nullopt;
public:
void serialize(torch::serialize::InputArchive& archive) override;
void serialize(torch::serialize::OutputArchive& archive) const override;
TORCH_API friend bool operator==(
const LBFGSOptions& lhs,
const LBFGSOptions& rhs);
double get_lr() const override;
void set_lr(const double lr) override;
};
struct TORCH_API LBFGSParamState
: public OptimizerCloneableParamState<LBFGSParamState> {
TORCH_ARG(int64_t, func_evals) = 0;
TORCH_ARG(int64_t, n_iter) = 0;
TORCH_ARG(double, t) = 0;
TORCH_ARG(double, prev_loss) = 0;
TORCH_ARG(Tensor, d) = {};
TORCH_ARG(Tensor, H_diag) = {};
TORCH_ARG(Tensor, prev_flat_grad) = {};
TORCH_ARG(std::deque<Tensor>, old_dirs);
TORCH_ARG(std::deque<Tensor>, old_stps);
TORCH_ARG(std::deque<Tensor>, ro);
TORCH_ARG(std::optional<std::vector<Tensor>>, al) = std::nullopt;
public:
void serialize(torch::serialize::InputArchive& archive) override;
void serialize(torch::serialize::OutputArchive& archive) const override;
TORCH_API friend bool operator==(
const LBFGSParamState& lhs,
const LBFGSParamState& rhs);
};
class TORCH_API LBFGS : public Optimizer {
public:
explicit LBFGS(
const std::vector<OptimizerParamGroup>& param_groups,
LBFGSOptions defaults = {})
: Optimizer(param_groups, std::make_unique<LBFGSOptions>(defaults)) {
TORCH_CHECK(
param_groups_.size() == 1,
"LBFGS doesn't support per-parameter options (parameter groups)");
if (defaults.max_eval() == std::nullopt) {
auto max_eval_val = (defaults.max_iter() * 5) / 4;
static_cast<LBFGSOptions&>(param_groups_[0].options())
.max_eval(max_eval_val);
static_cast<LBFGSOptions&>(*defaults_).max_eval(max_eval_val);
}
_numel_cache = std::nullopt;
}
explicit LBFGS(std::vector<Tensor> params, LBFGSOptions defaults = {})
: LBFGS({OptimizerParamGroup(std::move(params))}, std::move(defaults)) {}
Tensor step(LossClosure closure) override;
void save(serialize::OutputArchive& archive) const override;
void load(serialize::InputArchive& archive) override;
private:
std::optional<int64_t> _numel_cache;
int64_t _numel();
Tensor _gather_flat_grad();
void _add_grad(const double step_size, const Tensor& update);
std::tuple<double, Tensor> _directional_evaluate(
const LossClosure& closure,
const std::vector<Tensor>& x,
double t,
const Tensor& d);
void _set_param(const std::vector<Tensor>& params_data);
std::vector<Tensor> _clone_param();
template <typename Self, typename Archive>
static void serialize(Self& self, Archive& archive) {
_TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(LBFGS);
}
};
} // namespace torch::optim
```
|
====================================================================================================================================================
SOURCE CODE FILE: optimizer.h
LINES: 1
SIZE: 8.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\optim\optimizer.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/Tensor.h>
#include <c10/util/Exception.h>
#include <c10/util/flat_hash_map.h>
#include <torch/arg.h>
#include <torch/csrc/Export.h>
#include <algorithm>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <vector>
// Forward declarations confuse Doxygen
#ifndef DOXYGEN_SHOULD_SKIP_THIS
namespace at {
class Tensor;
} // namespace at
namespace torch {
using at::Tensor;
namespace serialize {
class OutputArchive;
class InputArchive;
} // namespace serialize
} // namespace torch
#endif // DOXYGEN_SHOULD_SKIP_THIS
namespace torch::optim {
class TORCH_API OptimizerParamState {
public:
OptimizerParamState() = default;
OptimizerParamState(const OptimizerParamState&) = default;
OptimizerParamState& operator=(const OptimizerParamState&) = default;
OptimizerParamState(OptimizerParamState&&) noexcept = default;
OptimizerParamState& operator=(OptimizerParamState&&) noexcept = default;
virtual std::unique_ptr<OptimizerParamState> clone() const;
virtual void serialize(torch::serialize::InputArchive& archive);
virtual void serialize(torch::serialize::OutputArchive& archive) const;
virtual ~OptimizerParamState() = default;
};
template <typename Derived>
class OptimizerCloneableParamState : public OptimizerParamState {
std::unique_ptr<OptimizerParamState> clone() const override {
return std::make_unique<Derived>(static_cast<const Derived&>(*this));
}
};
class TORCH_API OptimizerOptions {
public:
OptimizerOptions() = default;
OptimizerOptions(const OptimizerOptions&) = default;
OptimizerOptions& operator=(const OptimizerOptions&) = default;
OptimizerOptions(OptimizerOptions&&) noexcept = default;
OptimizerOptions& operator=(OptimizerOptions&&) noexcept = default;
virtual std::unique_ptr<OptimizerOptions> clone() const;
virtual void serialize(torch::serialize::InputArchive& archive);
virtual void serialize(torch::serialize::OutputArchive& archive) const;
virtual ~OptimizerOptions() = default;
virtual double get_lr() const;
virtual void set_lr(const double lr);
};
template <typename Derived>
class OptimizerCloneableOptions : public OptimizerOptions {
private:
std::unique_ptr<OptimizerOptions> clone() const override {
return std::make_unique<Derived>(static_cast<const Derived&>(*this));
}
};
/// Stores parameters in the param_group and stores a pointer to the
/// OptimizerOptions
class TORCH_API OptimizerParamGroup {
public:
// NOTE: In order to store `OptimizerParamGroup` in a `std::vector`, it has to
// be copy-constructible.
OptimizerParamGroup(const OptimizerParamGroup& param_group)
: params_(param_group.params()),
options_(
param_group.has_options() ? param_group.options().clone()
: nullptr) {}
OptimizerParamGroup(OptimizerParamGroup&& param_group) = default;
OptimizerParamGroup(std::vector<Tensor> params)
: params_(std::move(params)) {}
OptimizerParamGroup(
std::vector<Tensor> params,
std::unique_ptr<OptimizerOptions> options)
: params_(std::move(params)), options_(std::move(options)) {}
OptimizerParamGroup& operator=(const OptimizerParamGroup& param_group) =
delete;
OptimizerParamGroup& operator=(OptimizerParamGroup&& param_group) noexcept =
default;
~OptimizerParamGroup() = default;
bool has_options() const;
OptimizerOptions& options();
const OptimizerOptions& options() const;
void set_options(std::unique_ptr<OptimizerOptions> options);
std::vector<Tensor>& params();
const std::vector<Tensor>& params() const;
protected:
std::vector<Tensor> params_;
std::unique_ptr<OptimizerOptions> options_;
};
class TORCH_API Optimizer {
public:
// The copy constructor is deleted, because the user should use the
// `state_dict` / `load_state_dict` API to copy an optimizer instead.
Optimizer(const Optimizer& optimizer) = delete;
Optimizer(Optimizer&& optimizer) = default;
Optimizer& operator=(const Optimizer& optimizer) = delete;
Optimizer& operator=(Optimizer&& optimizer) = default;
explicit Optimizer(
const std::vector<OptimizerParamGroup>& param_groups,
std::unique_ptr<OptimizerOptions> defaults)
: defaults_(std::move(defaults)) {
for (const auto& param_group : param_groups) {
add_param_group(param_group);
}
}
/// Constructs the `Optimizer` from a vector of parameters.
explicit Optimizer(
std::vector<Tensor> parameters,
std::unique_ptr<OptimizerOptions> defaults)
: Optimizer(
{OptimizerParamGroup(std::move(parameters))},
std::move(defaults)) {}
/// Adds the given param_group to the optimizer's param_group list.
void add_param_group(const OptimizerParamGroup& param_group);
virtual ~Optimizer() = default;
using LossClosure = std::function<Tensor()>;
/// A loss function closure, which is expected to return the loss value.
virtual Tensor step(LossClosure closure = nullptr) = 0;
/// Adds the given vector of parameters to the optimizer's parameter list.
void add_parameters(const std::vector<Tensor>& parameters);
/// Zeros out the gradients of all parameters.
void zero_grad(bool set_to_none = true);
/// Provides a const reference to the parameters in the first param_group this
/// optimizer holds.
const std::vector<Tensor>& parameters() const noexcept;
/// Provides a reference to the parameters in the first param_group this
/// optimizer holds.
std::vector<Tensor>& parameters() noexcept;
/// Returns the number of parameters referenced by the optimizer.
size_t size() const noexcept;
OptimizerOptions& defaults() noexcept;
const OptimizerOptions& defaults() const noexcept;
/// Provides a reference to the param_groups this optimizer holds.
std::vector<OptimizerParamGroup>& param_groups() noexcept;
/// Provides a const reference to the param_groups this optimizer holds.
const std::vector<OptimizerParamGroup>& param_groups() const noexcept;
/// Provides a reference to the state this optimizer holds
ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>>&
state() noexcept;
/// Provides a const reference to the state this optimizer holds
const ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>>& state()
const noexcept;
/// Serializes the optimizer state into the given `archive`.
virtual void save(serialize::OutputArchive& archive) const;
/// Deserializes the optimizer state from the given `archive`.
virtual void load(serialize::InputArchive& archive);
protected:
std::vector<OptimizerParamGroup> param_groups_;
ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>> state_;
std::unique_ptr<OptimizerOptions> defaults_;
};
/* How do we decide whether to serialize undefined tensors or
std::nullopt values into the output archive?
Answer: we strictly follow the behavior of Python API. To be more specific:
For optimizer options:
a) For undefined tensor: currently no tensor is used as an options argument in
Python API, so we don't need to worry about it now. b) For std::nullopt value:
we serialize std::nullopt values into the output archive, to follow the exact
same behavior as Python API.
For optimizer param state:
a) For undefined tensor: in param state, undefined tensor in C++ impl is
equivalent to missing key in Python impl. Since we don't serialize missing keys
in Python API, we skip undefined tensors when serializing the param state. b)
For std::nullopt value: in param state, std::nullopt value in C++ impl is
equivalent to missing key in Python impl. Since we don't serialize missing keys
in Python API, we skip std::nullopt values when serializing the param state. */
/// Serializes an `Optimizer` into an `OutputArchive`.
TORCH_API serialize::OutputArchive& operator<<(
serialize::OutputArchive& archive,
const Optimizer& optimizer);
/// Deserializes a `Tensor` from an `InputArchive`.
TORCH_API serialize::InputArchive& operator>>(
serialize::InputArchive& archive,
Optimizer& optimizer);
} // namespace torch::optim
```
|
==================================================================================================================================================
SOURCE CODE FILE: rmsprop.h
LINES: 1
SIZE: 2.92 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\optim\rmsprop.h
ENCODING: utf-8
```h
#pragma once
#include <torch/nn/module.h>
#include <torch/optim/optimizer.h>
#include <torch/optim/serialize.h>
#include <torch/serialize/archive.h>
#include <torch/types.h>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
namespace torch::serialize {
class OutputArchive;
class InputArchive;
} // namespace torch::serialize
namespace torch::optim {
struct TORCH_API RMSpropOptions
: public OptimizerCloneableOptions<RMSpropOptions> {
RMSpropOptions(double lr = 1e-2);
TORCH_ARG(double, lr) = 1e-2;
TORCH_ARG(double, alpha) = 0.99;
TORCH_ARG(double, eps) = 1e-8;
TORCH_ARG(double, weight_decay) = 0;
TORCH_ARG(double, momentum) = 0;
TORCH_ARG(bool, centered) = false;
public:
void serialize(torch::serialize::InputArchive& archive) override;
void serialize(torch::serialize::OutputArchive& archive) const override;
TORCH_API friend bool operator==(
const RMSpropOptions& lhs,
const RMSpropOptions& rhs);
double get_lr() const override;
void set_lr(const double lr) override;
};
struct TORCH_API RMSpropParamState
: public OptimizerCloneableParamState<RMSpropParamState> {
TORCH_ARG(int64_t, step) = 0;
TORCH_ARG(torch::Tensor, square_avg);
TORCH_ARG(torch::Tensor, momentum_buffer) = {};
TORCH_ARG(torch::Tensor, grad_avg) = {};
public:
void serialize(torch::serialize::InputArchive& archive) override;
void serialize(torch::serialize::OutputArchive& archive) const override;
TORCH_API friend bool operator==(
const RMSpropParamState& lhs,
const RMSpropParamState& rhs);
};
class TORCH_API RMSprop : public Optimizer {
public:
explicit RMSprop(
const std::vector<OptimizerParamGroup>& param_groups,
RMSpropOptions defaults = {})
: Optimizer(param_groups, std::make_unique<RMSpropOptions>(defaults)) {
TORCH_CHECK(defaults.lr() >= 0, "Invalid learning rate: ", defaults.lr());
TORCH_CHECK(defaults.eps() >= 0, "Invalid epsilon value: ", defaults.eps());
TORCH_CHECK(
defaults.momentum() >= 0,
"Invalid momentum value: ",
defaults.momentum());
TORCH_CHECK(
defaults.weight_decay() >= 0,
"Invalid weight_decay value: ",
defaults.weight_decay());
TORCH_CHECK(
defaults.alpha() >= 0, "Invalid alpha value: ", defaults.alpha());
}
explicit RMSprop(std::vector<Tensor> params, RMSpropOptions defaults = {})
: RMSprop({OptimizerParamGroup(std::move(params))}, std::move(defaults)) {
}
torch::Tensor step(LossClosure closure = nullptr) override;
void save(serialize::OutputArchive& archive) const override;
void load(serialize::InputArchive& archive) override;
private:
template <typename Self, typename Archive>
static void serialize(Self& self, Archive& archive) {
_TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(RMSprop);
}
};
} // namespace torch::optim
```
|
==================================================================================================================================================================
SOURCE CODE FILE: lr_scheduler.h
LINES: 1
SIZE: 1.13 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\optim\schedulers\lr_scheduler.h
ENCODING: utf-8
```h
#pragma once
#include <torch/optim/optimizer.h>
#include <torch/csrc/Export.h>
namespace torch::optim {
class TORCH_API LRScheduler {
public:
// This class needs to take a reference of an optimizer from outside such that
// it can modify its learning rates; due to this the lifetime of said
// optimizer must be maintained
LRScheduler(torch::optim::Optimizer& optimizer);
virtual ~LRScheduler() = default;
void step();
protected:
// A vector of learning rates is calculated and returned from the specific
// subclass. A vector is returned with each element being a separate learning
// rate for each param group - although the normal use case would be to return
// a vector of identical elements.
virtual std::vector<double> get_lrs() = 0;
// Get current learning rates from the optimizer
std::vector<double> get_current_lrs() const;
unsigned step_count_{};
private:
void set_optimizer_lrs(const std::vector<double>& learning_rates);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
torch::optim::Optimizer& optimizer_;
};
} // namespace torch::optim
```
|
=================================================================================================================================================================================
SOURCE CODE FILE: reduce_on_plateau_scheduler.h
LINES: 1
SIZE: 1.41 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\optim\schedulers\reduce_on_plateau_scheduler.h
ENCODING: utf-8
```h
#pragma once
#include <torch/optim/optimizer.h>
#include <torch/optim/schedulers/lr_scheduler.h>
#include <torch/csrc/Export.h>
#include <cmath>
namespace torch::optim {
class TORCH_API ReduceLROnPlateauScheduler {
public:
enum SchedulerMode { min, max };
enum ThresholdMode { rel, abs };
ReduceLROnPlateauScheduler(
Optimizer& optimizer,
SchedulerMode mode = min,
float factor = 0.1,
int patience = 10,
double threshold = 1e-4,
ThresholdMode threshold_mode = rel,
int cooldown = 0,
const std::vector<float>& min_lr = std::vector<float>(),
double eps = 1e-8,
bool verbose = false);
virtual ~ReduceLROnPlateauScheduler() = default;
void step(float metric);
private:
void reset();
void reduce_lr(int epoch);
bool in_cooldown() const;
bool is_better(float a);
void init_is_better(
SchedulerMode mode,
double threshold,
ThresholdMode threshold_mode);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
Optimizer& optimizer;
SchedulerMode mode{};
float mode_worse{};
float factor;
int patience;
double threshold{};
ThresholdMode threshold_mode{};
int cooldown{};
int cooldown_counter{};
std::vector<float> min_lrs;
double eps;
float best{};
bool verbose;
int last_epoch{};
int num_bad_epochs{};
};
} // namespace torch::optim
```
|
=============================================================================================================================================================
SOURCE CODE FILE: step_lr.h
LINES: 1
SIZE: 0.41 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\optim\schedulers\step_lr.h
ENCODING: utf-8
```h
#pragma once
#include <torch/optim/schedulers/lr_scheduler.h>
namespace torch::optim {
class TORCH_API StepLR : public LRScheduler {
public:
StepLR(
torch::optim::Optimizer& optimizer,
const unsigned step_size,
const double gamma = 0.1);
private:
std::vector<double> get_lrs() override;
const unsigned step_size_;
const double gamma_;
};
} // namespace torch::optim
```
|
====================================================================================================================================================
SOURCE CODE FILE: serialize.h
LINES: 1
SIZE: 12.68 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\optim\serialize.h
ENCODING: utf-8
```h
#pragma once
#include <c10/util/irange.h>
#include <torch/optim/optimizer.h>
#include <torch/serialize/archive.h>
#include <torch/types.h>
#include <cstddef>
#include <cstdint>
#include <deque>
#include <string>
#include <vector>
namespace torch::optim {
namespace detail {
// Utility function to save state
template <typename DerivedOptimizerParamState>
void serialize(
serialize::OutputArchive& archive,
const ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>>&
state) {
for (const auto& item : state) {
serialize::OutputArchive param_state_archive(archive.compilation_unit());
std::string tensorimpl_key =
std::to_string(reinterpret_cast<size_t>(item.first));
const DerivedOptimizerParamState& curr_state =
static_cast<const DerivedOptimizerParamState&>(*(item.second));
curr_state.serialize(param_state_archive);
archive.write(tensorimpl_key, param_state_archive);
}
}
// Utility function to load state
template <typename DerivedOptimizerParamState>
void serialize(
serialize::InputArchive& archive,
ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>>& state) {
std::vector<std::string> tensorimpl_keys = archive.keys();
for (const std::string& tensorimpl_key : tensorimpl_keys) {
serialize::InputArchive param_state_archive;
archive.read(tensorimpl_key, param_state_archive);
DerivedOptimizerParamState param_state;
param_state.serialize(param_state_archive);
// NOLINTNEXTLINE(performance-no-int-to-ptr)
state[reinterpret_cast<void*>(std::stoull(tensorimpl_key))] =
std::make_unique<DerivedOptimizerParamState>(param_state);
}
}
// Utility function to save param_groups
template <typename DerivedOptimizerParamOptions>
void serialize(
serialize::OutputArchive& archive,
const std::vector<OptimizerParamGroup>& param_groups) {
archive.write(
"param_groups/size",
torch::tensor(static_cast<int64_t>(param_groups.size())));
for (const auto i : c10::irange(param_groups.size())) {
serialize::OutputArchive param_group_archive(archive.compilation_unit());
std::vector<Tensor> params = param_groups[i].params();
param_group_archive.write(
"params/size", torch::tensor(static_cast<int64_t>(params.size())));
for (const auto index : c10::irange(params.size())) {
param_group_archive.write(
"params/" + std::to_string(index),
IValue(std::to_string(
reinterpret_cast<size_t>(params[index].unsafeGetTensorImpl()))));
}
const DerivedOptimizerParamOptions& param_group_options =
static_cast<const DerivedOptimizerParamOptions&>(
param_groups[i].options());
serialize::OutputArchive param_group_options_archive(
param_group_archive.compilation_unit());
param_group_options.serialize(param_group_options_archive);
param_group_archive.write("options", param_group_options_archive);
archive.write("param_groups/" + std::to_string(i), param_group_archive);
}
}
// Utility function to load param_groups
// We take as input vector of pair of string and unique_ptr to optimizer options
// so that we can retain the state for each param by using the old tensor impl
// keys (saved during serialization) and map the new tensor impl keys to the
// correct state for each param
template <typename DerivedOptimizerParamOptions>
void serialize(
serialize::InputArchive& archive,
std::vector<
std::pair<std::vector<std::string>, std::unique_ptr<OptimizerOptions>>>&
param_groups) {
torch::Tensor param_groups_size_tensor;
archive.read("param_groups/size", param_groups_size_tensor);
const int64_t param_groups_size = param_groups_size_tensor.item<int64_t>();
for (const auto i : c10::irange(param_groups_size)) {
serialize::InputArchive param_group_archive;
archive.read("param_groups/" + std::to_string(i), param_group_archive);
torch::Tensor size_tensor;
param_group_archive.read("params/size", size_tensor);
const int64_t size = size_tensor.item<int64_t>();
std::vector<std::string> params;
for (const auto index : c10::irange(size)) {
IValue ivalue;
param_group_archive.read("params/" + std::to_string(index), ivalue);
std::string element = ivalue.toStringRef();
params.emplace_back(element);
}
serialize::InputArchive param_group_options_archive;
param_group_archive.read("options", param_group_options_archive);
DerivedOptimizerParamOptions param_group_options(0);
param_group_options.serialize(param_group_options_archive);
param_groups.emplace_back(std::make_pair(
params,
std::make_unique<DerivedOptimizerParamOptions>(param_group_options)));
}
}
} // namespace detail
// Note: These functions are all called `serialize()` so they can be called
// inside a template where the archive type is a template type and can thus be
// passed such that the appropriate overload is selected.
/// Utility function to save a value of `int64_t` type.
void serialize(
serialize::OutputArchive& archive,
const std::string& key,
const int64_t& value);
/// Utility function to load a value of `int64_t` type.
void serialize(
serialize::InputArchive& archive,
const std::string& key,
int64_t& value);
/// Utility function to save a vector of step buffers.
void serialize(
serialize::OutputArchive& archive,
const std::string& key,
const std::vector<int64_t>& steps);
/// Utility function to load a vector of step buffers.
void serialize(
serialize::InputArchive& archive,
const std::string& key,
std::vector<int64_t>& steps);
// Utility function to save state and param_groups
template <
typename DerivedOptimizerParamState,
typename DerivedOptimizerParamOptions>
void serialize(serialize::OutputArchive& archive, const Optimizer& optimizer) {
archive.write("pytorch_version", IValue("1.5.0"));
serialize::OutputArchive state_archive(archive.compilation_unit());
detail::serialize<DerivedOptimizerParamState>(
state_archive, optimizer.state());
archive.write("state", state_archive);
serialize::OutputArchive param_groups_archive(archive.compilation_unit());
detail::serialize<DerivedOptimizerParamOptions>(
param_groups_archive, optimizer.param_groups());
archive.write("param_groups", param_groups_archive);
}
// Utility function to load state and param_groups and update state
template <
typename DerivedOptimizerParamState,
typename DerivedOptimizerParamOptions>
void serialize(serialize::InputArchive& archive, Optimizer& optimizer) {
IValue pytorch_version;
archive.read("pytorch_version", pytorch_version);
TORCH_INTERNAL_ASSERT(pytorch_version.toStringRef() == "1.5.0");
serialize::InputArchive state_archive;
archive.read("state", state_archive);
ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>> saved_state;
detail::serialize<DerivedOptimizerParamState>(state_archive, saved_state);
serialize::InputArchive param_groups_archive;
archive.read("param_groups", param_groups_archive);
std::vector<
std::pair<std::vector<std::string>, std::unique_ptr<OptimizerOptions>>>
saved_param_groups;
detail::serialize<DerivedOptimizerParamOptions>(
param_groups_archive, saved_param_groups);
// update state and optimizer options
TORCH_CHECK(
saved_param_groups.size() == optimizer.param_groups().size(),
"loaded state dict has a different number of parameter groups");
for (const auto i : c10::irange(saved_param_groups.size())) {
std::vector<std::string> param_group_old_keys = saved_param_groups[i].first;
std::vector<Tensor> params = optimizer.param_groups()[i].params();
TORCH_CHECK(
param_group_old_keys.size() == params.size(),
"loaded state dict contains a parameter group that has a different size than the optimizer's parameter group");
for (const auto idx : c10::irange(params.size())) {
auto param_group_old_key =
// NOLINTNEXTLINE(performance-no-int-to-ptr)
reinterpret_cast<void*>(std::stoull(param_group_old_keys[idx]));
if (saved_state.find(param_group_old_key) != saved_state.end()) {
optimizer.state()[params[idx].unsafeGetTensorImpl()] =
std::move(saved_state[param_group_old_key]);
}
}
auto& saved_options = reinterpret_cast<DerivedOptimizerParamOptions&>(
*saved_param_groups[i].second);
auto& current_options = reinterpret_cast<DerivedOptimizerParamOptions&>(
optimizer.param_groups()[i].options());
current_options = saved_options;
}
}
/// Utility function to save a vector of buffers.
template <typename BufferContainer>
void serialize(
serialize::OutputArchive& archive,
const std::string& key,
const BufferContainer& buffers) {
archive.write(
key + "/size", torch::tensor(static_cast<int64_t>(buffers.size())));
for (const auto index : c10::irange(buffers.size())) {
archive.write(
key + "/" + std::to_string(index), buffers[index], /*is_buffer=*/true);
}
}
/// Utility function to load a vector of buffers.
template <typename BufferContainer>
void serialize(
serialize::InputArchive& archive,
const std::string& key,
BufferContainer& buffers) {
buffers.clear();
torch::Tensor size_tensor;
archive.read(key + "/size", size_tensor);
const size_t size = size_tensor.item<int64_t>();
for (const auto index : c10::irange(size)) {
buffers.emplace_back();
archive.read(
key + "/" + std::to_string(index), buffers.back(), /*is_buffer=*/true);
}
}
template <typename T>
c10::List<T> deque_to_list(const std::deque<T>& dq) {
c10::List<T> list;
list.reserve(dq.size());
for (const auto& e : dq) {
list.emplace_back(e);
}
return list;
}
template <typename T>
std::deque<T> list_to_deque(const c10::List<T>& list) {
std::deque<T> dq;
for (const auto& e : list) {
dq.emplace_back(e);
}
return dq;
}
#define _TORCH_OPTIM_SERIALIZE(name) \
torch::optim::serialize(archive, #name, self.name)
#define _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(OptimizerName) \
torch::optim::serialize<OptimizerName##ParamState, OptimizerName##Options>( \
archive, self)
#define _TORCH_OPTIM_SERIALIZE_TORCH_ARG(name) \
{ \
auto ivalue = torch::IValue(name()); \
/* do not serialize if name is an undefined tensor*/ \
if (!(ivalue.isTensor() && \
ivalue.unsafeToTensorImpl() == \
at::UndefinedTensorImpl::singleton())) { \
archive.write(#name, ivalue); \
} \
}
#define _TORCH_OPTIM_SERIALIZE_TORCH_ARG_DEQUE(name) \
{ \
c10::IValue ivalue = torch::IValue(deque_to_list(name())); \
archive.write(#name, ivalue); \
}
#define _TORCH_OPTIM_DESERIALIZE_TORCH_ARG(T, name) \
{ \
c10::IValue ivalue; \
bool exists = archive.try_read(#name, ivalue); \
if (exists) { \
name(ivalue.to<T>()); \
} else { \
constexpr bool is_tensor_type = std::is_base_of_v<torch::Tensor, T>; \
TORCH_INTERNAL_ASSERT(is_tensor_type); \
} \
}
#define _TORCH_OPTIM_DESERIALIZE_TORCH_ARG_OPTIONAL(T, name) \
{ \
c10::IValue ivalue; \
bool exists = archive.try_read(#name, ivalue); \
if (exists) { \
name(ivalue.toOptional<T>()); \
} \
}
#define _TORCH_OPTIM_DESERIALIZE_TORCH_ARG_DEQUE(T, name) \
{ \
c10::IValue ivalue; \
archive.read(#name, ivalue); \
auto list = ivalue.to<c10::List<T::value_type>>(); \
name(list_to_deque(list)); \
}
} // namespace torch::optim
```
|
==============================================================================================================================================
SOURCE CODE FILE: sgd.h
LINES: 1
SIZE: 2.64 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\optim\sgd.h
ENCODING: utf-8
```h
#pragma once
#include <torch/nn/module.h>
#include <torch/optim/optimizer.h>
#include <torch/optim/serialize.h>
#include <torch/serialize/archive.h>
#include <torch/types.h>
#include <cstddef>
#include <utility>
#include <vector>
namespace torch::serialize {
class OutputArchive;
class InputArchive;
} // namespace torch::serialize
namespace torch::optim {
struct TORCH_API SGDOptions : public OptimizerCloneableOptions<SGDOptions> {
SGDOptions(double lr);
TORCH_ARG(double, lr);
TORCH_ARG(double, momentum) = 0;
TORCH_ARG(double, dampening) = 0;
TORCH_ARG(double, weight_decay) = 0;
TORCH_ARG(bool, nesterov) = false;
public:
void serialize(torch::serialize::InputArchive& archive) override;
void serialize(torch::serialize::OutputArchive& archive) const override;
TORCH_API friend bool operator==(
const SGDOptions& lhs,
const SGDOptions& rhs);
double get_lr() const override;
void set_lr(const double lr) override;
};
struct TORCH_API SGDParamState
: public OptimizerCloneableParamState<SGDParamState> {
TORCH_ARG(torch::Tensor, momentum_buffer);
public:
void serialize(torch::serialize::InputArchive& archive) override;
void serialize(torch::serialize::OutputArchive& archive) const override;
TORCH_API friend bool operator==(
const SGDParamState& lhs,
const SGDParamState& rhs);
};
class TORCH_API SGD : public Optimizer {
public:
explicit SGD(
const std::vector<OptimizerParamGroup>& param_groups,
SGDOptions defaults)
: Optimizer(param_groups, std::make_unique<SGDOptions>(defaults)) {
TORCH_CHECK(defaults.lr() >= 0, "Invalid learning rate: ", defaults.lr());
TORCH_CHECK(
defaults.momentum() >= 0,
"Invalid momentum value: ",
defaults.momentum());
TORCH_CHECK(
defaults.weight_decay() >= 0,
"Invalid weight_decay value: ",
defaults.weight_decay());
TORCH_CHECK(
!defaults.nesterov() ||
(defaults.momentum() > 0 && defaults.dampening() == 0),
"Nesterov momentum requires a momentum and zero dampening");
}
explicit SGD(std::vector<Tensor> params, SGDOptions defaults)
: SGD({OptimizerParamGroup(std::move(params))}, std::move(defaults)) {}
torch::Tensor step(LossClosure closure = nullptr) override;
void save(serialize::OutputArchive& archive) const override;
void load(serialize::InputArchive& archive) override;
private:
template <typename Self, typename Archive>
static void serialize(Self& self, Archive& archive) {
_TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(SGD);
}
};
} // namespace torch::optim
```
|
=================================================================================================================================================
SOURCE CODE FILE: ordered_dict.h
LINES: 1
SIZE: 16.39 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\ordered_dict.h
ENCODING: utf-8
```h
#pragma once
#include <cstdint>
#include <initializer_list>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
namespace torch {
/// An ordered dictionary implementation, akin to Python's `OrderedDict`.
template <typename Key, typename Value>
class OrderedDict {
public:
/// A (key, value) pair.
class Item;
// The lifetime of an iterator is bound to the lifetime of the `OrderedDict`.
// Further, any `insert()` operation may invalidate all iterators
// pointing into the vector.
using Iterator = typename std::vector<Item>::iterator;
using ConstIterator = typename std::vector<Item>::const_iterator;
/// Constructs the `OrderedDict` with a short description of the kinds of keys
/// stored in the `OrderedDict`. This description is used in error messages
/// thrown by the `OrderedDict`.
explicit OrderedDict(std::string key_description = "Key");
/// Copy constructs this `OrderedDict` from `other`.
OrderedDict(const OrderedDict& other);
/// Assigns items from `other` to this `OrderedDict`.
OrderedDict& operator=(const OrderedDict& other);
// NB: Move works by default, because you can move-construct vectors of const
// values. I tried to make this noexcept (conditional on the move constructors
// of index_ and items_ being noexcept) but the obvious spelling didn't
// compile on Windows.
OrderedDict(OrderedDict&& other) noexcept = default;
OrderedDict& operator=(OrderedDict&& other) noexcept = default;
~OrderedDict() = default;
/// Constructs a new `OrderedDict` and pre-populates it with the given
/// `Item`s.
/*implicit */ OrderedDict(std::initializer_list<Item> initializer_list);
/// Returns the key description string the `OrderedDict` was constructed with.
const std::string& key_description() const noexcept;
// Element Access
/// Returns the very first item in the `OrderedDict` and throws an exception
/// if it is empty.
Item& front();
/// Returns the very first item in the `OrderedDict` and throws an exception
/// if it is empty.
const Item& front() const;
/// Returns the very last item in the `OrderedDict` and throws an exception
/// if it is empty.
Item& back();
/// Returns the very last item in the `OrderedDict` and throws an exception
/// if it is empty.
const Item& back() const;
/// Returns the item at the `index`-th position in the `OrderedDict`. Throws
/// an exception if the index is out of bounds.
Item& operator[](size_t index);
/// Returns the item at the `index`-th position in the `OrderedDict`. Throws
/// an exception if the index is out of bounds.
const Item& operator[](size_t index) const;
/// Returns the value associated with the given `key`. Throws an exception if
/// no such key is stored in the `OrderedDict`. Use `find()` for a
/// non-throwing way of accessing a value if it is present.
Value& operator[](const Key& key);
/// Returns the value associated with the given `key`. Throws an exception if
/// no such key is stored in the `OrderedDict`. Use `find()` for a
/// non-throwing way of accessing a value if it is present.
const Value& operator[](const Key& key) const;
// Lookup
/// Returns a pointer to the value associated with the given key, or a
/// `nullptr` if no such key is stored in the `OrderedDict`.
Value* find(const Key& key) noexcept;
/// Returns a pointer to the value associated with the given key, or a
/// `nullptr` if no such key is stored in the `OrderedDict`.
const Value* find(const Key& key) const noexcept;
/// Returns true if the key is present in the `OrderedDict`.
bool contains(const Key& key) const noexcept;
// Iterators
/// Returns an iterator to the first item in the `OrderedDict`. Iteration is
/// ordered.
Iterator begin();
/// Returns an iterator to the first item in the `OrderedDict`. Iteration is
/// ordered.
ConstIterator begin() const;
/// Returns an iterator one past the last item in the `OrderedDict`.
Iterator end();
/// Returns an iterator one past the last item in the `OrderedDict`.
ConstIterator end() const;
// Capacity
/// Returns the number of items currently stored in the `OrderedDict`.
size_t size() const noexcept;
/// Returns true if the `OrderedDict` contains no elements.
bool is_empty() const noexcept;
/// Resizes internal storage to fit at least `requested_capacity` items
/// without requiring reallocation.
void reserve(size_t requested_capacity);
// Modifiers
/// Inserts a new `(key, value)` pair into the `OrderedDict`. Throws an
/// exception if the key is already present. If insertion is successful,
/// immediately returns a reference to the inserted value.
template <typename K, typename V>
Value& insert(K&& key, V&& value);
/// Inserts a new `(key, value)` pair into the `OrderedDict`. Throws an
/// exception if the key is already present. If insertion is successful,
/// immediately returns a reference to the inserted value.
Value& insert(Key key, Value&& value);
/// Inserts all items from `other` into this `OrderedDict`. If any key from
/// `other` is already present in this `OrderedDict`, an exception is thrown.
void update(OrderedDict&& other);
/// Inserts all items from `other` into this `OrderedDict`. If any key from
/// `other` is already present in this `OrderedDict`, an exception is thrown.
void update(const OrderedDict& other);
/// Removes the item that has `key` from this `OrderedDict` if exists and if
/// it doesn't an exception is thrown.
void erase(const Key& key);
/// Removes all items from this `OrderedDict`.
void clear();
// Observers
/// Returns the items stored in the `OrderedDict`.
const std::vector<Item>& items() const noexcept;
/// Returns a newly allocated vector and copies all keys from this
/// `OrderedDict` into the vector.
::std::vector<Key> keys() const;
/// Returns a newly allocated vector and copies all values from this
/// `OrderedDict` into the vector.
::std::vector<Value> values() const;
/// Returns a newly allocated vector and copies all keys and values from this
/// `OrderedDict` into a vector of `std::pair<Key, Value>`.
::std::vector<std::pair<Key, Value>> pairs() const;
/// Returns true if both dicts contain the same keys and values, in the same
/// order.
template <typename K, typename V>
friend bool operator==(
const OrderedDict<K, V>& a,
const OrderedDict<K, V>& b);
private:
/// A mapping from a key to an index into the `items_` vector.
::std::unordered_map<Key, size_t> index_;
/// The items stored in the `OrderedDict`.
::std::vector<Item> items_;
/// A description of the keys stored in the `OrderedDict`.
::std::string key_description_{"Key"};
};
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OrderedDict::Item ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename Key, typename Value>
class OrderedDict<Key, Value>::Item {
public:
/// Constructs a new item.
Item(Key key, Value value) : pair_(std::move(key), std::move(value)) {}
/// Returns a reference to the value.
Value& operator*() {
return value();
}
/// Returns a reference to the value.
const Value& operator*() const {
return value();
}
/// Allows access to the value using the arrow operator.
Value* operator->() {
return &value();
}
/// Allows access to the value using the arrow operator.
const Value* operator->() const {
return &value();
}
/// Returns a reference to the key.
const Key& key() const noexcept {
return pair_.first;
}
/// Returns a reference to the value.
Value& value() noexcept {
return pair_.second;
}
/// Returns a reference to the value.
const Value& value() const noexcept {
return pair_.second;
}
/// Returns a `(key, value)` pair.
const std::pair<Key, Value>& pair() const noexcept {
return pair_;
}
private:
/// This is stored as an std::pair because it will make Python binding a lot,
/// lot easier.
::std::pair<Key, Value> pair_;
};
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OrderedDict ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename Key, typename Value>
OrderedDict<Key, Value>::OrderedDict(std::string key_description)
: key_description_(std::move(key_description)) {}
template <typename Key, typename Value>
OrderedDict<Key, Value>::OrderedDict(const OrderedDict& other)
: index_(other.index_), key_description_(other.key_description_) {
// Copy we have to do ourselves, because items' keys are const, so we have to
// re-insert the items.
for (const auto& item : other.items_) {
items_.push_back(item);
}
}
template <typename Key, typename Value>
OrderedDict<Key, Value>& OrderedDict<Key, Value>::operator=(
const OrderedDict& other) {
index_ = other.index_;
items_.clear();
for (auto& item : other.items_) {
items_.push_back(item);
}
key_description_ = other.key_description_;
return *this;
}
template <typename Key, typename Value>
OrderedDict<Key, Value>::OrderedDict(
std::initializer_list<Item> initializer_list)
: OrderedDict("Key") {
items_.reserve(initializer_list.size());
for (auto& item : initializer_list) {
// Copy the key here and move it into the index.
items_.emplace_back(item.key(), std::move(item.value()));
index_.emplace(std::move(item.key()), size() - 1);
}
}
template <typename Key, typename Value>
typename OrderedDict<Key, Value>::Iterator OrderedDict<Key, Value>::begin() {
return items_.begin();
}
template <typename Key, typename Value>
typename OrderedDict<Key, Value>::ConstIterator OrderedDict<Key, Value>::begin()
const {
return items_.begin();
}
template <typename Key, typename Value>
typename OrderedDict<Key, Value>::Iterator OrderedDict<Key, Value>::end() {
return items_.end();
}
template <typename Key, typename Value>
typename OrderedDict<Key, Value>::ConstIterator OrderedDict<Key, Value>::end()
const {
return items_.end();
}
template <typename Key, typename Value>
typename OrderedDict<Key, Value>::Item& OrderedDict<Key, Value>::front() {
TORCH_CHECK(!items_.empty(), "Called front() on an empty OrderedDict");
return items_.front();
}
template <typename Key, typename Value>
const typename OrderedDict<Key, Value>::Item& OrderedDict<Key, Value>::front()
const {
TORCH_CHECK(!items_.empty(), "Called front() on an empty OrderedDict");
return items_.front();
}
template <typename Key, typename Value>
typename OrderedDict<Key, Value>::Item& OrderedDict<Key, Value>::back() {
TORCH_CHECK(!items_.empty(), "Called back() on an empty OrderedDict");
return items_.back();
}
template <typename Key, typename Value>
const typename OrderedDict<Key, Value>::Item& OrderedDict<Key, Value>::back()
const {
TORCH_CHECK(!items_.empty(), "Called back() on an empty OrderedDict");
return items_.back();
}
template <typename Key, typename Value>
typename OrderedDict<Key, Value>::Item& OrderedDict<Key, Value>::operator[](
size_t index) {
TORCH_CHECK(index < items_.size(), "Index ", index, " is out of bounds");
return items_[index];
}
template <typename Key, typename Value>
const typename OrderedDict<Key, Value>::Item& OrderedDict<Key, Value>::
operator[](size_t index) const {
TORCH_CHECK(index < items_.size(), "Index ", index, " is out of bounds");
return items_[index];
}
template <typename Key, typename Value>
Value& OrderedDict<Key, Value>::operator[](const Key& key) {
if (auto* value = find(key)) {
return *value;
}
TORCH_CHECK(false, key_description_, " '", key, "' is not defined");
}
template <typename Key, typename Value>
const Value& OrderedDict<Key, Value>::operator[](const Key& key) const {
if (auto* value = find(key)) {
return *value;
}
TORCH_CHECK(false, key_description_, " '", key, "' is not defined");
}
template <typename Key, typename Value>
template <typename K, typename V>
Value& OrderedDict<Key, Value>::insert(K&& key, V&& value) {
TORCH_CHECK(
index_.count(key) == 0, key_description_, " '", key, "' already defined");
// Copy `key` here and move it into the index.
items_.emplace_back(key, std::forward<V>(value));
index_.emplace(std::forward<K>(key), size() - 1);
return items_.back().value();
}
template <typename Key, typename Value>
Value& OrderedDict<Key, Value>::insert(Key key, Value&& value) {
return insert<Key, Value>(std::move(key), std::move(value));
}
template <typename Key, typename Value>
void OrderedDict<Key, Value>::update(OrderedDict&& other) {
reserve(size() + other.size());
for (auto&& item : std::move(other)) {
// We want to call `insert()` to prevent duplicate keys.
insert(std::move(item.key()), std::move(item.value()));
}
}
template <typename Key, typename Value>
void OrderedDict<Key, Value>::update(const OrderedDict& other) {
reserve(size() + other.size());
for (auto& item : other) {
// We want to call `insert()` to prevent duplicate keys.
insert(item.key(), item.value());
}
}
template <typename Key, typename Value>
Value* OrderedDict<Key, Value>::find(const Key& key) noexcept {
auto iterator = index_.find(key);
if (iterator == index_.end()) {
return nullptr;
}
return &items_[iterator->second].value();
}
template <typename Key, typename Value>
const Value* OrderedDict<Key, Value>::find(const Key& key) const noexcept {
auto iterator = index_.find(key);
if (iterator == index_.end()) {
return nullptr;
}
return &items_[iterator->second].value();
}
template <typename Key, typename Value>
void OrderedDict<Key, Value>::erase(const Key& key) {
auto it = index_.find(key);
TORCH_CHECK(it != index_.end(), "Key '", key, "' doesn't exist");
auto index = it->second;
index_.erase(it);
items_.erase(items_.begin() + index);
for (auto& pair : index_)
if (pair.second > index)
--pair.second;
}
template <typename Key, typename Value>
bool OrderedDict<Key, Value>::contains(const Key& key) const noexcept {
return find(key) != nullptr;
}
template <typename Key, typename Value>
void OrderedDict<Key, Value>::clear() {
index_.clear();
items_.clear();
}
template <typename Key, typename Value>
size_t OrderedDict<Key, Value>::size() const noexcept {
return items_.size();
}
template <typename Key, typename Value>
bool OrderedDict<Key, Value>::is_empty() const noexcept {
return items_.empty();
}
template <typename Key, typename Value>
const std::string& OrderedDict<Key, Value>::key_description() const noexcept {
return key_description_;
}
template <typename Key, typename Value>
const std::vector<typename OrderedDict<Key, Value>::Item>& OrderedDict<
Key,
Value>::items() const noexcept {
return items_;
}
template <typename Key, typename Value>
::std::vector<Key> OrderedDict<Key, Value>::keys() const {
std::vector<Key> keys;
keys.reserve(size());
for (const auto& item : items_) {
keys.push_back(item.key());
}
return keys;
}
template <typename Key, typename Value>
::std::vector<Value> OrderedDict<Key, Value>::values() const {
std::vector<Value> values;
values.reserve(size());
for (const auto& item : items_) {
values.push_back(item.value());
}
return values;
}
template <typename Key, typename Value>
::std::vector<std::pair<Key, Value>> OrderedDict<Key, Value>::pairs() const {
std::vector<std::pair<Key, Value>> values;
values.reserve(size());
for (const auto& item : items_) {
values.push_back(item.pair());
}
return values;
}
template <typename Key, typename Value>
void OrderedDict<Key, Value>::reserve(size_t requested_capacity) {
index_.reserve(requested_capacity);
items_.reserve(requested_capacity);
}
template <typename K, typename V>
bool operator==(
const torch::OrderedDict<K, V>& a,
const torch::OrderedDict<K, V>& b) {
using Item = typename torch::OrderedDict<K, V>::Item;
if (a.index_ != b.index_)
return false;
if (a.items_.size() != b.items_.size())
return false;
// NOTE: There's no point in comparing keys for items_, as we already know
// that index is equal.
return std::equal(
a.items_.begin(),
a.items_.end(),
b.items_.begin(),
[](const Item& a, const Item& b) { return a.value() == b.value(); });
}
} // namespace torch
```
|
===========================================================================================================================================
SOURCE CODE FILE: python.h
LINES: 1
SIZE: 9.92 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\python.h
ENCODING: utf-8
```h
#pragma once
#include <torch/detail/static.h>
#include <torch/nn/module.h>
#include <torch/ordered_dict.h>
#include <torch/types.h>
#include <torch/csrc/Device.h>
#include <torch/csrc/Dtype.h>
#include <torch/csrc/DynamicTypes.h>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/autograd/python_variable.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/pybind.h>
#include <torch/csrc/utils/python_numbers.h>
#include <torch/csrc/utils/python_tuples.h>
#include <iterator>
#include <string>
#include <utility>
namespace torch::python {
namespace detail {
inline Device py_object_to_device(py::object object) {
PyObject* obj = object.ptr();
if (THPDevice_Check(obj)) {
return reinterpret_cast<THPDevice*>(obj)->device;
}
throw TypeError("Expected device");
}
inline Dtype py_object_to_dtype(py::object object) {
PyObject* obj = object.ptr();
if (THPDtype_Check(obj)) {
return reinterpret_cast<THPDtype*>(obj)->scalar_type;
}
throw TypeError("Expected dtype");
}
template <typename ModuleType>
using PyModuleClass =
py::class_<ModuleType, torch::nn::Module, std::shared_ptr<ModuleType>>;
/// Dynamically creates a subclass of `torch.nn.cpp.ModuleWrapper` that is also
/// a subclass of `torch.nn.Module`, and passes it the user-provided C++ module
/// to which it delegates all calls.
template <typename ModuleType>
void bind_cpp_module_wrapper(
const py::module& module,
PyModuleClass<ModuleType> cpp_class,
const char* name) {
// Grab the `torch.nn.cpp.ModuleWrapper` class, which we'll subclass
// with a dynamically created class below.
py::object cpp_module =
py::module::import("torch.nn.cpp").attr("ModuleWrapper");
// Grab the `type` class which we'll use as a metaclass to create a new class
// dynamically.
py::object type_metaclass =
py::reinterpret_borrow<py::object>((PyObject*)&PyType_Type);
// The `ModuleWrapper` constructor copies all functions to its own `__dict__`
// in its constructor, but we do need to give our dynamic class a constructor.
// Inside, we construct an instance of the original C++ module we're binding
// (the `torch::nn::Module` subclass), and then forward it to the
// `ModuleWrapper` constructor.
py::dict attributes;
// `type()` always needs a `str`, but pybind11's `str()` method always creates
// a `unicode` object.
py::object name_str = py::str(name);
// Dynamically create the subclass of `ModuleWrapper`, which is a subclass of
// `torch.nn.Module`, and will delegate all calls to the C++ module we're
// binding.
py::object wrapper_class =
type_metaclass(name_str, py::make_tuple(cpp_module), attributes);
// The constructor of the dynamic class calls `ModuleWrapper.__init__()`,
// which replaces its methods with those of the C++ module.
wrapper_class.attr("__init__") = py::cpp_function(
[cpp_module, cpp_class](
const py::object& self,
const py::args& args,
const py::kwargs& kwargs) {
cpp_module.attr("__init__")(self, cpp_class(*args, **kwargs));
},
py::is_method(wrapper_class));
// Calling `my_module.my_class` now means that `my_class` is a subclass of
// `ModuleWrapper`, and whose methods call into the C++ module we're binding.
module.attr(name) = wrapper_class;
}
} // namespace detail
/// Adds method bindings for a pybind11 `class_` that binds an `nn::Module`
/// subclass.
///
/// Say you have a pybind11 class object created with `py::class_<Net>(m,
/// "Net")`. This function will add all the necessary `.def()` calls to bind the
/// `nn::Module` base class' methods, such as `train()`, `eval()` etc. into
/// Python.
///
/// Users should prefer to use `bind_module` if possible.
template <typename ModuleType, typename... Extra>
py::class_<ModuleType, Extra...> add_module_bindings(
py::class_<ModuleType, Extra...> module) {
// clang-format off
return module
.def("train",
[](ModuleType& module, bool mode) { module.train(mode); },
py::arg("mode") = true)
.def("eval", [](ModuleType& module) { module.eval(); })
.def("clone", [](ModuleType& module) { return module.clone(); })
.def_property_readonly(
"training", [](ModuleType& module) { return module.is_training(); })
.def("zero_grad", [](ModuleType& module) { module.zero_grad(); })
.def_property_readonly( "_parameters", [](ModuleType& module) {
return module.named_parameters(/*recurse=*/false);
})
.def("parameters", [](ModuleType& module, bool recurse) {
return module.parameters(recurse);
},
py::arg("recurse") = true)
.def("named_parameters", [](ModuleType& module, bool recurse) {
return module.named_parameters(recurse);
},
py::arg("recurse") = true)
.def_property_readonly("_buffers", [](ModuleType& module) {
return module.named_buffers(/*recurse=*/false);
})
.def("buffers", [](ModuleType& module, bool recurse) {
return module.buffers(recurse); },
py::arg("recurse") = true)
.def("named_buffers", [](ModuleType& module, bool recurse) {
return module.named_buffers(recurse);
},
py::arg("recurse") = true)
.def_property_readonly(
"_modules", [](ModuleType& module) { return module.named_children(); })
.def("modules", [](ModuleType& module) { return module.modules(); })
.def("named_modules",
[](ModuleType& module, const py::object& /* unused */, std::string prefix, bool remove_duplicate /* unused */) {
return module.named_modules(std::move(prefix));
},
py::arg("memo") = py::none(),
py::arg("prefix") = std::string(),
py::arg("remove_duplicate") = true)
.def("children", [](ModuleType& module) { return module.children(); })
.def("named_children",
[](ModuleType& module) { return module.named_children(); })
.def("to", [](ModuleType& module, py::object object, bool non_blocking) {
if (THPDevice_Check(object.ptr())) {
module.to(
reinterpret_cast<THPDevice*>(object.ptr())->device,
non_blocking);
} else {
module.to(detail::py_object_to_dtype(object), non_blocking);
}
},
py::arg("dtype_or_device"),
py::arg("non_blocking") = false)
.def("to",
[](ModuleType& module,
const py::object& device,
const py::object& dtype,
bool non_blocking) {
if (device.is_none()) {
module.to(detail::py_object_to_dtype(dtype), non_blocking);
} else if (dtype.is_none()) {
module.to(detail::py_object_to_device(device), non_blocking);
} else {
module.to(
detail::py_object_to_device(device),
detail::py_object_to_dtype(dtype),
non_blocking);
}
},
py::arg("device"),
py::arg("dtype"),
py::arg("non_blocking") = false)
.def("cuda", [](ModuleType& module) { module.to(kCUDA); })
.def("cpu", [](ModuleType& module) { module.to(kCPU); })
.def("float", [](ModuleType& module) { module.to(kFloat32); })
.def("double", [](ModuleType& module) { module.to(kFloat64); })
.def("half", [](ModuleType& module) { module.to(kFloat16); })
.def("__str__", [](ModuleType& module) { return module.name(); })
.def("__repr__", [](ModuleType& module) { return module.name(); });
// clang-format on
}
/// Creates a pybind11 class object for an `nn::Module` subclass type and adds
/// default bindings.
///
/// After adding the default bindings, the class object is returned, such that
/// you can add more bindings.
///
/// Example usage:
/// \rst
/// .. code-block:: cpp
///
/// struct Net : torch::nn::Module {
/// Net(int in, int out) { }
/// torch::Tensor forward(torch::Tensor x) { return x; }
/// };
///
/// PYBIND11_MODULE(my_module, m) {
/// torch::python::bind_module<Net>(m, "Net")
/// .def(py::init<int, int>())
/// .def("forward", &Net::forward);
/// }
/// \endrst
template <typename ModuleType, bool force_enable = false>
std::enable_if_t<
!torch::detail::has_forward<ModuleType>::value || force_enable,
detail::PyModuleClass<ModuleType>>
bind_module(py::module module, const char* name) {
py::module cpp = module.def_submodule("cpp");
auto cpp_class =
add_module_bindings(detail::PyModuleClass<ModuleType>(cpp, name));
detail::bind_cpp_module_wrapper(module, cpp_class, name);
return cpp_class;
}
/// Creates a pybind11 class object for an `nn::Module` subclass type and adds
/// default bindings.
///
/// After adding the default bindings, the class object is returned, such that
/// you can add more bindings.
///
/// If the class has a `forward()` method, it is automatically exposed as
/// `forward()` and `__call__` in Python.
///
/// Example usage:
/// \rst
/// .. code-block:: cpp
///
/// struct Net : torch::nn::Module {
/// Net(int in, int out) { }
/// torch::Tensor forward(torch::Tensor x) { return x; }
/// };
///
/// PYBIND11_MODULE(my_module, m) {
/// torch::python::bind_module<Net>(m, "Net")
/// .def(py::init<int, int>())
/// .def("forward", &Net::forward);
/// }
/// \endrst
template <
typename ModuleType,
typename = std::enable_if_t<torch::detail::has_forward<ModuleType>::value>>
detail::PyModuleClass<ModuleType> bind_module(
py::module module,
const char* name) {
return bind_module<ModuleType, /*force_enable=*/true>(module, name)
.def("forward", &ModuleType::forward)
.def("__call__", &ModuleType::forward);
}
} // namespace torch::python
```
|
==============================================================================================================================================
SOURCE CODE FILE: serialize.h
LINES: 1
SIZE: 5.26 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\serialize.h
ENCODING: utf-8
```h
#pragma once
#include <c10/util/irange.h>
#include <torch/csrc/Export.h>
#include <torch/serialize/archive.h>
#include <torch/serialize/tensor.h>
#include <utility>
namespace torch {
/// Serializes the given `value`.
/// There must be an overload of `operator<<` between `serialize::OutputArchive`
/// and `Value` for this method to be well-formed. Currently, such an overload
/// is provided for (subclasses of):
///
/// - `torch::nn::Module`,
/// - `torch::optim::Optimizer`
/// - `torch::Tensor`
///
/// To perform the serialization, a `serialize::OutputArchive` is constructed,
/// and all arguments after the `value` are forwarded to its `save_to` method.
/// For example, you can pass a filename, or an `ostream`.
///
/// \rst
/// .. code-block:: cpp
///
/// torch::nn::Linear model(3, 4);
/// torch::save(model, "model.pt");
///
/// torch::optim::SGD sgd(model->parameters(), 0.9); // 0.9 is learning rate
/// std::ostringstream stream;
/// // Note that the same stream cannot be used in multiple torch::save(...)
/// // invocations, otherwise the header will be corrupted.
/// torch::save(sgd, stream);
///
/// auto tensor = torch::ones({3, 4});
/// torch::save(tensor, "my_tensor.pt");
/// \endrst
template <typename Value, typename... SaveToArgs>
void save(const Value& value, SaveToArgs&&... args) {
serialize::OutputArchive archive(std::make_shared<jit::CompilationUnit>());
archive << value;
archive.save_to(std::forward<SaveToArgs>(args)...);
}
/// Serializes the given `tensor_vec` of type `std::vector<torch::Tensor>`.
///
/// To perform the serialization, a `serialize::OutputArchive` is constructed,
/// and all arguments after the `tensor_vec` are forwarded to its `save_to`
/// method. For example, you can pass a filename, or an `ostream`.
///
/// \rst
/// .. code-block:: cpp
///
/// std::vector<torch::Tensor> tensor_vec = { torch::randn({1, 2}),
/// torch::randn({3, 4}) }; torch::save(tensor_vec, "my_tensor_vec.pt");
///
/// std::vector<torch::Tensor> tensor_vec = { torch::randn({5, 6}),
/// torch::randn({7, 8}) }; std::ostringstream stream;
/// // Note that the same stream cannot be used in multiple torch::save(...)
/// // invocations, otherwise the header will be corrupted.
/// torch::save(tensor_vec, stream);
/// \endrst
template <typename... SaveToArgs>
void save(const std::vector<torch::Tensor>& tensor_vec, SaveToArgs&&... args) {
serialize::OutputArchive archive(std::make_shared<jit::CompilationUnit>());
for (const auto i : c10::irange(tensor_vec.size())) {
auto& value = tensor_vec[i];
archive.write(std::to_string(i), value);
}
archive.save_to(std::forward<SaveToArgs>(args)...);
}
TORCH_API std::vector<char> pickle_save(const torch::IValue& ivalue);
TORCH_API torch::IValue pickle_load(const std::vector<char>& data);
/// Deserializes the given `value`.
/// There must be an overload of `operator>>` between `serialize::InputArchive`
/// and `Value` for this method to be well-formed. Currently, such an overload
/// is provided for (subclasses of):
///
/// - `torch::nn::Module`,
/// - `torch::optim::Optimizer`
/// - `torch::Tensor`
///
/// To perform the serialization, a `serialize::InputArchive` is constructed,
/// and all arguments after the `value` are forwarded to its `load_from` method.
/// For example, you can pass a filename, or an `istream`.
///
/// \rst
/// .. code-block:: cpp
///
/// torch::nn::Linear model(3, 4);
/// torch::load(model, "model.pt");
///
/// torch::optim::SGD sgd(model->parameters(), 0.9); // 0.9 is learning rate
/// std::istringstream stream("...");
/// torch::load(sgd, stream);
///
/// auto tensor = torch::ones({3, 4});
/// torch::load(tensor, "my_tensor.pt");
/// \endrst
template <typename Value, typename... LoadFromArgs>
void load(Value& value, LoadFromArgs&&... args) {
serialize::InputArchive archive;
archive.load_from(std::forward<LoadFromArgs>(args)...);
archive >> value;
}
/// Deserializes the given `tensor_vec` of type `std::vector<torch::Tensor>`.
///
/// To perform the serialization, a `serialize::InputArchive` is constructed,
/// and all arguments after the `value` are forwarded to its `load_from` method.
/// For example, you can pass a filename, or an `istream`.
///
/// \rst
/// .. code-block:: cpp
///
/// std::vector<torch::Tensor> tensor_vec;
/// torch::load(tensor_vec, "my_tensor_vec.pt");
///
/// std::vector<torch::Tensor> tensor_vec;
/// std::istringstream stream("...");
/// torch::load(tensor_vec, stream);
/// \endrst
template <typename... LoadFromArgs>
void load(std::vector<torch::Tensor>& tensor_vec, LoadFromArgs&&... args) {
serialize::InputArchive archive;
archive.load_from(std::forward<LoadFromArgs>(args)...);
// NOTE: The number of elements in the serialized `std::vector<torch::Tensor>`
// is not known ahead of time, so we need a while-loop to increment the index,
// and use `archive.try_read(...)` to check whether we have reached the end of
// the serialized `std::vector<torch::Tensor>`.
size_t index = 0;
torch::Tensor value;
while (archive.try_read(std::to_string(index), value)) {
tensor_vec.push_back(std::move(value));
value = torch::Tensor();
index++;
}
}
} // namespace torch
```
|
======================================================================================================================================================
SOURCE CODE FILE: archive.h
LINES: 1
SIZE: 0.10 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\serialize\archive.h
ENCODING: utf-8
```h
#pragma once
#include <torch/serialize/input-archive.h>
#include <torch/serialize/output-archive.h>
```
|
============================================================================================================================================================
SOURCE CODE FILE: input-archive.h
LINES: 1
SIZE: 3.98 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\serialize\input-archive.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/Device.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/api/module.h>
#include <torch/types.h>
#include <optional>
#include <iosfwd>
#include <memory>
#include <string>
#include <utility>
namespace at {
class Tensor;
} // namespace at
namespace torch {
using at::Tensor;
namespace jit {
struct Module;
} // namespace jit
} // namespace torch
namespace torch::serialize {
/// A recursive representation of tensors that can be deserialized from a file
/// or stream. In most cases, users should not have to interact with this class,
/// and should instead use `torch::load`.
class TORCH_API InputArchive final {
public:
/// Default-constructs the `InputArchive`.
InputArchive();
// Move is allowed.
InputArchive(InputArchive&&) = default;
InputArchive& operator=(InputArchive&&) = default;
// Copy is disallowed.
InputArchive(InputArchive&) = delete;
InputArchive& operator=(InputArchive&) = delete;
~InputArchive() = default;
/// Reads an `IValue` associated with a given `key`.
void read(const std::string& key, c10::IValue& ivalue);
/// Reads an `IValue` associated with a given `key`. If there is no `IValue`
/// associated with the `key`, this returns false, otherwise it returns true.
bool try_read(const std::string& key, c10::IValue& ivalue);
/// Reads a `tensor` associated with a given `key`. If there is no `tensor`
/// associated with the `key`, this returns false, otherwise it returns true.
/// If the tensor is expected to be a buffer (not differentiable), `is_buffer`
/// must be `true`.
bool try_read(const std::string& key, Tensor& tensor, bool is_buffer = false);
/// Reads a `tensor` associated with a given `key`.
/// If the tensor is expected to be a buffer (not differentiable), `is_buffer`
/// must be `true`.
void read(const std::string& key, Tensor& tensor, bool is_buffer = false);
/// Reads a `InputArchive` associated with a given `key`. If there is no
/// `InputArchive` associated with the `key`, this returns false, otherwise
/// it returns true.
bool try_read(const std::string& key, InputArchive& archive);
/// Reads an `InputArchive` associated with a given `key`.
/// The archive can thereafter be used for further deserialization of the
/// nested data.
void read(const std::string& key, InputArchive& archive);
/// Loads the `InputArchive` from a serialized representation stored in the
/// file at `filename`. Storage are remapped using device option. If device
/// is not specified, the module is loaded to the original device.
void load_from(
const std::string& filename,
std::optional<torch::Device> device = std::nullopt);
/// Loads the `InputArchive` from a serialized representation stored in the
/// given `stream`. Storage are remapped using device option. If device
/// is not specified, the module is loaded to the original device.
void load_from(
std::istream& stream,
std::optional<torch::Device> device = std::nullopt);
// Loads given the specified flat array.
void load_from(
const char* data,
size_t size,
std::optional<torch::Device> device = std::nullopt);
// Loads given the specified read and size functions.
void load_from(
const std::function<size_t(uint64_t pos, void* buf, size_t nbytes)>&
read_func,
const std::function<size_t(void)>& size_func,
std::optional<torch::Device> device = std::nullopt);
// Returns the vector of keys in the input archive.
std::vector<std::string> keys();
/// Forwards all arguments to `read()`.
/// Useful for generic code that can be re-used for both `InputArchive` and
/// `OutputArchive` (where `operator()` forwards to `write()`).
template <typename... Ts>
void operator()(Ts&&... ts) {
read(std::forward<Ts>(ts)...);
}
private:
jit::Module module_;
std::string hierarchy_prefix_;
};
} // namespace torch::serialize
```
|
=============================================================================================================================================================
SOURCE CODE FILE: output-archive.h
LINES: 1
SIZE: 2.31 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\serialize\output-archive.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/api/module.h>
#include <iosfwd>
#include <memory>
#include <string>
#include <utility>
namespace at {
class Tensor;
} // namespace at
namespace torch {
using at::Tensor;
namespace jit {
struct Module;
} // namespace jit
} // namespace torch
namespace torch::serialize {
class TORCH_API OutputArchive final {
public:
explicit OutputArchive(std::shared_ptr<jit::CompilationUnit> cu);
explicit OutputArchive()
: cu_(std::make_shared<jit::CompilationUnit>()),
module_("__torch__.Module", cu_) {}
// Move is allowed.
OutputArchive(OutputArchive&&) = default;
OutputArchive& operator=(OutputArchive&&) = default;
// Copy is disallowed.
OutputArchive(OutputArchive&) = delete;
OutputArchive& operator=(OutputArchive&) = delete;
std::shared_ptr<jit::CompilationUnit> compilation_unit() const {
return cu_;
}
/// Writes an `IValue` to the `OutputArchive`.
void write(const std::string& key, const c10::IValue& ivalue);
/// Writes a `(key, tensor)` pair to the `OutputArchive`, and marks it as
/// being or not being a buffer (non-differentiable tensor).
void write(
const std::string& key,
const Tensor& tensor,
bool is_buffer = false);
/// Writes a nested `OutputArchive` under the given `key` to this
/// `OutputArchive`.
void write(const std::string& key, OutputArchive& nested_archive);
/// Saves the `OutputArchive` into a serialized representation in a file at
/// `filename`.
void save_to(const std::string& filename);
/// Saves the `OutputArchive` into a serialized representation into the given
/// `stream`.
void save_to(std::ostream& stream);
/// Saves the `OutputArchive` into a serialized representation using the
/// given writer function.
void save_to(const std::function<size_t(const void*, size_t)>& func);
/// Forwards all arguments to `write()`.
/// Useful for generic code that can be re-used for both `OutputArchive` and
/// `InputArchive` (where `operator()` forwards to `read()`).
template <typename... Ts>
void operator()(Ts&&... ts) {
write(std::forward<Ts>(ts)...);
}
private:
std::shared_ptr<jit::CompilationUnit> cu_;
jit::Module module_;
};
} // namespace torch::serialize
```
|
=====================================================================================================================================================
SOURCE CODE FILE: tensor.h
LINES: 1
SIZE: 0.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\serialize\tensor.h
ENCODING: utf-8
```h
#pragma once
#include <torch/serialize/archive.h>
#include <torch/types.h>
namespace torch {
inline serialize::OutputArchive& operator<<(
serialize::OutputArchive& archive,
const Tensor& tensor) {
archive.write("0", tensor);
return archive;
}
inline serialize::InputArchive& operator>>(
serialize::InputArchive& archive,
Tensor& tensor) {
archive.read("0", tensor);
return archive;
}
} // namespace torch
```
|
===========================================================================================================================================
SOURCE CODE FILE: sparse.h
LINES: 1
SIZE: 0.04 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\sparse.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ATen.h>
```
|
============================================================================================================================================
SOURCE CODE FILE: special.h
LINES: 1
SIZE: 38.76 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\special.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ATen.h>
#include <torch/types.h>
namespace torch::special {
/// Computes the natural logarithm of the absolute value of the gamma function
/// See https://pytorch.org/docs/main/special.html#torch.special.gammaln.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::gammaln(t);
/// ```
inline Tensor gammaln(const Tensor& self) {
return torch::special_gammaln(self);
}
inline Tensor& gammaln_out(Tensor& result, const Tensor& self) {
return torch::special_gammaln_out(result, self);
}
/// Computes the regularized lower incomplete gamma function
/// See https://pytorch.org/docs/main/special.html#torch.special.gammainc.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// auto s = torch::randn(128, dtype=kDouble);
/// torch::special::gammainc(s, t);
/// ```
inline Tensor gammainc(const Tensor& self, const Tensor& other) {
return torch::special_gammainc(self, other);
}
inline Tensor& gammainc_out(
Tensor& result,
const Tensor& self,
const Tensor& other) {
return torch::special_gammainc_out(result, self, other);
}
/// Computes the regularized upper incomplete gamma function
/// See https://pytorch.org/docs/main/special.html#torch.special.gammainc.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// auto s = torch::randn(128, dtype=kDouble);
/// torch::special::gammaincc(s, t);
/// ```
inline Tensor gammaincc(const Tensor& self, const Tensor& other) {
return torch::special_gammaincc(self, other);
}
inline Tensor& gammaincc_out(
Tensor& result,
const Tensor& self,
const Tensor& other) {
return torch::special_gammaincc_out(result, self, other);
}
/// Computes the multivariate log-gamma function with dimension `p`, elementwise
/// See https://pytorch.org/docs/main/special.html#torch.special.multigammaln.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::multigammaln(t, 1);
/// ```
inline Tensor multigammaln(const Tensor& self, int64_t p) {
return torch::special_multigammaln(self, p);
}
inline Tensor& multigammaln_out(Tensor& result, const Tensor& self, int64_t p) {
return torch::special_multigammaln_out(result, self, p);
}
/// Computes the nth derivative of the digamma function on the input.
/// See https:://pytorch.org/docs/main/special.html#torch.special.polygamma.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::polygamma(2, t);
/// ```
inline Tensor polygamma(int64_t n, const Tensor& self) {
return torch::special_polygamma(n, self);
}
inline Tensor& polygamma_out(Tensor& result, int64_t n, const Tensor& self) {
return torch::special_polygamma_out(result, n, self);
}
/// Computes the logarithmic derivative of the gamma function on input
/// See https://pytorch.org/docs/main/special.html#torch.special.psi
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::psi(t);
/// ```
inline Tensor psi(const Tensor& self) {
return torch::special_psi(self);
}
inline Tensor& psi_out(Tensor& result, const Tensor& self) {
return torch::special_psi_out(result, self);
}
/// Computes the logarithmic derivative of the gamma function on input
/// See https://pytorch.org/docs/main/special.html#torch.special.digamma
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::digamma(t);
/// ```
inline Tensor digamma(const Tensor& self) {
return torch::special_digamma(self);
}
inline Tensor& digamma_out(Tensor& result, const Tensor& self) {
return torch::special_digamma_out(result, self);
}
/// Computes entropy of input, elementwise
/// See https://pytorch.org/docs/main/special.html#torch.special.entr.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::entr(t);
/// ```
inline Tensor entr(const Tensor& self) {
return torch::special_entr(self);
}
inline Tensor& entr_out(Tensor& result, const Tensor& self) {
return torch::special_entr_out(result, self);
}
/// Computes the error function
/// See https://pytorch.org/docs/main/special.html#torch.special.erf.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::erf(t);
/// ```
inline Tensor erf(const Tensor& self) {
return torch::special_erf(self);
}
inline Tensor& erf_out(Tensor& result, const Tensor& self) {
return torch::special_erf_out(result, self);
}
/// Computes the complementary error function
/// See https://pytorch.org/docs/main/special.html#torch.special.erfc.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::erfc(t);
/// ```
inline Tensor erfc(const Tensor& self) {
return torch::special_erfc(self);
}
inline Tensor& erfc_out(Tensor& result, const Tensor& self) {
return torch::special_erfc_out(result, self);
}
/// Computes the scaled complementary error function
/// See https://pytorch.org/docs/main/special.html#torch.special.erfcx.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::erfcx(t);
/// ```
inline Tensor erfcx(const Tensor& self) {
return torch::special_erfcx(self);
}
inline Tensor& erfcx_out(Tensor& result, const Tensor& self) {
return torch::special_erfcx_out(result, self);
}
/// Computes the inverse error function
/// See https://pytorch.org/docs/main/special.html#torch.special.erfinv.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::erfinv(t);
/// ```
inline Tensor erfinv(const Tensor& self) {
return torch::special_erfinv(self);
}
inline Tensor& erfinv_out(Tensor& result, const Tensor& self) {
return torch::special_erfinv_out(result, self);
}
/// Computes the log of summed exponentials of each row of input in the given
/// dimension dim See
/// https://pytorch.org/docs/main/special.html#torch.special.logsumexp.
///
/// Example:
/// ```
/// auto t = torch::randn(3, 3);
/// torch::special::logsumexp(t, 1);
/// ```
inline Tensor logsumexp(const Tensor& self, IntArrayRef dims, bool keepdim) {
return torch::special_logsumexp(self, dims, keepdim);
}
inline Tensor& logsumexp_out(
Tensor& result,
const Tensor& self,
IntArrayRef dims,
bool keepdim) {
return torch::special_logsumexp_out(result, self, dims, keepdim);
}
/// Computes the argument, x, for which the area under the Gaussian probability
/// density function (integrated from minus infinity to x) is equal to input,
/// elementwise. See
/// https://pytorch.org/docs/main/special.html#torch.special.ndtri
///
/// Example:
/// ```
/// auto t = torch::rand(128, dtype=kDouble);
/// torch::special::ndtri(t);
/// ```
inline Tensor ndtri(const Tensor& self) {
return torch::special_ndtri(self);
}
inline Tensor& ndtri_out(Tensor& result, const Tensor& self) {
return torch::special_ndtri_out(result, self);
}
/// Computes the log of area under the standard Gaussian probability density
/// function, integrated from minus infinity to :attr:`input`, elementwise See
/// https://pytorch.org/docs/main/special.html#torch.special.log_ndtr
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::log_ndtr(t);
/// ```
inline Tensor log_ndtr(const Tensor& self) {
return torch::special_log_ndtr(self);
}
inline Tensor& log_ndtr_out(Tensor& result, const Tensor& self) {
return torch::special_log_ndtr_out(result, self);
}
/// Computes the logit of input, elementwise.
/// See https://pytorch.org/docs/main/special.html#torch.special.logit.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::logit(t);
/// ```
inline Tensor logit(const Tensor& self) {
return torch::special_logit(self);
}
inline Tensor& logit_out(Tensor& result, const Tensor& self) {
return torch::special_logit_out(result, self);
}
/// Computes the expit (also known as the logistic sigmoid function) of input,
/// elementwise See
/// https://pytorch.org/docs/main/special.html#torch.special.expit.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::expit(t);
/// ```
inline Tensor expit(const Tensor& self) {
return torch::special_expit(self);
}
inline Tensor& expit_out(Tensor& result, const Tensor& self) {
return torch::special_expit_out(result, self);
}
/// Computes the base two exponential function of :attr:`input`, elementwise
/// See https://pytorch.org/docs/main/special.html#torch.special.exp2.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::exp2(t);
/// ```
inline Tensor exp2(const Tensor& self) {
return torch::special_exp2(self);
}
inline Tensor& exp2_out(Tensor& result, const Tensor& self) {
return torch::special_exp2_out(result, self);
}
/// Computes the exponential of the elements minus 1, elementwise
/// See https://pytorch.org/docs/main/special.html#torch.special.expm1.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::expm1(t);
/// ```
inline Tensor expm1(const Tensor& self) {
return torch::special_expm1(self);
}
inline Tensor& expm1_out(Tensor& result, const Tensor& self) {
return torch::special_expm1_out(result, self);
}
/// Computes x * log(y) for inputs, elementwise
/// See https://pytorch.org/docs/main/special.html#torch.special.xlogy.
///
/// Example:
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
/// auto y = torch::randn(128, dtype=kDouble);
/// torch::special::xlogy(x, y);
/// ```
inline Tensor xlogy(const Tensor& self, const Tensor& other) {
return torch::special_xlogy(self, other);
}
inline Tensor xlogy(const Scalar& self, const Tensor& other) {
return torch::special_xlogy(self, other);
}
inline Tensor xlogy(const Tensor& self, const Scalar& other) {
return torch::special_xlogy(self, other);
}
inline Tensor& xlogy_out(
Tensor& result,
const Tensor& self,
const Tensor& other) {
return torch::special_xlogy_out(result, self, other);
}
inline Tensor& xlogy_out(
Tensor& result,
const Scalar& self,
const Tensor& other) {
return torch::special_xlogy_out(result, self, other);
}
inline Tensor& xlogy_out(
Tensor& result,
const Tensor& self,
const Scalar& other) {
return torch::special_xlogy_out(result, self, other);
}
/// Computes x * log1p(y) for inputs, elementwise
/// See https://pytorch.org/docs/main/special.html#torch.special.xlog1py.
///
/// Example:
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
/// auto y = torch::randn(128, dtype=kDouble);
/// torch::special::xlog1py(x, y);
/// ```
inline Tensor xlog1py(const Tensor& self, const Tensor& other) {
return torch::special_xlog1py(self, other);
}
inline Tensor xlog1py(const Scalar& self, const Tensor& other) {
return torch::special_xlog1py(self, other);
}
inline Tensor xlog1py(const Tensor& self, const Scalar& other) {
return torch::special_xlog1py(self, other);
}
inline Tensor& xlog1py_out(
Tensor& result,
const Tensor& self,
const Tensor& other) {
return torch::special_xlog1py_out(result, self, other);
}
inline Tensor& xlog1py_out(
Tensor& result,
const Scalar& self,
const Tensor& other) {
return torch::special_xlog1py_out(result, self, other);
}
inline Tensor& xlog1py_out(
Tensor& result,
const Tensor& self,
const Scalar& other) {
return torch::special_xlog1py_out(result, self, other);
}
/// Computes Hurwitz Zeta function for inputs, elementwise
/// See https://pytorch.org/docs/main/special.html#torch.special.zeta.
///
/// Example:
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
/// auto y = torch::randn(128, dtype=kDouble);
/// torch::special::zeta(x, y);
/// ```
inline Tensor zeta(const Tensor& self, const Tensor& other) {
return torch::special_zeta(self, other);
}
inline Tensor zeta(const Scalar& self, const Tensor& other) {
return torch::special_zeta(self, other);
}
inline Tensor zeta(const Tensor& self, const Scalar& other) {
return torch::special_zeta(self, other);
}
inline Tensor& zeta_out(
Tensor& result,
const Tensor& self,
const Tensor& other) {
return torch::special_zeta_out(result, self, other);
}
inline Tensor& zeta_out(
Tensor& result,
const Scalar& self,
const Tensor& other) {
return torch::special_zeta_out(result, self, other);
}
inline Tensor& zeta_out(
Tensor& result,
const Tensor& self,
const Scalar& other) {
return torch::special_zeta_out(result, self, other);
}
/// Computes the zeroth order modified Bessel function of the first kind of
/// input, elementwise See
/// https://pytorch.org/docs/main/special.html#torch.special.i0
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::i0(t);
/// ```
inline Tensor i0(const Tensor& self) {
return torch::special_i0(self);
}
inline Tensor& i0_out(Tensor& result, const Tensor& self) {
return torch::special_i0_out(result, self);
}
/// Computes the area under the standard Gaussian probability density function,
/// integrated from minus infinity to :attr:`input`, elementwise
/// See https://pytorch.org/docs/main/special.html#torch.special.ndtr
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::ndtr(t);
/// ```
inline Tensor ndtr(const Tensor& self) {
return torch::special_ndtr(self);
}
inline Tensor& ndtr_out(Tensor& result, const Tensor& self) {
return torch::special_ndtr_out(result, self);
}
/// Computes the exponentially scaled zeroth order modified Bessel function of
/// the first kind See
/// https://pytorch.org/docs/main/special.html#torch.special.i0e.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::i0e(t);
/// ```
inline Tensor i0e(const Tensor& self) {
return torch::special_i0e(self);
}
inline Tensor& i0e_out(Tensor& result, const Tensor& self) {
return torch::special_i0e_out(result, self);
}
/// Computes the first order modified Bessel function of the first kind
/// See https://pytorch.org/docs/main/special.html#torch.special.i1.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::i1(t);
/// ```
inline Tensor i1(const Tensor& self) {
return torch::special_i1(self);
}
inline Tensor& i1_out(Tensor& result, const Tensor& self) {
return torch::special_i1_out(result, self);
}
/// Computes the exponentially scaled first order modified Bessel function of
/// the first kind See
/// https://pytorch.org/docs/main/special.html#torch.special.i1e.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::i1e(t);
/// ```
inline Tensor i1e(const Tensor& self) {
return torch::special_i1e(self);
}
inline Tensor& i1e_out(Tensor& result, const Tensor& self) {
return torch::special_i1e_out(result, self);
}
/// Computes the sinc of input, elementwise
/// See https://pytorch.org/docs/main/special.html#torch.special.sinc.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::sinc(t);
/// ```
inline Tensor sinc(const Tensor& self) {
return torch::special_sinc(self);
}
inline Tensor& sinc_out(Tensor& result, const Tensor& self) {
return torch::special_sinc_out(result, self);
}
/// Rounds the elements of the input
/// See https://pytorch.org/docs/main/special.html#torch.special.round.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::round(t);
/// ```
inline Tensor round(const Tensor& self) {
return torch::special_round(self);
}
inline Tensor& round_out(Tensor& result, const Tensor& self) {
return torch::special_round_out(result, self);
}
/// Computes log(1 + x) of the input, elementwise
/// See https://pytorch.org/docs/main/special.html#torch.special.log1p.
///
/// Example:
/// ```
/// auto t = torch::randn(128, dtype=kDouble);
/// torch::special::log1p(t);
/// ```
inline Tensor log1p(const Tensor& self) {
return torch::special_log1p(self);
}
inline Tensor& log1p_out(Tensor& result, const Tensor& self) {
return torch::special_log1p_out(result, self);
}
/// Computes log followed by softmax(x) of the input
/// See https://pytorch.org/docs/main/special.html#torch.special.log_softmax.
///
/// Example:
/// ```
/// auto t = torch::randn(128, 128, dtype=kDouble);
/// torch::special::log_softmax(t, 0);
/// ```
inline Tensor log_softmax(
const Tensor& self,
int64_t dim,
std::optional<ScalarType> dtype) {
return torch::special_log_softmax(self, dim, dtype);
}
/// Computes softmax of the input along a given dimension
/// See https://pytorch.org/docs/main/special.html#torch.special.softmax.
///
/// Example:
/// ```
/// auto t = torch::randn(128, 128, dtype=kDouble);
/// torch::special::softmax(t, 0);
/// ```
inline Tensor softmax(
const Tensor& self,
int64_t dim,
std::optional<ScalarType> dtype) {
return torch::special_softmax(self, dim, dtype);
}
/// Airy function Ai.
///
/// See https://pytorch.org/docs/main/special.html#torch.special.airy_ai.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
///
/// torch::special::airy_ai(x);
/// ```
inline Tensor airy_ai(const Tensor& x) {
return torch::special_airy_ai(x);
}
inline Tensor& airy_ai_out(Tensor& y, const Tensor& x) {
return torch::special_airy_ai_out(y, x);
}
/// Bessel function of the first kind of order 0.
///
/// See https://pytorch.org/docs/main/special.html#torch.special.bessel_j0.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
///
/// torch::special::bessel_j0(x);
/// ```
inline Tensor bessel_j0(const Tensor& self) {
return torch::special_bessel_j0(self);
}
inline Tensor& bessel_j0_out(Tensor& result, const Tensor& self) {
return torch::special_bessel_j0_out(result, self);
}
/// Bessel function of the first kind of order 1.
///
/// See https://pytorch.org/docs/main/special.html#torch.special.bessel_j1.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
///
/// torch::special::bessel_j1(x);
/// ```
inline Tensor bessel_j1(const Tensor& self) {
return torch::special_bessel_j1(self);
}
inline Tensor& bessel_j1_out(Tensor& result, const Tensor& self) {
return torch::special_bessel_j1_out(result, self);
}
/// Bessel function of the second kind of order 0.
///
/// See https://pytorch.org/docs/main/special.html#torch.special.bessel_y0.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
///
/// torch::special::bessel_y0(x);
/// ```
inline Tensor bessel_y0(const Tensor& self) {
return torch::special_bessel_y0(self);
}
inline Tensor& bessel_y0_out(Tensor& result, const Tensor& self) {
return torch::special_bessel_y0_out(result, self);
}
/// Bessel function of the second kind of order 1.
///
/// See https://pytorch.org/docs/main/special.html#torch.special.bessel_y1.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
///
/// torch::special::bessel_y1(x);
/// ```
inline Tensor bessel_y1(const Tensor& self) {
return torch::special_bessel_y1(self);
}
inline Tensor& bessel_y1_out(Tensor& result, const Tensor& self) {
return torch::special_bessel_y1_out(result, self);
}
/// Chebyshev polynomial of the first kind.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.chebyshev_polynomial_t.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
/// auto n = torch::randn(128, dtype=kDouble);
///
/// torch::special::chebyshev_polynomial_t(x, n);
/// ```
inline Tensor chebyshev_polynomial_t(const Tensor& x, const Tensor& n) {
return torch::special_chebyshev_polynomial_t(x, n);
}
inline Tensor chebyshev_polynomial_t(const Scalar& x, const Tensor& n) {
return torch::special_chebyshev_polynomial_t(x, n);
}
inline Tensor chebyshev_polynomial_t(const Tensor& x, const Scalar& n) {
return torch::special_chebyshev_polynomial_t(x, n);
}
inline Tensor& chebyshev_polynomial_t_out(
Tensor& output,
const Tensor& x,
const Tensor& n) {
return torch::special_chebyshev_polynomial_t_out(output, x, n);
}
inline Tensor& chebyshev_polynomial_t_out(
Tensor& output,
const Scalar& x,
const Tensor& n) {
return torch::special_chebyshev_polynomial_t_out(output, x, n);
}
inline Tensor& chebyshev_polynomial_t_out(
Tensor& output,
const Tensor& x,
const Scalar& n) {
return torch::special_chebyshev_polynomial_t_out(output, x, n);
}
/// Chebyshev polynomial of the second kind.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.chebyshev_polynomial_u.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
/// auto n = torch::randn(128, dtype=kDouble);
///
/// torch::special::chebyshev_polynomial_u(x, n);
/// ```
inline Tensor chebyshev_polynomial_u(const Tensor& x, const Tensor& n) {
return torch::special_chebyshev_polynomial_u(x, n);
}
inline Tensor chebyshev_polynomial_u(const Scalar& x, const Tensor& n) {
return torch::special_chebyshev_polynomial_u(x, n);
}
inline Tensor chebyshev_polynomial_u(const Tensor& x, const Scalar& n) {
return torch::special_chebyshev_polynomial_u(x, n);
}
inline Tensor& chebyshev_polynomial_u_out(
Tensor& output,
const Tensor& x,
const Tensor& n) {
return torch::special_chebyshev_polynomial_u_out(output, x, n);
}
inline Tensor& chebyshev_polynomial_u_out(
Tensor& output,
const Scalar& x,
const Tensor& n) {
return torch::special_chebyshev_polynomial_u_out(output, x, n);
}
inline Tensor& chebyshev_polynomial_u_out(
Tensor& output,
const Tensor& x,
const Scalar& n) {
return torch::special_chebyshev_polynomial_u_out(output, x, n);
}
/// Chebyshev polynomial of the third kind.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.chebyshev_polynomial_v.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
/// auto n = torch::randn(128, dtype=kDouble);
///
/// torch::special::chebyshev_polynomial_v(x, n);
/// ```
inline Tensor chebyshev_polynomial_v(const Tensor& x, const Tensor& n) {
return torch::special_chebyshev_polynomial_v(x, n);
}
inline Tensor chebyshev_polynomial_v(const Scalar& x, const Tensor& n) {
return torch::special_chebyshev_polynomial_v(x, n);
}
inline Tensor chebyshev_polynomial_v(const Tensor& x, const Scalar& n) {
return torch::special_chebyshev_polynomial_v(x, n);
}
inline Tensor& chebyshev_polynomial_v_out(
Tensor& output,
const Tensor& x,
const Tensor& n) {
return torch::special_chebyshev_polynomial_v_out(output, x, n);
}
inline Tensor& chebyshev_polynomial_v_out(
Tensor& output,
const Scalar& x,
const Tensor& n) {
return torch::special_chebyshev_polynomial_v_out(output, x, n);
}
inline Tensor& chebyshev_polynomial_v_out(
Tensor& output,
const Tensor& x,
const Scalar& n) {
return torch::special_chebyshev_polynomial_v_out(output, x, n);
}
/// Chebyshev polynomial of the fourth kind.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.chebyshev_polynomial_w.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
/// auto n = torch::randn(128, dtype=kDouble);
///
/// torch::special::chebyshev_polynomial_w(x, n);
/// ```
inline Tensor chebyshev_polynomial_w(const Tensor& x, const Tensor& n) {
return torch::special_chebyshev_polynomial_w(x, n);
}
inline Tensor chebyshev_polynomial_w(const Scalar& x, const Tensor& n) {
return torch::special_chebyshev_polynomial_w(x, n);
}
inline Tensor chebyshev_polynomial_w(const Tensor& x, const Scalar& n) {
return torch::special_chebyshev_polynomial_w(x, n);
}
inline Tensor& chebyshev_polynomial_w_out(
Tensor& output,
const Tensor& x,
const Tensor& n) {
return torch::special_chebyshev_polynomial_w_out(output, x, n);
}
inline Tensor& chebyshev_polynomial_w_out(
Tensor& output,
const Scalar& x,
const Tensor& n) {
return torch::special_chebyshev_polynomial_w_out(output, x, n);
}
inline Tensor& chebyshev_polynomial_w_out(
Tensor& output,
const Tensor& x,
const Scalar& n) {
return torch::special_chebyshev_polynomial_w_out(output, x, n);
}
/// Physicist’s Hermite polynomial.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.hermite_polynomial_h.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
/// auto n = torch::randn(128, dtype=kDouble);
///
/// torch::special::hermite_polynomial_h(x, n);
/// ```
inline Tensor hermite_polynomial_h(const Tensor& x, const Tensor& n) {
return torch::special_hermite_polynomial_h(x, n);
}
inline Tensor hermite_polynomial_h(const Scalar& x, const Tensor& n) {
return torch::special_hermite_polynomial_h(x, n);
}
inline Tensor hermite_polynomial_h(const Tensor& x, const Scalar& n) {
return torch::special_hermite_polynomial_h(x, n);
}
inline Tensor& hermite_polynomial_h_out(
Tensor& output,
const Tensor& x,
const Tensor& n) {
return torch::special_hermite_polynomial_h_out(output, x, n);
}
inline Tensor& hermite_polynomial_h_out(
Tensor& output,
const Scalar& x,
const Tensor& n) {
return torch::special_hermite_polynomial_h_out(output, x, n);
}
inline Tensor& hermite_polynomial_h_out(
Tensor& output,
const Tensor& x,
const Scalar& n) {
return torch::special_hermite_polynomial_h_out(output, x, n);
}
/// Probabilist’s Hermite polynomial.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.hermite_polynomial_he.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
/// auto n = torch::randn(128, dtype=kDouble);
///
/// torch::special::hermite_polynomial_he(x, n);
/// ```
inline Tensor hermite_polynomial_he(const Tensor& x, const Tensor& n) {
return torch::special_hermite_polynomial_he(x, n);
}
inline Tensor hermite_polynomial_he(const Scalar& x, const Tensor& n) {
return torch::special_hermite_polynomial_he(x, n);
}
inline Tensor hermite_polynomial_he(const Tensor& x, const Scalar& n) {
return torch::special_hermite_polynomial_he(x, n);
}
inline Tensor& hermite_polynomial_he_out(
Tensor& output,
const Tensor& x,
const Tensor& n) {
return torch::special_hermite_polynomial_he_out(output, x, n);
}
inline Tensor& hermite_polynomial_he_out(
Tensor& output,
const Scalar& x,
const Tensor& n) {
return torch::special_hermite_polynomial_he_out(output, x, n);
}
inline Tensor& hermite_polynomial_he_out(
Tensor& output,
const Tensor& x,
const Scalar& n) {
return torch::special_hermite_polynomial_he_out(output, x, n);
}
/// Laguerre polynomial.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.laguerre_polynomial_l.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
/// auto n = torch::randn(128, dtype=kDouble);
///
/// torch::special::laguerre_polynomial_l(x, n);
/// ```
inline Tensor laguerre_polynomial_l(const Tensor& x, const Tensor& n) {
return torch::special_laguerre_polynomial_l(x, n);
}
inline Tensor laguerre_polynomial_l(const Scalar& x, const Tensor& n) {
return torch::special_laguerre_polynomial_l(x, n);
}
inline Tensor laguerre_polynomial_l(const Tensor& x, const Scalar& n) {
return torch::special_laguerre_polynomial_l(x, n);
}
inline Tensor& laguerre_polynomial_l_out(
Tensor& output,
const Tensor& x,
const Tensor& n) {
return torch::special_laguerre_polynomial_l_out(output, x, n);
}
inline Tensor& laguerre_polynomial_l_out(
Tensor& output,
const Scalar& x,
const Tensor& n) {
return torch::special_laguerre_polynomial_l_out(output, x, n);
}
inline Tensor& laguerre_polynomial_l_out(
Tensor& output,
const Tensor& x,
const Scalar& n) {
return torch::special_laguerre_polynomial_l_out(output, x, n);
}
/// Legendre polynomial.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.legendre_polynomial_p.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
/// auto n = torch::randn(128, dtype=kDouble);
///
/// torch::special::legendre_polynomial_p(x, n);
/// ```
inline Tensor legendre_polynomial_p(const Tensor& x, const Tensor& n) {
return torch::special_legendre_polynomial_p(x, n);
}
inline Tensor legendre_polynomial_p(const Scalar& x, const Tensor& n) {
return torch::special_legendre_polynomial_p(x, n);
}
inline Tensor legendre_polynomial_p(const Tensor& x, const Scalar& n) {
return torch::special_legendre_polynomial_p(x, n);
}
inline Tensor& legendre_polynomial_p_out(
Tensor& output,
const Tensor& x,
const Tensor& n) {
return torch::special_legendre_polynomial_p_out(output, x, n);
}
inline Tensor& legendre_polynomial_p_out(
Tensor& output,
const Scalar& x,
const Tensor& n) {
return torch::special_legendre_polynomial_p_out(output, x, n);
}
inline Tensor& legendre_polynomial_p_out(
Tensor& output,
const Tensor& x,
const Scalar& n) {
return torch::special_legendre_polynomial_p_out(output, x, n);
}
/// Modified Bessel function of the first kind of order 0.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.modified_bessel_i0.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
///
/// torch::special::modified_bessel_i0(x);
/// ```
inline Tensor modified_bessel_i0(const Tensor& self) {
return torch::special_modified_bessel_i0(self);
}
inline Tensor& modified_bessel_i0_out(Tensor& result, const Tensor& self) {
return torch::special_modified_bessel_i0_out(result, self);
}
/// Modified Bessel function of the first kind of order 1.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.modified_bessel_i1.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
///
/// torch::special::modified_bessel_i1(x);
/// ```
inline Tensor modified_bessel_i1(const Tensor& self) {
return torch::special_modified_bessel_i1(self);
}
inline Tensor& modified_bessel_i1_out(Tensor& result, const Tensor& self) {
return torch::special_modified_bessel_i1_out(result, self);
}
/// Modified Bessel function of the second kind of order 0.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.modified_bessel_k0.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
///
/// torch::special::modified_bessel_k0(x);
/// ```
inline Tensor modified_bessel_k0(const Tensor& self) {
return torch::special_modified_bessel_k0(self);
}
inline Tensor& modified_bessel_k0_out(Tensor& result, const Tensor& self) {
return torch::special_modified_bessel_k0_out(result, self);
}
/// Modified Bessel function of the second kind of order 1.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.modified_bessel_k1.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
///
/// torch::special::modified_bessel_k1(x);
/// ```
inline Tensor modified_bessel_k1(const Tensor& self) {
return torch::special_modified_bessel_k1(self);
}
inline Tensor& modified_bessel_k1_out(Tensor& result, const Tensor& self) {
return torch::special_modified_bessel_k1_out(result, self);
}
/// Scaled modified Bessel function of the second kind of order 0.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.scaled_modified_bessel_k0.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
///
/// torch::special::scaled_modified_bessel_k0(x);
/// ```
inline Tensor scaled_modified_bessel_k0(const Tensor& x) {
return torch::special_scaled_modified_bessel_k0(x);
}
inline Tensor& scaled_modified_bessel_k0_out(Tensor& y, const Tensor& x) {
return torch::special_scaled_modified_bessel_k0_out(y, x);
}
/// Scaled modified Bessel function of the second kind of order 1.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.scaled_modified_bessel_k1.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
///
/// torch::special::scaled_modified_bessel_k1(x);
/// ```
inline Tensor scaled_modified_bessel_k1(const Tensor& x) {
return torch::special_scaled_modified_bessel_k1(x);
}
inline Tensor& scaled_modified_bessel_k1_out(Tensor& y, const Tensor& x) {
return torch::special_scaled_modified_bessel_k1_out(y, x);
}
/// Shifted Chebyshev polynomial of the first kind.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.shifted_chebyshev_polynomial_t.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
/// auto n = torch::randn(128, dtype=kDouble);
///
/// torch::special::shifted_chebyshev_polynomial_t(x, n);
/// ```
inline Tensor shifted_chebyshev_polynomial_t(const Tensor& x, const Tensor& n) {
return torch::special_shifted_chebyshev_polynomial_t(x, n);
}
inline Tensor shifted_chebyshev_polynomial_t(const Scalar& x, const Tensor& n) {
return torch::special_shifted_chebyshev_polynomial_t(x, n);
}
inline Tensor shifted_chebyshev_polynomial_t(const Tensor& x, const Scalar& n) {
return torch::special_shifted_chebyshev_polynomial_t(x, n);
}
inline Tensor& shifted_chebyshev_polynomial_t_out(
Tensor& output,
const Tensor& x,
const Tensor& n) {
return torch::special_shifted_chebyshev_polynomial_t_out(output, x, n);
}
inline Tensor& shifted_chebyshev_polynomial_t_out(
Tensor& output,
const Scalar& x,
const Tensor& n) {
return torch::special_shifted_chebyshev_polynomial_t_out(output, x, n);
}
inline Tensor& shifted_chebyshev_polynomial_t_out(
Tensor& output,
const Tensor& x,
const Scalar& n) {
return torch::special_shifted_chebyshev_polynomial_t_out(output, x, n);
}
/// Shifted Chebyshev polynomial of the second kind.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.shifted_chebyshev_polynomial_u.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
/// auto n = torch::randn(128, dtype=kDouble);
///
/// torch::special::shifted_chebyshev_polynomial_u(x, n);
/// ```
inline Tensor shifted_chebyshev_polynomial_u(const Tensor& x, const Tensor& n) {
return torch::special_shifted_chebyshev_polynomial_u(x, n);
}
inline Tensor shifted_chebyshev_polynomial_u(const Scalar& x, const Tensor& n) {
return torch::special_shifted_chebyshev_polynomial_u(x, n);
}
inline Tensor shifted_chebyshev_polynomial_u(const Tensor& x, const Scalar& n) {
return torch::special_shifted_chebyshev_polynomial_u(x, n);
}
inline Tensor& shifted_chebyshev_polynomial_u_out(
Tensor& output,
const Tensor& x,
const Tensor& n) {
return torch::special_shifted_chebyshev_polynomial_u_out(output, x, n);
}
inline Tensor& shifted_chebyshev_polynomial_u_out(
Tensor& output,
const Scalar& x,
const Tensor& n) {
return torch::special_shifted_chebyshev_polynomial_u_out(output, x, n);
}
inline Tensor& shifted_chebyshev_polynomial_u_out(
Tensor& output,
const Tensor& x,
const Scalar& n) {
return torch::special_shifted_chebyshev_polynomial_u_out(output, x, n);
}
/// Shifted Chebyshev polynomial of the third kind.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.shifted_chebyshev_polynomial_v.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
/// auto n = torch::randn(128, dtype=kDouble);
///
/// torch::special::shifted_chebyshev_polynomial_v(x, n);
/// ```
inline Tensor shifted_chebyshev_polynomial_v(const Tensor& x, const Tensor& n) {
return torch::special_shifted_chebyshev_polynomial_v(x, n);
}
inline Tensor shifted_chebyshev_polynomial_v(const Scalar& x, const Tensor& n) {
return torch::special_shifted_chebyshev_polynomial_v(x, n);
}
inline Tensor shifted_chebyshev_polynomial_v(const Tensor& x, const Scalar& n) {
return torch::special_shifted_chebyshev_polynomial_v(x, n);
}
inline Tensor& shifted_chebyshev_polynomial_v_out(
Tensor& output,
const Tensor& x,
const Tensor& n) {
return torch::special_shifted_chebyshev_polynomial_v_out(output, x, n);
}
inline Tensor& shifted_chebyshev_polynomial_v_out(
Tensor& output,
const Scalar& x,
const Tensor& n) {
return torch::special_shifted_chebyshev_polynomial_v_out(output, x, n);
}
inline Tensor& shifted_chebyshev_polynomial_v_out(
Tensor& output,
const Tensor& x,
const Scalar& n) {
return torch::special_shifted_chebyshev_polynomial_v_out(output, x, n);
}
/// Shifted Chebyshev polynomial of the fourth kind.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.shifted_chebyshev_polynomial_w.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
/// auto n = torch::randn(128, dtype=kDouble);
///
/// torch::special::shifted_chebyshev_polynomial_w(x, n);
/// ```
inline Tensor shifted_chebyshev_polynomial_w(const Tensor& x, const Tensor& n) {
return torch::special_shifted_chebyshev_polynomial_w(x, n);
}
inline Tensor shifted_chebyshev_polynomial_w(const Scalar& x, const Tensor& n) {
return torch::special_shifted_chebyshev_polynomial_w(x, n);
}
inline Tensor shifted_chebyshev_polynomial_w(const Tensor& x, const Scalar& n) {
return torch::special_shifted_chebyshev_polynomial_w(x, n);
}
inline Tensor& shifted_chebyshev_polynomial_w_out(
Tensor& output,
const Tensor& x,
const Tensor& n) {
return torch::special_shifted_chebyshev_polynomial_w_out(output, x, n);
}
inline Tensor& shifted_chebyshev_polynomial_w_out(
Tensor& output,
const Scalar& x,
const Tensor& n) {
return torch::special_shifted_chebyshev_polynomial_w_out(output, x, n);
}
inline Tensor& shifted_chebyshev_polynomial_w_out(
Tensor& output,
const Tensor& x,
const Scalar& n) {
return torch::special_shifted_chebyshev_polynomial_w_out(output, x, n);
}
/// Spherical Bessel function of the first kind of order 0.
///
/// See
/// https://pytorch.org/docs/main/special.html#torch.special.spherical_bessel_j0.
///
/// Example:
///
/// ```
/// auto x = torch::randn(128, dtype=kDouble);
///
/// torch::special::spherical_bessel_j0(x);
/// ```
inline Tensor spherical_bessel_j0(const Tensor& x) {
return torch::special_spherical_bessel_j0(x);
}
inline Tensor& spherical_bessel_j0_out(Tensor& y, const Tensor& x) {
return torch::special_spherical_bessel_j0_out(y, x);
}
} // namespace torch::special
```
|
==========================================================================================================================================
SOURCE CODE FILE: torch.h
LINES: 1
SIZE: 0.16 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\torch.h
ENCODING: utf-8
```h
#pragma once
#include <torch/all.h>
#ifdef TORCH_API_INCLUDE_EXTENSION_H
#include <torch/extension.h>
#endif // defined(TORCH_API_INCLUDE_EXTENSION_H)
```
|
==========================================================================================================================================
SOURCE CODE FILE: types.h
LINES: 1
SIZE: 2.59 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\types.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ATen.h>
#include <optional>
#include <torch/csrc/autograd/generated/variable_factories.h>
#include <torch/csrc/autograd/variable.h>
// TODO: These don't really belong here but torchvision builds in CI need them
// Remove once the torchvision version being compiled in CI is updated
#include <ATen/core/dispatch/Dispatcher.h>
#include <torch/library.h>
namespace torch {
// NOTE [ Exposing declarations in `at::` to `torch::` ]
//
// The following line `using namespace at;` is responsible for exposing all
// declarations in `at::` namespace to `torch::` namespace.
//
// According to the rules laid out in
// https://en.cppreference.com/w/cpp/language/qualified_lookup, section
// "Namespace members":
// ```
// Qualified lookup within the scope of a namespace N first considers all
// declarations that are located in N and all declarations that are located in
// the inline namespace members of N (and, transitively, in their inline
// namespace members). If there are no declarations in that set then it
// considers declarations in all namespaces named by using-directives found in N
// and in all transitive inline namespace members of N.
// ```
//
// This means that if both `at::` and `torch::` namespaces have a function with
// the same signature (e.g. both `at::func()` and `torch::func()` exist), after
// `namespace torch { using namespace at; }`, when we call `torch::func()`, the
// `func()` function defined in `torch::` namespace will always be called, and
// the `func()` function defined in `at::` namespace is always hidden.
using namespace at; // NOLINT
#if !defined(FBCODE_CAFFE2) && !defined(C10_NODEPRECATED)
using std::nullopt; // NOLINT
using std::optional; // NOLINT
#endif
using Dtype = at::ScalarType;
/// Fixed width dtypes.
constexpr auto kUInt8 = at::kByte;
constexpr auto kInt8 = at::kChar;
constexpr auto kInt16 = at::kShort;
constexpr auto kInt32 = at::kInt;
constexpr auto kInt64 = at::kLong;
constexpr auto kUInt16 = at::kUInt16;
constexpr auto kUInt32 = at::kUInt32;
constexpr auto kUInt64 = at::kUInt64;
constexpr auto kFloat16 = at::kHalf;
constexpr auto kFloat32 = at::kFloat;
constexpr auto kFloat64 = at::kDouble;
/// Rust-style short dtypes.
constexpr auto kU8 = kUInt8;
constexpr auto kU16 = kUInt16;
constexpr auto kU32 = kUInt32;
constexpr auto kU64 = kUInt64;
constexpr auto kI8 = kInt8;
constexpr auto kI16 = kInt16;
constexpr auto kI32 = kInt32;
constexpr auto kI64 = kInt64;
constexpr auto kF16 = kFloat16;
constexpr auto kF32 = kFloat32;
constexpr auto kF64 = kFloat64;
} // namespace torch
```
|
==========================================================================================================================================
SOURCE CODE FILE: utils.h
LINES: 1
SIZE: 3.62 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\utils.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/Parallel.h>
#include <ATen/record_function.h>
#include <torch/csrc/api/include/torch/types.h>
#include <torch/csrc/autograd/grad_mode.h>
#include <torch/csrc/autograd/profiler.h>
// NOLINTBEGIN(misc-unused-using-decls)
namespace torch {
/// A RAII, thread-local guard that disabled gradient calculation.
///
/// Disabling gradient calculation is useful for inference, when you are sure
/// that you will not call `at::Tensor::backward`. It will reduce memory
/// consumption for computations that would otherwise have `requires_grad() ==
/// true`.
///
/// In this mode, the result of every computation will have
/// `requires_grad() == false`, even when the inputs have `requires_grad() ==
/// true`.
///
/// This context manager is thread-local; it will not affect computation
/// in other threads.
///
/// Example:
/// @code
/// auto x = torch::tensor({1.}, torch::requires_grad());
/// {
/// torch::NoGradGuard no_grad;
/// auto y = x * 2;
/// std::cout << y.requires_grad() << std::endl; // prints `false`
/// }
/// {
/// auto doubler = [](torch::Tensor x) {
/// torch::NoGradGuard no_grad;
/// return x * 2;
/// };
/// auto z = doubler(x);
/// std::cout << z.requires_grad() << std::endl; // prints `false`
/// }
/// @endcode
using NoGradGuard = at::NoGradGuard;
/// A RAII, thread-local guard that sets gradient calculation to on or off.
///
/// ``AutoGradMode`` will enable or disable grads based on its argument
/// `enabled`.
///
/// This context manager is thread-local; it will not affect computation
/// in other threads.
///
/// \param enabled: Flag whether to enable grad (``true``), or disable
/// (``false``). This can be used to conditionally enable
/// gradients.
///
/// Example:
/// @code
/// auto x = torch::tensor({1.}, torch::requires_grad());
/// {
/// torch::AutoGradMode enable_grad(true);
/// auto y = x * 2;
/// std::cout << y.requires_grad() << std::endl; // prints `true`
/// }
/// {
/// torch::AutoGradMode enable_grad(false);
/// auto y = x * 2;
/// std::cout << y.requires_grad() << std::endl; // prints `false`
/// }
/// @endcode
using AutoGradMode = at::AutoGradMode;
/// Sets the global random seed for all newly created CPU and CUDA tensors.
using at::manual_seed;
// Called during new thread initialization
using at::init_num_threads;
// Returns the number of threads used in parallel region.
using at::get_num_threads;
// Sets the number of threads to be used in parallel region.
using at::set_num_threads;
// Returns the number of threads used for inter-op parallelism.
using at::get_num_interop_threads;
// Sets the number of threads to be used for inter-op parallelism.
using at::set_num_interop_threads;
// Returns true if both t1, t2 are undefined or both are defined and equal
inline bool equal_if_defined(const Tensor& t1, const Tensor& t2) {
return (
(!t1.defined() && !t2.defined()) ||
(t1.defined() && t2.defined() && torch::equal(t1, t2)));
}
// RecordFunction API
using at::addGlobalCallback;
using at::addThreadLocalCallback;
using at::CallbackHandle;
using at::clearCallbacks;
using at::clearGlobalCallbacks;
using at::clearThreadLocalCallbacks;
using at::DisableRecordFunctionGuard;
using at::enableRecordFunction;
using at::hasCallbacks;
using at::hasGlobalCallbacks;
using at::hasThreadLocalCallbacks;
using at::isRecordFunctionEnabled;
using at::RecordFunction;
using at::RecordFunctionCallback;
using at::RecordFunctionGuard;
using at::removeCallback;
} // namespace torch
// NOLINTEND(misc-unused-using-decls)
```
|
============================================================================================================================================
SOURCE CODE FILE: version.h
LINES: 1
SIZE: 0.81 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\version.h
ENCODING: utf-8
```h
#pragma once
/// Indicates the major version of LibTorch.
#define TORCH_VERSION_MAJOR 2
/// Indicates the minor version of LibTorch.
#define TORCH_VERSION_MINOR 7
/// Indicates the patch version of LibTorch.
#define TORCH_VERSION_PATCH 0
/// Indicates the ABI version tag of LibTorch.
#define TORCH_VERSION_ABI_TAG 0
/// Indicates the version of LibTorch as a string literal.
#define TORCH_VERSION \
"2.7.0"
/// Indicates the ABI version of LibTorch as a single uint64.
/// [ byte ][ byte ][ byte ][ byte ][ byte ][ byte ][ byte ][ byte ]
/// [ MAJ ][ MIN ][ PATCH][ ABI TAG ]
#define TORCH_ABI_VERSION \
(uint64_t)TORCH_VERSION_MAJOR << 56 | \
(uint64_t)TORCH_VERSION_MINOR << 48 | \
(uint64_t)TORCH_VERSION_PATCH << 40 | \
TORCH_VERSION_ABI_TAG << 0
```
|
========================================================================================================================================
SOURCE CODE FILE: xpu.h
LINES: 1
SIZE: 0.61 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\api\include\torch\xpu.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <cstddef>
#include <cstdint>
namespace torch::xpu {
/// Returns the number of XPU devices available.
size_t TORCH_API device_count();
/// Returns true if at least one XPU device is available.
bool TORCH_API is_available();
/// Sets the seed for the current GPU.
void TORCH_API manual_seed(uint64_t seed);
/// Sets the seed for all available GPUs.
void TORCH_API manual_seed_all(uint64_t seed);
/// Waits for all kernels in all streams on a XPU device to complete.
void TORCH_API synchronize(int64_t device_index);
} // namespace torch::xpu
```
|
===========================================================================================================================================
SOURCE CODE FILE: FunctionsManual.h
LINES: 1
SIZE: 32.89 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\FunctionsManual.h
ENCODING: utf-8
```h
#pragma once
// NB: Must be at the top of file to avoid including the deprecated "math.h".
// https://stackoverflow.com/questions/6563810/m-pi-works-with-math-h-but-not-with-cmath-in-visual-studio
#ifdef _MSC_VER
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#endif
#include <cmath>
#endif
#include <ATen/ATen.h>
#include <torch/csrc/autograd/generated/Functions.h>
namespace torch::autograd::generated::details {
extern const char* kCudnnDoubleBackwardMsg;
// A simple way to imperatively compute index ranges for slots
// that have been flattened
struct TORCH_API IndexRangeGenerator {
IndexRange range(size_t range_size) {
i += range_size;
return {i - range_size, i};
}
size_t size() {
return i;
}
private:
size_t i = 0;
};
TORCH_API Tensor toNonOptFwGrad(const std::optional<Tensor>& t);
TORCH_API Tensor toNonOptPrimal(const std::optional<Tensor>& t);
TORCH_API Tensor toNonOptTensor(const std::optional<Tensor>& t);
TORCH_API inline std::optional<Tensor> wrap_opt_if(
const Tensor& t,
const bool cond) {
using OptTensor = std::optional<Tensor>;
return cond ? OptTensor(t) : static_cast<OptTensor>(std::nullopt);
}
TORCH_API Tensor
apply_loss_reduction(const Tensor& unreduced, int64_t reduction);
TORCH_API bool any_variable_defined(const variable_list& variables);
TORCH_API void copy_range(
variable_list& out,
IndexRange range,
const at::Tensor& t);
TORCH_API void copy_range(
variable_list& out,
IndexRange range,
at::ArrayRef<at::Tensor> t);
TORCH_API at::Tensor copysign_tensor_self_backward(
const Tensor& grad,
const Tensor& self,
const Tensor& result);
TORCH_API at::Tensor not_implemented(const char* name, const char* reason = "");
TORCH_API std::vector<Tensor> not_implemented_list(
const char* name,
const char* reason = "");
at::Tensor handle_r_to_c(ScalarType self_st, Tensor gradient_result);
at::Tensor maybe_multiply(const at::Tensor& t, const at::Scalar& s);
int64_t _safe_size(IntArrayRef sizes, IntArrayRef dim);
Tensor restore_reduced_dims(
const Tensor& output,
IntArrayRef dims,
bool keepdim);
Tensor scale_grad_by_count(
const Tensor& grad,
const Tensor& mask,
IntArrayRef dims);
at::Tensor norm_backward(
const at::Tensor& grad,
const at::Tensor& self,
const std::optional<at::Scalar>& p_,
const at::Tensor& norm);
at::Tensor norm_backward(
at::Tensor grad,
const at::Tensor& self,
const std::optional<at::Scalar>& p_,
at::Tensor norm,
at::IntArrayRef dim,
bool keepdim);
Tensor norm_jvp(
const Tensor& self_p,
const Tensor& self_t,
const std::optional<Scalar>& p_,
Tensor norm,
IntArrayRef dim,
bool keepdim);
Tensor norm_jvp(
const Tensor& grad,
const Tensor& self,
const std::optional<Scalar>& p_,
Tensor norm);
Tensor _nested_from_padded_backward(
const Tensor& grad,
const Tensor& input,
const bool do_transform_0213);
std::tuple<Tensor, Tensor, Tensor> linear_double_backward(
const variable_list& grads,
const Tensor& self,
const Tensor& grad_output,
const Tensor& weight);
Tensor linalg_vector_norm_jvp(
const Tensor& self_p,
const Tensor& self_t,
const Scalar& scalar_ord,
Tensor norm,
const at::OptionalIntArrayRef& opt_dim,
bool keepdim);
at::Tensor linalg_vector_norm_backward(
at::Tensor grad,
const at::Tensor& self,
const at::Scalar& ord,
at::Tensor norm,
const at::OptionalIntArrayRef& opt_dim,
bool keepdim);
at::Tensor pow_backward(
at::Tensor grad,
const at::Tensor& self,
const at::Scalar& exponent_);
at::Tensor pow_backward_self(
const at::Tensor& grad,
const at::Tensor& self,
const at::Tensor& exponent);
at::Tensor pow_backward_exponent(
const at::Tensor& grad,
const at::Tensor& self,
const at::Tensor& exponent,
const at::Tensor& result);
at::Tensor pow_backward_exponent(
const at::Tensor& grad,
const at::Scalar& base,
const at::Tensor& exponent,
const at::Tensor& result);
at::Tensor angle_backward(const at::Tensor& grad, const at::Tensor& self);
template <typename T>
at::Tensor mul_tensor_backward(const Tensor& grad, T other, ScalarType self_st);
template <typename T>
at::Tensor div_tensor_self_backward(
const Tensor& grad,
T other,
ScalarType self_st,
const std::optional<std::string_view>& rounding_mode = std::nullopt);
at::Tensor div_tensor_other_backward(
const Tensor& grad,
const Tensor& self,
const Tensor& other,
const std::optional<std::string_view>& rounding_mode = std::nullopt);
at::Tensor mvlgamma_backward(
const at::Tensor& grad,
const at::Tensor& self,
int64_t p);
at::Tensor permute_backwards(const at::Tensor& grad, at::IntArrayRef fwd_dims);
at::Tensor rad2deg_backward(const at::Tensor& grad);
at::Tensor deg2rad_backward(const at::Tensor& grad);
at::Tensor unsqueeze_multiple(
const at::Tensor& t,
at::OptionalIntArrayRef opt_dim,
size_t n_dims);
at::Tensor sum_backward(
const at::Tensor& grad,
at::SymIntArrayRef sizes,
at::OptionalIntArrayRef opt_dims,
bool keepdim);
at::Tensor sum_backward(
const at::Tensor& grad,
c10::SymIntArrayRef sizes,
c10::IntArrayRef dims,
bool keepdim);
at::Tensor nansum_backward(
const at::Tensor& grad,
const at::Tensor& self,
at::OptionalIntArrayRef dims,
bool keepdim);
std::vector<int64_t> reverse_list(const at::IntArrayRef list);
std::vector<c10::SymInt> reverse_list_symint(const c10::SymIntArrayRef list);
at::Tensor reverse_dim(const at::Tensor& t, int64_t dim);
at::Tensor prod_safe_zeros_backward(
const at::Tensor& grad,
const at::Tensor& inp,
int64_t dim);
at::Tensor prod_backward(
const at::Tensor& grad,
const at::Tensor& input,
const at::Tensor& result);
at::Tensor prod_backward(
at::Tensor grad,
const at::Tensor& input,
at::Tensor result,
int64_t dim,
bool keepdim);
at::Tensor solve_jvp(
const Tensor& X,
const Tensor& A,
const Tensor& dA,
const Tensor& dB);
at::Tensor solve_backward_self(
const at::Tensor& grad,
const at::Tensor& self,
const at::Tensor& A);
at::Tensor solve_backward_A(
const at::Tensor& grad,
const at::Tensor& self,
const at::Tensor& A,
const at::Tensor& solution);
at::Tensor cumsum_backward(const at::Tensor& grad, int64_t dim);
at::Tensor logsumexp_backward(
at::Tensor grad,
const at::Tensor& self,
at::Tensor result,
at::IntArrayRef dim,
bool keepdim);
at::Tensor logsumexp_jvp(
const at::Tensor& self_p,
const at::Tensor& self_t,
IntArrayRef dim,
bool keepdim);
at::Tensor safe_logsumexp_jvp(
const at::Tensor& self_p,
const at::Tensor& self_t,
IntArrayRef dim,
bool keepdim);
at::Tensor logcumsumexp_backward(
at::Tensor grad,
const at::Tensor& self,
const at::Tensor& result,
int64_t dim);
at::Tensor logcumsumexp_jvp(
const at::Tensor& self_p,
const at::Tensor& self_t,
int64_t dim);
at::Tensor unbind_backward(const variable_list& grads, int64_t dim);
at::Tensor unbind_backward_nested(
const variable_list& grads,
const Tensor& nt_sizes,
int64_t dim,
const at::TensorOptions& options);
at::Tensor unbind_backward_nested_jagged(
const variable_list& grads,
const Tensor& self,
int64_t dim);
at::Tensor unsqueeze_to(const at::Tensor& self, c10::SymIntArrayRef sym_sizes);
at::Tensor unsqueeze_to(
const at::Tensor& self,
int64_t dim,
c10::SymIntArrayRef sym_sizes);
at::Tensor unsqueeze_to(
const at::Tensor& self,
IntArrayRef dim,
c10::SymIntArrayRef sym_sizes);
std::vector<at::Tensor> cat_tensors_backward(
const at::Tensor& grad,
const std::vector<std::vector<c10::SymInt>>& sizes,
const std::vector<ScalarType>& dtypes,
int64_t dim);
std::vector<at::Tensor> stack_tensors_backward(
const at::Tensor& grad,
int64_t dim,
const std::vector<ScalarType>& dtypes);
std::vector<at::Tensor> block_diag_backward(
const at::Tensor& grad,
const std::vector<std::vector<int64_t>>& sizes,
const std::vector<ScalarType>& dtypes);
at::Tensor clamp_backward(
const at::Tensor& grad,
const at::Tensor& self,
const std::optional<at::Scalar>& min,
const std::optional<at::Scalar>& max);
at::Tensor clamp_backward(
const at::Tensor& grad,
const at::Tensor& self,
const at::Tensor& min,
const at::Tensor& max);
std::tuple<at::Tensor, at::Tensor> clamp_backward_min_max(
const at::Tensor& grad,
const at::Tensor& self,
const at::Tensor& min,
const at::Tensor& max,
const std::array<bool, 2>&);
at::Tensor clamp_jvp(
const Tensor& self_p,
const Tensor& self_t,
const Tensor& min_p,
const Tensor& min_t,
const Tensor& max_p,
const Tensor& max_t);
at::SymIntArrayRef strides_or_error(
const Tensor& input,
std::string_view const& input_name);
at::Tensor mm_mat1_backward(
const Tensor& grad,
const Tensor& mat2,
at::SymIntArrayRef mat1_sizes,
at::SymIntArrayRef mat1_strides,
c10::Layout mat1_layout,
const Scalar& alpha);
at::Tensor mm_mat2_backward(
const at::Tensor& grad,
const at::Tensor& mat1,
at::SymIntArrayRef sizes,
at::SymIntArrayRef strides,
c10::Layout layout,
const at::Scalar& alpha);
at::Tensor mm_mat1_sparse_backward(
const at::Tensor& grad,
const at::Tensor& mat1,
const at::Tensor& mat2,
const at::Scalar& alpha);
std::tuple<Tensor, Tensor, Tensor> sparse_sampled_addmm_backward(
const Tensor& grad,
const Tensor& self,
const std::optional<Tensor>& mat1,
const std::optional<Tensor>& mat2,
const Scalar& alpha,
const Scalar& beta,
const std::array<bool, 3>& grad_input_mask);
at::Tensor sparse_mask_backward(
const at::Tensor& grad,
const at::Tensor& mask,
c10::Layout self_layout);
at::Tensor sparse_sparse_matmul_backward(
const at::Tensor& grad,
const at::Tensor& mat1,
const at::Tensor& mat2,
int64_t grad_order);
at::Tensor renorm_backward(
const at::Tensor& grad,
const at::Tensor& self,
const at::Scalar& p,
int64_t dim,
const at::Scalar& maxnorm);
at::Tensor renorm_jvp(
const at::Tensor& self_p,
const at::Tensor& self_t,
const at::Scalar& p,
int64_t dim,
const at::Scalar& maxnorm);
at::Tensor repeat_backward(
at::Tensor grad,
at::SymIntArrayRef repeats,
at::SymIntArrayRef input_shape);
at::Tensor _fused_dropout_backward(
const at::Tensor& grad,
const at::Tensor& mask,
double p1m);
at::Tensor infinitely_differentiable_native_dropout_backward(
const at::Tensor& grad,
const at::Tensor& mask,
double scale);
at::Tensor native_dropout_double_backward(
const at::Tensor& ggI,
const at::Tensor& grad,
const at::Tensor& mask,
double scale);
at::Tensor evenly_distribute_backward(
const at::Tensor& grad,
const at::Tensor& input,
const at::Tensor& value);
Tensor sgn_backward(const Tensor& x, const Tensor& gx, const Tensor& sgn);
Tensor masked_fill_backward(const Tensor& grad, const Tensor& mask);
at::Tensor var_backward(
at::Tensor grad,
const at::Tensor& self,
at::OptionalIntArrayRef dim,
const std::optional<c10::Scalar>& correction,
bool keepdim);
at::Tensor var_jvp(
const at::Tensor& self_t,
const at::Tensor& self_p,
const at::Tensor& result,
at::OptionalIntArrayRef dim_opt,
const std::optional<c10::Scalar>& correction,
bool keepdim);
at::Tensor std_backward(
const at::Tensor& result,
const at::Tensor& grad,
const at::Tensor& self,
at::OptionalIntArrayRef dim,
const std::optional<c10::Scalar>& correction,
bool keepdim);
Tensor mean_backward(
const Tensor& grad,
c10::SymIntArrayRef shape,
at::OptionalIntArrayRef opt_dim,
c10::SymInt numel,
bool keepdim);
Tensor var_mean_backward(
const Tensor& gvar,
const Tensor& gmean,
const Tensor& self,
at::OptionalIntArrayRef dim_opt,
const std::optional<c10::Scalar>& correction,
bool keepdim);
Tensor std_mean_backward(
const Tensor& gstd,
const Tensor& gmean,
const Tensor& self,
const Tensor& std,
at::OptionalIntArrayRef dim_opt,
const std::optional<c10::Scalar>& correction,
bool keepdim);
at::Tensor cholesky_backward(
const at::Tensor& grad,
bool upper,
const at::Tensor& L);
at::Tensor cholesky_jvp(
const at::Tensor& input_tangent,
const at::Tensor& L,
bool upper);
at::Tensor cholesky_inverse_backward(
const at::Tensor& grad,
const at::Tensor& L,
bool upper,
const at::Tensor& inverse);
at::Tensor cholesky_inverse_jvp(
const at::Tensor& F,
const at::Tensor& dF,
const at::Tensor& X,
bool upper);
Tensor pinv_jvp(const Tensor& A, const Tensor& pinvA, const Tensor& dA);
Tensor pinv_backward(const Tensor& grad, const Tensor& pinvA, const Tensor& A);
Tensor chunk_backward_nested(
const std::vector<torch::autograd::Variable>& grads,
const Tensor& self,
int64_t chunks,
int64_t dim);
at::Tensor split_with_sizes_backward(
const std::vector<torch::autograd::Variable>& grads,
c10::SymIntArrayRef split_sizes,
int64_t dim,
c10::SymIntArrayRef sizes,
const at::TensorOptions& options);
at::Tensor _nested_split_with_sizes_backward(
const std::vector<torch::autograd::Variable>& grads,
c10::SymIntArrayRef split_sizes,
int64_t dim,
const Tensor& nt_sizes,
const at::TensorOptions& options);
at::Tensor split_backward(
const std::vector<torch::autograd::Variable>& grads,
const c10::SymInt& split_size,
int64_t dim,
c10::SymIntArrayRef sizes,
const at::TensorOptions& options);
at::Tensor max_pool_double_backward(
const at::Tensor& grad,
const at::Tensor& indices,
int dim);
at::Tensor error_for_max_pool2d_double_backward();
at::Tensor glu_double_backward(
const at::Tensor& grad,
const at::Tensor& grad_output,
const at::Tensor& input,
int64_t dim);
at::Tensor glu_double_backward_grad_output(
const at::Tensor& grad,
const at::Tensor& input,
int64_t dim);
at::Tensor infinitely_differentiable_silu_backward(
const at::Tensor& grad_output,
const at::Tensor& input);
at::Tensor infinitely_differentiable_mish_backward(
const at::Tensor& grad_output,
const at::Tensor& input);
Tensor infinitely_differentiable_logit_backward(
const Tensor& grad,
const Tensor& self,
std::optional<double> eps);
Tensor binary_cross_entropy_target_backward(
const Tensor& grad,
const Tensor& self,
const Tensor& target,
const std::optional<Tensor>& weight,
int64_t reduction);
Tensor binary_cross_entropy_double_backward_target(
const Tensor& grad,
const Tensor& grad_output,
const Tensor& self,
const Tensor& target,
const std::optional<Tensor>& weight,
int64_t reduction);
Tensor binary_cross_entropy_with_logits_backward(
const Tensor& grad,
const Tensor& input,
const Tensor& target,
const std::optional<Tensor>& weight_opt,
const std::optional<Tensor>& pos_weight_opt,
int64_t reduction);
at::Tensor binary_cross_entropy_with_logits_target_backward(
const at::Tensor& grad_output,
const at::Tensor& self,
const at::Tensor& target,
const std::optional<at::Tensor>& weight,
const std::optional<at::Tensor>& pos_weight,
int64_t reduction);
at::Tensor log_sigmoid_double_backward(
const at::Tensor& grad,
const at::Tensor& input);
at::Tensor softmax_double_backward(
const at::Tensor& grad,
const at::Tensor& grad_output,
int dim,
const at::Tensor& output);
at::Tensor binary_cross_entropy_double_backward(
const at::Tensor& grad_output,
const at::Tensor& grad,
const at::Tensor& input,
const at::Tensor& target,
const std::optional<at::Tensor>& weight,
int64_t reduction);
at::Tensor binary_cross_entropy_double_backward_grad_output(
const at::Tensor& grad,
const at::Tensor& input,
const at::Tensor& target,
const std::optional<at::Tensor>& weight,
int64_t reduction);
at::Tensor smooth_l1_loss_double_backward(
const at::Tensor& grad,
const at::Tensor& input,
const at::Tensor& target,
int64_t reduction,
double beta);
at::Tensor huber_loss_double_backward(
const at::Tensor& grad,
const at::Tensor& input,
const at::Tensor& target,
int64_t reduction,
double delta);
at::Tensor huber_loss_double_backward_grad_output(
const at::Tensor& grad,
const at::Tensor& grad_output,
const at::Tensor& input,
const at::Tensor& target,
int64_t reduction,
double delta);
at::Tensor mse_loss_double_backward(
const at::Tensor& grad,
const at::Tensor& input,
int64_t reduction);
at::Tensor soft_margin_loss_double_backward(
const at::Tensor& grad,
const at::Tensor& input,
const at::Tensor& target,
int64_t reduction);
at::Tensor soft_margin_loss_double_backward_grad_output(
const at::Tensor& grad,
const at::Tensor& grad_output,
const at::Tensor& input,
const at::Tensor& target,
int64_t reduction);
at::Tensor softplus_double_backward(
const at::Tensor& grad,
const at::Tensor& input,
const at::Scalar& beta,
const at::Scalar& threshold);
std::tuple<at::Tensor, at::Tensor> slogdet_jvp(
const at::Tensor& LU,
const at::Tensor& pivots,
const at::Tensor& dA,
const at::Tensor& sign,
const bool use_A_T);
at::Tensor slogdet_backward(
const at::Tensor& grad_sign,
const at::Tensor& grad_logabsdet,
const at::Tensor& A,
const at::Tensor& signdet,
const at::Tensor& LU,
const at::Tensor& pivots);
at::Tensor log1p_backward(const at::Tensor& grad, const at::Tensor& self);
at::Tensor sinc_backward(const at::Tensor& grad, const at::Tensor& self);
at::Tensor sparse_constructor_values_backward(
const at::Tensor& sparse_grad_out,
const at::Tensor& indices);
at::Tensor embedding_dense_double_backward_symint(
const at::Tensor& grad,
const at::Tensor& indices,
const c10::SymInt& padding_idx);
at::Tensor index_backward(
at::Tensor zeros_like_self,
const torch::List<std::optional<Tensor>>& indices,
const at::Tensor& grad);
at::Tensor _cudnn_ctc_loss_backward(
const at::Tensor& grad_out,
const at::Tensor& loss,
const at::Tensor& raw_grad,
bool zero_infinity);
at::Tensor elu_double_backward(
const Tensor& grad,
const Tensor& grad_output,
const Scalar& alpha,
const Scalar& scale,
const Scalar& input_scale,
bool is_result,
const Tensor& self_or_result);
Tensor svd_backward(
const Tensor& gU,
const Tensor& gS,
const Tensor& gVh,
const Tensor& U,
const Tensor& S,
const Tensor& Vh);
std::tuple<Tensor, Tensor, Tensor> linalg_svd_jvp(
const Tensor& dA,
const Tensor& U,
const Tensor& S,
const Tensor& Vh,
const bool full_matrices);
Tensor slice_backward_wrapper(
const at::Tensor& grad,
const c10::SymIntArrayRef& input_sizes,
int64_t dim,
std::optional<c10::SymInt> start,
std::optional<c10::SymInt> end,
c10::SymInt step);
std::tuple<Tensor, Tensor> linalg_eig_jvp(
const Tensor& dA,
const Tensor& L,
const Tensor& V,
const bool is_hermitian);
Tensor linalg_eig_backward(
const Tensor& gL,
const Tensor& gV,
const Tensor& L,
const Tensor& V,
const bool is_hermitian,
const bool symeig_eigenvectors = true);
Tensor linalg_lstsq_solution_jvp(
const Tensor& A,
const Tensor& B_,
const Tensor& dA,
const Tensor& dB_);
Tensor linalg_lstsq_residuals_jvp(
const Tensor& A,
const Tensor& B_,
const Tensor& dA,
const Tensor& dB_,
const Tensor& X_,
const Tensor& L);
std::tuple<Tensor, Tensor> triangular_solve_backward(
const Tensor& grad_x,
const Tensor& grad_m,
const Tensor& b,
const Tensor& a,
const Tensor& x,
const bool upper,
const bool transpose,
const bool unitriangular,
std::array<bool, 2> output_mask);
Tensor triangular_solve_jvp(
const Tensor& X,
const Tensor& A,
const Tensor& dA,
const Tensor& dB,
const bool upper,
const bool transpose,
const bool unitriangular);
Tensor linalg_solve_triangular_forward_AD(
const Tensor& A_t,
const Tensor& B_t,
const Tensor& A,
const Tensor& X,
const bool upper,
const bool left,
const bool unitriangular);
std::tuple<Tensor, Tensor> linalg_solve_triangular_backward(
const Tensor& grad,
const Tensor& A,
const Tensor& X,
const bool upper,
const bool left,
const bool unitriangular,
std::array<bool, 2> output_mask);
std::tuple<Tensor, Tensor, Tensor> _trilinear_backward(
const Tensor& grad_out,
const std::optional<Tensor>& i1,
const std::optional<Tensor>& i2,
const std::optional<Tensor>& i3,
IntArrayRef expand1,
IntArrayRef expand2,
IntArrayRef expand3,
IntArrayRef sumdim,
std::array<bool, 3> grad_mask);
std::tuple<Tensor, Tensor> linalg_qr_jvp(
const Tensor& dA,
const Tensor& Q,
const Tensor& R,
const std::string_view mode);
Tensor linalg_qr_backward(
const Tensor& gQ,
const Tensor& gR,
const Tensor& Q,
const Tensor& R,
const std::string_view mode);
Tensor linalg_matrix_exp_differential(
const Tensor& self,
const Tensor& grad,
bool adjoint);
std::tuple<Tensor, Tensor, Tensor> batchnorm_double_backward(
const Tensor& input,
const std::optional<Tensor>& gamma,
const Tensor& ggI,
const Tensor& ggG,
const Tensor& ggB,
const Tensor& gO,
const std::optional<Tensor>& running_mean,
const std::optional<Tensor>& running_var,
bool training,
double eps,
const std::optional<Tensor>& save_mean,
const std::optional<Tensor>& save_invstd,
std::array<bool, 3> output_mask);
std::tuple<Tensor, Tensor> _euclidean_dist_backward(
const Tensor& grad,
const Tensor& x1,
const Tensor& x2,
const Tensor& res);
Tensor fft_backward(
const Tensor& self,
const Tensor& grad,
int64_t signal_ndim,
bool complex_input,
bool complex_output,
bool inverse,
IntArrayRef checked_signal_sizes,
int64_t normalization,
bool onesided,
IntArrayRef output_sizes);
Tensor fft_r2c_backward(
const Tensor& grad,
at::IntArrayRef dim,
int64_t normalization,
bool onesided,
const c10::SymInt& last_dim_size);
Tensor fft_c2r_backward(
const Tensor& grad,
IntArrayRef dim,
int64_t normalization);
Tensor constant_pad_nd_backward(const Tensor& grad, c10::SymIntArrayRef pad);
std::tuple<Tensor, Tensor> cholesky_solve_backward(
const Tensor& grad_x,
const Tensor& self,
const Tensor& input2,
const Tensor& result,
const bool upper,
std::array<bool, 2> output_mask);
Tensor cholesky_solve_jvp(
const Tensor& X,
const Tensor& U,
const Tensor& dU,
const Tensor& dB,
const bool upper);
std::tuple<Tensor, Tensor, Tensor>
infinitely_differentiable_native_group_norm_backward(
const Tensor& dY,
const Tensor& dmean,
const Tensor& drstd,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const std::optional<Tensor>& gamma,
c10::SymInt N,
const c10::SymInt& C,
c10::SymInt HxW,
int64_t group,
double eps,
std::array<bool, 3> grad_input_mask);
Tensor gelu_double_backward(
const Tensor& ggI,
const Tensor& gO,
const Tensor& input,
std::string_view approximate);
Tensor as_strided_backward(
Tensor grad,
const TensorGeometry& input_geometry,
c10::SymIntArrayRef sizes,
c10::SymIntArrayRef strides,
const std::optional<c10::SymInt>& storage_offset_);
Tensor as_strided_scatter_backward(
const Tensor& grad,
const TensorGeometry& input_geometry,
const TensorGeometry& src_geometry,
c10::SymIntArrayRef sizes,
c10::SymIntArrayRef strides,
std::optional<c10::SymInt> storage_offset);
std::tuple<Tensor, Tensor> atan2_backward(
const Tensor& grad,
const Tensor& self,
const Tensor& other,
std::array<bool, 2> output_mask);
Tensor amaxamin_jvp(
const Tensor& x,
const Tensor& dx,
const Tensor& result,
IntArrayRef dim,
bool keepdim);
std::tuple<Tensor, Tensor, Tensor> layer_norm_double_backward(
const Tensor& input,
const std::optional<Tensor>& gamma,
const Tensor& ggI,
const Tensor& ggG,
const Tensor& ggB,
const Tensor& gO,
const Tensor& save_mean,
const Tensor& save_invstd,
c10::SymIntArrayRef normalized_shape,
std::array<bool, 3> output_mask);
std::tuple<Tensor, Tensor> householder_product_backward(
const Tensor& grad,
const Tensor& result,
const Tensor& input,
const Tensor& tau,
const bool flip_order = false);
Tensor householder_product_jvp(
const Tensor& dV,
const Tensor& dtau,
const Tensor& prod,
const Tensor& V,
const Tensor& tau);
std::tuple<Tensor, Tensor, Tensor> ormqr_backward(
const Tensor& grad,
const Tensor& result,
const Tensor& self,
const Tensor& tau,
const Tensor& other,
bool left,
bool transpose,
std::array<bool, 3> grad_output_mask);
std::tuple<Tensor, Tensor> polar_backward(
const Tensor& grad,
const Tensor& result);
Tensor i1_backward(
const Tensor& grad,
const Tensor& self,
const Tensor& result);
Tensor i1e_backward(
const Tensor& grad,
const Tensor& self,
const Tensor& result);
Tensor linalg_lu_solve_LU(
const Tensor& grad,
const Tensor& LU,
const Tensor& pivots,
const Tensor& X,
const bool left,
const bool adjoint);
Tensor linalg_lu_solve_jvp(
const Tensor& X,
const Tensor& LU,
const Tensor& pivots,
const Tensor& dLU,
const Tensor& dB,
const bool left,
const bool adjoint);
std::tuple<Tensor, Tensor> linalg_solve_backward(
const Tensor& gX,
const Tensor& X,
const Tensor& A,
const Tensor& LU,
const Tensor& pivots,
const bool left,
const bool B_requires_grad);
Tensor linalg_solve_jvp(
const Tensor& dA,
const Tensor& dB,
const Tensor& X,
const Tensor& LU,
const Tensor& pivots,
const bool left,
const bool use_A_T);
Tensor lu_unpack_backward(
const Tensor& L_grad,
const Tensor& U_grad,
const c10::SymInt& m,
const c10::SymInt& n);
Tensor linalg_det_backward(
const Tensor& grad,
const Tensor& det,
const Tensor& A,
const Tensor& LU,
const Tensor& pivots);
Tensor linalg_det_jvp(
const Tensor& dA,
const Tensor& det,
const Tensor& LU,
const Tensor& pivots,
const bool use_A_T);
std::tuple<Tensor, Tensor> linalg_lstsq_backward(
const Tensor& gX_,
const Tensor& gL,
const Tensor& A,
const Tensor& B_,
const Tensor& X_,
const std::array<bool, 2>& grad_input_mask);
Tensor linalg_lu_backward(
const Tensor& L_grad,
const Tensor& U_grad,
const Tensor& P,
const Tensor& L,
const Tensor& U,
const bool pivot);
std::tuple<Tensor, Tensor> linalg_lu_jvp(
const Tensor& dA,
const Tensor& P,
const Tensor& L,
const Tensor& U,
const bool pivot);
Tensor lu_factor_ex_backward(
const Tensor& grad,
const Tensor& LU,
const Tensor& pivs,
const bool pivot);
Tensor lu_factor_ex_jvp(
const Tensor& dX,
const Tensor& LU,
const Tensor& pivs,
const bool pivot);
Tensor batch_norm_jvp(
const Tensor& input_p,
const Tensor& input_t,
const Tensor& weight_p,
const Tensor& weight_t,
const Tensor& bias_p,
const Tensor& bias_t,
const std::optional<Tensor>& running_mean,
const std::optional<Tensor>& running_var,
const Tensor& saved_mean,
const Tensor& saved_invstd,
bool train,
double eps);
Tensor layer_norm_jvp(
const Tensor& input_p,
const Tensor& input_t,
const Tensor& weight_p,
const Tensor& weight_t,
const Tensor& bias_p,
const Tensor& bias_t,
const Tensor& saved_mean,
const Tensor& saved_invstd,
c10::SymIntArrayRef normalized_shape);
Tensor group_norm_jvp(
const Tensor& input_p,
const Tensor& input_t,
const Tensor& weight_p,
const Tensor& weight_t,
const Tensor& bias_p,
const Tensor& bias_t,
const Tensor& saved_mean,
const Tensor& saved_invstd,
int64_t groups);
Tensor group_norm_mean_jvp(
const Tensor& input_t,
const Tensor& mean_p,
int64_t groups);
Tensor group_norm_invstd_jvp(
const Tensor& input_p,
const Tensor& input_t,
const Tensor& mean_p,
const Tensor& invstd_p,
int64_t groups);
Tensor convolution_jvp(
const Tensor& input_p,
const Tensor& input_t,
const Tensor& weight_p,
const Tensor& weight_t,
const Tensor& bias_p,
const Tensor& bias_t,
at::SymIntArrayRef stride,
at::SymIntArrayRef padding,
at::SymIntArrayRef dilation,
bool transposed,
at::SymIntArrayRef output_padding,
const c10::SymInt& groups);
Tensor _convolution_jvp(
const Tensor& input_p,
const Tensor& input_t,
const Tensor& weight_p,
const Tensor& weight_t,
const Tensor& bias_p,
const Tensor& bias_t,
at::SymIntArrayRef stride,
at::SymIntArrayRef padding,
at::SymIntArrayRef dilation,
bool transposed,
at::SymIntArrayRef output_padding,
const c10::SymInt& groups,
bool benchmark,
bool deterministic,
bool cudnn_enabled,
bool allow_tf32);
Tensor convolution_backward_jvp_grad_bias(
const Tensor& grad_out_t,
const Tensor& grad_bias);
Tensor cat_jvp(const at::ITensorListRef& tensors, int64_t dim);
Tensor block_diag_jvp(at::TensorList tensors);
Tensor stack_jvp(at::TensorList tensors, int64_t dim);
Tensor cumprod_jvp(
const Tensor& self_t,
const Tensor& self_p,
const Tensor& result,
int dim);
Tensor gather_with_keepdimed_indices(
const Tensor& input,
int64_t dim,
const Tensor& indices,
bool keepdim);
Tensor evenly_read_jvp(
const Tensor& fw_grad,
const Tensor& input,
const Tensor& value);
Tensor warn_backwards(const Tensor& grad_output);
std::tuple<Tensor, Tensor> _cudnn_convolution_backward(
const at::Tensor& self,
const at::Tensor& grad_output,
const at::Tensor& weight,
at::SymIntArrayRef padding,
at::SymIntArrayRef output_padding,
at::SymIntArrayRef stride,
at::SymIntArrayRef dilation,
bool transposed,
c10::SymInt groups,
::std::array<bool, 2> output_mask);
Tensor scatter_reduce_jvp(
const Tensor& self_p,
const Tensor& self_t,
int dim,
const Tensor& index,
const Tensor& src_p,
const Tensor& src_t,
std::string_view reduce,
bool include_self,
const Tensor& result);
std::tuple<Tensor, Tensor> scatter_reduce_backward(
const Tensor& grad,
const Tensor& self,
int dim,
const Tensor& index,
const Tensor& src,
std::string_view reduce,
bool include_self,
const Tensor& result);
Tensor _to_copy_backward(
const Tensor& grad,
const c10::TensorOptions& self_options);
std::tuple<Tensor, Tensor> index_reduce_backward(
const Tensor& grad,
const Tensor& self,
int dim,
const Tensor& index,
const Tensor& source,
std::string_view reduce,
bool include_self,
const Tensor& result);
Tensor take_backward(
const Tensor& grad,
const Tensor& self,
const Tensor& indices);
Tensor to_sparse_backward(
const Tensor& grad,
const c10::Layout self_layout,
const c10::OptionalArrayRef<c10::SymInt>& self_blocksize);
std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor>
mkldnn_rnn_layer_differentiable_backward(
const Tensor& input,
const Tensor& weight0,
const Tensor& weight1,
const Tensor& weight2,
const Tensor& weight3,
const Tensor& hx_,
const Tensor& cx_tmp,
const Tensor& output,
const Tensor& hy_,
const Tensor& cy_,
const std::optional<Tensor>& grad_output_r_opt,
const std::optional<Tensor>& grad_hy_r_opt,
const std::optional<Tensor>& grad_cy_r_opt,
bool reverse,
int64_t mode,
int64_t hidden_size,
int64_t num_layers,
bool has_biases,
bool train,
bool bidirectional,
at::IntArrayRef batch_sizes,
bool batch_first,
const at::Tensor& workspace);
Tensor values_backward(const Tensor& grad, const Tensor& self);
} // namespace torch::autograd::generated::details
```
|
=========================================================================================================================================
SOURCE CODE FILE: InferenceMode.h
LINES: 1
SIZE: 0.16 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\InferenceMode.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/InferenceMode.h>
#include <torch/csrc/Export.h>
namespace torch::autograd {
using InferenceMode = c10::InferenceMode;
}
```
|
=============================================================================================================================================
SOURCE CODE FILE: VariableTypeUtils.h
LINES: 2
SIZE: 14.66 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\VariableTypeUtils.h
ENCODING: utf-8
```h
#pragma once
#include <c10/util/irange.h>
#include <ATen/core/boxing/KernelFunction.h>
#include <ATen/core/dispatch/Dispatcher.h>
#include <torch/csrc/autograd/edge.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/functions/basic_ops.h>
#include <torch/csrc/autograd/functions/tensor.h>
#include <torch/csrc/autograd/grad_mode.h>
#include <torch/csrc/autograd/saved_variable.h>
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/autograd/functions/utils.h>
#include <torch/csrc/autograd/jit_decomp_interface.h>
#include <torch/csrc/utils/variadic.h>
#include <cstddef>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#ifdef _MSC_VER
#ifdef Type
#undef Type
#endif
#endif
namespace torch::autograd {
enum class can_mutate_inplace_result {
success,
non_default_backward_view,
view_of_leaf,
is_leaf,
};
// The requires_grad argument is used to know if the inplace operation needs
// gradient to be setup for it.
// In particular, we can have tensor.requires_grad() != requires_grad when
// writing a Tensor that requires gradients inplace into a Tensor that does not
// require gradients: a = torch.rand(2) b = torch.rand(2, requires_grad=True)
// a.copy_(b)
inline can_mutate_inplace_result can_mutate_inplace(
const at::Tensor& tensor,
bool requires_grad) {
if (!requires_grad || !GradMode::is_enabled()) {
return can_mutate_inplace_result::success;
}
auto diff_view_meta = impl::get_view_autograd_meta(tensor);
if (diff_view_meta && diff_view_meta->has_bw_view()) {
if (diff_view_meta->get_creation_meta() != CreationMeta::DEFAULT) {
return can_mutate_inplace_result::non_default_backward_view;
}
if (tensor.requires_grad() && tensor._base().is_leaf()) {
return can_mutate_inplace_result::view_of_leaf;
}
}
if (tensor.requires_grad() && tensor.is_leaf()) {
return can_mutate_inplace_result::is_leaf;
}
return can_mutate_inplace_result::success;
}
inline void check_inplace(const at::Tensor& tensor, bool requires_grad) {
switch (can_mutate_inplace(tensor, requires_grad)) {
case can_mutate_inplace_result::success:
return;
case can_mutate_inplace_result::non_default_backward_view: {
return handle_view_on_rebase(impl::get_view_autograd_meta(tensor));
}
case can_mutate_inplace_result::view_of_leaf:
TORCH_CHECK(
false,
"a view of a leaf Variable that requires grad is being used in an in-place operation.");
break;
case can_mutate_inplace_result::is_leaf:
TORCH_CHECK(
false,
"a leaf Variable that requires grad is being used in an in-place operation.");
break;
}
TORCH_INTERNAL_ASSERT(false);
}
inline void check_inplace(at::ITensorListRef tensors, bool requires_grad) {
for (const auto& tensor : tensors) {
check_inplace(tensor, requires_grad);
}
}
inline void throw_error_out_requires_grad(const char* name) {
TORCH_CHECK(
false,
name,
"(): functions with out=... arguments don't support automatic differentiation, "
"but one of the arguments requires grad.");
}
inline void throw_error_for_complex_autograd(
const at::Tensor& tensor,
const char* name) {
if (tensor.requires_grad()) {
TORCH_CHECK(
!tensor.is_complex(),
name,
" does not support automatic differentiation for outputs with complex dtype.");
}
}
inline void throw_error_if_base_and_tensor_are_same(
const at::Tensor& base,
const at::Tensor& tensor) {
TORCH_CHECK(
base.unsafeGetTensorImpl() != tensor.unsafeGetTensorImpl(),
"View operation returned a tensor that is the same as the input base tensor. This "
"is no longer allowed; you must explicitly create a new tensor (e.g., using .detach()). "
"As a user, you could have made a mistake implementing __torch_dispatch__ or a Python "
"operator decomposition or meta registration; if that's not the case, please "
"report a bug to PyTorch or the backend you are using.");
}
inline void throw_error_for_complex_autograd(
at::ITensorListRef tensorlist,
const char* name) {
for (const auto& tensor : tensorlist) {
throw_error_for_complex_autograd(tensor, name);
}
}
// TODO: Blegh, bare references
inline void rebase_history(const Variable& var, std::shared_ptr<Node> grad_fn) {
if (grad_fn && var.defined()) {
grad_fn->add_input_metadata(var);
impl::rebase_history(var, {std::move(grad_fn), 0});
}
}
inline void rebase_history(
const std::vector<Variable>& vars,
const std::shared_ptr<Node>& grad_fn) {
if (grad_fn) {
for (auto& var : vars) {
if (var.defined()) {
auto output_nr = grad_fn->add_input_metadata(var);
impl::rebase_history(var, {grad_fn, output_nr});
} else {
grad_fn->add_input_metadata(Node::undefined_input());
}
}
}
}
inline void increment_version(const at::Tensor& t) {
impl::bump_version(t);
}
struct Flatten : IterArgs<Flatten> {
Flatten(variable_list& out) : out(out) {}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
variable_list& out;
void operator()(const at::Tensor& x) {
out.emplace_back(x);
}
void operator()(const std::optional<at::Tensor>& x) {
if (x.has_value())
out.emplace_back(x.value());
}
void operator()(at::ArrayRef<at::Tensor> xs) {
out.insert(out.end(), xs.begin(), xs.end());
}
};
template <typename... Args>
inline variable_list flatten_tensor_args(Args&&... args) {
variable_list out;
out.reserve(count_tensors(std::forward<Args>(args)...));
Flatten(out).apply(std::forward<Args>(args)...);
return out; // RVO
}
// See NOTE [ Autograd View Variables ] for details.
inline at::Tensor as_view(
const at::Tensor& base,
const at::Tensor& tensor,
bool is_bw_differentiable,
bool is_fw_differentiable,
std::unique_ptr<ViewFunc> view_func = nullptr,
std::function<at::Tensor(const at::Tensor&)> rev_view_func = nullptr,
CreationMeta creation_meta = CreationMeta::DEFAULT,
bool allow_tensor_metadata_change = true) {
// Note [View of inference tensor]
// For inference tensor this code can only be hit outside InferenceMode
// since ADInplaceOrView is in the default_included_set.
// If Inplace and View were separate dispatch keys we can just put Inplace
// in the default_included_set, so that view ops on inference tensor doesn't
// have to go through as_view even outside InferenceMode.
if (base.is_inference())
return tensor;
auto diff_view_meta = torch::autograd::impl::get_view_autograd_meta(base);
// To speed up the most common case, we specially handle when both the forward
// and backward view infos are the same, and so a single shared ViewInfo can
// be used for both of them.
if ((!diff_view_meta || diff_view_meta->shared_view_info()) &&
is_bw_differentiable && is_fw_differentiable) {
throw_error_if_base_and_tensor_are_same(base, tensor);
if (diff_view_meta) {
creation_meta = propagate_creation_meta(
diff_view_meta->get_creation_meta(), creation_meta);
return make_variable_differentiable_view(
tensor,
diff_view_meta->get_backward_view().chain(
base, tensor, std::move(view_func), std::move(rev_view_func)),
std::nullopt,
/*shared_view_info*/ true,
creation_meta,
allow_tensor_metadata_change);
} else {
return make_variable_differentiable_view(
tensor,
ViewInfo(base, std::move(view_func), std::move(rev_view_func)),
std::nullopt,
/*shared_view_info*/ true,
creation_meta,
allow_tensor_metadata_change);
}
}
// If they cannot be shared, create the required view infos
std::optional<ViewInfo> new_bw_info;
std::optional<ViewInfo> new_fw_info;
if (is_bw_differentiable) {
auto bw_view_func = view_func ? view_func->clone_and_set() : nullptr;
if (diff_view_meta && diff_view_meta->has_bw_view()) {
const auto& base_bw_info = diff_view_meta->get_backward_view();
new_bw_info = base_bw_info.chain(
base, tensor, std::move(bw_view_func), rev_view_func);
} else {
new_bw_info = ViewInfo(base, std::move(bw_view_func), rev_view_func);
}
} else {
TORCH_CHECK(
creation_meta == CreationMeta::DEFAULT,
"Non-backward differentiable views must have creation_meta=CreationMeta::DEFAULT");
}
if (is_fw_differentiable) {
// Check if base is a forward differentiable view
if (diff_view_meta && diff_view_meta->has_fw_view()) {
const auto& base_fw_info = diff_view_meta->get_forward_view();
new_fw_info = base_fw_info.chain(
base, tensor, std::move(view_func), std::move(rev_view_func));
} else {
new_fw_info =
ViewInfo(base, std::move(view_func), std::move(rev_view_func));
}
}
if (is_fw_differentiable || is_bw_differentiable) {
if (diff_view_meta && diff_view_meta->has_bw_view()) {
creation_meta = propagate_creation_meta(
diff_view_meta->get_creation_meta(), creation_meta);
}
throw_error_if_base_and_tensor_are_same(base, tensor);
return make_variable_differentiable_view(
tensor,
std::move(new_bw_info),
std::move(new_fw_info),
/*shared_view_info*/ false,
creation_meta,
allow_tensor_metadata_change);
} else {
return make_variable_non_differentiable_view(
base, tensor, allow_tensor_metadata_change);
}
}
inline void check_no_requires_grad(
const at::Tensor& tensor,
const char* name,
const char* fn_name = "",
bool check_grad_mode = true) {
TORCH_CHECK(
!(tensor.defined() && tensor.requires_grad()) ||
!(check_grad_mode && GradMode::is_enabled()),
"The function '",
fn_name,
"' is not differentiable with respect to argument '",
name,
"'. This input cannot have requires_grad True.");
}
inline void check_no_requires_grad(
const std::optional<at::Tensor>& tensor,
const char* name,
const char* fn_name = "") {
if (tensor.has_value()) {
check_no_requires_grad(*tensor, name, fn_name);
}
}
inline void check_no_requires_grad(
at::ITensorListRef tensors,
const char* name,
const char* fn_name = "") {
// GradMode check is expensive, so check it only once for TensorLists
if (!GradMode::is_enabled()) {
return;
}
for (auto& tensor : tensors) {
check_no_requires_grad(tensor, name, fn_name, /*check_grad_mode*/ false);
}
}
inline void check_no_requires_grad(
const c10::List<std::optional<at::Tensor>>& tensors,
const char* name,
const char* fn_name = "") {
// GradMode check is expensive, so check it only once for TensorLists
if (!GradMode::is_enabled()) {
return;
}
for (std::optional<at::Tensor> tensor : tensors) {
if (tensor.has_value()) {
check_no_requires_grad(*tensor, name, fn_name, /*check_grad_mode*/ false);
}
}
}
// Assumed that saved tensor lists are never inplace outputs
inline std::vector<SavedVariable> make_saved_variable_list(
at::ITensorListRef tensors,
const bool is_output = false) {
return fmap(tensors, [&is_output](const at::Tensor& tensor) -> SavedVariable {
return SavedVariable{tensor, is_output /* is output */};
});
}
// Assumed that saved tensor lists are never inplace outputs
inline std::vector<SavedVariable> make_saved_variable_list(
const c10::List<std::optional<at::Tensor>>& tensors,
const bool is_output = false) {
return fmap(
tensors,
[&is_output](const std::optional<at::Tensor>& tensor) -> SavedVariable {
if (tensor.has_value()) {
return SavedVariable{*tensor, is_output /* is output */};
} else {
return SavedVariable{at::Tensor(), is_output /* is output */};
}
});
}
inline std::vector<std::vector<int64_t>> to_args_sizes(
at::ITensorListRef tensors) {
std::vector<std::vector<int64_t>> args_sizes(tensors.size());
size_t i = 0;
for (const auto& t : tensors) {
args_sizes[i++] = t.sizes().vec();
}
return args_sizes;
}
inline std::vector<std::vector<c10::SymInt>> to_args_sizes_symint(
at::ITensorListRef tensors) {
std::vector<std::vector<c10::SymInt>> args_sizes(tensors.size());
size_t i = 0;
for (const auto& t : tensors) {
args_sizes[i++] = t.sym_sizes().vec();
}
return args_sizes;
}
inline std::vector<c10::ScalarType> to_args_scalartypes(
at::ITensorListRef tensors) {
std::vector<c10::ScalarType> args_scalartypes(tensors.size());
size_t i = 0;
for (const auto& t : tensors) {
args_scalartypes[i++] = t.scalar_type();
}
return args_scalartypes;
}
namespace impl {
namespace {
// If run_jit_decomposition were not a member function, we would be able
// to pass this as a template parameter to c10::Boxedkernel::makeFromFunction.
// However, member functions cannot be passed this way - instead we wrap our
// call in this functor so it can be passed to c10::BoxedKernel::makeFromFunctor
class WrapperFunctor final : public c10::OperatorKernel {
public:
WrapperFunctor(JitDecompInterface* impl) : impl_(impl) {}
void operator()(
const c10::OperatorHandle& op,
c10::DispatchKeySet ks,
torch::jit::Stack* stack) {
impl_->run_jit_decomposition(op, stack);
}
JitDecompInterface* impl_;
};
} // namespace
template <class Return, class... Args>
Return run_jit_decomposition_with_args_for_jvp(
std::string_view name,
const c10::OperatorHandle& opHandle,
c10::DispatchKeySet dispatchKeySet,
Args&&... args) {
// see NOTE: [Jit Decomposition Interface]
JitDecompInterface* impl = getJitDecompImpl();
TORCH_CHECK_NOT_IMPLEMENTED(
impl && impl->has_jit_decomposition(opHandle.schema()),
"Trying to use forward AD with ",
name,
" that does not support it because it has not been implemented yet.\nPlease file an issue "
"to PyTorch at https://github.com/pytorch/pytorch/issues/new?template=feature-request.yml "
"so that we can prioritize its implementation or submit a PR adding the implementation to "
"derivatives.yaml");
return c10::KernelFunction::makeFromBoxedKernel(
c10::BoxedKernel::makeFromFunctor(
std::make_unique<WrapperFunctor>(impl)))
.call<Return, Args...>(
opHandle, dispatchKeySet, std::forward<Args>(args)...);
}
} // namespace impl
} // namespace torch::autograd
```
|
========================================================================================================================================
SOURCE CODE FILE: anomaly_mode.h
LINES: 1
SIZE: 1.75 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\anomaly_mode.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <memory>
#include <string>
namespace torch::autograd {
// forward declaration of Node from function.h
struct Node;
struct TORCH_API AnomalyMode {
static bool is_enabled() {
return _enabled;
}
static bool should_check_nan() {
return _check_nan;
}
static void set_enabled(bool enabled, bool check_nan = true) {
_enabled = enabled;
_check_nan = check_nan;
}
private:
static bool _enabled;
static bool _check_nan;
};
/// A RAII guard that enables Anomaly Detection Mode.
///
/// Anomaly detection mode is useful for debugging problems happening
/// in the backward, such as unexpectedly modified tensors or NaNs
/// occuring in the backward.
///
/// The enabling of anomaly mode is global - as soon as there is one
/// such guard, it is enabled for all computation and threads. It also
/// comes with a significant performance penalty.
///
/// Example:
/// @code
/// auto x = torch::tensor({1.}, torch::requires_grad());
/// {
/// torch::autograd::DetectAnomalyGuard detect_anomaly;
/// auto x = torch::tensor({5.0}, torch::requires_grad());
/// auto y = x * x;
/// auto z = y * y;
/// y += 1;
/// z.backward();
/// }
/// @endcode
class TORCH_API DetectAnomalyGuard {
public:
DetectAnomalyGuard(bool check_nan = true);
~DetectAnomalyGuard();
private:
bool prev_check_nan_;
};
struct TORCH_API AnomalyMetadata {
virtual ~AnomalyMetadata();
virtual void store_stack();
virtual void print_stack(const std::string& current_node_name);
virtual void assign_parent(const std::shared_ptr<Node>& parent_node);
private:
std::string traceback_;
std::shared_ptr<Node> parent_;
};
} // namespace torch::autograd
```
|
====================================================================================================================================
SOURCE CODE FILE: autograd.h
LINES: 1
SIZE: 5.29 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\autograd.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/autograd/variable.h>
namespace torch::autograd {
/// Computes the sum of gradients of given tensors with respect to graph leaves.
///
/// The graph is differentiated using the chain rule. If any of ``tensors``
/// are non-scalar (i.e. their data has more than one element) and require
/// gradient, then the Jacobian-vector product would be computed, in this case
/// the function additionally requires specifying `grad_tensors`. It should be a
/// sequence of matching length, that contains the "vector" in the
/// Jacobian-vector product, usually the gradient of the differentiated function
/// w.r.t. corresponding tensors
/// (`torch::Tensor()` is an acceptable value for all tensors that don't need
/// gradient tensors).
///
/// This function accumulates gradients in the leaves - you might need to zero
/// them before calling it.
///
/// \param tensors Tensors of which the derivative will be computed.
/// \param grad_tensors The "vector" in the Jacobian-vector product, usually
/// gradients
/// w.r.t. each element of corresponding tensors. `torch::Tensor()` values
/// can be specified for scalar Tensors or ones that don't require grad. If
/// a `torch::Tensor()` value would be acceptable for all grad_tensors, then
/// this argument is optional.
/// \param retain_graph If `false`, the graph used to compute the grad will be
/// freed.
/// Note that in nearly all cases setting this option to `true` is not
/// needed and often can be worked around in a much more efficient way.
/// Defaults to the value of `create_graph`.
/// \param create_graph If `true`, graph of the derivative will be constructed,
/// allowing
/// to compute higher order derivative products. Defaults to `false`.
/// \param inputs Inputs w.r.t. which the gradient will be accumulated into
/// `at::Tensor::grad`. All other Tensors will be ignored. If not provided,
/// the gradient is accumulated into all the leaf Tensors that were used to
/// compute param `tensors`.
// When inputs are provided and a given input is not a leaf,
// the current implementation will call its grad_fn (even though it is not
// strictly needed to get this gradients). It is an implementation detail
// on which the user should not rely. See
// https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for
// more details.
TORCH_API void backward(
const variable_list& tensors,
const variable_list& grad_tensors = {},
std::optional<bool> retain_graph = std::nullopt,
bool create_graph = false,
const variable_list& inputs = {});
/// Computes and returns the sum of gradients of outputs with respect to the
/// inputs.
///
/// ``grad_outputs`` should be a sequence of length matching ``output``
/// containing the "vector" in Jacobian-vector product, usually the pre-computed
/// gradients w.r.t. each of the outputs. If an output doesn't require_grad,
/// then the gradient can be ``torch::Tensor()``).
///
/// \param outputs outputs of the differentiated function.
/// \param inputs Inputs w.r.t. which the gradient will be
/// returned (and not accumulated into ``at::Tensor::grad``).
/// \param grad_outputs The "vector" in the Jacobian-vector product.
/// Usually gradients w.r.t. each output. `torch::Tensor()` values can be
/// specified for scalar Tensors or ones that don't require grad. If a
/// `torch::Tensor()` value would be acceptable for all grad_tensors, then
/// this argument is optional. Default: `{}`.
/// \param retain_graph If ``false``, the graph used to compute the grad
/// will be freed. Note that in nearly all cases setting this option to
/// ``true`` is not needed and often can be worked around in a much more
/// efficient way. Defaults to the value of ``create_graph``.
/// \param create_graph If ``true``, graph of the derivative will
/// be constructed, allowing to compute higher order derivative products.
/// Default: ``false``.
/// \param allow_unused If ``false``, specifying inputs that were not
/// used when computing outputs (and therefore their grad is always zero)
/// is an error. Defaults to ``false``.
TORCH_API variable_list grad(
const variable_list& outputs,
const variable_list& inputs,
const variable_list& grad_outputs = {},
std::optional<bool> retain_graph = std::nullopt,
bool create_graph = false,
bool allow_unused = false);
namespace forward_ad {
/// Creates a new dual level and returns its index. This level index should then
/// be used to call into the other functions below. This API supports entering a
/// new level before the previous one is exited. We call them nested forward AD
/// levels. These can be used to compute higher order derivatives.
TORCH_API uint64_t enter_dual_level();
/// Exits the given level. This will clear up all the gradients from this level
/// and all dual Tensors that had gradients for this level will become regular
/// Tensors again. This function can only be used to exit the innermost nesting
/// level and so exiting must happen in reverse order compared to the entering
/// that was done with the function above.
TORCH_API void exit_dual_level(uint64_t level);
} // namespace forward_ad
} // namespace torch::autograd
```
|
=============================================================================================================================================================
SOURCE CODE FILE: autograd_not_implemented_fallback.h
LINES: 1
SIZE: 1.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\autograd_not_implemented_fallback.h
ENCODING: utf-8
```h
#pragma once
#include <torch/library.h>
namespace torch::autograd {
// Default DispatchKey::Autograd fallback for built-in operators.
// Can be registered for custom operators.
TORCH_API torch::CppFunction autogradNotImplementedFallback();
// Default DispatchKey::AdInplaceOrView fallback for built-in operators
// Can be registered for custom operators.
TORCH_API torch::CppFunction autogradNotImplementedInplaceOrViewFallback();
// Default DispatchKey::Autograd fallback for all other operators (i.e. custom
// operators)
TORCH_API torch::CppFunction basicAutogradNotImplementedFallback();
enum class AutogradFallbackMode {
Nothing, // Fallback is a redispatch
Warn, // Fallback raises a warning if backward is called
Error, // Fallback raises an error if backward is called
};
// Change the behavior of "basicAutogradNotImplementedFallback"
// In Python this is:
// - torch._C._set_autograd_fallback_mode(str) -> None
// - torch._C._get_autograd_fallback_mode() -> str
TORCH_API void setAutogradFallbackMode(AutogradFallbackMode mode);
TORCH_API AutogradFallbackMode getAutogradFallbackMode();
} // namespace torch::autograd
```
|
====================================================================================================================================
SOURCE CODE FILE: cpp_hook.h
LINES: 1
SIZE: 0.87 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\cpp_hook.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/autograd/function_hook.h>
#include <functional>
#include <memory>
namespace torch::autograd {
using hooks_list =
std::vector<std::function<at::TensorBase(const at::TensorBase&)>>;
struct CppFunctionTensorPreHook : public FunctionPreHook {
CppFunctionTensorPreHook(std::shared_ptr<hooks_list> hooks, size_t value_idx);
variable_list operator()(const variable_list& values) override;
std::shared_ptr<hooks_list> hooks_;
size_t value_idx_;
};
struct CppFunctionSingleTensorPreHook : public FunctionPreHook {
CppFunctionSingleTensorPreHook(
std::function<at::TensorBase(const at::TensorBase&)> hook,
size_t value_idx);
variable_list operator()(const variable_list& values) override;
std::function<at::TensorBase(const at::TensorBase&)> hook_;
size_t value_idx_;
};
} // namespace torch::autograd
```
|
===========================================================================================================================================
SOURCE CODE FILE: custom_function.h
LINES: 1
SIZE: 21.42 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\custom_function.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/ivalue.h>
#include <c10/core/SymInt.h>
#include <c10/util/flat_hash_map.h>
#include <c10/util/irange.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/autograd/variable_info.h>
#include <torch/csrc/dynamo/compiled_autograd.h>
#include <vector>
namespace torch::autograd {
using optional_variable_list = std::vector<std::optional<Variable>>;
using _jvp_fn_t = std::function<variable_list(variable_list, variable_list)>;
using _view_as_self_fn_t = std::function<at::Tensor(at::Tensor)>;
TORCH_API std::vector<std::optional<Variable>> _wrap_outputs(
const variable_list& input_vars,
const std::unordered_set<at::TensorImpl*>& non_differentiable,
const std::unordered_set<at::TensorImpl*>& dirty_inputs,
const at::ArrayRef<std::optional<Variable>> raw_outputs,
const std::shared_ptr<Node>& cdata,
const _jvp_fn_t& jvp_user_function,
const std::unordered_set<at::TensorImpl*>& to_save_if_setup_context,
const _view_as_self_fn_t& view_as_self_fn);
TORCH_API void check_variable_result(
const at::TensorBase& original,
const at::TensorBase& result,
const std::string& hook_name);
// Get the return type of the forward function of the custom Function class X
template <typename X, typename... Args>
using forward_t = decltype(X::forward(nullptr, std::declval<Args>()...));
/// To use custom autograd operations, implement a Function subclass with
/// static forward and backward functions:
///
/// `forward` can take as many arguments as you want and should return either a
/// variable list or a Variable. Use of any direct Variable arguments will be
/// registered in the graph but no vectors/sets or any other data structures
/// will be traversed. You can use std::optional<Tensor> as one of the arguments
/// and it will be registered as a variable in the graph if the argument has a
/// value. It should take a pointer to `torch::autograd::AutogradContext` as the
/// first argument. Variables can be saved in the `ctx` using
/// `ctx->save_for_backward`
/// (see `torch::autograd::AutogradContext::save_for_backward`) and other data
/// can be saved in the `ctx->saved_data` map
/// (see `torch::autograd::AutogradContext::saved_data`)
/// in the form of `<std::string, at::IValue>` pairs.
///
/// `backward` should take a pointer to `torch::autograd::AutogradContext`
/// and a variable list containing as many Variables as there were outputs from
/// `forward` as arguments. It should return as many Variables as there were
/// inputs with each of them containing the gradient w.r.t. its corresponding
/// input. Variables saved in `forward` can be accessed with
/// `ctx->get_saved_variables` (see
/// `torch::autograd::AutogradContext::get_saved_variables`) and other saved
/// data can be accessed from `ctx->saved_data`.
/// To enable compiled autograd support (torch.compile for backward) for your
/// custom autograd operation, you can set MyFunction::is_traceable
/// (see Function::istraceable notes below).
///
/// For example:
/// ```
/// class MyFunction : public Function<MyFunction> {
/// public:
/// static constexpr bool is_traceable = true;
///
/// static variable_list forward(AutogradContext *ctx, int n, Variable var) {
/// // Save data for backward in context
/// ctx->saved_data["n"] = n;
/// var.mul_(n);
/// // Mark var as modified by inplace operation
/// ctx->mark_dirty({var});
/// return {var};
/// }
///
/// static variable_list backward(AutogradContext *ctx, variable_list
/// grad_output) {
/// // Use data saved in forward
/// auto n = ctx->saved_data["n"].toInt();
/// return {grad_output[0]*n};
/// }
/// };
/// ```
///
/// To use `MyFunction`:
/// ```
/// Variable x;
/// auto y = MyFunction::apply(6, x);
/// // Example backward call
/// y[0].sum().backward();
/// ```
template <class T>
struct TORCH_API Function {
// We need to use a different template parameter than T here because T will
// inherit from Function, and when Function<T> is instantiated, T::forward
// is not declared yet.
// The enable_if check is to ensure that the user doesn't explicitly provide
// the parameter X.
template <typename X = T, typename... Args>
static auto apply(Args&&... args)
-> std::enable_if_t<std::is_same_v<X, T>, forward_t<X, Args...>>;
// This flag is for an experimental feature: compiled autograd. Not all
// built-in APIs are supported at the moment e.g. mark_dirty and
// mark_non_differentiable. Before setting this flag to enable tracing for
// your custom function <T>, you need to ensure that the backward function is
// traceable i.e. any variables accessed in the backward other than the input
// arguments must be handled in a similar manner to built-ins in
// CppNode::compiled_args and CppNode::apply_with_saved.
static constexpr bool is_traceable = false;
};
/// Context to save information during `forward` that can be accessed in
/// `backward` in custom autograd operations (see `torch::autograd::Function`
/// for details).
struct TORCH_API AutogradContext {
AutogradContext() = default;
AutogradContext(const AutogradContext& other) = delete;
AutogradContext& operator=(const AutogradContext& other) = delete;
AutogradContext(AutogradContext&& other) = delete;
AutogradContext& operator=(AutogradContext&& other) = delete;
~AutogradContext() = default;
AutogradContext(PackedArgs& packed_args);
/// Can be used to save non-variable data for `backward`.
ska::flat_hash_map<std::string, at::IValue> saved_data;
/// Saves the list of variables for a future call to `backward`. This
/// should be called at most once from inside of `forward`.
void save_for_backward(variable_list to_save);
/// Marks variables in the list as modified in an in-place operation. This
/// should be called at most once from inside of `forward` and all arguments
/// should be inputs.
void mark_dirty(const variable_list& inputs);
/// Marks outputs in the list as not requiring gradients. This should be
/// called at most once from inside of `forward` and all arguments should be
/// outputs.
void mark_non_differentiable(const variable_list& outputs);
// Sets whether undefined output grad tensors should be expanded to tensors
// full of zeros before calling backward function. Default value is true.
void set_materialize_grads(bool value);
/// Get the list of variables that were saved in `forward` using
/// `save_for_backward()`. Before returning them to the user, a check is made
/// to ensure that they were not modified by any in-place operations.
variable_list get_saved_variables() const;
const std::unordered_set<at::TensorImpl*>& get_and_bump_dirty() const;
const std::unordered_set<at::TensorImpl*>& get_non_differentiable() const;
/// Expose the Node's `task_should_compute_output` method to the cpp
/// custom autograd Function as `needs_input_grad`.
bool needs_input_grad(size_t output_edge_index) const;
bool needs_input_grad(std::initializer_list<IndexRange> idxs) const;
private:
std::unordered_set<at::TensorImpl*> non_differentiable_;
std::unordered_set<at::TensorImpl*> dirty_inputs_;
std::vector<torch::autograd::SavedVariable> saved_variables_;
variable_list to_save_;
bool materialize_grads_{true};
// The CppNode in the autograd graph that owns this AutogradContext. We need a
// weak_ptr to avoid a refcycle. Since grad_fn_ owns this AutogradContext, it
// will always be alive when we want to use it.
std::weak_ptr<Node> grad_fn_;
bool has_freed_buffers_{false};
// Compiled autograd overrides saved_variables() and needs_input_grad().
// We store the values we want to return here.
std::optional<variable_list> saved_variables_override_;
std::optional<std::vector<bool>> needs_input_grad_override_;
void save_variables();
template <class T>
friend struct CppNode;
template <class T>
friend variable_list CppNode_apply_functional(
variable_list&& inputs,
AutogradContext& ctx_,
const std::vector<bool>& is_variable_input_,
const std::vector<VariableInfo>& output_info_,
const std::string& name);
};
template <typename T>
inline variable_list CppNode_apply_functional(
// NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
variable_list&& inputs,
AutogradContext& ctx_,
const std::vector<bool>& is_variable_input_,
const std::vector<VariableInfo>& output_info_,
const std::string& name) {
at::OptionalDeviceGuard _device_guard;
auto num_inputs = inputs.size();
variable_list backward_inputs;
backward_inputs.reserve(num_inputs);
for (const auto i : c10::irange(num_inputs)) {
if (inputs[i].defined() || !ctx_.materialize_grads_) {
backward_inputs.emplace_back(std::move(inputs[i]));
} else {
backward_inputs.emplace_back(output_info_[i].zeros(_device_guard));
}
}
auto outputs = T::backward(&ctx_, backward_inputs);
const auto num_forward_inputs =
static_cast<int64_t>(is_variable_input_.size());
auto num_outputs = static_cast<int64_t>(outputs.size());
// Returning too many results is ok, but only as long as they're all
// undefined. Truncate the result vector in that case.
if (num_outputs > num_forward_inputs) {
bool all_undef = true;
for (const auto i : c10::irange(num_forward_inputs, num_outputs)) {
all_undef &= (!outputs[i].defined());
}
if (all_undef) {
outputs.resize(num_forward_inputs);
num_outputs = num_forward_inputs;
}
}
if (num_outputs != num_forward_inputs) {
std::string msg("function ");
msg += name + " returned an incorrect number of gradients (expected ";
msg += std::to_string(num_forward_inputs) + ", got ";
msg += std::to_string(num_outputs) + ")";
throw std::runtime_error(msg);
}
variable_list results;
results.reserve(num_outputs);
for (const auto i : c10::irange(num_outputs)) {
if (!is_variable_input_[i]) {
if (outputs[i].defined()) {
std::string msg("function ");
msg += name +
" returned a gradient different that is defined at position ";
msg += std::to_string(i + 1) +
", std the corresponding forward input was not a Variable";
throw std::runtime_error(msg);
}
continue;
}
results.emplace_back(outputs[i]);
}
return results;
}
template <typename T>
inline variable_list CppNode_apply_functional_ivalue(
const variable_list& inputs,
const std::vector<c10::IValue>& args) {
auto packed_args = PackedArgs(args);
auto ctx = AutogradContext(packed_args);
auto output_info = packed_args.unpack<std::vector<VariableInfo>>();
auto is_variable_input = packed_args.unpack<std::vector<bool>>();
auto name = packed_args.unpack<std::string>();
return CppNode_apply_functional<T>(
variable_list(inputs), ctx, is_variable_input, output_info, name);
}
// CppNode<T> is the Node in the autograd graph that represents the user defined
// backward function for Function<T>. Calls to CppNode::apply are forward to
// T::backward().
template <class T>
struct CppNode : public Node {
variable_list apply(variable_list&& inputs) override;
AutogradContext ctx_;
std::vector<bool> is_variable_input_;
std::vector<VariableInfo> input_info_;
std::vector<VariableInfo> output_info_;
void release_variables() override;
void set_ctx_grad_fn(const std::shared_ptr<Node>& node);
void save_variables_to_ctx();
void compiled_args(CompiledNodeArgs& args) const override {
// although neither of the 2 methods below have uniqueness guarantees
// it is unlikely for them to collide at the same time
args.collect(static_cast<uint64_t>(typeid(T).hash_code()));
args.collect(std::string(typeid(T).name()));
args.collect(ctx_.saved_data);
TORCH_INTERNAL_ASSERT(ctx_.non_differentiable_.empty());
TORCH_INTERNAL_ASSERT(ctx_.dirty_inputs_.empty());
args.collect(
ctx_.saved_variables_, true); // always unpacked as output in eager
TORCH_INTERNAL_ASSERT(ctx_.to_save_.empty());
args.collect(ctx_.materialize_grads_);
args.collect(ctx_.has_freed_buffers_);
args.collect(is_variable_input_);
args.collect(input_info_);
args.collect(output_info_);
}
variable_list apply_with_saved(
const variable_list& inputs,
SwapSavedVariables& saved) override {
saved.before(ctx_.saved_data);
TORCH_INTERNAL_ASSERT(ctx_.non_differentiable_.empty());
TORCH_INTERNAL_ASSERT(ctx_.dirty_inputs_.empty());
saved.before(ctx_.saved_variables_);
TORCH_INTERNAL_ASSERT(ctx_.to_save_.empty());
saved.before(ctx_.materialize_grads_);
saved.before(ctx_.has_freed_buffers_);
saved.before(input_info_);
saved.before(output_info_);
PackedArgs packed_args;
packed_args.pack_saved_data(ctx_.saved_data);
variable_list saved_variables = ctx_.get_saved_variables();
packed_args.pack(saved_variables);
packed_args.pack(ctx_.materialize_grads_);
packed_args.pack(ctx_.has_freed_buffers_);
std::vector<bool> needs_input_grad;
{
auto ptr = ctx_.grad_fn_.lock();
TORCH_INTERNAL_ASSERT(ptr);
for (const auto i : c10::irange(ptr->next_edges().size())) {
needs_input_grad.push_back(ptr->task_should_compute_output(i));
}
}
packed_args.pack(needs_input_grad);
packed_args.pack(output_info_);
packed_args.pack(is_variable_input_);
packed_args.pack(name());
auto args = std::move(packed_args).vec();
auto output_metadata = torch::dynamo::autograd::
IValuePacker<std::vector<std::optional<InputMetadata>>>::pack(
torch::dynamo::autograd::get_input_metadata(next_edges()));
const auto& pyinterface = torch::dynamo::autograd::getPyCompilerInterface();
// Each time apply_with_saved is called, we bind a new function to Python.
// This is because the schema might be different on compiled autograd cache
// misses. An alternative is to pass the schema to Python so that it can be
// an input to a function, but the schema can't be put into an FX graph
// right now.
std::vector<at::TypePtr> schema;
schema.reserve(args.size());
for (const auto& ivalue : args) {
if (ivalue.isTensor()) {
schema.emplace_back(at::TensorType::get());
} else {
schema.emplace_back(ivalue.type());
}
}
static_assert(
std::is_same_v<std::remove_cv_t<decltype(T::is_traceable)>, bool>);
auto fn_name = pyinterface->bind_function(
saved.get_py_compiler(),
std::string(typeid(T).name()),
CppNode_apply_functional_ivalue<T>,
schema,
/*is_custom_function*/ true,
/*is_traceable*/ T::is_traceable);
auto results = pyinterface->call_function(
saved.get_py_compiler(),
"apply_functional",
fn_name,
inputs,
args,
output_metadata);
saved.after(ctx_.saved_data);
TORCH_INTERNAL_ASSERT(ctx_.non_differentiable_.empty());
TORCH_INTERNAL_ASSERT(ctx_.dirty_inputs_.empty());
saved.after(ctx_.saved_variables_);
TORCH_INTERNAL_ASSERT(ctx_.to_save_.empty());
saved.after(ctx_.materialize_grads_);
saved.after(ctx_.has_freed_buffers_);
saved.after(input_info_);
saved.after(output_info_);
return results;
}
};
struct ExtractVariables : IterArgs<ExtractVariables> {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
std::vector<bool>& is_var_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
variable_list& list_;
ExtractVariables(std::vector<bool>& is_var, variable_list& list)
: is_var_(is_var), list_(list) {}
void operator()(const std::optional<at::Tensor>& x) {
if (x.has_value() && x.value().defined()) {
is_var_.push_back(true);
list_.emplace_back(x.value());
} else {
is_var_.push_back(false);
}
}
void operator()(const at::Tensor& x) {
is_var_.push_back(true);
list_.emplace_back(x);
}
void operator()(const at::TensorList& list) {
for (const at::Tensor& x : list) {
is_var_.push_back(true);
list_.emplace_back(x);
}
}
template <typename T>
void operator()(const T& x) {
is_var_.push_back(false);
}
};
template <typename... Args>
inline void extract_vars(
std::vector<bool>& is_var,
variable_list& list,
Args&&... args) {
ExtractVariables(is_var, list).apply(std::forward<Args>(args)...);
}
template <typename T>
std::enable_if_t<std::is_same_v<T, variable_list>, T> to_output_type(
std::vector<std::optional<Variable>>& output_list) {
variable_list result;
std::transform(
output_list.begin(),
output_list.end(),
std::back_inserter(result),
[](const std::optional<Variable>& var) { return *var; });
return result;
}
template <typename T>
std::enable_if_t<std::is_same_v<T, Variable>, T> to_output_type(
std::vector<std::optional<Variable>>& output_list) {
return *output_list[0];
}
inline std::vector<std::optional<Variable>> to_optional(Variable& output) {
return std::vector<std::optional<Variable>>{output};
}
inline std::vector<std::optional<Variable>> to_optional(variable_list& output) {
std::vector<std::optional<Variable>> result;
std::transform(
output.begin(),
output.end(),
std::back_inserter(result),
[](const Variable& var) { return var; });
return result;
}
template <class T>
template <typename X, typename... Args>
auto Function<T>::apply(Args&&... args)
-> std::enable_if_t<std::is_same_v<X, T>, forward_t<X, Args...>> {
const auto& functorch_tls = at::functorch::functorchTLSAccessor();
if (functorch_tls) {
// Function support for functorch is handled in Python.
// Here we are dealing with a (C++) Function, which is not supported.
// Let's raise an error instead of being silently incorrect.
functorch_tls->checkSupportsCppAutogradFunction();
}
std::shared_ptr<CppNode<T>> node(new CppNode<T>(), deleteNode);
variable_list input_vars;
const size_t num_inputs = sizeof...(Args);
input_vars.reserve(num_inputs);
node->is_variable_input_.reserve(num_inputs);
// TODO Add tracing here
extract_vars(node->is_variable_input_, input_vars, args...);
bool is_executable =
GradMode::is_enabled() && any_variable_requires_grad(input_vars);
auto next_edges =
(is_executable ? collect_next_edges(input_vars) : edge_list());
node->set_ctx_grad_fn(node);
node->set_next_edges(std::move(next_edges));
node->clear_input_metadata();
node->input_info_.reserve(input_vars.size());
for (auto& var : input_vars) {
node->input_info_.emplace_back(var);
}
using forward_return_t = forward_t<X, Args...>;
forward_return_t outputs;
{
AutoGradMode grad_mode(false);
outputs = T::forward(&node->ctx_, std::forward<Args>(args)...);
}
_jvp_fn_t jvp_fn = [](const variable_list& inputs,
const variable_list& gI) -> variable_list {
TORCH_CHECK(
false,
"jvp is not implemented for the c++ API of custom Function yet.",
"Please open a feature request on GitHub if you need this.");
};
auto view_as_self_fn = [](const at::Tensor& x) -> at::Tensor {
return x.view_as(x);
};
auto wrapped_outputs = _wrap_outputs(
input_vars,
node->ctx_.get_non_differentiable(),
node->ctx_.get_and_bump_dirty(),
to_optional(outputs),
is_executable ? node : nullptr,
jvp_fn,
{},
view_as_self_fn);
node->output_info_.reserve(wrapped_outputs.size());
for (auto& output : wrapped_outputs) {
if (is_executable && output.has_value()) {
node->output_info_.emplace_back(output.value());
} else if (is_executable) {
node->output_info_.emplace_back();
}
}
if (is_executable) {
node->save_variables_to_ctx();
}
// wrapped_outputs will be a variable_list so, convert it to the correct
// return type. Only Variable and variable_list are accepted as return types.
return to_output_type<forward_return_t>(wrapped_outputs);
}
// The logic here is the same as PyNode::apply, so changes to it should be done
// in both the places
template <class T>
// NOLINTNEXTLINE(cppcoreguidelines-rvalue-reference-param-not-moved)
variable_list CppNode<T>::apply(variable_list&& inputs) {
// Acquire lock to here protect thread safety on custom C++ Autograd Node
// This is needed for the custom Autograd Node since we don't know if the
// user defined Node will write to the shared data during backward.
// see Note [Thread Safety on Autograd Node]
std::lock_guard<std::mutex> lock(mutex_);
return CppNode_apply_functional<T>(
std::move(inputs), ctx_, is_variable_input_, output_info_, name());
}
template <class T>
void CppNode<T>::release_variables() {
// lock to ensure thread safety, see [Thread Safety on Autograd Node]
std::lock_guard<std::mutex> lock(mutex_);
ctx_.saved_variables_.clear();
ctx_.has_freed_buffers_ = true;
}
template <class T>
void CppNode<T>::save_variables_to_ctx() {
ctx_.save_variables();
}
template <class T>
void CppNode<T>::set_ctx_grad_fn(const std::shared_ptr<Node>& node) {
ctx_.grad_fn_ = node;
}
} // namespace torch::autograd
```
|
================================================================================================================================
SOURCE CODE FILE: edge.h
LINES: 1
SIZE: 1.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\edge.h
ENCODING: utf-8
```h
#pragma once
#include <cstdint>
#include <functional>
#include <memory>
#include <c10/util/hash.h>
namespace torch::autograd {
struct Node;
/// Represents a particular input of a function.
struct Edge {
Edge() noexcept : function(nullptr), input_nr(0) {}
Edge(std::shared_ptr<Node> function_, uint32_t input_nr_) noexcept
: function(std::move(function_)), input_nr(input_nr_) {}
/// Convenience method to test if an edge is valid.
bool is_valid() const noexcept {
return function != nullptr;
}
// Required for use in associative containers.
bool operator==(const Edge& other) const noexcept {
return this->function == other.function && this->input_nr == other.input_nr;
}
bool operator!=(const Edge& other) const noexcept {
return !(*this == other);
}
/// The function this `Edge` points to.
std::shared_ptr<Node> function;
/// The identifier of a particular input to the function.
uint32_t input_nr;
};
} // namespace torch::autograd
// The idiomatic way of enabling use of a custom type as the key of hash
// containers in C++11. This method removes the requirement of having to pass
// a custom hasher to std::unordered_{map, set}.
// See http://en.cppreference.com/w/cpp/utility/hash for more information.
namespace std {
template <>
struct hash<torch::autograd::Edge> {
// These type aliases are required by the standard.
using argument_type = torch::autograd::Edge;
using return_type = size_t;
return_type operator()(const argument_type& edge) const noexcept {
return c10::get_hash(edge.function, edge.input_nr);
}
};
} // namespace std
```
|
==================================================================================================================================
SOURCE CODE FILE: engine.h
LINES: 1
SIZE: 10.83 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\engine.h
ENCODING: utf-8
```h
#pragma once
// Engine implements backpropagation from output variables and their gradients
// to "root" variables (variables created by the user with requires_grad=True).
#include <ATen/Tensor.h>
#include <ATen/ThreadLocalState.h>
#include <ATen/core/ivalue.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/anomaly_mode.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/functions/basic_ops.h>
#include <torch/csrc/autograd/graph_task.h>
#include <torch/csrc/autograd/input_buffer.h>
#include <torch/csrc/autograd/saved_variable_hooks.h>
#include <torch/csrc/autograd/utils/warnings.h>
#include <exception>
#include <functional>
#include <memory>
#include <queue>
#include <utility>
#include <vector>
namespace torch::autograd {
struct ReadyQueue;
}
namespace torch::autograd {
// Maximum reentrant backward depth before switching to a new thread
// This limit is based on the TSAN's deadlock detector, where it will
// fail if a program hold more than 65 locks in one thread at once.
// As we hold mutex in every of our custom C++ autograd Node, we would
// like to avoid TSAN complains on this when doing reentrant backwards
// For reference, see https://github.com/google/sanitizers/issues/950
static constexpr int MAX_DEPTH = 60;
void set_device(int device);
TORCH_API void validate_outputs(
const edge_list& edges,
variable_list& grads,
const std::function<std::string(const std::string&)>& format_error);
TORCH_API void validate_outputs(
const std::vector<std::optional<InputMetadata>>& input_metadata,
variable_list& grads,
const std::function<std::string(const std::string&)>& format_error);
TORCH_API std::vector<std::optional<InputMetadata>> collect_input_metadata(
const edge_list& edges);
struct NodeTask {
std::weak_ptr<GraphTask> base_;
std::shared_ptr<Node> fn_;
// This buffer serves as an implicit "addition" node for all of the
// gradients flowing here. Once all the dependencies are finished, we
// use the contents of this buffer to run the function.
InputBuffer inputs_;
// When worker receives a task with isShutdownTask = true, it will immediately
// exit. The engine sends a shutdown task to every queue upon its destruction.
bool isShutdownTask_;
int getReentrantDepth() const;
NodeTask(
std::weak_ptr<GraphTask> base,
std::shared_ptr<Node> fn,
InputBuffer inputs,
bool isShutdownTask = false)
: base_(std::move(base)),
fn_(std::move(fn)),
inputs_(std::move(inputs)),
isShutdownTask_(isShutdownTask) {}
};
// Guard that sets and restores checkpoint_valid
class CheckpointValidGuard {
public:
explicit CheckpointValidGuard(
const std::shared_ptr<const GraphTask>& graph_task);
~CheckpointValidGuard();
private:
bool prev_checkpoint_valid_state;
};
struct ReadyQueue {
private:
// Returns true when t2 should be (weakly) BEFORE t1 in the queue.
// Shutdown tasks are first and then empty NodeTask are next.
struct CompareNodeTaskTime {
bool operator()(NodeTask const& t1, NodeTask const& t2) {
// NOLINTNEXTLINE(bugprone-branch-clone)
if (t2.isShutdownTask_) {
return true;
} else if (!t1.fn_ || t1.isShutdownTask_) {
return false;
} else if (!t2.fn_) {
return true;
} else if (t1.getReentrantDepth() == t2.getReentrantDepth()) {
return t1.fn_->sequence_nr() < t2.fn_->sequence_nr();
} else {
return t1.getReentrantDepth() < t2.getReentrantDepth();
}
}
};
// To notify threads waiting on the ReadyQueue of available tasks on the heap_
std::condition_variable not_empty_;
// To protect read and writes to heap_
mutable std::mutex mutex_;
std::priority_queue<NodeTask, std::vector<NodeTask>, CompareNodeTaskTime>
heap_;
public:
// incrementOutstandingTasks indicates whether or not we should increment
// 'outstanding_tasks_' for the associated GraphTask. This should mostly
// always be true and is only set false in certain cases (see docs for
// DistEngine.execute_graph_task_until_ready_queue_empty)
void push(NodeTask item, bool incrementOutstandingTasks = true);
void pushShutdownTask();
NodeTask pop();
bool empty() const;
size_t size() const;
};
// A single instance of this struct should be created through the whole process
// lifetime. The worker thread creation logic and Engine's destructor rely on
// this.
struct TORCH_API Engine {
/// Returns a reference to a static `Engine` instance.
static Engine& get_default_engine();
static Engine& get_base_engine();
// compiled_autograd needs to live in a different .so file so that it
// can have python symbols, so we add a layer of indirection
// see [Note: Compiled Autograd]
typedef variable_list (*compiled_autograd_fn)(
const std::shared_ptr<Node>& graph_root,
const GraphTask& graph_task,
bool accumulate_grad,
const edge_list& outputs);
static void set_compiled_autograd(compiled_autograd_fn fn);
Engine(const Engine&) = delete;
Engine(Engine&&) = delete;
virtual ~Engine();
// Given a list of (Node, input number) pairs computes the value of the graph
// by following next_edge references.
virtual variable_list execute(
const edge_list& roots,
const variable_list& inputs,
bool keep_graph,
bool create_graph,
bool accumulate_grad,
const edge_list& outputs = {});
// Given a pre-populated GraphTask and GraphRoot, computes the backward pass
// for the graph.
//
// NB: This API should only be used by internal autograd specific
// machinery and shouldn't be exposed to users in anyway.
virtual c10::intrusive_ptr<at::ivalue::Future> execute_with_graph_task(
const std::shared_ptr<GraphTask>& graph_task,
std::shared_ptr<Node> graph_root,
InputBuffer&& input_buffer);
virtual std::unique_ptr<AnomalyMetadata> make_anomaly_metadata() {
return std::make_unique<AnomalyMetadata>();
}
virtual std::unique_ptr<SavedVariableHooks> get_default_saved_variable_hooks() {
return nullptr;
}
// We pass cpu_ready_queue to evaluate_function, so that it knows
// the correct ready queue to push to after a NodeTask is ready
void evaluate_function(
std::shared_ptr<GraphTask>& graph_task,
Node* func,
InputBuffer& inputs,
const std::shared_ptr<ReadyQueue>& cpu_ready_queue);
void initialize_device_threads_pool();
virtual void thread_on_exception(
const std::shared_ptr<GraphTask>& graph_task,
const std::shared_ptr<Node>& fn,
std::exception& e);
void queue_callback(std::function<void()> callback);
bool is_checkpoint_valid();
// Should be called after fork to notify that worker threads are gone
void release_workers();
// Must be called by subclass before destructing to avoid a data-race-on-vptr.
void stop();
// Initializes a device thread for the autograd engine.
virtual void thread_init(
int device,
const std::shared_ptr<ReadyQueue>& ready_queue,
bool should_increment = true);
protected:
Engine();
void compute_dependencies(Node* root, GraphTask& task, uint64_t min_topo_nr);
// initialize the thread local ready queue with the ready queue that is
// created elsewhere (i.e. thread_init, Engine::execute, etc), or create a new
// ready queue if ready_queue is not provided.
void init_local_ready_queue(
std::shared_ptr<ReadyQueue> ready_queue = nullptr);
std::shared_ptr<ReadyQueue> ready_queue(
std::shared_ptr<ReadyQueue> cpu_ready_queue,
at::Device device);
std::shared_ptr<ReadyQueue> ready_queue_by_index(
std::shared_ptr<ReadyQueue> cpu_ready_queue,
int device_index);
// start device threads (CUDA, XLA, etc.) in Engine,
// note that it does NOT start CPU thread.
void start_device_threads();
void increment_non_reentrant_thread_count();
void decrement_non_reentrant_thread_count();
virtual void thread_main(const std::shared_ptr<GraphTask>& task);
void reentrant_thread_init();
void add_thread_pool_task(const std::weak_ptr<GraphTask>& graph_task);
// Safe to read device_ready_queues_ without synchronization after
// initialization
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
std::vector<std::shared_ptr<ReadyQueue>> device_ready_queues_;
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
std::vector<std::function<void()>> final_callbacks_;
// To protect reads and writes to final_callbacks_
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
std::mutex post_callbacks_lock_;
// How many nested reentrant calls are allowed until a new thread is used
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
int max_recursion_depth_;
struct ThreadPoolShared {
// Data structures used by the threads for executing reentrant backwards
// tasks. See Note [Reentrant backwards]
// Number of available threads for processing new GraphTasks.
unsigned int num_workers_{0};
// The threads will wait on work_ to be notified of GraphTasks
std::condition_variable work_;
// To protect reads and writes to graphtask_queue_ and num_workers_
// and for synchronizing creating new threads when needed
std::mutex mutex_;
// Workers will process the GraphTasks added to this queue. A GraphTask is
// allocated inside Engine::execute and lives for the duration of execute
std::queue<std::weak_ptr<GraphTask>> graphtasks_queue_;
ThreadPoolShared() = default;
};
// Temporary workaround until shutting down threads is done
// We need shared ownership of all these objects because the threads are
// leaked when Engine shuts down, so there may be threads waiting on work_ for
// the graphtasks_queue_ to be nonempty.
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
std::shared_ptr<ThreadPoolShared> thread_pool_shared_;
private:
// Number of non-reentrant threads
std::atomic<uint32_t> non_reentrant_device_thread_count_;
// Destructor will wait for non-reentrant threads to finish
std::condition_variable non_reentrant_device_thread_condvar_;
std::mutex non_reentrant_device_thread_mutex_;
// stop() must be called before the destruction path goes down to the base
// class, in order to avoid a data-race-on-vptr. Use this boolean to guard
// whether stop() has already been called, so we can call this in every
// destructor of the class hierarchy.
bool stopped_{false};
};
// allow python_engine to override the default engine when it loads
using EngineStub = Engine& (*)();
TORCH_API void set_default_engine_stub(EngineStub stub);
} // namespace torch::autograd
```
|
========================================================================================================================================
SOURCE CODE FILE: forward_grad.h
LINES: 1
SIZE: 8.93 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\forward_grad.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/Tensor.h>
#include <unordered_set>
namespace torch::autograd {
// [ Using ForwardGrad ]
// ForwardGrad needs to be a shared_ptr to satisfy constraints of its inner
// design. But this shared_ptr must be uniquely associated with the object that
// stores it (as of writing, either AutogradMeta or SavedVariable). This object
// is called the "owning object" in the discussions below. This owning object
// must call `ForwardGrad::clear()` when it is destroyed to ensure that the
// ForwardGrad is properly de-allocated.
struct ForwardGrad;
// This file contains two classes that are used to store forward AD gradients
// and ensure that they are scoped properly. Because forward AD runs
// concurrently with the evaluation of the function, we need a mechanism to
// separate different forward AD invocations and be able to compute the right
// gradients. We model such invocations as levels here. The particular scoping
// issue mentioned above has two main drivers:
// - Ensure that we can conveniently use forward AD within a high level API
// without
// leaking the forward AD states outside.
// - Ensure that we can keep the level that we expose to the user API simple
// (an integer
// that represents the nesting depth) while avoiding confusions when the
// level index is re-used.
// The important external APIs from this file are:
// - ForwardADLevel::get_next_idx() that can be used to enter a new level and
// get its index
// - ForwardADLevel::release_idx() that can be used to exit a given level.
// - ForwardGrad() can be used to store a given forward gradient that will
// handle the level
// tracking automatically.
// The basic implementation strategy is as follows:
// Every tensor has a ForwardGrad, maintaining a map from levels to tangents.
// ForwardGrad is responsible for registering itself to the appropriate
// ForwardADLevel when a new tangent is added to it via ForwardGrad::set_value
// and to un-register itself from this same level if that tangent is removed via
// ForwardGrad::reset. The ForwardADLevel is created when a new level is entered
// via ForwardADLevel::get_next_idx. A reference to the new ForwardADLevel is
// stored into a global (for the whole process) vector that ensure it can be
// accessed via ForwardADLevel::get_by_idx. This reference is deleted when the
// index is released by the user when calling ForwardADLevel::release_idx. When
// it is destructed, the ForwardADLevel is responsible for clearing all the
// tangents for its level stored in all the ForwardGrad that registered with it.
//
// This process-wide level design, compared to a thread local one, allows us to
// use very simple user facing handle for the level (an int) while enabling
// cross-thread forward AD. The only required synchronization for the user is
// when entering and exiting the levels. Some discussion on alternative design
// is in https://github.com/pytorch/pytorch/pull/49097#discussion_r543716453 and
// can be refined in the future.
// Correctness of concurrency:
// Each class uses its own lock when reading or modifying internal storages.
// This allows in particular to safely remove tangents from ForwardGrad when the
// ForwardADLevel is being exited. We ensure no deadlock by ensuring that a
// methods never calls into another class's method while the local class's lock
// is held except in one single case: calling from ForwardADLevel's destructor
// into ForwardGrad::reset with update_level=false.
// The lifetime of these objects is as follows:
// The ForwardADLevel can be in three states:
// - Initialized: where one of its reference is held by the global vector
// and there may be more
// references held by temporary variables in ForwardGrad's methods.
// - About to be destructed: where "release_idx" has been called and the
// only reason for the
// ForwardADLevel not to be destructed right away is that some methods in
// ForwardGrad have owning reference to it. This is done so that a
// ForwardADLevel can never be destructed when a ForwardGrad is
// registered with it and in the process of adding something to its
// internal state.
// - Being destructed: Here the ForwardADLevel is not referenced anymore
// and can be safely reset
// all of the ForwardGrad. Note that we can have more than one reset
// being called here (which is ok) but we are guaranteed that there is at
// least one.
// The ForwardGrad is simpler as there is no intermediary state and no special
// destructor for. The logic to unregister it from the different ForwardADLevel
// is done when the owning object (AutogradMeta or SavedVariable) is being
// destroyed.
// Other considered design:
// To avoid having the ForwardGrad::clear, we considered storing weak_ptr inside
// the ForwardADLevel. While this would work, it would mean that the set inside
// the ForwardADLevel would only grow unless we do an expensive linear scan to
// remove all the dangling weak pointers. Hence this approach was not used.
// Data structures in this file are optimized for this maximum number of levels.
// The number of levels corresponds to the degree of the gradient being
// computed using forward AD and we don't expect more than second order
// gradients to be common.
#define EXPECTED_MAX_LEVEL 2
struct TORCH_API ForwardADLevel {
ForwardADLevel(uint64_t idx) : idx_(idx) {}
~ForwardADLevel();
static uint64_t get_next_idx();
static void release_idx(uint64_t idx);
static std::shared_ptr<ForwardADLevel> get_by_idx(uint64_t idx);
static std::shared_ptr<ForwardADLevel> try_get_by_idx(uint64_t idx);
void erase(const std::shared_ptr<ForwardGrad>& grad) {
std::lock_guard<std::mutex> lock(mutex_);
grads_.erase(grad);
}
void insert(const std::shared_ptr<ForwardGrad>& grad) {
std::lock_guard<std::mutex> lock(mutex_);
grads_.insert(grad);
}
private:
std::unordered_set<std::shared_ptr<ForwardGrad>> grads_;
std::mutex mutex_;
uint64_t idx_;
};
struct TORCH_API ForwardGrad : std::enable_shared_from_this<ForwardGrad> {
ForwardGrad() = default;
// This function must only be called when AutogradMeta or SavedVariable is
// being destructed as it ensures that:
// - The only (potential) other references to this ForwardGrad are the
// different level it is registered to
// - No other thread will try to call `set_value` or `value` ever from now
// on
// - Any of the ForwardADLevel that this ForwardGrad is registered with
// might
// call `reset` at any point during this function
void clear() {
c10::SmallVector<uint64_t, EXPECTED_MAX_LEVEL> levels_idx;
{
std::lock_guard<std::mutex> lock(mutex_);
for (auto& c : content_) {
levels_idx.push_back(c.first);
}
}
for (auto l_idx : levels_idx) {
// Use "try" version here as another thread might have deleted this
// level before we got here
// This is an owning reference as we want to keep the level alive
// until we successfully unregister ourselves
auto level = ForwardADLevel::try_get_by_idx(l_idx);
if (level) {
level->erase(shared_from_this());
}
}
}
void set_value(const at::Tensor& value, uint64_t level) {
// Owning reference to ensure the forward_level is not destroyed
// while we are updating our internal state
auto forward_level = ForwardADLevel::get_by_idx(level);
forward_level->insert(shared_from_this());
std::lock_guard<std::mutex> lock(mutex_);
content_.insert({level, value});
}
// This function removes the tangent for a given level from this ForwardGrad
// Use the update_level flag to disable notifying the level about this reset
// This flag is most notably used by the ForwardADLevel destructor.
void reset(uint64_t level, bool update_level = true) {
if (update_level) {
ForwardADLevel::get_by_idx(level)->erase(shared_from_this());
}
std::unique_lock<std::mutex> lock(mutex_);
const auto& it = content_.find(level);
TORCH_INTERNAL_ASSERT(
it != content_.end(), "Resetting a non-existent level.");
// Keep the Tensor alive until we have released the lock
// This is needed as we can be in a case where this function is called by
// ForwardADLevel destructor
auto t = (*it).second;
content_.erase(level);
lock.unlock();
}
const at::Tensor& value(uint64_t level) const;
bool contains(uint64_t level) {
std::lock_guard<std::mutex> lock(mutex_);
return content_.count(level) > 0;
}
bool empty() const {
return content_.empty();
}
static const at::Tensor& undef_grad();
private:
// TODO(albanD): replace this with a SmallVector
std::unordered_map<uint64_t, at::Tensor> content_;
mutable std::mutex mutex_;
};
} // namespace torch::autograd
```
|
====================================================================================================================================
SOURCE CODE FILE: function.h
LINES: 1
SIZE: 30.69 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\function.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/autograd/anomaly_mode.h>
#include <torch/csrc/autograd/edge.h>
#include <torch/csrc/autograd/grad_mode.h>
#include <torch/csrc/autograd/graph_task.h>
#include <torch/csrc/autograd/input_metadata.h>
#include <torch/csrc/autograd/saved_variable.h>
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/utils/python_stub.h>
#include <torch/csrc/utils/variadic.h>
#include <ATen/SequenceNumber.h>
#include <ATen/core/Tensor.h>
#include <ATen/record_function.h>
#include <c10/util/Exception.h>
#include <c10/util/irange.h>
#include <algorithm>
#include <cstdint>
#include <initializer_list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
namespace torch::autograd {
struct Edge;
struct FunctionPostHook;
struct FunctionPreHook;
using tensor_list = std::vector<at::Tensor>;
using variable_list = std::vector<Variable>;
using edge_list = std::vector<Edge>;
using saved_variable_list = std::vector<SavedVariable>;
using ivalue_list = std::vector<c10::IValue>;
using functional_apply_t = std::function<
variable_list(const variable_list&, const std::vector<c10::IValue>&)>;
using IndexRange = std::pair<size_t, size_t>;
using torch::dynamo::autograd::CompiledNodeArgs;
using torch::dynamo::autograd::PackedArgs;
using torch::dynamo::autograd::SwapSavedVariables;
// Custom deleter to prevent stack overflows.
TORCH_API void deleteNode(Node* function);
// Guard that sets and restores the evaluating node
class NodeGuard {
public:
explicit NodeGuard(std::shared_ptr<Node> node);
~NodeGuard();
private:
std::shared_ptr<Node> last_evaluating_node_;
};
// Return the Node currently being evaluated (if any)
// This is only set during the backward pass while a Node is being
// executed.
TORCH_API std::shared_ptr<Node> get_current_node();
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Node
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// A `Node` is an abstract class that represents an operation taking zero
// or more input `Variable`s and producing zero or more output `Variable`s. All
// functions in PyTorch's autograd machinery derive from this class and
// override its `apply` method. Instances of such subclasses will then be
// invokable via the call operator.
//
// Nodes in the Autograd Graph
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// When viewing the autograd system as a graph, `Node`s are the vertices or
// nodes, connected to each other via (directed) `Edge`s, which themselves are
// represented via (`Node`, input_nr) pairs. `Variable`s are the outputs to
// and inputs of `Node`s, and travel between these edges during execution
// of the graph. When two or more `Edge`s (from different sources) point at the
// same input to a `Node`, the values produced along all of these edges are
// implicitly summed prior to being forwarded to the target `Node`.
//
// Hierarchy
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Subclasses usually represent differentiable functions as well as their
// gradient operators. Note, however, that due to the very general definition
// of a `Node` taking *zero* or more inputs and producing *zero* or more
// outputs, uses of `Node`s are flexible and extend beyond purely
// mathematical operations. For example, the `AccumulateGrad` function is a
// *sink*: it takes one input, but produces no outputs, instead accumulating
// the input as a side effect. At the other extreme, the `GraphRoot` function
// receives no inputs from other functions, but produces multiple outputs.
//
// Interface
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// The most important method on `Node` is the call operator, which takes in
// a list of variables and produces a list of variables. The precise size of
// these lists can be determined with `num_inputs()` and `num_outputs()`.
// `Node`s are stitched together via their `next_edge` interface, which let
// you manipulate the set of outgoing edges of a `Node`. You can add an
// edge with `add_next_edge()`, retrieve an edge with `next_edge(index)` and
// iterate over them via the `next_edges()` method. Other methods exist for
// integration with the JIT and other parts of PyTorch. Every `Node` has a
// *sequence number* that increases monotonically in the order of `Node`
// construction. It can be retrieved via the `sequence_nr()` method. Note that
// this sequence number is *thread local*. This means that when `Node`s
// `A`, `B` and `C` are created consecutively in the same thread, their
// sequence numbers will be ordered `A` < `B` < `C`. If, however, `A` and `B`
// are created in one thread and `C` is created in a new thread, there are *no
// guarantees* w.r.t. the ordering of `C` relative to `A` or `B`.
// See NOTE [ Sequence Number] for more details on the usages of sequence
// number.
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
struct TORCH_API Node : std::enable_shared_from_this<Node> {
public:
/// Construct a new `Node` with the given `next_edges`
explicit Node(uint64_t sequence_nr, edge_list&& next_edges = edge_list())
: sequence_nr_(sequence_nr), next_edges_(std::move(next_edges)) {
for (const Edge& edge : next_edges_) {
update_topological_nr(edge);
}
if (AnomalyMode::is_enabled()) {
metadata()->store_stack();
// If anomaly mode is enabled and graph is constructed, then assign the
// currently evaluating node as the parent of this node.
// A parent is a Node where this Node is created.
// We are tracking the parents to track multiple backward operations.
assign_parent();
}
// Store the thread_id of the forward operator.
// See NOTE [ Sequence Numbers ]
thread_id_ = at::RecordFunction::currentThreadId();
}
explicit Node(edge_list&& next_edges = edge_list())
: Node(
/*sequence_nr=*/at::sequence_number::get_and_increment(),
std::move(next_edges)) {}
/// Nodes are neither copyable nor moveable.
Node(const Node& other) = delete;
Node(Node&& other) = delete;
Node& operator=(const Node& other) = delete;
Node& operator=(Node&& other) = delete;
virtual ~Node() = default;
std::shared_ptr<Node> getptr() {
return shared_from_this();
}
/// Evaluates the function on the given inputs and returns the result of the
/// function call.
variable_list operator()(variable_list&& inputs) {
// In the first iteration of named tensors, autograd ignores names and
// operates on unnamed tensors. In the long term, autograd should
// probably operate with names.
at::NoNamesGuard no_names_guard;
#ifdef USE_ROCM
// Keep track of backward pass for rocblas.
at::ROCmBackwardPassGuard in_backward;
#endif
auto step_callbacks =
at::getStepCallbacksUnlessEmpty(at::RecordScope::BACKWARD_FUNCTION);
if (C10_UNLIKELY(step_callbacks.has_value())) {
at::RecordFunction guard(std::move(*step_callbacks));
// Using sequence number and thread id to correlate with
// the forward pass function
guard.setForwardThreadId(thread_id_);
if (guard.needsInputs()) {
std::vector<c10::IValue> inputs_vec(inputs.begin(), inputs.end());
guard.before(
name(),
c10::ArrayRef<const c10::IValue>(
inputs_vec.data(), inputs_vec.size()),
static_cast<int64_t>(sequence_nr()));
} else {
guard.before(name(), static_cast<int64_t>(sequence_nr()));
}
return apply(std::move(inputs));
} else {
return apply(std::move(inputs));
}
}
// Graph Connectivity API
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Inputs. NOTE: inputs of the grad_fn correspond to Tensor outputs of the
// forward function.
// Marker for expected undefined input
struct undefined_input {};
/// Adds the type and shape metadata for a new input. Returns the index of
/// of the new input.
uint32_t add_input_metadata(
const at::TensorOptions& options,
c10::SymIntArrayRef shape,
bool is_tensor_subclass,
bool is_nested) noexcept {
uint32_t input_nr = input_metadata_.size();
auto meta_shape = MetadataShape{std::in_place_type<SymIntSmallVec>, shape};
input_metadata_.emplace_back(
options, meta_shape, is_tensor_subclass, is_nested);
return input_nr;
}
uint32_t add_input_metadata(const at::Tensor& t) noexcept {
uint32_t input_nr = input_metadata_.size();
input_metadata_.emplace_back(t);
return input_nr;
}
/// Adds a placeholder for an input that will not be used.
uint32_t add_input_metadata(undefined_input u) noexcept {
uint32_t input_nr = input_metadata_.size();
input_metadata_.emplace_back();
return input_nr;
}
uint32_t num_inputs() const noexcept {
return input_metadata_.size();
}
const InputMetadata& input_metadata(size_t index) const {
return input_metadata_[index];
}
// Danger: not thread safe, caller must protect with lock
InputMetadata& mutable_input_metadata(size_t index) {
return input_metadata_[index];
}
/**
* Note: Function Streams
* A function's stream (for a given device type) is the stream of the first
* element of its input buffer on a device of that type.
*
* If all elements are on the same device they MUST share a stream. If
* elements are on different devices (across multiple GPUs, for example)
* they may have different streams.
*/
std::optional<c10::Stream> stream() {
auto opt_device_type = at::getAccelerator();
if (!opt_device_type.has_value()) {
return std::nullopt;
}
for (const auto& metadata : input_metadata_) {
if (metadata.device().type() == opt_device_type.value())
return metadata.stream();
}
return std::nullopt;
}
// Used by the engine to determine what device thread to run on
at::Device device() {
// Since we pick the first non-CPU tensor, this won't work with
// mixed device-type operations (e.g., an op that is both CUDA
// and XLA). This is *incredibly* unlikely, so we don't worry
// about it.
for (const auto& metadata : input_metadata_) {
auto device = metadata.device();
if (device.type() != at::kCPU) {
return device;
}
}
// Only report to the CPU thread if there really were no tensors
// from other devices.
return at::kCPU;
}
void clear_input_metadata() {
input_metadata_.clear();
}
// Outputs ("Next Edges")
void update_topological_nr(const Edge& edge) {
TORCH_INTERNAL_ASSERT(
!has_parent_,
"Cannot update a node's topological_nr after it already has a parent."
" If we allow this, we can no longer guarantee that a parent's"
" topo_nr is always greater than those of all its children")
Node* node = edge.function.get();
if (node) {
auto topo_nr = node->topological_nr();
if (topological_nr_ <= topo_nr) {
topological_nr_ = topo_nr + 1;
}
}
}
void set_next_edge(size_t index, Edge edge) {
update_topological_nr(edge);
next_edges_[index] = std::move(edge);
}
void add_next_edge(Edge edge) {
update_topological_nr(edge);
next_edges_.emplace_back(std::move(edge));
}
void set_next_edges(edge_list&& next_edges) {
next_edges_ = std::move(next_edges);
for (const auto& next_edge : next_edges_) {
update_topological_nr(next_edge);
}
}
const Edge& next_edge(size_t index) const noexcept {
return next_edges_[index];
}
const edge_list& next_edges() const noexcept {
return next_edges_;
}
edge_list& next_edges() noexcept {
return next_edges_;
}
uint32_t num_outputs() const noexcept {
return next_edges_.size();
}
// Miscellaneous Methods
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// NOTE [ Sequence Number]
///
/// The sequence_nr has two main usages in autograd:
///
/// 1) Helps determine the node's execution priority in the engine.
/// All else being equal, nodes with higher priority numbers are executed
/// first. Thus, nodes corresponding to ops executed later are the first to
/// be executed in the backward pass. One caveat is that we prioritize
/// AccumulateGrad nodes by explicitly setting its sequence_nr to be
/// UINT64_MAX.
/// 2) The sequence number of this `Node` is paired with with thread_id it was
/// created in
/// as a unique identifier by the profiler to annotate recorded events.
/// The purpose of this is to help users (and possibly programs)
/// interpreting the profiler's output to correlate backward nodes with its
/// forward ops. We need both sequence_nr and thread_id to identify a node
/// because sequence_nr is thread_local, i.e., starts counting up from zero
/// in a new thread
uint64_t sequence_nr() const noexcept {
return sequence_nr_;
}
void set_sequence_nr(uint64_t sequence_nr) {
sequence_nr_ = sequence_nr;
}
// NOTE [ Topological Number ]
//
// topological_nr is used to prune branches in the DAG during autograd
// discovery as maintaining topological_nr helps us check in O(1) if there
// does NOT exist a directed path between two nodes.
//
// The topological order number of this `Node` representing the length of the
// longest possible path from this Node to any leaf node. If you are leaf
// node, aka AccumulateGrad, this will be zero. This value has the property
// that For every pair of nodes X, Y in G, existence of a directed path from X
// to Y implies topo_nr(X) > topo_nr(Y). The converse is not true, however, so
// we cannot prove existence of a path from X to Y, only non-existence.
//
// One assumption we make when using topo_nr is that once a node
// has been used, i.e., has a parent node, its own topo_nr does not change
// we have added some checks with the `has_parent_` field to enforce this.
//
// What NOT to do:
//
// 1) 2 -> 1 -> 0 In this diagram we label nodes with their
// topo_nr.
// 2 -> 1 -> 0 We have two simple graphs that can each
// arise from
// `t.exp().exp()`, for example.
// 2) 2 -> 1 -> 0
// /
// 2 -> 1 -> 0 We add 2 as a next edge to 1 even though 1
// already
// has a parent.
// 3) 2 -> 1 -> 0
// /
// 2 -> 3 -> 0 2 < 3, yet there exists a path from 2 to 3!
//
uint64_t topological_nr() const noexcept {
has_parent_ = true;
return topological_nr_;
}
// assigning a node as a parent to this node
void assign_parent();
/// Id of the thread that created Node
uint64_t thread_id() const noexcept {
return thread_id_;
}
/// Returns the name of the dynamic type of the function, for debugging.
virtual std::string name() const;
/// The difference between functions `should_compute_output` and
/// `task_should_compute_output`:
/// - `should_compute_output` should only be used during graph construction
/// and takes into account only requires_grad information
/// - `task_should_compute_output` should only be called during the backward
/// pass (unless called directly through grad_fn) and takes into account the
/// current graph task. Specifically, the autograd engine trims unnecessary
/// edges when `inputs` are specified, and during backward untrimmed nodes
/// left on the graph can/should check `task_should_compute_output` to see if
/// any outgoing edges have been trimmed by the engine. If that is the case,
/// gradient computation wrt those edges can be omitted.
///
/// Returns true if the particular output edge is active, and that particular
/// output of this function should be computed.
bool should_compute_output(size_t output_edge_index) const {
TORCH_CHECK(output_edge_index < num_outputs(), "Index out of range");
return next_edges_[output_edge_index].is_valid();
}
/// Returns true if any of the output edges in any of the ranges are active.
bool should_compute_output(std::initializer_list<IndexRange> idxs) const {
return std::any_of(idxs.begin(), idxs.end(), [this](IndexRange range) {
for (const auto i : c10::irange(range.first, range.second)) {
if (should_compute_output(i))
return true;
}
return false;
});
}
/// Same as the above `should_compute_output` function but will also
/// check whether this edge is needed within the current graph task.
bool task_should_compute_output(size_t output_edge_index) const {
TORCH_CHECK(output_edge_index < num_outputs(), "Index out of range");
const auto& next = next_edges_[output_edge_index];
if (next.is_valid()) {
const auto exec_info = get_current_graph_task_exec_info();
if (exec_info && !exec_info->empty()) {
auto it = exec_info->find(next.function.get());
if (it == exec_info->end() || !it->second.should_execute()) {
return false; // this edge is not needed for the current graph_task
}
}
return true;
}
return false;
}
/// Returns true if any of the output edges in any of the ranges are active
/// and should be computed in the current graph task.
bool task_should_compute_output(
std::initializer_list<IndexRange> idxs) const {
return std::any_of(idxs.begin(), idxs.end(), [this](IndexRange range) {
for (const auto i : c10::irange(range.first, range.second)) {
if (task_should_compute_output(i))
return true;
}
return false;
});
}
/// Returns the `PyObject` stored for this `Node` (for Python
/// interaction).
PyObject* pyobj() const noexcept {
return pyobj_;
}
/// Sets the `PyObject` stored for this `Node` (for Python interaction).
void set_pyobj(PyObject* pyobj) noexcept {
pyobj_ = pyobj;
}
/// Returns the anomaly metadata stored for this `Node`.
/// If none exist, creates a new empty one.
AnomalyMetadata* metadata() noexcept;
// Hook API
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
uintptr_t add_post_hook(std::unique_ptr<FunctionPostHook>&& post_hook) {
post_hooks_.emplace_back(std::move(post_hook));
// Use the raw pointer as the unique key to identify this hook. This key
// can then be used in del_post_hook(key) to remove this hook.
return reinterpret_cast<std::uintptr_t>(post_hooks_.back().get());
}
const std::vector<std::unique_ptr<FunctionPostHook>>& post_hooks()
const noexcept {
return post_hooks_;
}
// delete a post hook matching the key
bool del_post_hook(const uintptr_t& key) {
for (auto it = post_hooks_.begin(); it != post_hooks_.end(); ++it) {
if (key == reinterpret_cast<std::uintptr_t>(it->get())) {
post_hooks_.erase(it);
return true;
}
}
return false;
}
std::vector<std::unique_ptr<FunctionPostHook>>& post_hooks() noexcept {
return post_hooks_;
}
void add_pre_hook(std::unique_ptr<FunctionPreHook>&& pre_hook) {
pre_hooks_.emplace_back(std::move(pre_hook));
}
void add_tensor_pre_hook(std::unique_ptr<FunctionPreHook>&& pre_hook) {
tensor_pre_hooks_.emplace_back(std::move(pre_hook));
}
void add_retains_grad_hook(
std::unique_ptr<FunctionPreHook>&& pre_hook,
size_t output_idx) {
retains_grad_hooks_[output_idx] = std::move(pre_hook);
}
std::unique_ptr<FunctionPreHook> pop_retains_grad_hook(size_t output_idx) {
auto ret = std::move(retains_grad_hooks_[output_idx]);
retains_grad_hooks_.erase(output_idx);
return ret;
}
const std::vector<std::unique_ptr<FunctionPreHook>>& pre_hooks()
const noexcept {
return pre_hooks_;
}
std::vector<std::unique_ptr<FunctionPreHook>>& pre_hooks() noexcept {
return pre_hooks_;
}
virtual std::vector<std::unique_ptr<FunctionPreHook>>&
tensor_pre_hooks() noexcept {
return tensor_pre_hooks_;
}
virtual std::unique_ptr<PostAccumulateGradHook>& tensor_post_acc_grad_hooks()
const noexcept {
static std::unique_ptr<PostAccumulateGradHook> empty = nullptr;
return empty;
}
std::unordered_map<size_t, std::unique_ptr<FunctionPreHook>>&
retains_grad_hooks() noexcept {
return retains_grad_hooks_;
}
// Customization Points for Subclasses
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Releases saved variables if the operation won't be reused.
virtual void release_variables() {}
/// Called before an apply if `release_variables()` is going to be called.
/// Allows larger ops like `InterpreterAutogradFunction` to incrementally
/// release variables as they run.
virtual void will_release_variables() {}
/// Returns true if this function is traceable. An op is traceable if all
/// operations happening within `apply()` are performed on autograd
/// `Variables` (i.e. apply mostly instantiates and applies other functions).
virtual bool is_traceable() {
return false;
}
/// A `Node` is said to pass state transparently to backward, if the
/// state consists only of (Saved)Variables and only non-variable objects
/// that parameterize the operation in some way that defines the graph
/// structure AND the backward function is traceable. In particular,
/// parametrization MUST NOT depend on the data of any `Variable`.
/// TODO: it might be possible to handle cases where backward is
/// non-traceable but state passing could be considered transparent. This
/// will probably depend on saved_variable_list being mutable.
/// NOTE: this value matters only if is_traceable() returns false.
virtual bool passes_state_transparently() {
return false;
}
// see [Note: Compiled Autograd]
// Used by compiled autograd to
// 1) Extract tensors/symint args
// 2) Collect node information for specialization and caching
// Implementations in subclasses should call args.collect() with all node
// attrs. These functions are only called durring backward.
virtual void compiled_args(CompiledNodeArgs& args) const {
throw std::runtime_error(
std::string("compiled_args not implemented: ") + name());
}
// Used by compiled autograd to call apply() with different saved tensors
// Implementations should call saved.before() on all attrs, then apply(), then
// saved.after() on all attrs in the same order.
virtual variable_list apply_with_saved(
const variable_list& inputs,
SwapSavedVariables& saved) {
throw std::runtime_error(
std::string("apply_with_saved not implemented: ") + name());
}
// If this node is the AOTBackward node produced by torch.compile.
// Compiled Autograd special-cases on this information.
virtual bool is_aot_backward() const {
return false;
}
protected:
/// Performs the `Node`'s actual operation.
virtual variable_list apply(variable_list&& inputs) = 0;
/// Calls `apply()`, but instruments it with tracing machinery.
variable_list traced_apply(variable_list inputs);
// Sequence number used to correlate backward nodes with forward ops in the
// profiler and provide determinism in the engine.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
uint64_t sequence_nr_;
// See NOTE [ Topological Number ]
uint64_t topological_nr_ = 0;
// Tracks whether this node has been added as the next_edge of another node
// via set_next_edge(s), which always calls topological_nr() of all its
// children See NOTE [ Topological Number ] for why we need this.
mutable bool has_parent_ = false;
// Id of the thread that created the instance
uint64_t thread_id_ = 0;
// Note [Thread Safety on Autograd Node]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Autograd Engine let the owning thread which calls Engine::execute to drive
// the GraphTask execution, there might be cases that part of the GraphTask is
// shared across different `backward()` or `grad()` calls, i.e. fork new
// threads in the middle of the forward and call `backward()` separately from
// different threads. We need to protect the thread safety on NodeTask to
// prevent data racing on shared variables read/write.
//
// NB: This is only needed for Autograd Nodes that runs on CPU, technically
// "CUDA", "XLA" nodes don't need locking because device threads are always
// single threaded.
//
// Here we add a thread mutex to help protect the Node's thread safety, so
// that different threads cannot race the shared data when executing the same
// NodeTask from multiple CPU threads. It IS the user/developer responsibility
// to take advantage of this mutex to protect the thread safety of their
// autograd Node. The general strategy of thread safety on autograd Node:
//
// 1. User should lock the mutex during Node::release_variables() if the Node
// needs
// to release the variables on the fly, this serve the purpose that when we
// release saved_variables from one thread, no other threads can release
// the saved variables concurrently. call the Node::apply(),
// 2. User should lock the mutex during Node::apply(), this is to ensure Node
// that
// writing to the shared variable are not racing across threads (i.e.
// AccumulateGrad and custom C++ Autograd Node if writing to shared
// variables )
// 3. item 2 and item 3 should work together so that when we release saved
// variables
// from one thread, no other threads can call Node::apply(), this ensures
// the variable references from other threads aren't dangling.
// 4. if the Node don't release any variables and no shared data read/write in
// the Node
// i.e. purely functional, user don't need to lock the mutex
//
// This way we could protect the thread safety on Autograd Node, but we could
// still not protect the thread safety on Node pre/post C++ hooks (python
// hooks are automatically thread safe), we rely on the user to write thread
// safe C++ hooks if they want the hook to be correctly applied in
// multithreading environment.
std::mutex mutex_;
edge_list next_edges_;
PyObject* pyobj_ = nullptr; // weak reference
std::unique_ptr<AnomalyMetadata> anomaly_metadata_ = nullptr;
// NOTE [Hooks ordering]
// We have 3 separate fields for pre hooks registered to the autograd nodes
// because the conditions under which they execute are different, and we
// want more fine-grained control over the order in which different types
// of hooks are executed.
// - pre_hooks are only executed when the node itself is executed
// - tensor_pre_hook is executed as long as the engine traverses over it
// even if that node won't be executed.
// - retains_grad_hook are like tensor_pre_hooks except they are always
// ordered after all other tensor pre hooks
std::vector<std::unique_ptr<FunctionPreHook>> pre_hooks_;
std::vector<std::unique_ptr<FunctionPreHook>> tensor_pre_hooks_;
std::unordered_map<size_t, std::unique_ptr<FunctionPreHook>>
retains_grad_hooks_;
std::vector<std::unique_ptr<FunctionPostHook>> post_hooks_;
at::SmallVector<InputMetadata, 2> input_metadata_;
};
/// See Node::is_traceable() for definition.
struct TraceableFunction : public Node {
using Node::Node;
bool is_traceable() final {
return true;
}
};
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Associated Free Nodes
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace detail {
// Implementation of `collect_next_edges` (see below).
struct MakeNextFunctionList : IterArgs<MakeNextFunctionList> {
edge_list next_edges;
using IterArgs<MakeNextFunctionList>::operator();
void operator()(const Variable& variable) {
if (variable.defined()) {
next_edges.emplace_back(impl::gradient_edge(variable));
} else {
next_edges.emplace_back();
}
}
void operator()(const Variable* variable) {
operator()(*variable);
}
void operator()(const std::optional<Variable>& variable) {
if (variable.has_value()) {
operator()(*variable);
} else {
next_edges.emplace_back();
}
}
};
} // namespace detail
/// Create an `Edge` between the given `variable` and the `function`, which is
/// assumed to be the gradient function of this variable (i.e. the function
/// through which this variable is backpropagated during the backward pass).
/// This sets the `grad_fn` property of the `variable`. This function assumes
/// that the `Variable` is a new input to the gradient function and its
/// `input_nr` thus equal to `function->num_inputs()`. Additionally, it
/// increments the `Node`'s number of inputs by one. Approximately
/// equivalent to `variable.set_gradient_edge(function,
/// function->add_input_metadata(variable.dispatch_type(), variable.sizes()))`.
/// If you don't want the `Node`'s `num_inputs` to be incremented, use
/// `set_gradient_edge` directly.
inline void create_gradient_edge(
Variable& variable,
std::shared_ptr<Node> function) {
// Copy before move.
const auto input_nr = function->add_input_metadata(variable);
impl::set_gradient_edge(variable, {std::move(function), input_nr});
}
/// Return true if any of the variables in the list require a gradient.
inline bool any_variable_requires_grad(const variable_list& variables) {
return std::any_of(
variables.begin(), variables.end(), [](const Variable& variable) {
return variable.defined() && variable.requires_grad();
});
}
/// Return the next edges of all the given variables, or tuples of variables.
template <typename... Variables>
edge_list collect_next_edges(Variables&&... variables) {
detail::MakeNextFunctionList make;
make.apply(std::forward<Variables>(variables)...);
return std::move(make.next_edges);
}
struct TypeAndSize {
TypeAndSize() = default;
/* implicit */
TypeAndSize(const at::Tensor& t)
: sym_sizes(t.sym_sizes().vec()), options(t.options()) {}
at::Tensor zeros();
std::vector<c10::SymInt> sym_sizes;
at::TensorOptions options;
};
} // namespace torch::autograd
```
|
=========================================================================================================================================
SOURCE CODE FILE: function_hook.h
LINES: 1
SIZE: 2.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\function_hook.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/Tensor.h>
#include <torch/csrc/Export.h>
#include <string>
#include <vector>
namespace torch::dynamo::autograd {
class CompiledNodeArgs;
class SwapSavedVariables;
struct PackedArgs;
} // namespace torch::dynamo::autograd
// A hook that's called on gradients
namespace torch::autograd {
using Variable = at::Tensor;
using variable_list = std::vector<Variable>;
struct TORCH_API FunctionPreHook {
virtual ~FunctionPreHook() = default;
virtual variable_list operator()(const variable_list& grads) = 0;
// only implemented for python hooks, registers hook with compiled autograd
virtual void compiled_args(
torch::dynamo::autograd::CompiledNodeArgs& args) const {
throw std::runtime_error(
std::string("compiled_args nyi, see [Note: Compiled Autograd] ") +
typeid(*this).name());
}
};
struct TORCH_API FunctionPostHook {
virtual ~FunctionPostHook() = default;
virtual variable_list operator()(
const variable_list& outputs /* grad_inputs */,
const variable_list& inputs /* grad_outputs */) = 0;
// only implemented for python hooks, registers hook with compiled autograd
virtual void compiled_args(
torch::dynamo::autograd::CompiledNodeArgs& args) const {
throw std::runtime_error(
std::string("compiled_args nyi, see [Note: Compiled Autograd] ") +
typeid(*this).name());
}
};
struct TORCH_API PostAccumulateGradHook {
virtual ~PostAccumulateGradHook() = default;
virtual void operator()(const Variable& tensor) = 0;
// only implemented for python hooks on nodes, registers hook with compiled
// autograd
virtual void compiled_args(
torch::dynamo::autograd::CompiledNodeArgs& args) const {
throw std::runtime_error(
std::string("not yet implemented for compiled autograd: ") +
typeid(*this).name());
}
virtual void apply_with_saved(
Variable&,
torch::dynamo::autograd::SwapSavedVariables&) {
throw std::runtime_error(
std::string("not yet implemented for compiled autograd: ") +
typeid(*this).name());
}
};
} // namespace torch::autograd
```
|
=====================================================================================================================================================
SOURCE CODE FILE: accumulate_grad.h
LINES: 3
SIZE: 13.59 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\functions\accumulate_grad.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/CachedTensorUtils.h>
#include <ATen/LegacyBatchedTensorImpl.h>
#include <ATen/TensorOperators.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/utils/grad_layout_contract.h>
#include <torch/csrc/autograd/variable.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/_sparse_coo_tensor_unsafe.h>
#endif
#include <mutex>
namespace torch::autograd {
#define CHECK_RESULT(RESULT, VAR) \
if (!(RESULT.is_sparse() || VAR.is_sparse() || RESULT.is_sparse_csr() || \
VAR.is_sparse_csr())) { \
if (!utils::obeys_layout_contract(RESULT, VAR)) { \
TORCH_WARN_ONCE( \
"grad and param do not obey the gradient layout contract. " \
"This is not an error, but may impair performance.\n" \
"grad.sizes() = ", \
RESULT.sizes(), \
", strides() = ", \
RESULT.strides(), \
"\n", \
"param.sizes() = ", \
VAR.sizes(), \
", strides() = ", \
VAR.strides()); \
} \
}
struct TORCH_API AccumulateGrad : public Node {
explicit AccumulateGrad(Variable variable_);
variable_list apply(variable_list&& grads) override;
std::vector<std::unique_ptr<FunctionPreHook>>& tensor_pre_hooks() noexcept
override {
// NB: Since the AccumulateGrad Node is only a weak ref from the Tensor,
// it can be destroyed even though the Tensor is still alive (contrary
// to all other Nodes). So we must lazily read the Tensor hooks here.
return impl::hooks(variable);
}
std::unique_ptr<PostAccumulateGradHook>& tensor_post_acc_grad_hooks()
const noexcept override {
// NB: Since the AccumulateGrad Node is only a weak ref from the Tensor,
// it can be destroyed even though the Tensor is still alive (contrary
// to all other Nodes). So we must lazily read the Tensor hooks here.
return impl::post_acc_grad_hooks(variable);
}
// Given a variable with its current grad as variable_grad, accumulates
// new_grad into variable_grad if in place accumulation is possible.
// Otherwise, uses 'update_grad' to update the grad for the variable.
// "Gradient Layout Contract"
//
// AccumulateGrad tries to stash strided (non-sparse) grads with memory layout
// (strides) such that variables and grads interact efficiently in later
// optimizer kernels, and grads interact efficiently with c10d::Reducer.cpp.
//
// Specifically, AccumulateGrad tries to ensure the following
// (cf torch/csrc/autograd/utils/grad_layout_contract.h):
// (1) if variable.is_non_overlapping_and_dense(), the stashed grad's
// strides match variable.
// (2) else, stashed grad is rowmajor contiguous.
// If variable's grad does not exist (!variable_grad.defined())
// AccumulateGrad steals new_grad if it's stealable and obeys the contract
// already, otherwise it deep copies new_grad into an obedient clone.
//
// If variable's grad already exists (variable_grad.defined()), new_grad must
// be added to variable_grad. If we aren't setting up for double backward
// (!GradMode::is_enabled()), AccumulateGrad performs "variable_grad +=
// new_grad" in-place, which keeps variable_grad's layout. We assume (hope)
// variable_grad was created obeying (1) or (2) at some point in the past.
//
// If we are setting up for double backward, AccumulateGrad updates the grad
// out-of-place via "variable_grad + new_grad." TensorIterator operator+
// decides result's layout. Typically TensorIterator matches strides of the
// first arg, so we once again assume (hope) variable_grad was originally
// created obeying (1) or (2).
//
// AccumulateGrad does not enforce the contract with 100% certainty. Examples:
// - If a user manually permutes a param or its grad, then runs a fwd+bwd,
// variable_grad += new_grad keeps variable_grad's layout without
// rechecking the contract.
// - If TensorIterator changes its corner cases about operator+'s result
// (for example, giving more or less priority to channels_last inputs, see
// https://github.com/pytorch/pytorch/pull/37968) the result may not obey.
//
// Fortunately, if a given grad doesn't satisfy (1) or (2), the penalty is
// degraded performance in Reducer.cpp or optimizer kernels, not death by
// assert or silently bad numerics.
// variable: the variable whose grad we're accumulating.
// variable_grad: the current grad for the variable.
// new_grad: new grad we want to accumulate for the variable.
// num_expected_refs: the number of refs we expect to hold internally
// such that it is safe to avoid cloning the grad
// if use_count() of the grad is less than or equal
// to this value (in addition to post_hooks).
// update_grad: Function that is used to update grad for the variable.
// The argument to the function is a Tensor which
// is used to set a new value for the grad.
template <typename T>
static void accumulateGrad(
const Variable& variable,
at::Tensor& variable_grad,
const at::Tensor& new_grad,
size_t num_expected_refs,
const T& update_grad) {
if (!variable_grad.defined()) {
if (!GradMode::is_enabled() && !new_grad.is_sparse() &&
!new_grad.is_sparse_csr() &&
!(variable.is_sparse_csr() && new_grad.layout() == at::kStrided) &&
at::caching::adjusted_use_count(new_grad) <= num_expected_refs &&
(new_grad.is_mkldnn() ||
utils::obeys_layout_contract(new_grad, variable))) {
// we aren't setting up for double-backward
// not sparse
// no other user-visible tensor references new_grad
// new_grad obeys the "Gradient Layout Contract", there has a special
// case, For MKLDNN tensor, which is a opaque tensor, assuming it obeys
// layout_contract. Under these conditions, we can steal new_grad
// without a deep copy.
update_grad(new_grad.detach());
} else if (
!GradMode::is_enabled() && new_grad.is_sparse() &&
new_grad._indices().is_contiguous() &&
new_grad._values().is_contiguous() &&
// Use count for indices and values should always be <=1 since the
// SparseTensor should be the only one holding a reference to these.
new_grad._indices().use_count() <= 1 &&
new_grad._values().use_count() <= 1 &&
new_grad.use_count() <= num_expected_refs) {
// Can't detach sparse tensor (since metadata changes are not allowed
// after detach), so just create a new one for the grad which is a
// shallow copy. We need a shallow copy so that modifying the original
// grad tensor doesn't modify the grad we accumulate.
// We only skip clone if indices and values themselves are contiguous
// for backward compatibility reasons. Since without this optimization,
// earlier we would clone the entire SparseTensor which cloned indices
// and values.
// For details see https://github.com/pytorch/pytorch/issues/34375.
// No scenario where we expect this to be true currently
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
!at::caching::is_cached_tensor(new_grad._indices()) &&
!at::caching::is_cached_tensor(new_grad._values()) &&
!at::caching::is_cached_tensor(new_grad));
update_grad(at::_sparse_coo_tensor_unsafe(
new_grad._indices(),
new_grad._values(),
new_grad.sizes(),
new_grad.options()));
} else {
if (new_grad.is_sparse() || new_grad.is_sparse_csr() ||
new_grad.is_nested()) {
update_grad(new_grad.clone());
} else {
if (new_grad.is_mkldnn()) {
update_grad(new_grad.clone());
} else {
// Deep copies new_grad according to the "Gradient Layout Contract."
update_grad(utils::clone_obey_contract(new_grad, variable));
}
}
}
} else if (!GradMode::is_enabled()) {
// This case is not strictly necessary, but it makes the first-order only
// case slightly more efficient.
if (variable_grad.is_sparse() && !new_grad.is_sparse()) {
// If `variable_grad` is sparse and `new_grad` is not sparse, their
// sum is not sparse, and we must change the TensorImpl type of
// `variable_grad` for it to store the result. However, changing the
// TensorImpl type of a tensor requires changing the tensor itself, and
// thus in this case we have to change the grad tensor.
auto result = new_grad + variable_grad;
CHECK_RESULT(result, variable);
update_grad(std::move(result));
} else if (!at::inplaceIsVmapCompatible(variable_grad, new_grad)) {
// Ideally we'd perform an in-place operation to avoid changing
// the grad tensor. However, if that's impossible because the grads
// are vmap-incompatible (See NOTE: [vmap-incompatible in-place
// operations]), then we just add them out-of-place.
auto result = variable_grad + new_grad;
CHECK_RESULT(result, variable);
update_grad(std::move(result));
} else {
// In this case we can avoid changing the grad tensor. There are three
// scenarios when we'll hit this case:
//
// 1. `variable_grad` is sparse, and `new_grad` is sparse.
// 2. `variable_grad` is dense, and `new_grad` is sparse.
// 3. `variable_grad` is dense, and `new_grad` is dense.
// 4. `variable_grad` is mkldnn, and `new_grad` is mkldnn.
//
// In all of these four cases, `variable_grad += new_grad` is a
// valid operation which adds `new_grad` to `variable_grad` in
// place. `variable_grad` is thus still referring to the same tensor
// after the operation.
// Also DistributedDataParallel(DDP) package relies on grad being
// mutated in place for saving peak memory usage. DDP will still
// work correctly if it is mutated out of place here, but DDP will
// maintain one extra copy of grad tensors in buffer and thus
// increase peak memory usage.
variable_grad += new_grad;
CHECK_RESULT(variable_grad, variable);
// ^ We could enforce the contract more aggressively here by writing:
// if (variable_grad.is_sparse() || new_grad.is_sparse()) {
// variable_grad += new_grad;
// } else if (obeys_layout_contract(variable_grad, variable)) {
// variable_grad += new_grad;
// } else {
// result = at::empty_strided(variable.sizes(), variable.strides(),
// variable.options().memory_format(std::nullopt));
// update_grad(at::native::add_out(result, variable_grad,
// new_grad, 1.0);
// }
// However, that accumulation is sometimes in place and sometimes not,
// which may break user code.
}
} else {
at::Tensor result;
if (variable_grad.is_sparse() && !new_grad.is_sparse()) {
// CPU backend throws an error on sparse + dense, so prefer dense +
// sparse here.
result = new_grad + variable_grad;
} else {
// Assumes operator+ result typically matches strides of first arg,
// and hopes variable_grad was originally created obeying layout
// contract.
result = variable_grad + new_grad;
}
CHECK_RESULT(result, variable);
update_grad(std::move(result));
// ^ We could enforce the contract more aggressively here by saying
// if (obeys_layout_contract(new_grad, variable)) {
// update_grad(new_grad + variable_grad);
// } else {
// update_grad(variable_grad + new_grad);
// }
// such that the stashed grad is likely to have the right strides if
// either variable_grad or new_grad already has the right strides.
// We could enforce the contract with certainty by saying
// auto result = variable_grad + new_grad (or vice versa), checking
// result's layout, and copying to an obedient clone if necessary before
// update_grad. The copy would require another gmem pass. We can't create
// empty result with the right layout then add_out into it with a single
// kernel, because GradMode is enabled in this branch, and add_out isn't
// differentiable. Maybe more trouble than it's worth.
}
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(
const variable_list& inputs,
SwapSavedVariables& saved) override;
Variable variable;
};
#undef CHECK_RESULT
} // namespace torch::autograd
```
|
===============================================================================================================================================
SOURCE CODE FILE: basic_ops.h
LINES: 1
SIZE: 3.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\functions\basic_ops.h
ENCODING: utf-8
```h
#pragma once
#include <c10/util/irange.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/variable.h>
#include <memory>
#include <string>
#include <vector>
namespace torch::autograd {
struct TORCH_API Error : public Node {
Error(std::string msg, edge_list&& next_edges)
: Node(std::move(next_edges)), msg(std::move(msg)) {}
Error(std::string msg) : msg(std::move(msg)) {}
variable_list apply(variable_list&& inputs) override;
variable_list apply(variable_list&& inputs) const;
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(
const variable_list& inputs,
SwapSavedVariables& saved) override;
std::string msg;
};
// We print grad_fn names in tensor printing. For functions with backward
// NYI, grad_fn=<Error> will be printed if we use Error, which is confusing. So
// special case with a new NotImplemented function here.
struct TORCH_API NotImplemented : public Error {
NotImplemented(const std::string& forward_fn, edge_list&& next_edges)
: Error(
"derivative for " + forward_fn + " is not implemented",
std::move(next_edges)) {}
NotImplemented(const std::string& forward_fn)
: Error("derivative for " + forward_fn + " is not implemented") {}
};
// Identity in forward, Error in backward. Used to implement
// @once_differentiable
struct TORCH_API DelayedError : public Node {
DelayedError(std::string msg, int64_t num_inputs) : msg(std::move(msg)) {
for ([[maybe_unused]] const auto _ [[maybe_unused]] :
c10::irange(num_inputs)) {
add_input_metadata(Node::undefined_input());
}
}
variable_list apply(variable_list&& inputs) override;
variable_list apply(variable_list&& inputs) const;
std::string msg;
};
struct TORCH_API UndefinedGrad : public Node {
UndefinedGrad() {
add_input_metadata(Node::undefined_input());
}
variable_list apply(variable_list&& inputs) override;
variable_list apply(variable_list&& inputs) const;
};
struct TORCH_API UndefinedGradBackward : public Node {
UndefinedGradBackward(edge_list&& next_edges) : Node(std::move(next_edges)) {}
UndefinedGradBackward() = default;
variable_list apply(variable_list&& inputs) override;
variable_list apply(variable_list&& inputs) const;
void compiled_args(CompiledNodeArgs& args) const override {}
variable_list apply_with_saved(
const variable_list& inputs,
SwapSavedVariables& saved) override {
return apply(variable_list(inputs));
}
};
struct TORCH_API GraphRoot : public Node {
GraphRoot(edge_list functions, variable_list inputs)
: Node(std::move(functions)), outputs(std::move(inputs)) {
// Ensures calls to stream() on a GraphRoot instance reflect current
// stream(s) on devices of root grad tensors at the time the instance is
// constructed.
for (const auto& t : outputs) {
add_input_metadata(t);
}
}
variable_list apply(variable_list&& inputs) override {
return outputs;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(
const variable_list& inputs,
SwapSavedVariables& saved) override;
variable_list outputs;
};
struct TORCH_API Identity : public Node {
variable_list apply(variable_list&& inputs) override;
};
} // namespace torch::autograd
```
|
==========================================================================================================================================
SOURCE CODE FILE: comm.h
LINES: 1
SIZE: 1.21 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\functions\comm.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/variable.h>
#include <ATen/ATen.h>
#include <c10/cuda/CUDAStream.h>
#include <optional>
#include <cstddef>
#include <vector>
namespace torch::autograd {
struct TORCH_CUDA_CU_API Scatter : public Node {
explicit Scatter(
std::vector<at::Device> devices,
std::optional<std::vector<int64_t>> chunk_sizes = std::nullopt,
int64_t dim = 0,
std::optional<std::vector<std::optional<at::cuda::CUDAStream>>> streams =
std::nullopt,
bool unsqueeze_scalars = false);
~Scatter() override;
variable_list apply(variable_list&& inputs) override;
std::vector<at::Device> devices_;
std::optional<std::vector<int64_t>> chunk_sizes_;
int64_t dim_;
std::optional<std::vector<std::optional<at::cuda::CUDAStream>>> streams_;
bool unsqueeze_scalars_;
};
struct TORCH_CUDA_CU_API Gather : public Node {
explicit Gather(const at::Device& destination_device, int64_t dim = 0);
~Gather() override;
variable_list apply(variable_list&& inputs) override;
at::Device destination_device_;
int64_t dim_;
};
} // namespace torch::autograd
```
|
============================================================================================================================================
SOURCE CODE FILE: pybind.h
LINES: 1
SIZE: 0.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\functions\pybind.h
ENCODING: utf-8
```h
#pragma once
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/pybind.h>
#include <torch/csrc/autograd/python_cpp_function.h>
#include <torch/csrc/autograd/python_function.h>
// NOLINTNEXTLINE(misc-unused-alias-decls)
namespace py = pybind11;
namespace pybind11::detail {} // namespace pybind11::detail
```
|
============================================================================================================================================
SOURCE CODE FILE: tensor.h
LINES: 1
SIZE: 7.28 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\functions\tensor.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/variable.h>
#include <ATen/TensorGeometry.h>
#include <ATen/core/DeprecatedTypeProperties.h>
#include <optional>
#include <cstdint>
#include <memory>
namespace torch::autograd {
struct TORCH_API CopyBackwards : public Node {
variable_list apply(variable_list&& grads) override;
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(
const variable_list& inputs,
SwapSavedVariables& saved) override;
at::TensorOptions src_options;
};
// Note [View + Inplace update for base tensor]
//
// This note covers a few important topics related to view + inplace handling.
// - It explains what is the CopySlices Node and why we need it.
// - It explains the considerations on what is saved for backward in
// CopySlices.
// - It explains why we need to sometimes change the exec_info of the current
// backward
//
// What is CopySlices?
// ~~~~~~~~~~~~~~~~~~~
//
// We support autograd with inplace mutation; e.g., if you write x.mul_(2)
// the autograd will work as if you now had multiple Tensors under the hood and
// you did
// x = t.clone()
// x0 = x
// x1 = x0 * 2
// x = x1
// As you can see here, after this operation, x.grad_fn now points to x1.grad_fn
// (the MulBackward node) and this node points to x's original grad_fn (which is
// also x0.grad_fn). It is important to keep in mind that after the inplace,
// there is no Tensor object that represents the x0 state anymore. But the graph
// for it is still around in autograd (in case x was used before being modified
// inplace). See Example 1 in
// https://docs.google.com/drawings/d/1-T5DyYfChMX1ONQkY-zU-hj_ayQ2zmA5CBOKDWqvEhE
// We call this rebasing the history of the Tensor.
//
// Now, a difficult situation is what happens if x is a differentiable view
// of a base b.
// b = t.clone()
// x = b.select(0, 0)
// x *= 2
// With the same approach as above, this will become
// b = t.clone()
// x = b.select(0, 0)
// b0 = b
// x0 = x
// x1 = x0 * 2
// b1 = b0.select_scatter(x1, 0, 0)
// x2 = b1.select(0, 0)
// x = x2
// b = b1
// As you can see here, not only we need to modify x's grad_fn, we also need to
// modify the one from b. We also need to ensure that the new grad_fn on x is
// linked to b's new grad_fn. The chain the select_scatter, multiplication and
// select is what CopySlices does, all wrapped into a single Node.
//
// See Example 1 in
// https://docs.google.com/drawings/d/1-T5DyYfChMX1ONQkY-zU-hj_ayQ2zmA5CBOKDWqvEhE
//
// What do we need to save in CopySlices to run backward?
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// We need to perform grad_view = fn(grad_view), but out-of-place.
// view_fn_ is an optional function saved in DifferentiableViewMeta
// from forward pass, so that we can recover we when as_strided is not
// supported. It preserves the invariants:
// view = view_fn_(base)
// grad_view = view_fn_(grad_base)
//
// When as_strided is supported (e.g. strided CPU/CUDA Tensors), view_fn_
// is empty and we save TensorGeometry(view) instead.
// With the TensorGeometry information we can use `as_strided` call which
// is more efficient to recover views in backward.
//
// For example:
// view_1 = view_op_1(base)
// view_2 = view_op_2(view_1)
// ...
// view_n = view_op_n(view_n-1)
// view_n = inplace_op(view_n)
//
// In CPU/CUDA case where we support efficient as_strided implementation,
// grad_view_n can be calculated through 1 step.
//
// grad_view_n = grad_base.as_strided(view_sizes, view_strides, view_offset);
//
// But in XLA backend where we don't have full support of as_strided,
// it has to save a chained lambda function view_fn_, to exactly
// replay how the view was done in forward.
//
// view_fn_ = view_op_n(...(view_op_2(view_op_1())))
// grad_view_n = view_fn_(grad_base)
//
// This chain view_fn_ works as long as forward view ops are implemented,
// e.g XLA simulates view without a real Storage behind Tensor, but it's less
// efficient than the as_strided one so we should be careful to only use it when
// necessary.
//
// - For CPU/CUDA we save TensorGeometry of both base and view tensors,
// That's all we need to pass into as_strided.
// E.g. int[] sizes, int[] strides, and int storage_offset.
// - For XLA we use view_fn_, which captures all forward view op arguments
// by **value**.
// E.g for at::narrow, int dim, int start, in length are saved.
//
// Theoretically we could also save Tensor `view` in CopySlices Node, but
// it's far more expensive than what we currently save.
// 1. We cannot afford keeping large tensors alive to recover views only.
// 2. There are inplace checks when Tensors are loaded back to make sure
// they haven't been changed (including size metadata).
// So saving metadata like TensorGeometry/view arguments is much better
// because it is minimal information needed to recover views, as well as it
// allows the user to modify the original Tensor without preventing the
// backward pass from running.
//
// Why do we manually change exec_info in the apply?
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Using the same example as before,
// b = t.clone()
// x = b.select(0, 0)
// x *= y
//
// You can see the visualization at
// https://docs.google.com/drawings/d/1Bx-Hcz-zlIv7PabQqnPhUIVIs9F8WWi48svqMsAUMFs
// which contains the wrapped MulBackward Node and show what it links to.
// Since a backward can happen between any subset of the inputs (t and y) and
// outputs (o, x, b). It is possible to get into a state where CopySlices's 0th
// next function (CloneBackward) needs gradient but MulBackward's 0th next
// function (SelectBackward) is not. This happens if you do autograd.grad
// between x and t for example.
// In such a case, we do need to mark SelectBackward as requiring gradient such
// that, during the execution of MulBackward, we will actually compute gradient
// for the 0th input.
//
// All the other next functions are always shared (this is asserted in the apply
// code) and so nothing needs to be done for them.
// See Note [View + Inplace update for view tensor] for what we do to view
// tensor when an in-place operation happens.
struct TORCH_API CopySlices : public Node {
CopySlices(
const Variable& base_var,
at::TensorGeometry view_,
std::unique_ptr<ViewFunc> view_fn_,
std::shared_ptr<Node> fn_);
// common code between apply/apply_with_saved
template <typename T>
variable_list apply_impl(variable_list&& inputs, const T& call_fn);
variable_list apply(variable_list&& inputs) override;
void release_variables() override;
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(
const variable_list& inputs,
SwapSavedVariables& saved) override;
void update_exec_info();
at::TensorGeometry base;
// view and view_fn are redundant and view_fn will be used if available.
// See Note [View + Inplace update for base tensor] for details.
at::TensorGeometry view;
std::unique_ptr<ViewFunc> view_fn;
std::shared_ptr<Node> fn;
};
} // namespace torch::autograd
```
|
===========================================================================================================================================
SOURCE CODE FILE: utils.h
LINES: 1
SIZE: 3.19 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\functions\utils.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/InferenceMode.h>
#include <torch/csrc/autograd/autograd.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/utils/variadic.h>
#include <ATen/core/Tensor.h>
#include <functional>
#include <memory>
#include <vector>
namespace torch::autograd {
using function_constructor = std::function<std::shared_ptr<Node>(edge_list&&)>;
/**
* Wraps the tensor outputs in variables and creates the grad_fn and sets the
* grad_fn if necessary.
*/
TORCH_API variable_list wrap_outputs(
const variable_list& inputs,
tensor_list&& outputs,
const function_constructor& ctr);
/// Checks that inputs contains exactly `args` items and that the first
/// `required_args`
/// items are not nullptr. If not specified, `required_args` defaults to `args`.
TORCH_API void check_input_variables(
const char* name,
const variable_list& inputs,
int args,
int required_args = -1,
bool allow_undefined = false);
struct ComputeRequiresGrad : IterArgs<ComputeRequiresGrad> {
bool out = false;
using IterArgs<ComputeRequiresGrad>::operator();
void operator()(const at::Tensor& tensor) {
const auto& var = static_cast<const Variable&>(tensor);
if (var.defined() && var.requires_grad()) {
out = true;
}
}
void operator()(const std::optional<at::Tensor>& tensor) {
if (tensor.has_value()) {
(*this)(*tensor);
}
}
bool short_circuit() {
return out;
}
};
template <typename... Args>
inline bool compute_requires_grad(Args&&... args) {
if (!GradMode::is_enabled()) {
return false;
}
return ComputeRequiresGrad().apply(std::forward<Args>(args)...).out;
}
inline void set_history(
const at::Tensor& variable,
const std::shared_ptr<Node>& grad_fn) {
TORCH_CHECK(grad_fn != nullptr);
if (variable.defined()) {
// If the codegen triggers this, you most likely want to add your newly
// added function to the DONT_REQUIRE_DERIVATIVE list in
// tools/autograd/gen_variable_type.py
TORCH_INTERNAL_ASSERT(isDifferentiableType(variable.scalar_type()));
auto output_nr = grad_fn->add_input_metadata(variable);
impl::set_gradient_edge(variable, {grad_fn, output_nr});
} else {
grad_fn->add_input_metadata(Node::undefined_input());
}
}
inline void set_history(
const std::vector<Variable>& variables,
const std::shared_ptr<Node>& grad_fn) {
for (auto& variable : variables) {
set_history(variable, grad_fn);
}
}
inline bool isFwGradDefined(const std::optional<at::Tensor>& t) {
return t.has_value() && t->defined() && t->_fw_grad(/*level */ 0).defined();
}
inline bool isFwGradDefinedTensorList(const at::ITensorListRef& variables) {
bool ret = false;
for (auto& variable : variables) {
ret |= isFwGradDefined(variable);
}
return ret;
}
inline bool isFwGradDefinedTensorList(
const c10::List<std::optional<at::Tensor>>& li) {
bool ret = false;
for (auto i : c10::irange(li.size())) {
auto t = li.get(i);
ret |= isFwGradDefined(t);
}
return ret;
}
} // namespace torch::autograd
```
|
===============================================================================================================================================
SOURCE CODE FILE: Functions.h
LINES: 1
SIZE: 516.83 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\generated\Functions.h
ENCODING: utf-8
```h
#pragma once
// @generated from ..\tools\autograd\templates/Functions.h
#include <ATen/ATen.h>
#include <ATen/core/functional.h>
#include <ATen/TensorGeometry.h>
#include "torch/csrc/autograd/function.h"
#include "torch/csrc/autograd/variable.h"
#include "torch/csrc/autograd/saved_variable.h"
#include <torch/csrc/Export.h>
#include <c10/core/SymIntArrayRef.h>
namespace torch { namespace autograd { namespace generated {
using at::Scalar;
using at::Tensor;
using at::IntArrayRef;
using at::ArrayRef;
using at::Type;
using at::TensorGeometry;
using at::ScalarType;
using std::optional;
using c10::fmap;
inline std::vector<Tensor> unpack_list(at::ArrayRef<SavedVariable> xs, std::shared_ptr<Node> saved_for = nullptr) {
// NB: we must explicitly do the conversion in the lambda, otherwise template
// deduction will give a Tensor of Variable which is not convertible
return fmap(xs, [&saved_for](const SavedVariable& x) {
// TODO(crcrpar): Use `std::move(saved_for)` to avoid incrementing refcount, which would need refactoring.
return static_cast<Tensor>(x.unpack(saved_for));
});
}
inline c10::List<std::optional<Tensor>> unpack_opt_list(at::ArrayRef<SavedVariable> xs, std::shared_ptr<Node> saved_for = nullptr) {
torch::List<std::optional<Tensor>> result;
result.reserve(xs.size());
for (const SavedVariable& v : xs) {
auto var = v.unpack(saved_for);
result.push_back(var.defined() ? std::optional<Tensor>(var) : ::std::nullopt);
}
return result;
}
using torch::autograd::TypeAndSize;
#ifdef _WIN32
struct AbsBackward0 : public TraceableFunction {
TORCH_API AbsBackward0() = default;
#else
struct TORCH_API AbsBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AbsBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct AcosBackward0 : public TraceableFunction {
TORCH_API AcosBackward0() = default;
#else
struct TORCH_API AcosBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AcosBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct AddBackward0 : public TraceableFunction {
TORCH_API AddBackward0() = default;
#else
struct TORCH_API AddBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AddBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
at::ScalarType other_scalar_type;
at::ScalarType self_scalar_type;
};
#ifdef _WIN32
struct AddBackward1 : public TraceableFunction {
TORCH_API AddBackward1() = default;
#else
struct TORCH_API AddBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AddBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::ScalarType self_scalar_type;
};
#ifdef _WIN32
struct AddbmmBackward0 : public TraceableFunction {
TORCH_API AddbmmBackward0() = default;
#else
struct TORCH_API AddbmmBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AddbmmBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
batch1_.reset_data();
batch2_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
SavedVariable batch1_;
c10::SymInt batch1_sym_argsize_0;
c10::SymInt batch1_sym_argsize_1;
SavedVariable batch2_;
c10::SymInt batch2_sym_argsize_2;
at::Scalar beta;
};
#ifdef _WIN32
struct AddcdivBackward0 : public TraceableFunction {
TORCH_API AddcdivBackward0() = default;
#else
struct TORCH_API AddcdivBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AddcdivBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
tensor1_.reset_data();
tensor2_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::ScalarType self_scalar_type;
SavedVariable tensor1_;
at::ScalarType tensor1_scalar_type;
SavedVariable tensor2_;
at::ScalarType tensor2_scalar_type;
at::Scalar value;
};
#ifdef _WIN32
struct AddcmulBackward0 : public TraceableFunction {
TORCH_API AddcmulBackward0() = default;
#else
struct TORCH_API AddcmulBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AddcmulBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
tensor1_.reset_data();
tensor2_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::ScalarType self_scalar_type;
SavedVariable tensor1_;
at::ScalarType tensor1_scalar_type;
SavedVariable tensor2_;
at::ScalarType tensor2_scalar_type;
at::Scalar value;
};
#ifdef _WIN32
struct AddmmBackward0 : public TraceableFunction {
TORCH_API AddmmBackward0() = default;
#else
struct TORCH_API AddmmBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AddmmBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mat1_.reset_data();
mat2_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
at::Scalar beta;
SavedVariable mat1_;
at::Layout mat1_layout;
std::vector<c10::SymInt> mat1_sym_sizes;
std::vector<c10::SymInt> mat1_sym_strides;
SavedVariable mat2_;
at::Layout mat2_layout;
std::vector<c10::SymInt> mat2_sym_sizes;
std::vector<c10::SymInt> mat2_sym_strides;
};
#ifdef _WIN32
struct SparseAddmmBackward0 : public TraceableFunction {
TORCH_API SparseAddmmBackward0() = default;
#else
struct TORCH_API SparseAddmmBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SparseAddmmBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mat1_.reset_data();
mat2_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
at::Scalar beta;
SavedVariable mat1_;
SavedVariable mat2_;
at::Layout mat2_layout;
std::vector<c10::SymInt> mat2_sym_sizes;
std::vector<c10::SymInt> mat2_sym_strides;
};
#ifdef _WIN32
struct AddmvBackward0 : public TraceableFunction {
TORCH_API AddmvBackward0() = default;
#else
struct TORCH_API AddmvBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AddmvBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mat_.reset_data();
vec_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
at::Scalar beta;
SavedVariable mat_;
SavedVariable vec_;
};
#ifdef _WIN32
struct AddrBackward0 : public TraceableFunction {
TORCH_API AddrBackward0() = default;
#else
struct TORCH_API AddrBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AddrBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
vec1_.reset_data();
vec2_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
at::Scalar beta;
SavedVariable vec1_;
SavedVariable vec2_;
};
#ifdef _WIN32
struct AffineGridGeneratorBackward0 : public TraceableFunction {
TORCH_API AffineGridGeneratorBackward0() = default;
#else
struct TORCH_API AffineGridGeneratorBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AffineGridGeneratorBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool align_corners;
std::vector<c10::SymInt> size;
};
#ifdef _WIN32
struct AliasBackward0 : public Node {
TORCH_API AliasBackward0() = default;
#else
struct TORCH_API AliasBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AliasBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct AngleBackward0 : public TraceableFunction {
TORCH_API AngleBackward0() = default;
#else
struct TORCH_API AngleBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AngleBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct AcoshBackward0 : public TraceableFunction {
TORCH_API AcoshBackward0() = default;
#else
struct TORCH_API AcoshBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AcoshBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct AcoshBackward1 : public TraceableFunction {
TORCH_API AcoshBackward1() = default;
#else
struct TORCH_API AcoshBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AcoshBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct AsinhBackward0 : public TraceableFunction {
TORCH_API AsinhBackward0() = default;
#else
struct TORCH_API AsinhBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AsinhBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct AsinhBackward1 : public TraceableFunction {
TORCH_API AsinhBackward1() = default;
#else
struct TORCH_API AsinhBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AsinhBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct AtanhBackward0 : public TraceableFunction {
TORCH_API AtanhBackward0() = default;
#else
struct TORCH_API AtanhBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AtanhBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct AtanhBackward1 : public TraceableFunction {
TORCH_API AtanhBackward1() = default;
#else
struct TORCH_API AtanhBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AtanhBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct AsStridedBackward0 : public Node {
TORCH_API AsStridedBackward0() = default;
#else
struct TORCH_API AsStridedBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AsStridedBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::TensorGeometry self_geometry;
std::vector<c10::SymInt> size;
::std::optional<c10::SymInt> storage_offset;
std::vector<c10::SymInt> stride;
};
#ifdef _WIN32
struct AsStridedBackward1 : public TraceableFunction {
TORCH_API AsStridedBackward1() = default;
#else
struct TORCH_API AsStridedBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AsStridedBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::TensorGeometry self_geometry;
std::vector<c10::SymInt> size;
::std::optional<c10::SymInt> storage_offset;
std::vector<c10::SymInt> stride;
};
#ifdef _WIN32
struct AsinBackward0 : public TraceableFunction {
TORCH_API AsinBackward0() = default;
#else
struct TORCH_API AsinBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AsinBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct AtanBackward0 : public TraceableFunction {
TORCH_API AtanBackward0() = default;
#else
struct TORCH_API AtanBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AtanBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct Atan2Backward0 : public TraceableFunction {
TORCH_API Atan2Backward0() = default;
#else
struct TORCH_API Atan2Backward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "Atan2Backward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct BaddbmmBackward0 : public TraceableFunction {
TORCH_API BaddbmmBackward0() = default;
#else
struct TORCH_API BaddbmmBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "BaddbmmBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
batch1_.reset_data();
batch2_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
SavedVariable batch1_;
SavedVariable batch2_;
at::Scalar beta;
};
#ifdef _WIN32
struct BernoulliBackward0 : public TraceableFunction {
TORCH_API BernoulliBackward0() = default;
#else
struct TORCH_API BernoulliBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "BernoulliBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct BernoulliBackward1 : public TraceableFunction {
TORCH_API BernoulliBackward1() = default;
#else
struct TORCH_API BernoulliBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "BernoulliBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize p_info;
};
#ifdef _WIN32
struct BernoulliBackward2 : public TraceableFunction {
TORCH_API BernoulliBackward2() = default;
#else
struct TORCH_API BernoulliBackward2 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "BernoulliBackward2"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct BmmBackward0 : public TraceableFunction {
TORCH_API BmmBackward0() = default;
#else
struct TORCH_API BmmBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "BmmBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mat2_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable mat2_;
SavedVariable self_;
};
#ifdef _WIN32
struct MatmulBackward0 : public TraceableFunction {
TORCH_API MatmulBackward0() = default;
#else
struct TORCH_API MatmulBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MatmulBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct CatBackward0 : public TraceableFunction {
TORCH_API CatBackward0() = default;
#else
struct TORCH_API CatBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CatBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
::std::vector<at::ScalarType> tensors_args_scalartypes;
::std::vector<::std::vector<c10::SymInt>> tensors_args_sizes_symint;
size_t tensors_size_;
};
#ifdef _WIN32
struct CauchyBackward0 : public TraceableFunction {
TORCH_API CauchyBackward0() = default;
#else
struct TORCH_API CauchyBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CauchyBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct CeilBackward0 : public TraceableFunction {
TORCH_API CeilBackward0() = default;
#else
struct TORCH_API CeilBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CeilBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct CholeskyBackward0 : public TraceableFunction {
TORCH_API CholeskyBackward0() = default;
#else
struct TORCH_API CholeskyBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CholeskyBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool upper;
SavedVariable result_;
};
#ifdef _WIN32
struct ChunkBackward0 : public TraceableFunction {
TORCH_API ChunkBackward0() = default;
#else
struct TORCH_API ChunkBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ChunkBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct ChunkBackwardAutogradNestedTensor0 : public TraceableFunction {
TORCH_API ChunkBackwardAutogradNestedTensor0() = default;
#else
struct TORCH_API ChunkBackwardAutogradNestedTensor0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ChunkBackwardAutogradNestedTensor0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t chunks = 0;
int64_t dim = 0;
SavedVariable self_;
};
#ifdef _WIN32
struct LinalgCholeskyExBackward0 : public TraceableFunction {
TORCH_API LinalgCholeskyExBackward0() = default;
#else
struct TORCH_API LinalgCholeskyExBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgCholeskyExBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
L_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool upper;
SavedVariable L_;
};
#ifdef _WIN32
struct CholeskySolveBackward0 : public TraceableFunction {
TORCH_API CholeskySolveBackward0() = default;
#else
struct TORCH_API CholeskySolveBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CholeskySolveBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input2_.reset_data();
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable input2_;
SavedVariable self_;
bool upper;
SavedVariable result_;
};
#ifdef _WIN32
struct CholeskyInverseBackward0 : public TraceableFunction {
TORCH_API CholeskyInverseBackward0() = default;
#else
struct TORCH_API CholeskyInverseBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CholeskyInverseBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
bool upper;
SavedVariable result_;
};
#ifdef _WIN32
struct ClampBackward0 : public TraceableFunction {
TORCH_API ClampBackward0() = default;
#else
struct TORCH_API ClampBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ClampBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
max_.reset_data();
min_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable max_;
SavedVariable min_;
SavedVariable self_;
};
#ifdef _WIN32
struct ClampBackward1 : public TraceableFunction {
TORCH_API ClampBackward1() = default;
#else
struct TORCH_API ClampBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ClampBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
::std::optional<at::Scalar> max;
::std::optional<at::Scalar> min;
SavedVariable self_;
};
#ifdef _WIN32
struct ClampMinBackward0 : public TraceableFunction {
TORCH_API ClampMinBackward0() = default;
#else
struct TORCH_API ClampMinBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ClampMinBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar min;
SavedVariable self_;
};
#ifdef _WIN32
struct ClampMinBackward1 : public TraceableFunction {
TORCH_API ClampMinBackward1() = default;
#else
struct TORCH_API ClampMinBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ClampMinBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
min_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable min_;
SavedVariable self_;
};
#ifdef _WIN32
struct ClampMaxBackward0 : public TraceableFunction {
TORCH_API ClampMaxBackward0() = default;
#else
struct TORCH_API ClampMaxBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ClampMaxBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar max;
SavedVariable self_;
};
#ifdef _WIN32
struct ClampMaxBackward1 : public TraceableFunction {
TORCH_API ClampMaxBackward1() = default;
#else
struct TORCH_API ClampMaxBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ClampMaxBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
max_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable max_;
SavedVariable self_;
};
#ifdef _WIN32
struct CloneBackward0 : public TraceableFunction {
TORCH_API CloneBackward0() = default;
#else
struct TORCH_API CloneBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CloneBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct LazyCloneBackward0 : public TraceableFunction {
TORCH_API LazyCloneBackward0() = default;
#else
struct TORCH_API LazyCloneBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LazyCloneBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct ToCopyBackward0 : public TraceableFunction {
TORCH_API ToCopyBackward0() = default;
#else
struct TORCH_API ToCopyBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ToCopyBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::TensorOptions self_options;
};
#ifdef _WIN32
struct CoalesceBackward0 : public TraceableFunction {
TORCH_API CoalesceBackward0() = default;
#else
struct TORCH_API CoalesceBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CoalesceBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct ComplexBackward0 : public TraceableFunction {
TORCH_API ComplexBackward0() = default;
#else
struct TORCH_API ComplexBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ComplexBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
imag_.reset_data();
real_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable imag_;
SavedVariable real_;
};
#ifdef _WIN32
struct PolarBackward0 : public TraceableFunction {
TORCH_API PolarBackward0() = default;
#else
struct TORCH_API PolarBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PolarBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable result_;
};
#ifdef _WIN32
struct ConjBackward0 : public Node {
TORCH_API ConjBackward0() = default;
#else
struct TORCH_API ConjBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ConjBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct NegViewBackward0 : public Node {
TORCH_API NegViewBackward0() = default;
#else
struct TORCH_API NegViewBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NegViewBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct ConjPhysicalBackward0 : public TraceableFunction {
TORCH_API ConjPhysicalBackward0() = default;
#else
struct TORCH_API ConjPhysicalBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ConjPhysicalBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct ConjPhysicalBackward1 : public TraceableFunction {
TORCH_API ConjPhysicalBackward1() = default;
#else
struct TORCH_API ConjPhysicalBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ConjPhysicalBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct CopysignBackward0 : public TraceableFunction {
TORCH_API CopysignBackward0() = default;
#else
struct TORCH_API CopysignBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CopysignBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize other_info;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct CopysignBackward1 : public TraceableFunction {
TORCH_API CopysignBackward1() = default;
#else
struct TORCH_API CopysignBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CopysignBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct CosBackward0 : public TraceableFunction {
TORCH_API CosBackward0() = default;
#else
struct TORCH_API CosBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CosBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct CoshBackward0 : public TraceableFunction {
TORCH_API CoshBackward0() = default;
#else
struct TORCH_API CoshBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CoshBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct LinalgCrossBackward0 : public TraceableFunction {
TORCH_API LinalgCrossBackward0() = default;
#else
struct TORCH_API LinalgCrossBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgCrossBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct LogcumsumexpBackward0 : public TraceableFunction {
TORCH_API LogcumsumexpBackward0() = default;
#else
struct TORCH_API LogcumsumexpBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LogcumsumexpBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct CumprodBackward0 : public TraceableFunction {
TORCH_API CumprodBackward0() = default;
#else
struct TORCH_API CumprodBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CumprodBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable self_;
at::ScalarType self_scalar_type;
SavedVariable result_;
};
#ifdef _WIN32
struct CumsumBackward0 : public TraceableFunction {
TORCH_API CumsumBackward0() = default;
#else
struct TORCH_API CumsumBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CumsumBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
at::ScalarType self_scalar_type;
};
#ifdef _WIN32
struct CummaxBackward0 : public TraceableFunction {
TORCH_API CummaxBackward0() = default;
#else
struct TORCH_API CummaxBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CummaxBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable self_;
SavedVariable indices_;
};
#ifdef _WIN32
struct CumminBackward0 : public TraceableFunction {
TORCH_API CumminBackward0() = default;
#else
struct TORCH_API CumminBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CumminBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable self_;
SavedVariable indices_;
};
#ifdef _WIN32
struct ConvTbcBackward0 : public TraceableFunction {
TORCH_API ConvTbcBackward0() = default;
#else
struct TORCH_API ConvTbcBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ConvTbcBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
bias_.reset_data();
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable bias_;
int64_t pad = 0;
SavedVariable self_;
SavedVariable weight_;
};
#ifdef _WIN32
struct CtcLossBackward0 : public TraceableFunction {
TORCH_API CtcLossBackward0() = default;
#else
struct TORCH_API CtcLossBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CtcLossBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
log_probs_.reset_data();
targets_.reset_data();
result0_.reset_data();
result1_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t blank = 0;
std::vector<int64_t> input_lengths;
SavedVariable log_probs_;
std::vector<int64_t> target_lengths;
SavedVariable targets_;
bool zero_infinity;
SavedVariable result0_;
SavedVariable result1_;
};
#ifdef _WIN32
struct CtcLossBackward1 : public TraceableFunction {
TORCH_API CtcLossBackward1() = default;
#else
struct TORCH_API CtcLossBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CtcLossBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input_lengths_.reset_data();
log_probs_.reset_data();
target_lengths_.reset_data();
targets_.reset_data();
result0_.reset_data();
result1_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t blank = 0;
SavedVariable input_lengths_;
SavedVariable log_probs_;
SavedVariable target_lengths_;
SavedVariable targets_;
bool zero_infinity;
SavedVariable result0_;
SavedVariable result1_;
};
#ifdef _WIN32
struct Deg2RadBackward0 : public TraceableFunction {
TORCH_API Deg2RadBackward0() = default;
#else
struct TORCH_API Deg2RadBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "Deg2RadBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct LinalgDetBackward0 : public TraceableFunction {
TORCH_API LinalgDetBackward0() = default;
#else
struct TORCH_API LinalgDetBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgDetBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
A_.reset_data();
LU_.reset_data();
pivots_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable A_;
SavedVariable LU_;
SavedVariable pivots_;
SavedVariable result_;
};
#ifdef _WIN32
struct LinalgSlogdetBackward0 : public TraceableFunction {
TORCH_API LinalgSlogdetBackward0() = default;
#else
struct TORCH_API LinalgSlogdetBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgSlogdetBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
A_.reset_data();
LU_.reset_data();
pivots_.reset_data();
sign_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable A_;
SavedVariable LU_;
SavedVariable pivots_;
SavedVariable sign_;
};
#ifdef _WIN32
struct BlockDiagBackward0 : public TraceableFunction {
TORCH_API BlockDiagBackward0() = default;
#else
struct TORCH_API BlockDiagBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "BlockDiagBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
::std::vector<at::ScalarType> tensors_args_scalartypes;
::std::vector<::std::vector<int64_t>> tensors_args_sizes;
size_t tensors_size_;
};
#ifdef _WIN32
struct DiagEmbedBackward0 : public TraceableFunction {
TORCH_API DiagEmbedBackward0() = default;
#else
struct TORCH_API DiagEmbedBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "DiagEmbedBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim1 = 0;
int64_t dim2 = 0;
int64_t offset = 0;
};
#ifdef _WIN32
struct DiagonalBackward0 : public Node {
TORCH_API DiagonalBackward0() = default;
#else
struct TORCH_API DiagonalBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "DiagonalBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim1 = 0;
int64_t dim2 = 0;
int64_t offset = 0;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct DiagonalBackwardBackward0 : public TraceableFunction {
TORCH_API DiagonalBackwardBackward0() = default;
#else
struct TORCH_API DiagonalBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "DiagonalBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim1 = 0;
int64_t dim2 = 0;
int64_t offset = 0;
};
#ifdef _WIN32
struct DistBackward0 : public TraceableFunction {
TORCH_API DistBackward0() = default;
#else
struct TORCH_API DistBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "DistBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
at::Scalar p;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct DivBackward0 : public TraceableFunction {
TORCH_API DivBackward0() = default;
#else
struct TORCH_API DivBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "DivBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
at::ScalarType self_scalar_type;
};
#ifdef _WIN32
struct DivBackward1 : public TraceableFunction {
TORCH_API DivBackward1() = default;
#else
struct TORCH_API DivBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "DivBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar other;
at::ScalarType self_scalar_type;
};
#ifdef _WIN32
struct DivBackward2 : public TraceableFunction {
TORCH_API DivBackward2() = default;
#else
struct TORCH_API DivBackward2 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "DivBackward2"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
std::optional<std::string> rounding_mode;
SavedVariable self_;
at::ScalarType self_scalar_type;
};
#ifdef _WIN32
struct DivBackward3 : public TraceableFunction {
TORCH_API DivBackward3() = default;
#else
struct TORCH_API DivBackward3 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "DivBackward3"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar other;
std::optional<std::string> rounding_mode;
at::ScalarType self_scalar_type;
};
#ifdef _WIN32
struct DotBackward0 : public TraceableFunction {
TORCH_API DotBackward0() = default;
#else
struct TORCH_API DotBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "DotBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
tensor_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable tensor_;
};
#ifdef _WIN32
struct VdotBackward0 : public TraceableFunction {
TORCH_API VdotBackward0() = default;
#else
struct TORCH_API VdotBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "VdotBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct FusedDropoutBackward0 : public TraceableFunction {
TORCH_API FusedDropoutBackward0() = default;
#else
struct TORCH_API FusedDropoutBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FusedDropoutBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result1_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double p;
SavedVariable result1_;
};
#ifdef _WIN32
struct NativeDropoutBackward0 : public TraceableFunction {
TORCH_API NativeDropoutBackward0() = default;
#else
struct TORCH_API NativeDropoutBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NativeDropoutBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result1_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double p;
::std::optional<bool> train;
SavedVariable result1_;
};
#ifdef _WIN32
struct NativeDropoutBackwardBackward0 : public TraceableFunction {
TORCH_API NativeDropoutBackwardBackward0() = default;
#else
struct TORCH_API NativeDropoutBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NativeDropoutBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
mask_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable grad_output_;
SavedVariable mask_;
double scale;
};
#ifdef _WIN32
struct EqBackward0 : public TraceableFunction {
TORCH_API EqBackward0() = default;
#else
struct TORCH_API EqBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "EqBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct EqBackward1 : public TraceableFunction {
TORCH_API EqBackward1() = default;
#else
struct TORCH_API EqBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "EqBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize other_info;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct ErfBackward0 : public TraceableFunction {
TORCH_API ErfBackward0() = default;
#else
struct TORCH_API ErfBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ErfBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct ErfcBackward0 : public TraceableFunction {
TORCH_API ErfcBackward0() = default;
#else
struct TORCH_API ErfcBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ErfcBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct SpecialErfcxBackward0 : public TraceableFunction {
TORCH_API SpecialErfcxBackward0() = default;
#else
struct TORCH_API SpecialErfcxBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SpecialErfcxBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct ErfinvBackward0 : public TraceableFunction {
TORCH_API ErfinvBackward0() = default;
#else
struct TORCH_API ErfinvBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ErfinvBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct ExpBackward0 : public TraceableFunction {
TORCH_API ExpBackward0() = default;
#else
struct TORCH_API ExpBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ExpBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable result_;
};
#ifdef _WIN32
struct Exp2Backward0 : public TraceableFunction {
TORCH_API Exp2Backward0() = default;
#else
struct TORCH_API Exp2Backward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "Exp2Backward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable result_;
};
#ifdef _WIN32
struct Expm1Backward0 : public TraceableFunction {
TORCH_API Expm1Backward0() = default;
#else
struct TORCH_API Expm1Backward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "Expm1Backward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable result_;
};
#ifdef _WIN32
struct ExpandBackward0 : public Node {
TORCH_API ExpandBackward0() = default;
#else
struct TORCH_API ExpandBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ExpandBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct ExponentialBackward0 : public TraceableFunction {
TORCH_API ExponentialBackward0() = default;
#else
struct TORCH_API ExponentialBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ExponentialBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct FakeQuantizePerTensorAffineCachemaskBackward0 : public TraceableFunction {
TORCH_API FakeQuantizePerTensorAffineCachemaskBackward0() = default;
#else
struct TORCH_API FakeQuantizePerTensorAffineCachemaskBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FakeQuantizePerTensorAffineCachemaskBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mask_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable mask_;
};
#ifdef _WIN32
struct FakeQuantizePerTensorAffineCachemaskTensorQparamsBackward0 : public TraceableFunction {
TORCH_API FakeQuantizePerTensorAffineCachemaskTensorQparamsBackward0() = default;
#else
struct TORCH_API FakeQuantizePerTensorAffineCachemaskTensorQparamsBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FakeQuantizePerTensorAffineCachemaskTensorQparamsBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mask_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable mask_;
};
#ifdef _WIN32
struct FakeQuantizeLearnablePerTensorAffineBackward0 : public TraceableFunction {
TORCH_API FakeQuantizeLearnablePerTensorAffineBackward0() = default;
#else
struct TORCH_API FakeQuantizeLearnablePerTensorAffineBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FakeQuantizeLearnablePerTensorAffineBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
scale_.reset_data();
self_.reset_data();
zero_point_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double grad_factor;
int64_t quant_max = 0;
int64_t quant_min = 0;
SavedVariable scale_;
SavedVariable self_;
SavedVariable zero_point_;
};
#ifdef _WIN32
struct FakeQuantizePerChannelAffineCachemaskBackward0 : public TraceableFunction {
TORCH_API FakeQuantizePerChannelAffineCachemaskBackward0() = default;
#else
struct TORCH_API FakeQuantizePerChannelAffineCachemaskBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FakeQuantizePerChannelAffineCachemaskBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mask_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable mask_;
};
#ifdef _WIN32
struct FakeQuantizeLearnablePerChannelAffineBackward0 : public TraceableFunction {
TORCH_API FakeQuantizeLearnablePerChannelAffineBackward0() = default;
#else
struct TORCH_API FakeQuantizeLearnablePerChannelAffineBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FakeQuantizeLearnablePerChannelAffineBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
scale_.reset_data();
self_.reset_data();
zero_point_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t axis = 0;
double grad_factor;
int64_t quant_max = 0;
int64_t quant_min = 0;
SavedVariable scale_;
SavedVariable self_;
SavedVariable zero_point_;
};
#ifdef _WIN32
struct FusedMovingAvgObsFqHelperBackward0 : public TraceableFunction {
TORCH_API FusedMovingAvgObsFqHelperBackward0() = default;
#else
struct TORCH_API FusedMovingAvgObsFqHelperBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FusedMovingAvgObsFqHelperBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mask_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable mask_;
};
#ifdef _WIN32
struct FillBackward0 : public TraceableFunction {
TORCH_API FillBackward0() = default;
#else
struct TORCH_API FillBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FillBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct FillBackward1 : public TraceableFunction {
TORCH_API FillBackward1() = default;
#else
struct TORCH_API FillBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FillBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct FillBackward2 : public TraceableFunction {
TORCH_API FillBackward2() = default;
#else
struct TORCH_API FillBackward2 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FillBackward2"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct FillBackward3 : public TraceableFunction {
TORCH_API FillBackward3() = default;
#else
struct TORCH_API FillBackward3 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FillBackward3"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct FloorBackward0 : public TraceableFunction {
TORCH_API FloorBackward0() = default;
#else
struct TORCH_API FloorBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FloorBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct FmodBackward0 : public TraceableFunction {
TORCH_API FmodBackward0() = default;
#else
struct TORCH_API FmodBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FmodBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct FmodBackward1 : public TraceableFunction {
TORCH_API FmodBackward1() = default;
#else
struct TORCH_API FmodBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FmodBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct FracBackward0 : public TraceableFunction {
TORCH_API FracBackward0() = default;
#else
struct TORCH_API FracBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FracBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct FrexpBackward0 : public TraceableFunction {
TORCH_API FrexpBackward0() = default;
#else
struct TORCH_API FrexpBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FrexpBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
exponent_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable exponent_;
};
#ifdef _WIN32
struct GatherBackward0 : public TraceableFunction {
TORCH_API GatherBackward0() = default;
#else
struct TORCH_API GatherBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "GatherBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
index_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable index_;
SavedVariable self_;
bool sparse_grad;
};
#ifdef _WIN32
struct GeBackward0 : public TraceableFunction {
TORCH_API GeBackward0() = default;
#else
struct TORCH_API GeBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "GeBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct GeBackward1 : public TraceableFunction {
TORCH_API GeBackward1() = default;
#else
struct TORCH_API GeBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "GeBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize other_info;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct GeometricBackward0 : public TraceableFunction {
TORCH_API GeometricBackward0() = default;
#else
struct TORCH_API GeometricBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "GeometricBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct GeqrfBackward0 : public TraceableFunction {
TORCH_API GeqrfBackward0() = default;
#else
struct TORCH_API GeqrfBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "GeqrfBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct GridSampler2DBackward0 : public TraceableFunction {
TORCH_API GridSampler2DBackward0() = default;
#else
struct TORCH_API GridSampler2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "GridSampler2DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grid_.reset_data();
input_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool align_corners;
SavedVariable grid_;
SavedVariable input_;
int64_t interpolation_mode = 0;
int64_t padding_mode = 0;
};
#ifdef _WIN32
struct GridSampler3DBackward0 : public TraceableFunction {
TORCH_API GridSampler3DBackward0() = default;
#else
struct TORCH_API GridSampler3DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "GridSampler3DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grid_.reset_data();
input_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool align_corners;
SavedVariable grid_;
SavedVariable input_;
int64_t interpolation_mode = 0;
int64_t padding_mode = 0;
};
#ifdef _WIN32
struct GridSampler2DCpuFallbackBackward0 : public TraceableFunction {
TORCH_API GridSampler2DCpuFallbackBackward0() = default;
#else
struct TORCH_API GridSampler2DCpuFallbackBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "GridSampler2DCpuFallbackBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grid_.reset_data();
input_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool align_corners;
SavedVariable grid_;
SavedVariable input_;
int64_t interpolation_mode = 0;
int64_t padding_mode = 0;
};
#ifdef _WIN32
struct GtBackward0 : public TraceableFunction {
TORCH_API GtBackward0() = default;
#else
struct TORCH_API GtBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "GtBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct GtBackward1 : public TraceableFunction {
TORCH_API GtBackward1() = default;
#else
struct TORCH_API GtBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "GtBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize other_info;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct HardsigmoidBackward0 : public TraceableFunction {
TORCH_API HardsigmoidBackward0() = default;
#else
struct TORCH_API HardsigmoidBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "HardsigmoidBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct HardswishBackward0 : public TraceableFunction {
TORCH_API HardswishBackward0() = default;
#else
struct TORCH_API HardswishBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "HardswishBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct HardswishBackwardBackward0 : public TraceableFunction {
TORCH_API HardswishBackwardBackward0() = default;
#else
struct TORCH_API HardswishBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "HardswishBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable grad_output_;
SavedVariable self_;
at::TensorOptions self_options;
};
#ifdef _WIN32
struct HypotBackward0 : public TraceableFunction {
TORCH_API HypotBackward0() = default;
#else
struct TORCH_API HypotBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "HypotBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct I0Backward0 : public TraceableFunction {
TORCH_API I0Backward0() = default;
#else
struct TORCH_API I0Backward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "I0Backward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct SpecialI0EBackward0 : public TraceableFunction {
TORCH_API SpecialI0EBackward0() = default;
#else
struct TORCH_API SpecialI0EBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SpecialI0EBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct SpecialI1Backward0 : public TraceableFunction {
TORCH_API SpecialI1Backward0() = default;
#else
struct TORCH_API SpecialI1Backward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SpecialI1Backward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct SpecialI1EBackward0 : public TraceableFunction {
TORCH_API SpecialI1EBackward0() = default;
#else
struct TORCH_API SpecialI1EBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SpecialI1EBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct IgammaBackward0 : public TraceableFunction {
TORCH_API IgammaBackward0() = default;
#else
struct TORCH_API IgammaBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "IgammaBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct IgammacBackward0 : public TraceableFunction {
TORCH_API IgammacBackward0() = default;
#else
struct TORCH_API IgammacBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "IgammacBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct IndexBackward0 : public TraceableFunction {
TORCH_API IndexBackward0() = default;
#else
struct TORCH_API IndexBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "IndexBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.clear();
indices_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> indices_;
bool indices_released_ = false;
at::TensorOptions self_options;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct UnsafeIndexBackward0 : public TraceableFunction {
TORCH_API UnsafeIndexBackward0() = default;
#else
struct TORCH_API UnsafeIndexBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnsafeIndexBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.clear();
indices_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> indices_;
bool indices_released_ = false;
at::TensorOptions self_options;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct UnsafeMaskedIndexBackward0 : public TraceableFunction {
TORCH_API UnsafeMaskedIndexBackward0() = default;
#else
struct TORCH_API UnsafeMaskedIndexBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnsafeMaskedIndexBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.clear();
indices_released_ = true;
mask_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> indices_;
bool indices_released_ = false;
SavedVariable mask_;
at::TensorOptions self_options;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct UnsafeMaskedIndexPutAccumulateBackward0 : public TraceableFunction {
TORCH_API UnsafeMaskedIndexPutAccumulateBackward0() = default;
#else
struct TORCH_API UnsafeMaskedIndexPutAccumulateBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnsafeMaskedIndexPutAccumulateBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.clear();
indices_released_ = true;
mask_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> indices_;
bool indices_released_ = false;
SavedVariable mask_;
};
#ifdef _WIN32
struct IndexAddBackward0 : public TraceableFunction {
TORCH_API IndexAddBackward0() = default;
#else
struct TORCH_API IndexAddBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "IndexAddBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
index_.reset_data();
source_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
int64_t dim = 0;
SavedVariable index_;
SavedVariable source_;
int64_t source_dim = 0;
};
#ifdef _WIN32
struct IndexReduceBackward0 : public TraceableFunction {
TORCH_API IndexReduceBackward0() = default;
#else
struct TORCH_API IndexReduceBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "IndexReduceBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
index_.reset_data();
self_.reset_data();
source_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
bool include_self;
SavedVariable index_;
std::string reduce;
SavedVariable self_;
SavedVariable source_;
SavedVariable result_;
};
#ifdef _WIN32
struct IndexCopyBackward0 : public TraceableFunction {
TORCH_API IndexCopyBackward0() = default;
#else
struct TORCH_API IndexCopyBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "IndexCopyBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
index_.reset_data();
source_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable index_;
SavedVariable source_;
int64_t source_dim = 0;
};
#ifdef _WIN32
struct IndexFillBackward0 : public TraceableFunction {
TORCH_API IndexFillBackward0() = default;
#else
struct TORCH_API IndexFillBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "IndexFillBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
index_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable index_;
};
#ifdef _WIN32
struct IndexFillBackward1 : public TraceableFunction {
TORCH_API IndexFillBackward1() = default;
#else
struct TORCH_API IndexFillBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "IndexFillBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
index_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable index_;
};
#ifdef _WIN32
struct IndexPutBackward0 : public TraceableFunction {
TORCH_API IndexPutBackward0() = default;
#else
struct TORCH_API IndexPutBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "IndexPutBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.clear();
indices_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool accumulate;
std::vector<SavedVariable> indices_;
bool indices_released_ = false;
torch::autograd::generated::TypeAndSize values_info;
};
#ifdef _WIN32
struct UnsafeIndexPutBackward0 : public TraceableFunction {
TORCH_API UnsafeIndexPutBackward0() = default;
#else
struct TORCH_API UnsafeIndexPutBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnsafeIndexPutBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.clear();
indices_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool accumulate;
std::vector<SavedVariable> indices_;
bool indices_released_ = false;
torch::autograd::generated::TypeAndSize values_info;
};
#ifdef _WIN32
struct IndexPutImplBackward0 : public TraceableFunction {
TORCH_API IndexPutImplBackward0() = default;
#else
struct TORCH_API IndexPutImplBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "IndexPutImplBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.clear();
indices_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool accumulate;
std::vector<SavedVariable> indices_;
bool indices_released_ = false;
torch::autograd::generated::TypeAndSize values_info;
};
#ifdef _WIN32
struct IndexSelectBackward0 : public TraceableFunction {
TORCH_API IndexSelectBackward0() = default;
#else
struct TORCH_API IndexSelectBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "IndexSelectBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
index_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable index_;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct LinalgInvExBackward0 : public TraceableFunction {
TORCH_API LinalgInvExBackward0() = default;
#else
struct TORCH_API LinalgInvExBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgInvExBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
inverse_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable inverse_;
};
#ifdef _WIN32
struct LinalgPinvBackward0 : public TraceableFunction {
TORCH_API LinalgPinvBackward0() = default;
#else
struct TORCH_API LinalgPinvBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgPinvBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct KthvalueBackward0 : public TraceableFunction {
TORCH_API KthvalueBackward0() = default;
#else
struct TORCH_API KthvalueBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "KthvalueBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
bool keepdim;
std::vector<c10::SymInt> self_sym_sizes;
SavedVariable indices_;
};
#ifdef _WIN32
struct LeBackward0 : public TraceableFunction {
TORCH_API LeBackward0() = default;
#else
struct TORCH_API LeBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LeBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct LeBackward1 : public TraceableFunction {
TORCH_API LeBackward1() = default;
#else
struct TORCH_API LeBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LeBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize other_info;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct LerpBackward0 : public TraceableFunction {
TORCH_API LerpBackward0() = default;
#else
struct TORCH_API LerpBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LerpBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar weight;
};
#ifdef _WIN32
struct LerpBackward1 : public TraceableFunction {
TORCH_API LerpBackward1() = default;
#else
struct TORCH_API LerpBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LerpBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
end_.reset_data();
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable end_;
SavedVariable self_;
SavedVariable weight_;
};
#ifdef _WIN32
struct LgammaBackward0 : public TraceableFunction {
TORCH_API LgammaBackward0() = default;
#else
struct TORCH_API LgammaBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LgammaBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct DigammaBackward0 : public TraceableFunction {
TORCH_API DigammaBackward0() = default;
#else
struct TORCH_API DigammaBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "DigammaBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct PolygammaBackward0 : public TraceableFunction {
TORCH_API PolygammaBackward0() = default;
#else
struct TORCH_API PolygammaBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PolygammaBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t n = 0;
SavedVariable self_;
};
#ifdef _WIN32
struct PolygammaBackward1 : public TraceableFunction {
TORCH_API PolygammaBackward1() = default;
#else
struct TORCH_API PolygammaBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PolygammaBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t n = 0;
SavedVariable self_;
};
#ifdef _WIN32
struct LogBackward0 : public TraceableFunction {
TORCH_API LogBackward0() = default;
#else
struct TORCH_API LogBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LogBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct Log10Backward0 : public TraceableFunction {
TORCH_API Log10Backward0() = default;
#else
struct TORCH_API Log10Backward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "Log10Backward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct Log1PBackward0 : public TraceableFunction {
TORCH_API Log1PBackward0() = default;
#else
struct TORCH_API Log1PBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "Log1PBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct Log2Backward0 : public TraceableFunction {
TORCH_API Log2Backward0() = default;
#else
struct TORCH_API Log2Backward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "Log2Backward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct LogaddexpBackward0 : public TraceableFunction {
TORCH_API LogaddexpBackward0() = default;
#else
struct TORCH_API LogaddexpBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LogaddexpBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct Logaddexp2Backward0 : public TraceableFunction {
TORCH_API Logaddexp2Backward0() = default;
#else
struct TORCH_API Logaddexp2Backward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "Logaddexp2Backward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct XlogyBackward0 : public TraceableFunction {
TORCH_API XlogyBackward0() = default;
#else
struct TORCH_API XlogyBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "XlogyBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct XlogyBackward1 : public TraceableFunction {
TORCH_API XlogyBackward1() = default;
#else
struct TORCH_API XlogyBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "XlogyBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
at::Scalar self;
};
#ifdef _WIN32
struct XlogyBackward2 : public TraceableFunction {
TORCH_API XlogyBackward2() = default;
#else
struct TORCH_API XlogyBackward2 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "XlogyBackward2"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar other;
SavedVariable self_;
};
#ifdef _WIN32
struct SpecialXlog1PyBackward0 : public TraceableFunction {
TORCH_API SpecialXlog1PyBackward0() = default;
#else
struct TORCH_API SpecialXlog1PyBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SpecialXlog1PyBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct SpecialXlog1PyBackward1 : public TraceableFunction {
TORCH_API SpecialXlog1PyBackward1() = default;
#else
struct TORCH_API SpecialXlog1PyBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SpecialXlog1PyBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
at::Scalar self;
};
#ifdef _WIN32
struct SpecialXlog1PyBackward2 : public TraceableFunction {
TORCH_API SpecialXlog1PyBackward2() = default;
#else
struct TORCH_API SpecialXlog1PyBackward2 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SpecialXlog1PyBackward2"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar other;
SavedVariable self_;
};
#ifdef _WIN32
struct SpecialZetaBackward0 : public TraceableFunction {
TORCH_API SpecialZetaBackward0() = default;
#else
struct TORCH_API SpecialZetaBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SpecialZetaBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct SpecialZetaBackward1 : public TraceableFunction {
TORCH_API SpecialZetaBackward1() = default;
#else
struct TORCH_API SpecialZetaBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SpecialZetaBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
at::Scalar self;
};
#ifdef _WIN32
struct SpecialZetaBackward2 : public TraceableFunction {
TORCH_API SpecialZetaBackward2() = default;
#else
struct TORCH_API SpecialZetaBackward2 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SpecialZetaBackward2"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct LogNormalBackward0 : public TraceableFunction {
TORCH_API LogNormalBackward0() = default;
#else
struct TORCH_API LogNormalBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LogNormalBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct LogsumexpBackward0 : public TraceableFunction {
TORCH_API LogsumexpBackward0() = default;
#else
struct TORCH_API LogsumexpBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LogsumexpBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dim;
bool keepdim;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct LinalgLstsqBackward0 : public TraceableFunction {
TORCH_API LinalgLstsqBackward0() = default;
#else
struct TORCH_API LinalgLstsqBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgLstsqBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
b_.reset_data();
self_.reset_data();
solution_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable b_;
SavedVariable self_;
SavedVariable solution_;
};
#ifdef _WIN32
struct LtBackward0 : public TraceableFunction {
TORCH_API LtBackward0() = default;
#else
struct TORCH_API LtBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LtBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct LtBackward1 : public TraceableFunction {
TORCH_API LtBackward1() = default;
#else
struct TORCH_API LtBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LtBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize other_info;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct LinalgLuFactorExBackward0 : public TraceableFunction {
TORCH_API LinalgLuFactorExBackward0() = default;
#else
struct TORCH_API LinalgLuFactorExBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgLuFactorExBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
LU_.reset_data();
pivots_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool pivot;
SavedVariable LU_;
SavedVariable pivots_;
};
#ifdef _WIN32
struct LinalgLuFactorBackward0 : public TraceableFunction {
TORCH_API LinalgLuFactorBackward0() = default;
#else
struct TORCH_API LinalgLuFactorBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgLuFactorBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
LU_.reset_data();
pivots_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool pivot;
SavedVariable LU_;
SavedVariable pivots_;
};
#ifdef _WIN32
struct LinalgLuBackward0 : public TraceableFunction {
TORCH_API LinalgLuBackward0() = default;
#else
struct TORCH_API LinalgLuBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgLuBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
L_.reset_data();
P_.reset_data();
U_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool pivot;
SavedVariable L_;
SavedVariable P_;
SavedVariable U_;
};
#ifdef _WIN32
struct LinalgLuSolveBackward0 : public TraceableFunction {
TORCH_API LinalgLuSolveBackward0() = default;
#else
struct TORCH_API LinalgLuSolveBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgLuSolveBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
LU_.reset_data();
pivots_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable LU_;
bool adjoint;
bool left;
SavedVariable pivots_;
SavedVariable result_;
};
#ifdef _WIN32
struct LuUnpackBackward0 : public TraceableFunction {
TORCH_API LuUnpackBackward0() = default;
#else
struct TORCH_API LuUnpackBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LuUnpackBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::SymInt LU_data_sym_argsize_minus_1;
c10::SymInt LU_data_sym_argsize_minus_2;
};
#ifdef _WIN32
struct MaskedFillBackward0 : public TraceableFunction {
TORCH_API MaskedFillBackward0() = default;
#else
struct TORCH_API MaskedFillBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaskedFillBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mask_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable mask_;
};
#ifdef _WIN32
struct MaskedFillBackward1 : public TraceableFunction {
TORCH_API MaskedFillBackward1() = default;
#else
struct TORCH_API MaskedFillBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaskedFillBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mask_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable mask_;
};
#ifdef _WIN32
struct MaskedScatterBackward0 : public TraceableFunction {
TORCH_API MaskedScatterBackward0() = default;
#else
struct TORCH_API MaskedScatterBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaskedScatterBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mask_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable mask_;
std::vector<c10::SymInt> source_sym_sizes;
};
#ifdef _WIN32
struct MaskedScatterBackwardBackward0 : public TraceableFunction {
TORCH_API MaskedScatterBackwardBackward0() = default;
#else
struct TORCH_API MaskedScatterBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaskedScatterBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mask_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize grad_output_info;
SavedVariable mask_;
};
#ifdef _WIN32
struct MaskedSelectBackward0 : public TraceableFunction {
TORCH_API MaskedSelectBackward0() = default;
#else
struct TORCH_API MaskedSelectBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaskedSelectBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mask_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable mask_;
SavedVariable self_;
};
#ifdef _WIN32
struct LinalgMatrixExpBackward0 : public TraceableFunction {
TORCH_API LinalgMatrixExpBackward0() = default;
#else
struct TORCH_API LinalgMatrixExpBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgMatrixExpBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct MaxBackward0 : public TraceableFunction {
TORCH_API MaxBackward0() = default;
#else
struct TORCH_API MaxBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaxBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
bool keepdim;
std::vector<c10::SymInt> self_sym_sizes;
SavedVariable indices_;
};
#ifdef _WIN32
struct MaxBackward1 : public TraceableFunction {
TORCH_API MaxBackward1() = default;
#else
struct TORCH_API MaxBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaxBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct MaximumBackward0 : public TraceableFunction {
TORCH_API MaximumBackward0() = default;
#else
struct TORCH_API MaximumBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaximumBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct FmaxBackward0 : public TraceableFunction {
TORCH_API FmaxBackward0() = default;
#else
struct TORCH_API FmaxBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FmaxBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct MeanBackward0 : public TraceableFunction {
TORCH_API MeanBackward0() = default;
#else
struct TORCH_API MeanBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MeanBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::SymInt self_sym_numel;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct MeanBackwardAutogradNestedTensor0 : public TraceableFunction {
TORCH_API MeanBackwardAutogradNestedTensor0() = default;
#else
struct TORCH_API MeanBackwardAutogradNestedTensor0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MeanBackwardAutogradNestedTensor0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
c10::SymInt self_sym_numel;
};
#ifdef _WIN32
struct MeanBackward1 : public TraceableFunction {
TORCH_API MeanBackward1() = default;
#else
struct TORCH_API MeanBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MeanBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<int64_t> dim;
bool keepdim;
c10::SymInt self_sym_numel;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct MedianBackward0 : public TraceableFunction {
TORCH_API MedianBackward0() = default;
#else
struct TORCH_API MedianBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MedianBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct NanmedianBackward0 : public TraceableFunction {
TORCH_API NanmedianBackward0() = default;
#else
struct TORCH_API NanmedianBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NanmedianBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct MedianBackward1 : public TraceableFunction {
TORCH_API MedianBackward1() = default;
#else
struct TORCH_API MedianBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MedianBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
bool keepdim;
std::vector<c10::SymInt> self_sym_sizes;
SavedVariable indices_;
};
#ifdef _WIN32
struct NanmedianBackward1 : public TraceableFunction {
TORCH_API NanmedianBackward1() = default;
#else
struct TORCH_API NanmedianBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NanmedianBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
bool keepdim;
std::vector<c10::SymInt> self_sym_sizes;
SavedVariable indices_;
};
#ifdef _WIN32
struct MinBackward0 : public TraceableFunction {
TORCH_API MinBackward0() = default;
#else
struct TORCH_API MinBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MinBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
bool keepdim;
std::vector<c10::SymInt> self_sym_sizes;
SavedVariable indices_;
};
#ifdef _WIN32
struct MinBackward1 : public TraceableFunction {
TORCH_API MinBackward1() = default;
#else
struct TORCH_API MinBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MinBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct MinimumBackward0 : public TraceableFunction {
TORCH_API MinimumBackward0() = default;
#else
struct TORCH_API MinimumBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MinimumBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct FminBackward0 : public TraceableFunction {
TORCH_API FminBackward0() = default;
#else
struct TORCH_API FminBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FminBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct AmaxBackward0 : public TraceableFunction {
TORCH_API AmaxBackward0() = default;
#else
struct TORCH_API AmaxBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AmaxBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dim;
bool keepdim;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct AminBackward0 : public TraceableFunction {
TORCH_API AminBackward0() = default;
#else
struct TORCH_API AminBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AminBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dim;
bool keepdim;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct MmBackward0 : public TraceableFunction {
TORCH_API MmBackward0() = default;
#else
struct TORCH_API MmBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MmBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mat2_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable mat2_;
at::Layout mat2_layout;
std::vector<c10::SymInt> mat2_sym_sizes;
std::vector<c10::SymInt> mat2_sym_strides;
SavedVariable self_;
at::Layout self_layout;
std::vector<c10::SymInt> self_sym_sizes;
std::vector<c10::SymInt> self_sym_strides;
};
#ifdef _WIN32
struct ModeBackward0 : public TraceableFunction {
TORCH_API ModeBackward0() = default;
#else
struct TORCH_API ModeBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ModeBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
bool keepdim;
std::vector<c10::SymInt> self_sym_sizes;
SavedVariable indices_;
};
#ifdef _WIN32
struct MulBackward0 : public TraceableFunction {
TORCH_API MulBackward0() = default;
#else
struct TORCH_API MulBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MulBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
at::ScalarType other_scalar_type;
SavedVariable self_;
at::ScalarType self_scalar_type;
};
#ifdef _WIN32
struct MulBackward1 : public TraceableFunction {
TORCH_API MulBackward1() = default;
#else
struct TORCH_API MulBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MulBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar other;
at::ScalarType self_scalar_type;
};
#ifdef _WIN32
struct MvBackward0 : public TraceableFunction {
TORCH_API MvBackward0() = default;
#else
struct TORCH_API MvBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MvBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
vec_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable vec_;
};
#ifdef _WIN32
struct MvlgammaBackward0 : public TraceableFunction {
TORCH_API MvlgammaBackward0() = default;
#else
struct TORCH_API MvlgammaBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MvlgammaBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t p = 0;
SavedVariable self_;
};
#ifdef _WIN32
struct NanToNumBackward0 : public TraceableFunction {
TORCH_API NanToNumBackward0() = default;
#else
struct TORCH_API NanToNumBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NanToNumBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct NativeBatchNormBackward0 : public TraceableFunction {
TORCH_API NativeBatchNormBackward0() = default;
#else
struct TORCH_API NativeBatchNormBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NativeBatchNormBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input_.reset_data();
running_mean_.reset_data();
running_var_.reset_data();
weight_.reset_data();
result1_.reset_data();
result2_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double eps;
SavedVariable input_;
SavedVariable running_mean_;
SavedVariable running_var_;
bool training;
SavedVariable weight_;
SavedVariable result1_;
SavedVariable result2_;
};
#ifdef _WIN32
struct NativeBatchNormLegitBackward0 : public TraceableFunction {
TORCH_API NativeBatchNormLegitBackward0() = default;
#else
struct TORCH_API NativeBatchNormLegitBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NativeBatchNormLegitBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input_.reset_data();
running_mean_.reset_data();
running_var_.reset_data();
weight_.reset_data();
result1_.reset_data();
result2_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double eps;
SavedVariable input_;
SavedVariable running_mean_;
SavedVariable running_var_;
bool training;
SavedVariable weight_;
SavedVariable result1_;
SavedVariable result2_;
};
#ifdef _WIN32
struct NativeBatchNormLegitNoTrainingBackward0 : public TraceableFunction {
TORCH_API NativeBatchNormLegitNoTrainingBackward0() = default;
#else
struct TORCH_API NativeBatchNormLegitNoTrainingBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NativeBatchNormLegitNoTrainingBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input_.reset_data();
running_mean_.reset_data();
running_var_.reset_data();
weight_.reset_data();
result1_.reset_data();
result2_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double eps;
SavedVariable input_;
SavedVariable running_mean_;
SavedVariable running_var_;
SavedVariable weight_;
SavedVariable result1_;
SavedVariable result2_;
};
#ifdef _WIN32
struct NativeBatchNormLegitBackward1 : public TraceableFunction {
TORCH_API NativeBatchNormLegitBackward1() = default;
#else
struct TORCH_API NativeBatchNormLegitBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NativeBatchNormLegitBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input_.reset_data();
weight_.reset_data();
result1_.reset_data();
result2_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double eps;
SavedVariable input_;
bool training;
SavedVariable weight_;
SavedVariable result1_;
SavedVariable result2_;
};
#ifdef _WIN32
struct NativeBatchNormBackwardBackward0 : public TraceableFunction {
TORCH_API NativeBatchNormBackwardBackward0() = default;
#else
struct TORCH_API NativeBatchNormBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NativeBatchNormBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_out_.reset_data();
input_.reset_data();
running_mean_.reset_data();
running_var_.reset_data();
save_invstd_.reset_data();
save_mean_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double eps;
SavedVariable grad_out_;
SavedVariable input_;
SavedVariable running_mean_;
SavedVariable running_var_;
SavedVariable save_invstd_;
SavedVariable save_mean_;
bool train;
SavedVariable weight_;
};
#ifdef _WIN32
struct NativeLayerNormBackward0 : public TraceableFunction {
TORCH_API NativeLayerNormBackward0() = default;
#else
struct TORCH_API NativeLayerNormBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NativeLayerNormBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
bias_.reset_data();
input_.reset_data();
weight_.reset_data();
result1_.reset_data();
result2_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable bias_;
SavedVariable input_;
std::vector<c10::SymInt> normalized_shape;
SavedVariable weight_;
SavedVariable result1_;
SavedVariable result2_;
};
#ifdef _WIN32
struct NativeLayerNormBackwardBackward0 : public TraceableFunction {
TORCH_API NativeLayerNormBackwardBackward0() = default;
#else
struct TORCH_API NativeLayerNormBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NativeLayerNormBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_out_.reset_data();
input_.reset_data();
mean_.reset_data();
rstd_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable grad_out_;
SavedVariable input_;
SavedVariable mean_;
std::vector<c10::SymInt> normalized_shape;
SavedVariable rstd_;
SavedVariable weight_;
};
#ifdef _WIN32
struct NativeGroupNormBackward0 : public TraceableFunction {
TORCH_API NativeGroupNormBackward0() = default;
#else
struct TORCH_API NativeGroupNormBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NativeGroupNormBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input_.reset_data();
weight_.reset_data();
result1_.reset_data();
result2_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::SymInt C;
c10::SymInt HxW;
c10::SymInt N;
double eps;
int64_t group = 0;
SavedVariable input_;
SavedVariable weight_;
SavedVariable result1_;
SavedVariable result2_;
};
#ifdef _WIN32
struct NeBackward0 : public TraceableFunction {
TORCH_API NeBackward0() = default;
#else
struct TORCH_API NeBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NeBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct NeBackward1 : public TraceableFunction {
TORCH_API NeBackward1() = default;
#else
struct TORCH_API NeBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NeBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize other_info;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct NegBackward0 : public TraceableFunction {
TORCH_API NegBackward0() = default;
#else
struct TORCH_API NegBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NegBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct BatchNormWithUpdateBackward0 : public TraceableFunction {
TORCH_API BatchNormWithUpdateBackward0() = default;
#else
struct TORCH_API BatchNormWithUpdateBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "BatchNormWithUpdateBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input_.reset_data();
running_mean_.reset_data();
running_var_.reset_data();
weight_.reset_data();
result1_.reset_data();
result2_.reset_data();
result3_.reset_data();
}
bool retain_variables = true;
void will_release_variables() override {
retain_variables = false;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double eps;
SavedVariable input_;
SavedVariable running_mean_;
SavedVariable running_var_;
SavedVariable weight_;
SavedVariable result1_;
SavedVariable result2_;
SavedVariable result3_;
};
#ifdef _WIN32
struct BatchNormNoUpdateBackward0 : public TraceableFunction {
TORCH_API BatchNormNoUpdateBackward0() = default;
#else
struct TORCH_API BatchNormNoUpdateBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "BatchNormNoUpdateBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input_.reset_data();
running_mean_.reset_data();
running_var_.reset_data();
weight_.reset_data();
result1_.reset_data();
result2_.reset_data();
result3_.reset_data();
}
bool retain_variables = true;
void will_release_variables() override {
retain_variables = false;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double eps;
SavedVariable input_;
SavedVariable running_mean_;
SavedVariable running_var_;
SavedVariable weight_;
SavedVariable result1_;
SavedVariable result2_;
SavedVariable result3_;
};
#ifdef _WIN32
struct BatchNormBackwardBackward0 : public TraceableFunction {
TORCH_API BatchNormBackwardBackward0() = default;
#else
struct TORCH_API BatchNormBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "BatchNormBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_out_.reset_data();
input_.reset_data();
reserve_.reset_data();
running_mean_.reset_data();
running_var_.reset_data();
save_mean_.reset_data();
save_var_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double eps;
SavedVariable grad_out_;
SavedVariable input_;
SavedVariable reserve_;
SavedVariable running_mean_;
SavedVariable running_var_;
SavedVariable save_mean_;
SavedVariable save_var_;
bool update;
SavedVariable weight_;
};
#ifdef _WIN32
struct NextafterBackward0 : public TraceableFunction {
TORCH_API NextafterBackward0() = default;
#else
struct TORCH_API NextafterBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NextafterBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct NormBackward0 : public TraceableFunction {
TORCH_API NormBackward0() = default;
#else
struct TORCH_API NormBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NormBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar p;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct NormBackward1 : public TraceableFunction {
TORCH_API NormBackward1() = default;
#else
struct TORCH_API NormBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NormBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dim;
bool keepdim;
::std::optional<at::Scalar> p;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct NormBackward2 : public TraceableFunction {
TORCH_API NormBackward2() = default;
#else
struct TORCH_API NormBackward2 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NormBackward2"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
::std::optional<at::Scalar> p;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct NormBackward3 : public TraceableFunction {
TORCH_API NormBackward3() = default;
#else
struct TORCH_API NormBackward3 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NormBackward3"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dim;
bool keepdim;
::std::optional<at::Scalar> p;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct LinalgVectorNormBackward0 : public TraceableFunction {
TORCH_API LinalgVectorNormBackward0() = default;
#else
struct TORCH_API LinalgVectorNormBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgVectorNormBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<int64_t> dim;
bool keepdim;
at::Scalar ord;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct PdistBackward0 : public TraceableFunction {
TORCH_API PdistBackward0() = default;
#else
struct TORCH_API PdistBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PdistBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double p;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct PdistBackwardBackward0 : public TraceableFunction {
TORCH_API PdistBackwardBackward0() = default;
#else
struct TORCH_API PdistBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PdistBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct EuclideanDistBackward0 : public TraceableFunction {
TORCH_API EuclideanDistBackward0() = default;
#else
struct TORCH_API EuclideanDistBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "EuclideanDistBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
x1_.reset_data();
x2_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable x1_;
SavedVariable x2_;
SavedVariable result_;
};
#ifdef _WIN32
struct CdistBackward0 : public TraceableFunction {
TORCH_API CdistBackward0() = default;
#else
struct TORCH_API CdistBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CdistBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
x1_.reset_data();
x2_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double p;
SavedVariable x1_;
SavedVariable x2_;
SavedVariable result_;
};
#ifdef _WIN32
struct CdistBackwardBackward0 : public TraceableFunction {
TORCH_API CdistBackwardBackward0() = default;
#else
struct TORCH_API CdistBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CdistBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct NormalBackward0 : public TraceableFunction {
TORCH_API NormalBackward0() = default;
#else
struct TORCH_API NormalBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NormalBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct NormalBackward1 : public TraceableFunction {
TORCH_API NormalBackward1() = default;
#else
struct TORCH_API NormalBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NormalBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> mean_sym_sizes;
};
#ifdef _WIN32
struct NormalBackward2 : public TraceableFunction {
TORCH_API NormalBackward2() = default;
#else
struct TORCH_API NormalBackward2 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NormalBackward2"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> std_sym_sizes;
};
#ifdef _WIN32
struct NormalBackward3 : public TraceableFunction {
TORCH_API NormalBackward3() = default;
#else
struct TORCH_API NormalBackward3 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NormalBackward3"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> mean_sym_sizes;
std::vector<c10::SymInt> std_sym_sizes;
};
#ifdef _WIN32
struct LinalgHouseholderProductBackward0 : public TraceableFunction {
TORCH_API LinalgHouseholderProductBackward0() = default;
#else
struct TORCH_API LinalgHouseholderProductBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgHouseholderProductBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input_.reset_data();
tau_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable input_;
SavedVariable tau_;
SavedVariable result_;
};
#ifdef _WIN32
struct OrmqrBackward0 : public TraceableFunction {
TORCH_API OrmqrBackward0() = default;
#else
struct TORCH_API OrmqrBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "OrmqrBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input2_.reset_data();
input3_.reset_data();
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable input2_;
SavedVariable input3_;
bool left;
SavedVariable self_;
bool transpose;
SavedVariable result_;
};
#ifdef _WIN32
struct PermuteBackward0 : public Node {
TORCH_API PermuteBackward0() = default;
#else
struct TORCH_API PermuteBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PermuteBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dims;
};
#ifdef _WIN32
struct PoissonBackward0 : public TraceableFunction {
TORCH_API PoissonBackward0() = default;
#else
struct TORCH_API PoissonBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PoissonBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct PowBackward0 : public TraceableFunction {
TORCH_API PowBackward0() = default;
#else
struct TORCH_API PowBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PowBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar exponent;
SavedVariable self_;
};
#ifdef _WIN32
struct PowBackward1 : public TraceableFunction {
TORCH_API PowBackward1() = default;
#else
struct TORCH_API PowBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PowBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
exponent_.reset_data();
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable exponent_;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct PowBackward2 : public TraceableFunction {
TORCH_API PowBackward2() = default;
#else
struct TORCH_API PowBackward2 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PowBackward2"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
exponent_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable exponent_;
at::Scalar self;
SavedVariable result_;
};
#ifdef _WIN32
struct ProdBackward0 : public TraceableFunction {
TORCH_API ProdBackward0() = default;
#else
struct TORCH_API ProdBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ProdBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct ProdBackward1 : public TraceableFunction {
TORCH_API ProdBackward1() = default;
#else
struct TORCH_API ProdBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ProdBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
bool keepdim;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct PutBackward0 : public TraceableFunction {
TORCH_API PutBackward0() = default;
#else
struct TORCH_API PutBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PutBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
index_.reset_data();
source_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool accumulate;
SavedVariable index_;
SavedVariable source_;
torch::autograd::generated::TypeAndSize source_info;
};
#ifdef _WIN32
struct LinalgQrBackward0 : public TraceableFunction {
TORCH_API LinalgQrBackward0() = default;
#else
struct TORCH_API LinalgQrBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgQrBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
Q_.reset_data();
R_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::string mode;
SavedVariable Q_;
SavedVariable R_;
};
#ifdef _WIN32
struct Rad2DegBackward0 : public TraceableFunction {
TORCH_API Rad2DegBackward0() = default;
#else
struct TORCH_API Rad2DegBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "Rad2DegBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct RandomBackward0 : public TraceableFunction {
TORCH_API RandomBackward0() = default;
#else
struct TORCH_API RandomBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RandomBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct RandomBackward1 : public TraceableFunction {
TORCH_API RandomBackward1() = default;
#else
struct TORCH_API RandomBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RandomBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct RandomBackward2 : public TraceableFunction {
TORCH_API RandomBackward2() = default;
#else
struct TORCH_API RandomBackward2 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RandomBackward2"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct ReciprocalBackward0 : public TraceableFunction {
TORCH_API ReciprocalBackward0() = default;
#else
struct TORCH_API ReciprocalBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReciprocalBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable result_;
};
#ifdef _WIN32
struct RemainderBackward0 : public TraceableFunction {
TORCH_API RemainderBackward0() = default;
#else
struct TORCH_API RemainderBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RemainderBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct RemainderBackward1 : public TraceableFunction {
TORCH_API RemainderBackward1() = default;
#else
struct TORCH_API RemainderBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RemainderBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct RenormBackward0 : public TraceableFunction {
TORCH_API RenormBackward0() = default;
#else
struct TORCH_API RenormBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RenormBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
at::Scalar maxnorm;
at::Scalar p;
SavedVariable self_;
};
#ifdef _WIN32
struct RepeatBackward0 : public TraceableFunction {
TORCH_API RepeatBackward0() = default;
#else
struct TORCH_API RepeatBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RepeatBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> repeats;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct SpecialEntrBackward0 : public TraceableFunction {
TORCH_API SpecialEntrBackward0() = default;
#else
struct TORCH_API SpecialEntrBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SpecialEntrBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct SpecialNdtriBackward0 : public TraceableFunction {
TORCH_API SpecialNdtriBackward0() = default;
#else
struct TORCH_API SpecialNdtriBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SpecialNdtriBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable result_;
};
#ifdef _WIN32
struct SpecialLogNdtrBackward0 : public TraceableFunction {
TORCH_API SpecialLogNdtrBackward0() = default;
#else
struct TORCH_API SpecialLogNdtrBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SpecialLogNdtrBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct ReshapeAliasBackward0 : public Node {
TORCH_API ReshapeAliasBackward0() = default;
#else
struct TORCH_API ReshapeAliasBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReshapeAliasBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct RoundBackward0 : public TraceableFunction {
TORCH_API RoundBackward0() = default;
#else
struct TORCH_API RoundBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RoundBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct RoundBackward1 : public TraceableFunction {
TORCH_API RoundBackward1() = default;
#else
struct TORCH_API RoundBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RoundBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct RsqrtBackward0 : public TraceableFunction {
TORCH_API RsqrtBackward0() = default;
#else
struct TORCH_API RsqrtBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RsqrtBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable result_;
};
#ifdef _WIN32
struct ScatterBackward0 : public TraceableFunction {
TORCH_API ScatterBackward0() = default;
#else
struct TORCH_API ScatterBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ScatterBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
index_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable index_;
};
#ifdef _WIN32
struct ScatterBackward1 : public TraceableFunction {
TORCH_API ScatterBackward1() = default;
#else
struct TORCH_API ScatterBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ScatterBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
index_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable index_;
};
#ifdef _WIN32
struct ScatterAddBackward0 : public TraceableFunction {
TORCH_API ScatterAddBackward0() = default;
#else
struct TORCH_API ScatterAddBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ScatterAddBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
index_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable index_;
};
#ifdef _WIN32
struct SelectBackward0 : public Node {
TORCH_API SelectBackward0() = default;
#else
struct TORCH_API SelectBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SelectBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
c10::SymInt index;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct SelectBackwardAutogradNestedTensor0 : public Node {
TORCH_API SelectBackwardAutogradNestedTensor0() = default;
#else
struct TORCH_API SelectBackwardAutogradNestedTensor0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SelectBackwardAutogradNestedTensor0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
c10::SymInt index;
SavedVariable self_;
};
#ifdef _WIN32
struct SelectBackwardBackward0 : public TraceableFunction {
TORCH_API SelectBackwardBackward0() = default;
#else
struct TORCH_API SelectBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SelectBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
c10::SymInt index;
};
#ifdef _WIN32
struct SigmoidBackward0 : public TraceableFunction {
TORCH_API SigmoidBackward0() = default;
#else
struct TORCH_API SigmoidBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SigmoidBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable result_;
};
#ifdef _WIN32
struct LogitBackward0 : public TraceableFunction {
TORCH_API LogitBackward0() = default;
#else
struct TORCH_API LogitBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LogitBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
::std::optional<double> eps;
SavedVariable self_;
};
#ifdef _WIN32
struct SignBackward0 : public TraceableFunction {
TORCH_API SignBackward0() = default;
#else
struct TORCH_API SignBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SignBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct SgnBackward0 : public TraceableFunction {
TORCH_API SgnBackward0() = default;
#else
struct TORCH_API SgnBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SgnBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct SinBackward0 : public TraceableFunction {
TORCH_API SinBackward0() = default;
#else
struct TORCH_API SinBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SinBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct SincBackward0 : public TraceableFunction {
TORCH_API SincBackward0() = default;
#else
struct TORCH_API SincBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SincBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct SinhBackward0 : public TraceableFunction {
TORCH_API SinhBackward0() = default;
#else
struct TORCH_API SinhBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SinhBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct SliceBackward0 : public Node {
TORCH_API SliceBackward0() = default;
#else
struct TORCH_API SliceBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SliceBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
::std::optional<c10::SymInt> end;
std::vector<c10::SymInt> self_sym_sizes;
::std::optional<c10::SymInt> start;
c10::SymInt step;
};
#ifdef _WIN32
struct SliceBackwardBackward0 : public TraceableFunction {
TORCH_API SliceBackwardBackward0() = default;
#else
struct TORCH_API SliceBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SliceBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
c10::SymInt end;
c10::SymInt start;
c10::SymInt step;
};
#ifdef _WIN32
struct SliceInverseBackward0 : public Node {
TORCH_API SliceInverseBackward0() = default;
#else
struct TORCH_API SliceInverseBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SliceInverseBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
::std::optional<c10::SymInt> end;
torch::autograd::generated::TypeAndSize self_info;
::std::optional<c10::SymInt> start;
c10::SymInt step;
};
#ifdef _WIN32
struct SliceScatterBackward0 : public TraceableFunction {
TORCH_API SliceScatterBackward0() = default;
#else
struct TORCH_API SliceScatterBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SliceScatterBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
::std::optional<c10::SymInt> end;
torch::autograd::generated::TypeAndSize src_info;
::std::optional<c10::SymInt> start;
c10::SymInt step;
};
#ifdef _WIN32
struct SelectScatterBackward0 : public TraceableFunction {
TORCH_API SelectScatterBackward0() = default;
#else
struct TORCH_API SelectScatterBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SelectScatterBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
c10::SymInt index;
torch::autograd::generated::TypeAndSize src_info;
};
#ifdef _WIN32
struct DiagonalScatterBackward0 : public TraceableFunction {
TORCH_API DiagonalScatterBackward0() = default;
#else
struct TORCH_API DiagonalScatterBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "DiagonalScatterBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim1 = 0;
int64_t dim2 = 0;
int64_t offset = 0;
torch::autograd::generated::TypeAndSize src_info;
};
#ifdef _WIN32
struct AsStridedScatterBackward0 : public TraceableFunction {
TORCH_API AsStridedScatterBackward0() = default;
#else
struct TORCH_API AsStridedScatterBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AsStridedScatterBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::TensorGeometry self_geometry;
std::vector<c10::SymInt> size;
at::TensorGeometry src_geometry;
::std::optional<c10::SymInt> storage_offset;
std::vector<c10::SymInt> stride;
};
#ifdef _WIN32
struct LinalgSolveExBackward0 : public TraceableFunction {
TORCH_API LinalgSolveExBackward0() = default;
#else
struct TORCH_API LinalgSolveExBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgSolveExBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
A_.reset_data();
LU_.reset_data();
pivots_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable A_;
bool left;
SavedVariable LU_;
SavedVariable pivots_;
SavedVariable result_;
};
#ifdef _WIN32
struct SortBackward0 : public TraceableFunction {
TORCH_API SortBackward0() = default;
#else
struct TORCH_API SortBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SortBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
std::vector<c10::SymInt> self_sym_sizes;
SavedVariable indices_;
};
#ifdef _WIN32
struct SortBackward1 : public TraceableFunction {
TORCH_API SortBackward1() = default;
#else
struct TORCH_API SortBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SortBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
std::vector<c10::SymInt> self_sym_sizes;
SavedVariable indices_;
};
#ifdef _WIN32
struct SplitBackward0 : public Node {
TORCH_API SplitBackward0() = default;
#else
struct TORCH_API SplitBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SplitBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
at::TensorOptions self_options;
std::vector<c10::SymInt> self_sym_sizes;
c10::SymInt split_size;
};
#ifdef _WIN32
struct UnsafeSplitBackward0 : public TraceableFunction {
TORCH_API UnsafeSplitBackward0() = default;
#else
struct TORCH_API UnsafeSplitBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnsafeSplitBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
at::TensorOptions self_options;
std::vector<c10::SymInt> self_sym_sizes;
c10::SymInt split_size;
};
#ifdef _WIN32
struct SplitWithSizesBackward0 : public Node {
TORCH_API SplitWithSizesBackward0() = default;
#else
struct TORCH_API SplitWithSizesBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SplitWithSizesBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
at::TensorOptions self_options;
std::vector<c10::SymInt> self_sym_sizes;
std::vector<c10::SymInt> split_sizes;
};
#ifdef _WIN32
struct SplitWithSizesBackwardAutogradNestedTensor0 : public Node {
TORCH_API SplitWithSizesBackwardAutogradNestedTensor0() = default;
#else
struct TORCH_API SplitWithSizesBackwardAutogradNestedTensor0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SplitWithSizesBackwardAutogradNestedTensor0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable self_;
at::TensorOptions self_options;
std::vector<c10::SymInt> split_sizes;
};
#ifdef _WIN32
struct UnsafeSplitWithSizesBackward0 : public TraceableFunction {
TORCH_API UnsafeSplitWithSizesBackward0() = default;
#else
struct TORCH_API UnsafeSplitWithSizesBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnsafeSplitWithSizesBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
at::TensorOptions self_options;
std::vector<c10::SymInt> self_sym_sizes;
std::vector<c10::SymInt> split_sizes;
};
#ifdef _WIN32
struct SqrtBackward0 : public TraceableFunction {
TORCH_API SqrtBackward0() = default;
#else
struct TORCH_API SqrtBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SqrtBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable result_;
};
#ifdef _WIN32
struct SqueezeBackward0 : public Node {
TORCH_API SqueezeBackward0() = default;
#else
struct TORCH_API SqueezeBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SqueezeBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct SqueezeBackward1 : public Node {
TORCH_API SqueezeBackward1() = default;
#else
struct TORCH_API SqueezeBackward1 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SqueezeBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct SqueezeBackwardAutogradNestedTensor0 : public Node {
TORCH_API SqueezeBackwardAutogradNestedTensor0() = default;
#else
struct TORCH_API SqueezeBackwardAutogradNestedTensor0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SqueezeBackwardAutogradNestedTensor0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
};
#ifdef _WIN32
struct SqueezeBackward2 : public Node {
TORCH_API SqueezeBackward2() = default;
#else
struct TORCH_API SqueezeBackward2 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SqueezeBackward2"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dim;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct SqueezeBackwardAutogradNestedTensor1 : public Node {
TORCH_API SqueezeBackwardAutogradNestedTensor1() = default;
#else
struct TORCH_API SqueezeBackwardAutogradNestedTensor1 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SqueezeBackwardAutogradNestedTensor1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dim;
int64_t self_dim = 0;
};
#ifdef _WIN32
struct SqueezeBackward3 : public TraceableFunction {
TORCH_API SqueezeBackward3() = default;
#else
struct TORCH_API SqueezeBackward3 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SqueezeBackward3"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct SqueezeBackward4 : public TraceableFunction {
TORCH_API SqueezeBackward4() = default;
#else
struct TORCH_API SqueezeBackward4 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SqueezeBackward4"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct SqueezeBackward5 : public TraceableFunction {
TORCH_API SqueezeBackward5() = default;
#else
struct TORCH_API SqueezeBackward5 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SqueezeBackward5"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dim;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct StdBackward0 : public TraceableFunction {
TORCH_API StdBackward0() = default;
#else
struct TORCH_API StdBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "StdBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
::std::optional<at::Scalar> correction;
c10::OptionalArray<int64_t> dim;
bool keepdim;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct StdMeanBackward0 : public TraceableFunction {
TORCH_API StdMeanBackward0() = default;
#else
struct TORCH_API StdMeanBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "StdMeanBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result0_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
::std::optional<at::Scalar> correction;
c10::OptionalArray<int64_t> dim;
bool keepdim;
SavedVariable self_;
SavedVariable result0_;
};
#ifdef _WIN32
struct SubBackward0 : public TraceableFunction {
TORCH_API SubBackward0() = default;
#else
struct TORCH_API SubBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SubBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
at::ScalarType other_scalar_type;
at::ScalarType self_scalar_type;
};
#ifdef _WIN32
struct SubBackward1 : public TraceableFunction {
TORCH_API SubBackward1() = default;
#else
struct TORCH_API SubBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SubBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::ScalarType self_scalar_type;
};
#ifdef _WIN32
struct RsubBackward0 : public TraceableFunction {
TORCH_API RsubBackward0() = default;
#else
struct TORCH_API RsubBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RsubBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
at::ScalarType other_scalar_type;
at::ScalarType self_scalar_type;
};
#ifdef _WIN32
struct RsubBackward1 : public TraceableFunction {
TORCH_API RsubBackward1() = default;
#else
struct TORCH_API RsubBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RsubBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
at::ScalarType self_scalar_type;
};
#ifdef _WIN32
struct SumBackward0 : public TraceableFunction {
TORCH_API SumBackward0() = default;
#else
struct TORCH_API SumBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SumBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct SumBackwardAutogradNestedTensor0 : public TraceableFunction {
TORCH_API SumBackwardAutogradNestedTensor0() = default;
#else
struct TORCH_API SumBackwardAutogradNestedTensor0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SumBackwardAutogradNestedTensor0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct SumBackward1 : public TraceableFunction {
TORCH_API SumBackward1() = default;
#else
struct TORCH_API SumBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SumBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<int64_t> dim;
bool keepdim;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct SumBackwardAutogradNestedTensor1 : public TraceableFunction {
TORCH_API SumBackwardAutogradNestedTensor1() = default;
#else
struct TORCH_API SumBackwardAutogradNestedTensor1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SumBackwardAutogradNestedTensor1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<int64_t> dim;
bool keepdim;
SavedVariable self_;
};
#ifdef _WIN32
struct NansumBackward0 : public TraceableFunction {
TORCH_API NansumBackward0() = default;
#else
struct TORCH_API NansumBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NansumBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<int64_t> dim;
bool keepdim;
SavedVariable self_;
at::ScalarType self_scalar_type;
};
#ifdef _WIN32
struct LinalgSvdBackward0 : public TraceableFunction {
TORCH_API LinalgSvdBackward0() = default;
#else
struct TORCH_API LinalgSvdBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgSvdBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
S_.reset_data();
U_.reset_data();
Vh_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool full_matrices;
SavedVariable S_;
c10::SymInt S_sym_argsize_minus_1;
SavedVariable U_;
SavedVariable Vh_;
};
#ifdef _WIN32
struct LinalgEighBackward0 : public TraceableFunction {
TORCH_API LinalgEighBackward0() = default;
#else
struct TORCH_API LinalgEighBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgEighBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
eigenvalues_.reset_data();
eigenvectors_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable eigenvalues_;
SavedVariable eigenvectors_;
};
#ifdef _WIN32
struct LinalgEigBackward0 : public TraceableFunction {
TORCH_API LinalgEigBackward0() = default;
#else
struct TORCH_API LinalgEigBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgEigBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
eigenvalues_.reset_data();
eigenvectors_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::ScalarType self_scalar_type;
SavedVariable eigenvalues_;
SavedVariable eigenvectors_;
};
#ifdef _WIN32
struct TBackward0 : public Node {
TORCH_API TBackward0() = default;
#else
struct TORCH_API TBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct TBackward1 : public TraceableFunction {
TORCH_API TBackward1() = default;
#else
struct TORCH_API TBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct FlipBackward0 : public TraceableFunction {
TORCH_API FlipBackward0() = default;
#else
struct TORCH_API FlipBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FlipBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dims;
};
#ifdef _WIN32
struct RollBackward0 : public TraceableFunction {
TORCH_API RollBackward0() = default;
#else
struct TORCH_API RollBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RollBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dims;
std::vector<c10::SymInt> shifts;
};
#ifdef _WIN32
struct Rot90Backward0 : public TraceableFunction {
TORCH_API Rot90Backward0() = default;
#else
struct TORCH_API Rot90Backward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "Rot90Backward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dims;
int64_t k = 0;
};
#ifdef _WIN32
struct TakeBackward0 : public TraceableFunction {
TORCH_API TakeBackward0() = default;
#else
struct TORCH_API TakeBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TakeBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
index_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable index_;
SavedVariable self_;
};
#ifdef _WIN32
struct TanBackward0 : public TraceableFunction {
TORCH_API TanBackward0() = default;
#else
struct TORCH_API TanBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TanBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable result_;
};
#ifdef _WIN32
struct TanhBackward0 : public TraceableFunction {
TORCH_API TanhBackward0() = default;
#else
struct TORCH_API TanhBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TanhBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable result_;
};
#ifdef _WIN32
struct TopkBackward0 : public TraceableFunction {
TORCH_API TopkBackward0() = default;
#else
struct TORCH_API TopkBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TopkBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
std::vector<c10::SymInt> self_sym_sizes;
SavedVariable indices_;
};
#ifdef _WIN32
struct TraceBackward0 : public TraceableFunction {
TORCH_API TraceBackward0() = default;
#else
struct TORCH_API TraceBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TraceBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct TransposeBackward0 : public Node {
TORCH_API TransposeBackward0() = default;
#else
struct TORCH_API TransposeBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TransposeBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim0 = 0;
int64_t dim1 = 0;
};
#ifdef _WIN32
struct TransposeBackward1 : public TraceableFunction {
TORCH_API TransposeBackward1() = default;
#else
struct TORCH_API TransposeBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TransposeBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim0 = 0;
int64_t dim1 = 0;
};
#ifdef _WIN32
struct TriangularSolveBackward0 : public TraceableFunction {
TORCH_API TriangularSolveBackward0() = default;
#else
struct TORCH_API TriangularSolveBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TriangularSolveBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
A_.reset_data();
self_.reset_data();
solution_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable A_;
SavedVariable self_;
bool transpose;
bool unitriangular;
bool upper;
SavedVariable solution_;
};
#ifdef _WIN32
struct LinalgSolveTriangularBackward0 : public TraceableFunction {
TORCH_API LinalgSolveTriangularBackward0() = default;
#else
struct TORCH_API LinalgSolveTriangularBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinalgSolveTriangularBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool left;
SavedVariable self_;
bool unitriangular;
bool upper;
SavedVariable result_;
};
#ifdef _WIN32
struct TrilBackward0 : public TraceableFunction {
TORCH_API TrilBackward0() = default;
#else
struct TORCH_API TrilBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TrilBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t diagonal = 0;
};
#ifdef _WIN32
struct TriuBackward0 : public TraceableFunction {
TORCH_API TriuBackward0() = default;
#else
struct TORCH_API TriuBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TriuBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t diagonal = 0;
};
#ifdef _WIN32
struct TruncBackward0 : public TraceableFunction {
TORCH_API TruncBackward0() = default;
#else
struct TORCH_API TruncBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TruncBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct ToDenseBackward0 : public TraceableFunction {
TORCH_API ToDenseBackward0() = default;
#else
struct TORCH_API ToDenseBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ToDenseBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
::std::optional<bool> masked_grad;
SavedVariable self_;
};
#ifdef _WIN32
struct ToSparseBackward0 : public TraceableFunction {
TORCH_API ToSparseBackward0() = default;
#else
struct TORCH_API ToSparseBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ToSparseBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Layout self_layout;
c10::OptionalArray<c10::SymInt> self_self_sym_blocksize_opt;
};
#ifdef _WIN32
struct ToSparseBackward1 : public TraceableFunction {
TORCH_API ToSparseBackward1() = default;
#else
struct TORCH_API ToSparseBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ToSparseBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Layout self_layout;
c10::OptionalArray<c10::SymInt> self_self_sym_blocksize_opt;
};
#ifdef _WIN32
struct ToSparseCsrBackward0 : public TraceableFunction {
TORCH_API ToSparseCsrBackward0() = default;
#else
struct TORCH_API ToSparseCsrBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ToSparseCsrBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Layout self_layout;
c10::OptionalArray<c10::SymInt> self_self_sym_blocksize_opt;
};
#ifdef _WIN32
struct ToSparseCscBackward0 : public TraceableFunction {
TORCH_API ToSparseCscBackward0() = default;
#else
struct TORCH_API ToSparseCscBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ToSparseCscBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Layout self_layout;
c10::OptionalArray<c10::SymInt> self_self_sym_blocksize_opt;
};
#ifdef _WIN32
struct ToSparseBsrBackward0 : public TraceableFunction {
TORCH_API ToSparseBsrBackward0() = default;
#else
struct TORCH_API ToSparseBsrBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ToSparseBsrBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Layout self_layout;
c10::OptionalArray<c10::SymInt> self_self_sym_blocksize_opt;
};
#ifdef _WIN32
struct ToSparseBscBackward0 : public TraceableFunction {
TORCH_API ToSparseBscBackward0() = default;
#else
struct TORCH_API ToSparseBscBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ToSparseBscBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Layout self_layout;
c10::OptionalArray<c10::SymInt> self_self_sym_blocksize_opt;
};
#ifdef _WIN32
struct ToMkldnnBackward0 : public TraceableFunction {
TORCH_API ToMkldnnBackward0() = default;
#else
struct TORCH_API ToMkldnnBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ToMkldnnBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct UnfoldBackward0 : public Node {
TORCH_API UnfoldBackward0() = default;
#else
struct TORCH_API UnfoldBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnfoldBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dimension = 0;
std::vector<c10::SymInt> self_sym_sizes;
int64_t size = 0;
int64_t step = 0;
};
#ifdef _WIN32
struct UnfoldBackwardBackward0 : public TraceableFunction {
TORCH_API UnfoldBackwardBackward0() = default;
#else
struct TORCH_API UnfoldBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnfoldBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
int64_t size = 0;
int64_t step = 0;
};
#ifdef _WIN32
struct UniformBackward0 : public TraceableFunction {
TORCH_API UniformBackward0() = default;
#else
struct TORCH_API UniformBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UniformBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct UniqueBackward0 : public TraceableFunction {
TORCH_API UniqueBackward0() = default;
#else
struct TORCH_API UniqueBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UniqueBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct UniqueDimBackward0 : public TraceableFunction {
TORCH_API UniqueDimBackward0() = default;
#else
struct TORCH_API UniqueDimBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UniqueDimBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct UniqueConsecutiveBackward0 : public TraceableFunction {
TORCH_API UniqueConsecutiveBackward0() = default;
#else
struct TORCH_API UniqueConsecutiveBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UniqueConsecutiveBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct UniqueDimConsecutiveBackward0 : public TraceableFunction {
TORCH_API UniqueDimConsecutiveBackward0() = default;
#else
struct TORCH_API UniqueDimConsecutiveBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UniqueDimConsecutiveBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct Unique2Backward0 : public TraceableFunction {
TORCH_API Unique2Backward0() = default;
#else
struct TORCH_API Unique2Backward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "Unique2Backward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct UnsafeViewBackward0 : public TraceableFunction {
TORCH_API UnsafeViewBackward0() = default;
#else
struct TORCH_API UnsafeViewBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnsafeViewBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct LiftBackward0 : public TraceableFunction {
TORCH_API LiftBackward0() = default;
#else
struct TORCH_API LiftBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LiftBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct LiftFreshBackward0 : public TraceableFunction {
TORCH_API LiftFreshBackward0() = default;
#else
struct TORCH_API LiftFreshBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LiftFreshBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct UnsqueezeBackward0 : public Node {
TORCH_API UnsqueezeBackward0() = default;
#else
struct TORCH_API UnsqueezeBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnsqueezeBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
};
#ifdef _WIN32
struct UnsqueezeBackward1 : public TraceableFunction {
TORCH_API UnsqueezeBackward1() = default;
#else
struct TORCH_API UnsqueezeBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnsqueezeBackward1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
};
#ifdef _WIN32
struct VarBackward0 : public TraceableFunction {
TORCH_API VarBackward0() = default;
#else
struct TORCH_API VarBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "VarBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
::std::optional<at::Scalar> correction;
c10::OptionalArray<int64_t> dim;
bool keepdim;
SavedVariable self_;
};
#ifdef _WIN32
struct VarMeanBackward0 : public TraceableFunction {
TORCH_API VarMeanBackward0() = default;
#else
struct TORCH_API VarMeanBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "VarMeanBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
::std::optional<at::Scalar> correction;
c10::OptionalArray<int64_t> dim;
bool keepdim;
SavedVariable self_;
};
#ifdef _WIN32
struct ViewBackward0 : public Node {
TORCH_API ViewBackward0() = default;
#else
struct TORCH_API ViewBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ViewBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct ViewBackwardAutogradNestedTensor0 : public Node {
TORCH_API ViewBackwardAutogradNestedTensor0() = default;
#else
struct TORCH_API ViewBackwardAutogradNestedTensor0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ViewBackwardAutogradNestedTensor0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct ViewAsRealBackward0 : public Node {
TORCH_API ViewAsRealBackward0() = default;
#else
struct TORCH_API ViewAsRealBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ViewAsRealBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct ViewAsComplexBackward0 : public Node {
TORCH_API ViewAsComplexBackward0() = default;
#else
struct TORCH_API ViewAsComplexBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ViewAsComplexBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct WhereBackward0 : public TraceableFunction {
TORCH_API WhereBackward0() = default;
#else
struct TORCH_API WhereBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "WhereBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
condition_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable condition_;
};
#ifdef _WIN32
struct WeightNormInterfaceBackward0 : public TraceableFunction {
TORCH_API WeightNormInterfaceBackward0() = default;
#else
struct TORCH_API WeightNormInterfaceBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "WeightNormInterfaceBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
g_.reset_data();
v_.reset_data();
result1_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable g_;
SavedVariable v_;
SavedVariable result1_;
};
#ifdef _WIN32
struct ZeroBackward0 : public TraceableFunction {
TORCH_API ZeroBackward0() = default;
#else
struct TORCH_API ZeroBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ZeroBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct SparseMaskBackward0 : public TraceableFunction {
TORCH_API SparseMaskBackward0() = default;
#else
struct TORCH_API SparseMaskBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SparseMaskBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mask_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable mask_;
at::Layout self_layout;
};
#ifdef _WIN32
struct SparseCooTensorWithDimsAndTensorsBackward0 : public TraceableFunction {
TORCH_API SparseCooTensorWithDimsAndTensorsBackward0() = default;
#else
struct TORCH_API SparseCooTensorWithDimsAndTensorsBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SparseCooTensorWithDimsAndTensorsBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable result_;
};
#ifdef _WIN32
struct SparseCompressedTensorBackward0 : public TraceableFunction {
TORCH_API SparseCompressedTensorBackward0() = default;
#else
struct TORCH_API SparseCompressedTensorBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SparseCompressedTensorBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
values_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable values_;
SavedVariable result_;
};
#ifdef _WIN32
struct SparseSumBackward0 : public TraceableFunction {
TORCH_API SparseSumBackward0() = default;
#else
struct TORCH_API SparseSumBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SparseSumBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dim;
SavedVariable self_;
};
#ifdef _WIN32
struct StandardGammaBackward0 : public TraceableFunction {
TORCH_API StandardGammaBackward0() = default;
#else
struct TORCH_API StandardGammaBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "StandardGammaBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct StandardGammaGradBackward0 : public TraceableFunction {
TORCH_API StandardGammaGradBackward0() = default;
#else
struct TORCH_API StandardGammaGradBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "StandardGammaGradBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct ValuesBackward0 : public Node {
TORCH_API ValuesBackward0() = default;
#else
struct TORCH_API ValuesBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ValuesBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct ValuesBackwardAutogradNestedTensor0 : public Node {
TORCH_API ValuesBackwardAutogradNestedTensor0() = default;
#else
struct TORCH_API ValuesBackwardAutogradNestedTensor0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ValuesBackwardAutogradNestedTensor0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct TrilinearBackward0 : public TraceableFunction {
TORCH_API TrilinearBackward0() = default;
#else
struct TORCH_API TrilinearBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TrilinearBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
i1_.reset_data();
i2_.reset_data();
i3_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> expand1;
std::vector<int64_t> expand2;
std::vector<int64_t> expand3;
SavedVariable i1_;
SavedVariable i2_;
SavedVariable i3_;
std::vector<int64_t> sumdim;
};
#ifdef _WIN32
struct ConstantPadNdBackward0 : public TraceableFunction {
TORCH_API ConstantPadNdBackward0() = default;
#else
struct TORCH_API ConstantPadNdBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ConstantPadNdBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> pad;
};
#ifdef _WIN32
struct BinaryCrossEntropyBackward0 : public TraceableFunction {
TORCH_API BinaryCrossEntropyBackward0() = default;
#else
struct TORCH_API BinaryCrossEntropyBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "BinaryCrossEntropyBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
target_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t reduction = 0;
SavedVariable self_;
SavedVariable target_;
SavedVariable weight_;
};
#ifdef _WIN32
struct BinaryCrossEntropyBackwardBackward0 : public TraceableFunction {
TORCH_API BinaryCrossEntropyBackwardBackward0() = default;
#else
struct TORCH_API BinaryCrossEntropyBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "BinaryCrossEntropyBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
self_.reset_data();
target_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable grad_output_;
int64_t reduction = 0;
SavedVariable self_;
SavedVariable target_;
SavedVariable weight_;
};
#ifdef _WIN32
struct BinaryCrossEntropyWithLogitsBackward0 : public TraceableFunction {
TORCH_API BinaryCrossEntropyWithLogitsBackward0() = default;
#else
struct TORCH_API BinaryCrossEntropyWithLogitsBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "BinaryCrossEntropyWithLogitsBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
pos_weight_.reset_data();
self_.reset_data();
target_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable pos_weight_;
int64_t reduction = 0;
SavedVariable self_;
SavedVariable target_;
SavedVariable weight_;
};
#ifdef _WIN32
struct EmbeddingBackward0 : public TraceableFunction {
TORCH_API EmbeddingBackward0() = default;
#else
struct TORCH_API EmbeddingBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "EmbeddingBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable indices_;
c10::SymInt padding_idx;
bool scale_grad_by_freq;
bool sparse;
c10::SymInt weight_sym_argsize_0;
};
#ifdef _WIN32
struct EmbeddingDenseBackwardBackward0 : public TraceableFunction {
TORCH_API EmbeddingDenseBackwardBackward0() = default;
#else
struct TORCH_API EmbeddingDenseBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "EmbeddingDenseBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable indices_;
c10::SymInt padding_idx;
};
#ifdef _WIN32
struct EmbeddingBagBackward0 : public TraceableFunction {
TORCH_API EmbeddingBagBackward0() = default;
#else
struct TORCH_API EmbeddingBagBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "EmbeddingBagBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
offsets_.reset_data();
per_sample_weights_.reset_data();
weight_.reset_data();
result1_.reset_data();
result2_.reset_data();
result3_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable indices_;
int64_t mode = 0;
SavedVariable offsets_;
int64_t padding_idx = 0;
SavedVariable per_sample_weights_;
bool scale_grad_by_freq;
bool sparse;
SavedVariable weight_;
c10::SymInt weight_sym_argsize_0;
SavedVariable result1_;
SavedVariable result2_;
SavedVariable result3_;
};
#ifdef _WIN32
struct EmbeddingBagBackwardBackward0 : public TraceableFunction {
TORCH_API EmbeddingBagBackwardBackward0() = default;
#else
struct TORCH_API EmbeddingBagBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "EmbeddingBagBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct EmbeddingBagDenseBackwardBackward0 : public TraceableFunction {
TORCH_API EmbeddingBagDenseBackwardBackward0() = default;
#else
struct TORCH_API EmbeddingBagDenseBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "EmbeddingBagDenseBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct EmbeddingRenormBackward0 : public TraceableFunction {
TORCH_API EmbeddingRenormBackward0() = default;
#else
struct TORCH_API EmbeddingRenormBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "EmbeddingRenormBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct MseLossBackward0 : public TraceableFunction {
TORCH_API MseLossBackward0() = default;
#else
struct TORCH_API MseLossBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MseLossBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
target_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t reduction = 0;
SavedVariable self_;
SavedVariable target_;
};
#ifdef _WIN32
struct MultiMarginLossBackward0 : public TraceableFunction {
TORCH_API MultiMarginLossBackward0() = default;
#else
struct TORCH_API MultiMarginLossBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MultiMarginLossBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
target_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar margin;
at::Scalar p;
int64_t reduction = 0;
SavedVariable self_;
SavedVariable target_;
SavedVariable weight_;
};
#ifdef _WIN32
struct MultilabelMarginLossBackward0 : public TraceableFunction {
TORCH_API MultilabelMarginLossBackward0() = default;
#else
struct TORCH_API MultilabelMarginLossBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MultilabelMarginLossBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
target_.reset_data();
is_target_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t reduction = 0;
SavedVariable self_;
SavedVariable target_;
SavedVariable is_target_;
};
#ifdef _WIN32
struct NllLossBackward0 : public TraceableFunction {
TORCH_API NllLossBackward0() = default;
#else
struct TORCH_API NllLossBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NllLossBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
target_.reset_data();
weight_.reset_data();
total_weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::SymInt ignore_index;
int64_t reduction = 0;
SavedVariable self_;
SavedVariable target_;
SavedVariable weight_;
SavedVariable total_weight_;
};
#ifdef _WIN32
struct NllLoss2DBackward0 : public TraceableFunction {
TORCH_API NllLoss2DBackward0() = default;
#else
struct TORCH_API NllLoss2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NllLoss2DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
target_.reset_data();
weight_.reset_data();
total_weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::SymInt ignore_index;
int64_t reduction = 0;
SavedVariable self_;
SavedVariable target_;
SavedVariable weight_;
SavedVariable total_weight_;
};
#ifdef _WIN32
struct SmoothL1LossBackward0 : public TraceableFunction {
TORCH_API SmoothL1LossBackward0() = default;
#else
struct TORCH_API SmoothL1LossBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SmoothL1LossBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
target_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double beta;
int64_t reduction = 0;
SavedVariable self_;
SavedVariable target_;
};
#ifdef _WIN32
struct HuberLossBackward0 : public TraceableFunction {
TORCH_API HuberLossBackward0() = default;
#else
struct TORCH_API HuberLossBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "HuberLossBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
target_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double delta;
int64_t reduction = 0;
SavedVariable self_;
SavedVariable target_;
};
#ifdef _WIN32
struct SoftMarginLossBackward0 : public TraceableFunction {
TORCH_API SoftMarginLossBackward0() = default;
#else
struct TORCH_API SoftMarginLossBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SoftMarginLossBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
target_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t reduction = 0;
SavedVariable self_;
SavedVariable target_;
};
#ifdef _WIN32
struct ReluBackward0 : public TraceableFunction {
TORCH_API ReluBackward0() = default;
#else
struct TORCH_API ReluBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReluBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable result_;
};
#ifdef _WIN32
struct SiluBackward0 : public TraceableFunction {
TORCH_API SiluBackward0() = default;
#else
struct TORCH_API SiluBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SiluBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct MishBackward0 : public TraceableFunction {
TORCH_API MishBackward0() = default;
#else
struct TORCH_API MishBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MishBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct EluBackward0 : public TraceableFunction {
TORCH_API EluBackward0() = default;
#else
struct TORCH_API EluBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "EluBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
at::Scalar input_scale;
at::Scalar scale;
SavedVariable self_;
};
#ifdef _WIN32
struct EluBackward1 : public TraceableFunction {
TORCH_API EluBackward1() = default;
#else
struct TORCH_API EluBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "EluBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
at::Scalar input_scale;
at::Scalar scale;
SavedVariable result_;
};
#ifdef _WIN32
struct CeluBackward0 : public TraceableFunction {
TORCH_API CeluBackward0() = default;
#else
struct TORCH_API CeluBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CeluBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
SavedVariable self_;
};
#ifdef _WIN32
struct CeluBackward1 : public TraceableFunction {
TORCH_API CeluBackward1() = default;
#else
struct TORCH_API CeluBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CeluBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
SavedVariable result_;
};
#ifdef _WIN32
struct GeluBackward0 : public TraceableFunction {
TORCH_API GeluBackward0() = default;
#else
struct TORCH_API GeluBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "GeluBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::string approximate;
SavedVariable self_;
};
#ifdef _WIN32
struct GeluBackwardBackward0 : public TraceableFunction {
TORCH_API GeluBackwardBackward0() = default;
#else
struct TORCH_API GeluBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "GeluBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::string approximate;
SavedVariable grad_output_;
SavedVariable self_;
};
#ifdef _WIN32
struct GluBackward0 : public TraceableFunction {
TORCH_API GluBackward0() = default;
#else
struct TORCH_API GluBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "GluBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable self_;
};
#ifdef _WIN32
struct HardshrinkBackward0 : public TraceableFunction {
TORCH_API HardshrinkBackward0() = default;
#else
struct TORCH_API HardshrinkBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "HardshrinkBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar lambd;
SavedVariable self_;
};
#ifdef _WIN32
struct HardshrinkBackwardBackward0 : public TraceableFunction {
TORCH_API HardshrinkBackwardBackward0() = default;
#else
struct TORCH_API HardshrinkBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "HardshrinkBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar lambd;
SavedVariable self_;
};
#ifdef _WIN32
struct HardtanhBackward0 : public TraceableFunction {
TORCH_API HardtanhBackward0() = default;
#else
struct TORCH_API HardtanhBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "HardtanhBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar max_val;
at::Scalar min_val;
SavedVariable self_;
};
#ifdef _WIN32
struct LeakyReluBackward0 : public TraceableFunction {
TORCH_API LeakyReluBackward0() = default;
#else
struct TORCH_API LeakyReluBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LeakyReluBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar negative_slope;
SavedVariable self_;
};
#ifdef _WIN32
struct LeakyReluBackward1 : public TraceableFunction {
TORCH_API LeakyReluBackward1() = default;
#else
struct TORCH_API LeakyReluBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LeakyReluBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar negative_slope;
SavedVariable result_;
};
#ifdef _WIN32
struct LogSigmoidBackward0 : public TraceableFunction {
TORCH_API LogSigmoidBackward0() = default;
#else
struct TORCH_API LogSigmoidBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LogSigmoidBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
buffer_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable buffer_;
};
#ifdef _WIN32
struct LogSoftmaxBackward0 : public TraceableFunction {
TORCH_API LogSoftmaxBackward0() = default;
#else
struct TORCH_API LogSoftmaxBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LogSoftmaxBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
at::ScalarType self_scalar_type;
SavedVariable result_;
};
#ifdef _WIN32
struct SparseLogSoftmaxBackward0 : public TraceableFunction {
TORCH_API SparseLogSoftmaxBackward0() = default;
#else
struct TORCH_API SparseLogSoftmaxBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SparseLogSoftmaxBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct MaskedSoftmaxBackward0 : public TraceableFunction {
TORCH_API MaskedSoftmaxBackward0() = default;
#else
struct TORCH_API MaskedSoftmaxBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaskedSoftmaxBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mask_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
::std::optional<int64_t> dim;
SavedVariable mask_;
SavedVariable result_;
};
#ifdef _WIN32
struct PreluKernelBackward0 : public TraceableFunction {
TORCH_API PreluKernelBackward0() = default;
#else
struct TORCH_API PreluKernelBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PreluKernelBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable weight_;
};
#ifdef _WIN32
struct PreluKernelBackwardBackward0 : public TraceableFunction {
TORCH_API PreluKernelBackwardBackward0() = default;
#else
struct TORCH_API PreluKernelBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PreluKernelBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable grad_output_;
at::TensorOptions grad_output_options;
SavedVariable self_;
torch::autograd::generated::TypeAndSize self_info;
at::TensorOptions self_options;
SavedVariable weight_;
at::TensorOptions weight_options;
};
#ifdef _WIN32
struct RreluWithNoiseBackward0 : public TraceableFunction {
TORCH_API RreluWithNoiseBackward0() = default;
#else
struct TORCH_API RreluWithNoiseBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RreluWithNoiseBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
noise_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar lower;
SavedVariable noise_;
SavedVariable self_;
bool training;
at::Scalar upper;
};
#ifdef _WIN32
struct RreluWithNoiseBackward1 : public TraceableFunction {
TORCH_API RreluWithNoiseBackward1() = default;
#else
struct TORCH_API RreluWithNoiseBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RreluWithNoiseBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
noise_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar lower;
SavedVariable noise_;
bool training;
at::Scalar upper;
SavedVariable result_;
};
#ifdef _WIN32
struct RreluWithNoiseFunctionalBackward0 : public TraceableFunction {
TORCH_API RreluWithNoiseFunctionalBackward0() = default;
#else
struct TORCH_API RreluWithNoiseFunctionalBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RreluWithNoiseFunctionalBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
noise_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar lower;
SavedVariable noise_;
SavedVariable self_;
bool training;
at::Scalar upper;
};
#ifdef _WIN32
struct SoftmaxBackward0 : public TraceableFunction {
TORCH_API SoftmaxBackward0() = default;
#else
struct TORCH_API SoftmaxBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SoftmaxBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
at::ScalarType self_scalar_type;
SavedVariable result_;
};
#ifdef _WIN32
struct SparseSoftmaxBackward0 : public TraceableFunction {
TORCH_API SparseSoftmaxBackward0() = default;
#else
struct TORCH_API SparseSoftmaxBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SparseSoftmaxBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable self_;
SavedVariable result_;
};
#ifdef _WIN32
struct SparseSparseMatmulBackward0 : public TraceableFunction {
TORCH_API SparseSparseMatmulBackward0() = default;
#else
struct TORCH_API SparseSparseMatmulBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SparseSparseMatmulBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
SavedVariable self_;
};
#ifdef _WIN32
struct SoftplusBackward0 : public TraceableFunction {
TORCH_API SoftplusBackward0() = default;
#else
struct TORCH_API SoftplusBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SoftplusBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar beta;
SavedVariable self_;
at::Scalar threshold;
};
#ifdef _WIN32
struct SoftshrinkBackward0 : public TraceableFunction {
TORCH_API SoftshrinkBackward0() = default;
#else
struct TORCH_API SoftshrinkBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SoftshrinkBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar lambd;
SavedVariable self_;
};
#ifdef _WIN32
struct ThresholdBackward0 : public TraceableFunction {
TORCH_API ThresholdBackward0() = default;
#else
struct TORCH_API ThresholdBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ThresholdBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
at::Scalar threshold;
};
#ifdef _WIN32
struct ThresholdBackward1 : public TraceableFunction {
TORCH_API ThresholdBackward1() = default;
#else
struct TORCH_API ThresholdBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ThresholdBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
at::Scalar threshold;
};
#ifdef _WIN32
struct ReflectionPad1DBackward0 : public TraceableFunction {
TORCH_API ReflectionPad1DBackward0() = default;
#else
struct TORCH_API ReflectionPad1DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReflectionPad1DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> padding;
SavedVariable self_;
};
#ifdef _WIN32
struct ReflectionPad2DBackward0 : public TraceableFunction {
TORCH_API ReflectionPad2DBackward0() = default;
#else
struct TORCH_API ReflectionPad2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReflectionPad2DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> padding;
SavedVariable self_;
};
#ifdef _WIN32
struct ReflectionPad3DBackward0 : public TraceableFunction {
TORCH_API ReflectionPad3DBackward0() = default;
#else
struct TORCH_API ReflectionPad3DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReflectionPad3DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> padding;
SavedVariable self_;
};
#ifdef _WIN32
struct ReplicationPad1DBackward0 : public TraceableFunction {
TORCH_API ReplicationPad1DBackward0() = default;
#else
struct TORCH_API ReplicationPad1DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReplicationPad1DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> padding;
SavedVariable self_;
};
#ifdef _WIN32
struct ReplicationPad2DBackward0 : public TraceableFunction {
TORCH_API ReplicationPad2DBackward0() = default;
#else
struct TORCH_API ReplicationPad2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReplicationPad2DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> padding;
SavedVariable self_;
};
#ifdef _WIN32
struct ReplicationPad3DBackward0 : public TraceableFunction {
TORCH_API ReplicationPad3DBackward0() = default;
#else
struct TORCH_API ReplicationPad3DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReplicationPad3DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> padding;
SavedVariable self_;
};
#ifdef _WIN32
struct UpsampleLinear1DBackward0 : public TraceableFunction {
TORCH_API UpsampleLinear1DBackward0() = default;
#else
struct TORCH_API UpsampleLinear1DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleLinear1DBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool align_corners;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct UpsampleBilinear2DBackward0 : public TraceableFunction {
TORCH_API UpsampleBilinear2DBackward0() = default;
#else
struct TORCH_API UpsampleBilinear2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleBilinear2DBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool align_corners;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct UpsampleBilinear2DAaBackward0 : public TraceableFunction {
TORCH_API UpsampleBilinear2DAaBackward0() = default;
#else
struct TORCH_API UpsampleBilinear2DAaBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleBilinear2DAaBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool align_corners;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct UpsampleBicubic2DBackward0 : public TraceableFunction {
TORCH_API UpsampleBicubic2DBackward0() = default;
#else
struct TORCH_API UpsampleBicubic2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleBicubic2DBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool align_corners;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct UpsampleBicubic2DAaBackward0 : public TraceableFunction {
TORCH_API UpsampleBicubic2DAaBackward0() = default;
#else
struct TORCH_API UpsampleBicubic2DAaBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleBicubic2DAaBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool align_corners;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct UpsampleTrilinear3DBackward0 : public TraceableFunction {
TORCH_API UpsampleTrilinear3DBackward0() = default;
#else
struct TORCH_API UpsampleTrilinear3DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleTrilinear3DBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool align_corners;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_d;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct UpsampleNearest1DBackward0 : public TraceableFunction {
TORCH_API UpsampleNearest1DBackward0() = default;
#else
struct TORCH_API UpsampleNearest1DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleNearest1DBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct UpsampleNearestExact1DBackward0 : public TraceableFunction {
TORCH_API UpsampleNearestExact1DBackward0() = default;
#else
struct TORCH_API UpsampleNearestExact1DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleNearestExact1DBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct UpsampleNearest2DBackward0 : public TraceableFunction {
TORCH_API UpsampleNearest2DBackward0() = default;
#else
struct TORCH_API UpsampleNearest2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleNearest2DBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct UpsampleNearestExact2DBackward0 : public TraceableFunction {
TORCH_API UpsampleNearestExact2DBackward0() = default;
#else
struct TORCH_API UpsampleNearestExact2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleNearestExact2DBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct UpsampleNearest3DBackward0 : public TraceableFunction {
TORCH_API UpsampleNearest3DBackward0() = default;
#else
struct TORCH_API UpsampleNearest3DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleNearest3DBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_d;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct UpsampleNearestExact3DBackward0 : public TraceableFunction {
TORCH_API UpsampleNearestExact3DBackward0() = default;
#else
struct TORCH_API UpsampleNearestExact3DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleNearestExact3DBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_d;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct PixelShuffleBackward0 : public TraceableFunction {
TORCH_API PixelShuffleBackward0() = default;
#else
struct TORCH_API PixelShuffleBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PixelShuffleBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t upscale_factor = 0;
};
#ifdef _WIN32
struct PixelUnshuffleBackward0 : public TraceableFunction {
TORCH_API PixelUnshuffleBackward0() = default;
#else
struct TORCH_API PixelUnshuffleBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PixelUnshuffleBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t downscale_factor = 0;
};
#ifdef _WIN32
struct ChannelShuffleBackward0 : public TraceableFunction {
TORCH_API ChannelShuffleBackward0() = default;
#else
struct TORCH_API ChannelShuffleBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ChannelShuffleBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::SymInt groups;
};
#ifdef _WIN32
struct AdaptiveAvgPool2DBackward0 : public TraceableFunction {
TORCH_API AdaptiveAvgPool2DBackward0() = default;
#else
struct TORCH_API AdaptiveAvgPool2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AdaptiveAvgPool2DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct AdaptiveAvgPool3DBackward0 : public TraceableFunction {
TORCH_API AdaptiveAvgPool3DBackward0() = default;
#else
struct TORCH_API AdaptiveAvgPool3DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AdaptiveAvgPool3DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct AdaptiveMaxPool2DBackward0 : public TraceableFunction {
TORCH_API AdaptiveMaxPool2DBackward0() = default;
#else
struct TORCH_API AdaptiveMaxPool2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AdaptiveMaxPool2DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result1_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable result1_;
};
#ifdef _WIN32
struct AdaptiveMaxPool3DBackward0 : public TraceableFunction {
TORCH_API AdaptiveMaxPool3DBackward0() = default;
#else
struct TORCH_API AdaptiveMaxPool3DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AdaptiveMaxPool3DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result1_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable result1_;
};
#ifdef _WIN32
struct AvgPool2DBackward0 : public TraceableFunction {
TORCH_API AvgPool2DBackward0() = default;
#else
struct TORCH_API AvgPool2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AvgPool2DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool ceil_mode;
bool count_include_pad;
::std::optional<int64_t> divisor_override;
std::vector<int64_t> kernel_size;
std::vector<int64_t> padding;
SavedVariable self_;
std::vector<int64_t> stride;
};
#ifdef _WIN32
struct AvgPool3DBackward0 : public TraceableFunction {
TORCH_API AvgPool3DBackward0() = default;
#else
struct TORCH_API AvgPool3DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AvgPool3DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool ceil_mode;
bool count_include_pad;
::std::optional<int64_t> divisor_override;
std::vector<int64_t> kernel_size;
std::vector<int64_t> padding;
SavedVariable self_;
std::vector<int64_t> stride;
};
#ifdef _WIN32
struct FractionalMaxPool2DBackward0 : public TraceableFunction {
TORCH_API FractionalMaxPool2DBackward0() = default;
#else
struct TORCH_API FractionalMaxPool2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FractionalMaxPool2DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result1_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> kernel_size;
std::vector<int64_t> output_size;
SavedVariable self_;
SavedVariable result1_;
};
#ifdef _WIN32
struct FractionalMaxPool3DBackward0 : public TraceableFunction {
TORCH_API FractionalMaxPool3DBackward0() = default;
#else
struct TORCH_API FractionalMaxPool3DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FractionalMaxPool3DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result1_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> kernel_size;
std::vector<int64_t> output_size;
SavedVariable self_;
SavedVariable result1_;
};
#ifdef _WIN32
struct LinearBackward0 : public TraceableFunction {
TORCH_API LinearBackward0() = default;
#else
struct TORCH_API LinearBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinearBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable input_;
SavedVariable weight_;
};
#ifdef _WIN32
struct LinearBackwardBackward0 : public TraceableFunction {
TORCH_API LinearBackwardBackward0() = default;
#else
struct TORCH_API LinearBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LinearBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable grad_output_;
SavedVariable self_;
SavedVariable weight_;
};
#ifdef _WIN32
struct MaxPool2DBackward0 : public TraceableFunction {
TORCH_API MaxPool2DBackward0() = default;
#else
struct TORCH_API MaxPool2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaxPool2DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool ceil_mode;
std::vector<int64_t> dilation;
std::vector<int64_t> kernel_size;
std::vector<int64_t> padding;
SavedVariable self_;
std::vector<int64_t> stride;
};
#ifdef _WIN32
struct MpsConvolutionBackward0 : public TraceableFunction {
TORCH_API MpsConvolutionBackward0() = default;
#else
struct TORCH_API MpsConvolutionBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MpsConvolutionBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> dilation;
c10::SymInt groups;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct MpsConvolutionBackwardBackward0 : public TraceableFunction {
TORCH_API MpsConvolutionBackwardBackward0() = default;
#else
struct TORCH_API MpsConvolutionBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MpsConvolutionBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> dilation;
SavedVariable grad_output_;
c10::SymInt groups;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct MaxPool2DWithIndicesBackward0 : public TraceableFunction {
TORCH_API MaxPool2DWithIndicesBackward0() = default;
#else
struct TORCH_API MaxPool2DWithIndicesBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaxPool2DWithIndicesBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result1_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool ceil_mode;
std::vector<int64_t> dilation;
std::vector<int64_t> kernel_size;
std::vector<int64_t> padding;
SavedVariable self_;
std::vector<int64_t> stride;
SavedVariable result1_;
};
#ifdef _WIN32
struct MaxPool3DWithIndicesBackward0 : public TraceableFunction {
TORCH_API MaxPool3DWithIndicesBackward0() = default;
#else
struct TORCH_API MaxPool3DWithIndicesBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaxPool3DWithIndicesBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result1_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool ceil_mode;
std::vector<int64_t> dilation;
std::vector<int64_t> kernel_size;
std::vector<int64_t> padding;
SavedVariable self_;
std::vector<int64_t> stride;
SavedVariable result1_;
};
#ifdef _WIN32
struct MaxUnpool2DBackward0 : public TraceableFunction {
TORCH_API MaxUnpool2DBackward0() = default;
#else
struct TORCH_API MaxUnpool2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaxUnpool2DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable indices_;
};
#ifdef _WIN32
struct MaxUnpool3DBackward0 : public TraceableFunction {
TORCH_API MaxUnpool3DBackward0() = default;
#else
struct TORCH_API MaxUnpool3DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaxUnpool3DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable indices_;
};
#ifdef _WIN32
struct ConvolutionBackward0 : public TraceableFunction {
TORCH_API ConvolutionBackward0() = default;
#else
struct TORCH_API ConvolutionBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ConvolutionBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<c10::SymInt> bias_sym_sizes_opt;
std::vector<c10::SymInt> dilation;
c10::SymInt groups;
SavedVariable input_;
std::vector<c10::SymInt> output_padding;
std::vector<c10::SymInt> padding;
std::vector<c10::SymInt> stride;
bool transposed;
SavedVariable weight_;
};
#ifdef _WIN32
struct ConvolutionBackward1 : public TraceableFunction {
TORCH_API ConvolutionBackward1() = default;
#else
struct TORCH_API ConvolutionBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ConvolutionBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<c10::SymInt> bias_sym_sizes_opt;
std::vector<c10::SymInt> dilation;
c10::SymInt groups;
SavedVariable input_;
std::vector<c10::SymInt> output_padding;
std::vector<c10::SymInt> padding;
std::vector<c10::SymInt> stride;
bool transposed;
SavedVariable weight_;
};
#ifdef _WIN32
struct ConvolutionBackwardBackward0 : public TraceableFunction {
TORCH_API ConvolutionBackwardBackward0() = default;
#else
struct TORCH_API ConvolutionBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ConvolutionBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
input_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> dilation;
SavedVariable grad_output_;
c10::SymInt groups;
SavedVariable input_;
std::vector<c10::SymInt> output_padding;
std::vector<c10::SymInt> padding;
std::vector<c10::SymInt> stride;
bool transposed;
SavedVariable weight_;
};
#ifdef _WIN32
struct ConvolutionOverrideableBackward0 : public TraceableFunction {
TORCH_API ConvolutionOverrideableBackward0() = default;
#else
struct TORCH_API ConvolutionOverrideableBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ConvolutionOverrideableBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> dilation;
c10::SymInt groups;
SavedVariable input_;
std::vector<c10::SymInt> output_padding;
std::vector<c10::SymInt> padding;
std::vector<c10::SymInt> stride;
bool transposed;
SavedVariable weight_;
};
#ifdef _WIN32
struct ConvolutionBackwardOverrideableBackward0 : public TraceableFunction {
TORCH_API ConvolutionBackwardOverrideableBackward0() = default;
#else
struct TORCH_API ConvolutionBackwardOverrideableBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ConvolutionBackwardOverrideableBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
input_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> dilation;
SavedVariable grad_output_;
c10::SymInt groups;
SavedVariable input_;
std::vector<c10::SymInt> output_padding;
std::vector<c10::SymInt> padding;
std::vector<c10::SymInt> stride;
bool transposed;
SavedVariable weight_;
};
#ifdef _WIN32
struct SlowConvTranspose2DBackward0 : public TraceableFunction {
TORCH_API SlowConvTranspose2DBackward0() = default;
#else
struct TORCH_API SlowConvTranspose2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SlowConvTranspose2DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<c10::SymInt> bias_sym_sizes_opt;
std::vector<c10::SymInt> dilation;
std::vector<c10::SymInt> output_padding;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct SlowConvTranspose3DBackward0 : public TraceableFunction {
TORCH_API SlowConvTranspose3DBackward0() = default;
#else
struct TORCH_API SlowConvTranspose3DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SlowConvTranspose3DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<c10::SymInt> bias_sym_sizes_opt;
std::vector<c10::SymInt> dilation;
std::vector<c10::SymInt> output_padding;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct SlowConv2DBackward0 : public TraceableFunction {
TORCH_API SlowConv2DBackward0() = default;
#else
struct TORCH_API SlowConv2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SlowConv2DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> kernel_size;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct SlowConv2DBackwardBackward0 : public TraceableFunction {
TORCH_API SlowConv2DBackwardBackward0() = default;
#else
struct TORCH_API SlowConv2DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SlowConv2DBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable grad_output_;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct ConvDepthwise2DBackward0 : public TraceableFunction {
TORCH_API ConvDepthwise2DBackward0() = default;
#else
struct TORCH_API ConvDepthwise2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ConvDepthwise2DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<c10::SymInt> bias_sym_sizes_opt;
std::vector<c10::SymInt> dilation;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct ConvDepthwise3DBackward0 : public TraceableFunction {
TORCH_API ConvDepthwise3DBackward0() = default;
#else
struct TORCH_API ConvDepthwise3DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ConvDepthwise3DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<c10::SymInt> bias_sym_sizes_opt;
std::vector<c10::SymInt> dilation;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct SlowConv3DBackward0 : public TraceableFunction {
TORCH_API SlowConv3DBackward0() = default;
#else
struct TORCH_API SlowConv3DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SlowConv3DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<c10::SymInt> bias_sym_sizes_opt;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct SlowConvDilated2DBackward0 : public TraceableFunction {
TORCH_API SlowConvDilated2DBackward0() = default;
#else
struct TORCH_API SlowConvDilated2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SlowConvDilated2DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<c10::SymInt> bias_sym_sizes_opt;
std::vector<c10::SymInt> dilation;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct SlowConvDilated3DBackward0 : public TraceableFunction {
TORCH_API SlowConvDilated3DBackward0() = default;
#else
struct TORCH_API SlowConvDilated3DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SlowConvDilated3DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<c10::SymInt> bias_sym_sizes_opt;
std::vector<c10::SymInt> dilation;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct Col2ImBackward0 : public TraceableFunction {
TORCH_API Col2ImBackward0() = default;
#else
struct TORCH_API Col2ImBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "Col2ImBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dilation;
std::vector<int64_t> kernel_size;
std::vector<int64_t> padding;
std::vector<int64_t> stride;
};
#ifdef _WIN32
struct Im2ColBackward0 : public TraceableFunction {
TORCH_API Im2ColBackward0() = default;
#else
struct TORCH_API Im2ColBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "Im2ColBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dilation;
std::vector<int64_t> kernel_size;
std::vector<int64_t> padding;
c10::SymInt self_sym_argsize_minus_1;
c10::SymInt self_sym_argsize_minus_2;
std::vector<int64_t> stride;
};
#ifdef _WIN32
struct AdaptiveAvgPool2DBackwardBackward0 : public TraceableFunction {
TORCH_API AdaptiveAvgPool2DBackwardBackward0() = default;
#else
struct TORCH_API AdaptiveAvgPool2DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AdaptiveAvgPool2DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::SymInt grad_output_sym_argsize_minus_1;
c10::SymInt grad_output_sym_argsize_minus_2;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct AdaptiveAvgPool3DBackwardBackward0 : public TraceableFunction {
TORCH_API AdaptiveAvgPool3DBackwardBackward0() = default;
#else
struct TORCH_API AdaptiveAvgPool3DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AdaptiveAvgPool3DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::SymInt grad_output_sym_argsize_minus_1;
c10::SymInt grad_output_sym_argsize_minus_2;
c10::SymInt grad_output_sym_argsize_minus_3;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct AdaptiveMaxPool2DBackwardBackward0 : public TraceableFunction {
TORCH_API AdaptiveMaxPool2DBackwardBackward0() = default;
#else
struct TORCH_API AdaptiveMaxPool2DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AdaptiveMaxPool2DBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable indices_;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct AdaptiveMaxPool3DBackwardBackward0 : public TraceableFunction {
TORCH_API AdaptiveMaxPool3DBackwardBackward0() = default;
#else
struct TORCH_API AdaptiveMaxPool3DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AdaptiveMaxPool3DBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable indices_;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct AvgPool2DBackwardBackward0 : public TraceableFunction {
TORCH_API AvgPool2DBackwardBackward0() = default;
#else
struct TORCH_API AvgPool2DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AvgPool2DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool ceil_mode;
bool count_include_pad;
::std::optional<int64_t> divisor_override;
std::vector<int64_t> kernel_size;
std::vector<int64_t> padding;
torch::autograd::generated::TypeAndSize self_info;
std::vector<int64_t> stride;
};
#ifdef _WIN32
struct AvgPool3DBackwardBackward0 : public TraceableFunction {
TORCH_API AvgPool3DBackwardBackward0() = default;
#else
struct TORCH_API AvgPool3DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AvgPool3DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool ceil_mode;
bool count_include_pad;
::std::optional<int64_t> divisor_override;
std::vector<int64_t> kernel_size;
std::vector<int64_t> padding;
torch::autograd::generated::TypeAndSize self_info;
std::vector<int64_t> stride;
};
#ifdef _WIN32
struct EluBackwardBackward0 : public TraceableFunction {
TORCH_API EluBackwardBackward0() = default;
#else
struct TORCH_API EluBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "EluBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
self_or_result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
SavedVariable grad_output_;
at::Scalar input_scale;
bool is_result;
at::Scalar scale;
SavedVariable self_or_result_;
};
#ifdef _WIN32
struct FractionalMaxPool2DBackwardBackward0 : public TraceableFunction {
TORCH_API FractionalMaxPool2DBackwardBackward0() = default;
#else
struct TORCH_API FractionalMaxPool2DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FractionalMaxPool2DBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable indices_;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct FractionalMaxPool3DBackwardBackward0 : public TraceableFunction {
TORCH_API FractionalMaxPool3DBackwardBackward0() = default;
#else
struct TORCH_API FractionalMaxPool3DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FractionalMaxPool3DBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable indices_;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct GluBackwardBackward0 : public TraceableFunction {
TORCH_API GluBackwardBackward0() = default;
#else
struct TORCH_API GluBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "GluBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable grad_output_;
SavedVariable self_;
};
#ifdef _WIN32
struct HardtanhBackwardBackward0 : public TraceableFunction {
TORCH_API HardtanhBackwardBackward0() = default;
#else
struct TORCH_API HardtanhBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "HardtanhBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar max_val;
at::Scalar min_val;
SavedVariable self_;
};
#ifdef _WIN32
struct LogSigmoidBackwardBackward0 : public TraceableFunction {
TORCH_API LogSigmoidBackwardBackward0() = default;
#else
struct TORCH_API LogSigmoidBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LogSigmoidBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
buffer_.reset_data();
grad_output_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable buffer_;
SavedVariable grad_output_;
SavedVariable self_;
};
#ifdef _WIN32
struct LogSoftmaxBackwardDataBackward0 : public TraceableFunction {
TORCH_API LogSoftmaxBackwardDataBackward0() = default;
#else
struct TORCH_API LogSoftmaxBackwardDataBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LogSoftmaxBackwardDataBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
output_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable grad_output_;
SavedVariable output_;
};
#ifdef _WIN32
struct LeakyReluBackwardBackward0 : public TraceableFunction {
TORCH_API LeakyReluBackwardBackward0() = default;
#else
struct TORCH_API LeakyReluBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LeakyReluBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar negative_slope;
SavedVariable self_;
};
#ifdef _WIN32
struct MaxPool2DBackwardBackward0 : public TraceableFunction {
TORCH_API MaxPool2DBackwardBackward0() = default;
#else
struct TORCH_API MaxPool2DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaxPool2DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct MaxPool2DWithIndicesBackwardBackward0 : public TraceableFunction {
TORCH_API MaxPool2DWithIndicesBackwardBackward0() = default;
#else
struct TORCH_API MaxPool2DWithIndicesBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaxPool2DWithIndicesBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable indices_;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct MaxPool3DWithIndicesBackwardBackward0 : public TraceableFunction {
TORCH_API MaxPool3DWithIndicesBackwardBackward0() = default;
#else
struct TORCH_API MaxPool3DWithIndicesBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MaxPool3DWithIndicesBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
indices_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable indices_;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct MseLossBackwardBackward0 : public TraceableFunction {
TORCH_API MseLossBackwardBackward0() = default;
#else
struct TORCH_API MseLossBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MseLossBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
self_.reset_data();
target_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable grad_output_;
int64_t reduction = 0;
SavedVariable self_;
SavedVariable target_;
};
#ifdef _WIN32
struct NllLossBackwardBackward0 : public TraceableFunction {
TORCH_API NllLossBackwardBackward0() = default;
#else
struct TORCH_API NllLossBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NllLossBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
target_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::SymInt ignore_index;
int64_t reduction = 0;
SavedVariable target_;
SavedVariable weight_;
};
#ifdef _WIN32
struct NllLoss2DBackwardBackward0 : public TraceableFunction {
TORCH_API NllLoss2DBackwardBackward0() = default;
#else
struct TORCH_API NllLoss2DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NllLoss2DBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
target_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::SymInt ignore_index;
int64_t reduction = 0;
SavedVariable target_;
SavedVariable weight_;
};
#ifdef _WIN32
struct RreluWithNoiseBackwardBackward0 : public TraceableFunction {
TORCH_API RreluWithNoiseBackwardBackward0() = default;
#else
struct TORCH_API RreluWithNoiseBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "RreluWithNoiseBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
noise_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar lower;
SavedVariable noise_;
SavedVariable self_;
bool training;
at::Scalar upper;
};
#ifdef _WIN32
struct ReflectionPad1DBackwardBackward0 : public TraceableFunction {
TORCH_API ReflectionPad1DBackwardBackward0() = default;
#else
struct TORCH_API ReflectionPad1DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReflectionPad1DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> padding;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct ReflectionPad2DBackwardBackward0 : public TraceableFunction {
TORCH_API ReflectionPad2DBackwardBackward0() = default;
#else
struct TORCH_API ReflectionPad2DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReflectionPad2DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> padding;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct ReflectionPad3DBackwardBackward0 : public TraceableFunction {
TORCH_API ReflectionPad3DBackwardBackward0() = default;
#else
struct TORCH_API ReflectionPad3DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReflectionPad3DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> padding;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct ReplicationPad1DBackwardBackward0 : public TraceableFunction {
TORCH_API ReplicationPad1DBackwardBackward0() = default;
#else
struct TORCH_API ReplicationPad1DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReplicationPad1DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> padding;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct ReplicationPad2DBackwardBackward0 : public TraceableFunction {
TORCH_API ReplicationPad2DBackwardBackward0() = default;
#else
struct TORCH_API ReplicationPad2DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReplicationPad2DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> padding;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct ReplicationPad3DBackwardBackward0 : public TraceableFunction {
TORCH_API ReplicationPad3DBackwardBackward0() = default;
#else
struct TORCH_API ReplicationPad3DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReplicationPad3DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> padding;
torch::autograd::generated::TypeAndSize self_info;
};
#ifdef _WIN32
struct SparseSampledAddmmBackward0 : public TraceableFunction {
TORCH_API SparseSampledAddmmBackward0() = default;
#else
struct TORCH_API SparseSampledAddmmBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SparseSampledAddmmBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
mat1_.reset_data();
mat2_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
at::Scalar beta;
SavedVariable mat1_;
SavedVariable mat2_;
SavedVariable self_;
};
#ifdef _WIN32
struct SparseMmReduceImplBackward0 : public TraceableFunction {
TORCH_API SparseMmReduceImplBackward0() = default;
#else
struct TORCH_API SparseMmReduceImplBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SparseMmReduceImplBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.reset_data();
result1_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
std::string reduce;
SavedVariable self_;
SavedVariable result1_;
};
#ifdef _WIN32
struct SmoothL1LossBackwardBackward0 : public TraceableFunction {
TORCH_API SmoothL1LossBackwardBackward0() = default;
#else
struct TORCH_API SmoothL1LossBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SmoothL1LossBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
self_.reset_data();
target_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double beta;
SavedVariable grad_output_;
int64_t reduction = 0;
SavedVariable self_;
SavedVariable target_;
};
#ifdef _WIN32
struct HuberLossBackwardBackward0 : public TraceableFunction {
TORCH_API HuberLossBackwardBackward0() = default;
#else
struct TORCH_API HuberLossBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "HuberLossBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
self_.reset_data();
target_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double delta;
SavedVariable grad_output_;
int64_t reduction = 0;
SavedVariable self_;
SavedVariable target_;
};
#ifdef _WIN32
struct SoftplusBackwardBackward0 : public TraceableFunction {
TORCH_API SoftplusBackwardBackward0() = default;
#else
struct TORCH_API SoftplusBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SoftplusBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar beta;
SavedVariable grad_output_;
SavedVariable self_;
at::Scalar threshold;
};
#ifdef _WIN32
struct SoftmaxBackwardDataBackward0 : public TraceableFunction {
TORCH_API SoftmaxBackwardDataBackward0() = default;
#else
struct TORCH_API SoftmaxBackwardDataBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SoftmaxBackwardDataBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
output_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable grad_output_;
at::ScalarType input_dtype;
SavedVariable output_;
};
#ifdef _WIN32
struct SoftMarginLossBackwardBackward0 : public TraceableFunction {
TORCH_API SoftMarginLossBackwardBackward0() = default;
#else
struct TORCH_API SoftMarginLossBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SoftMarginLossBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
self_.reset_data();
target_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable grad_output_;
int64_t reduction = 0;
SavedVariable self_;
SavedVariable target_;
};
#ifdef _WIN32
struct SoftshrinkBackwardBackward0 : public TraceableFunction {
TORCH_API SoftshrinkBackwardBackward0() = default;
#else
struct TORCH_API SoftshrinkBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SoftshrinkBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar lambd;
SavedVariable self_;
};
#ifdef _WIN32
struct ThresholdBackwardBackward0 : public TraceableFunction {
TORCH_API ThresholdBackwardBackward0() = default;
#else
struct TORCH_API ThresholdBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ThresholdBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
at::Scalar threshold;
};
#ifdef _WIN32
struct UpsampleLinear1DBackwardBackward0 : public TraceableFunction {
TORCH_API UpsampleLinear1DBackwardBackward0() = default;
#else
struct TORCH_API UpsampleLinear1DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleLinear1DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool align_corners;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales;
};
#ifdef _WIN32
struct UpsampleBilinear2DBackwardBackward0 : public TraceableFunction {
TORCH_API UpsampleBilinear2DBackwardBackward0() = default;
#else
struct TORCH_API UpsampleBilinear2DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleBilinear2DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool align_corners;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
};
#ifdef _WIN32
struct UpsampleBilinear2DAaBackwardBackward0 : public TraceableFunction {
TORCH_API UpsampleBilinear2DAaBackwardBackward0() = default;
#else
struct TORCH_API UpsampleBilinear2DAaBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleBilinear2DAaBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool align_corners;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
};
#ifdef _WIN32
struct UpsampleBicubic2DBackwardBackward0 : public TraceableFunction {
TORCH_API UpsampleBicubic2DBackwardBackward0() = default;
#else
struct TORCH_API UpsampleBicubic2DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleBicubic2DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool align_corners;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
};
#ifdef _WIN32
struct UpsampleBicubic2DAaBackwardBackward0 : public TraceableFunction {
TORCH_API UpsampleBicubic2DAaBackwardBackward0() = default;
#else
struct TORCH_API UpsampleBicubic2DAaBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleBicubic2DAaBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool align_corners;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
};
#ifdef _WIN32
struct UpsampleTrilinear3DBackwardBackward0 : public TraceableFunction {
TORCH_API UpsampleTrilinear3DBackwardBackward0() = default;
#else
struct TORCH_API UpsampleTrilinear3DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleTrilinear3DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool align_corners;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_d;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
};
#ifdef _WIN32
struct UpsampleNearest1DBackwardBackward0 : public TraceableFunction {
TORCH_API UpsampleNearest1DBackwardBackward0() = default;
#else
struct TORCH_API UpsampleNearest1DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleNearest1DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales;
};
#ifdef _WIN32
struct UpsampleNearestExact1DBackwardBackward0 : public TraceableFunction {
TORCH_API UpsampleNearestExact1DBackwardBackward0() = default;
#else
struct TORCH_API UpsampleNearestExact1DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleNearestExact1DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales;
};
#ifdef _WIN32
struct UpsampleNearest2DBackwardBackward0 : public TraceableFunction {
TORCH_API UpsampleNearest2DBackwardBackward0() = default;
#else
struct TORCH_API UpsampleNearest2DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleNearest2DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
};
#ifdef _WIN32
struct UpsampleNearestExact2DBackwardBackward0 : public TraceableFunction {
TORCH_API UpsampleNearestExact2DBackwardBackward0() = default;
#else
struct TORCH_API UpsampleNearestExact2DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleNearestExact2DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
};
#ifdef _WIN32
struct UpsampleNearest3DBackwardBackward0 : public TraceableFunction {
TORCH_API UpsampleNearest3DBackwardBackward0() = default;
#else
struct TORCH_API UpsampleNearest3DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleNearest3DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_d;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
};
#ifdef _WIN32
struct UpsampleNearestExact3DBackwardBackward0 : public TraceableFunction {
TORCH_API UpsampleNearestExact3DBackwardBackward0() = default;
#else
struct TORCH_API UpsampleNearestExact3DBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UpsampleNearestExact3DBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> output_size;
::std::optional<double> scales_d;
::std::optional<double> scales_h;
::std::optional<double> scales_w;
};
#ifdef _WIN32
struct SigmoidBackwardBackward0 : public TraceableFunction {
TORCH_API SigmoidBackwardBackward0() = default;
#else
struct TORCH_API SigmoidBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SigmoidBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
output_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable grad_output_;
SavedVariable output_;
};
#ifdef _WIN32
struct TanhBackwardBackward0 : public TraceableFunction {
TORCH_API TanhBackwardBackward0() = default;
#else
struct TORCH_API TanhBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TanhBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
output_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable grad_output_;
SavedVariable output_;
};
#ifdef _WIN32
struct CudnnCtcLossBackward0 : public TraceableFunction {
TORCH_API CudnnCtcLossBackward0() = default;
#else
struct TORCH_API CudnnCtcLossBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CudnnCtcLossBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result0_.reset_data();
result1_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool zero_infinity;
SavedVariable result0_;
SavedVariable result1_;
};
#ifdef _WIN32
struct CudnnCtcLossBackward1 : public TraceableFunction {
TORCH_API CudnnCtcLossBackward1() = default;
#else
struct TORCH_API CudnnCtcLossBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CudnnCtcLossBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result0_.reset_data();
result1_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool zero_infinity;
SavedVariable result0_;
SavedVariable result1_;
};
#ifdef _WIN32
struct CudnnConvolutionTransposeBackward0 : public TraceableFunction {
TORCH_API CudnnConvolutionTransposeBackward0() = default;
#else
struct TORCH_API CudnnConvolutionTransposeBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CudnnConvolutionTransposeBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> dilation;
c10::SymInt groups;
std::vector<c10::SymInt> output_padding;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct MpsConvolutionTransposeBackward0 : public TraceableFunction {
TORCH_API MpsConvolutionTransposeBackward0() = default;
#else
struct TORCH_API MpsConvolutionTransposeBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MpsConvolutionTransposeBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> dilation;
c10::SymInt groups;
std::vector<c10::SymInt> output_padding;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct CudnnConvolutionBackward0 : public TraceableFunction {
TORCH_API CudnnConvolutionBackward0() = default;
#else
struct TORCH_API CudnnConvolutionBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CudnnConvolutionBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> dilation;
c10::SymInt groups;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct CudnnGridSamplerBackward0 : public TraceableFunction {
TORCH_API CudnnGridSamplerBackward0() = default;
#else
struct TORCH_API CudnnGridSamplerBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CudnnGridSamplerBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grid_.reset_data();
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable grid_;
SavedVariable self_;
};
#ifdef _WIN32
struct CudnnAffineGridGeneratorBackward0 : public TraceableFunction {
TORCH_API CudnnAffineGridGeneratorBackward0() = default;
#else
struct TORCH_API CudnnAffineGridGeneratorBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CudnnAffineGridGeneratorBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t C = 0;
int64_t H = 0;
int64_t N = 0;
int64_t W = 0;
};
#ifdef _WIN32
struct CudnnBatchNormBackward0 : public TraceableFunction {
TORCH_API CudnnBatchNormBackward0() = default;
#else
struct TORCH_API CudnnBatchNormBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CudnnBatchNormBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input_.reset_data();
running_mean_.reset_data();
running_var_.reset_data();
weight_.reset_data();
result1_.reset_data();
result2_.reset_data();
result3_.reset_data();
}
bool retain_variables = true;
void will_release_variables() override {
retain_variables = false;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double epsilon;
SavedVariable input_;
SavedVariable running_mean_;
SavedVariable running_var_;
bool training;
SavedVariable weight_;
SavedVariable result1_;
SavedVariable result2_;
SavedVariable result3_;
};
#ifdef _WIN32
struct CudnnBatchNormBackwardBackward0 : public TraceableFunction {
TORCH_API CudnnBatchNormBackwardBackward0() = default;
#else
struct TORCH_API CudnnBatchNormBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CudnnBatchNormBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
input_.reset_data();
reserveSpace_.reset_data();
running_mean_.reset_data();
running_var_.reset_data();
save_mean_.reset_data();
save_var_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double epsilon;
SavedVariable grad_output_;
SavedVariable input_;
SavedVariable reserveSpace_;
SavedVariable running_mean_;
SavedVariable running_var_;
SavedVariable save_mean_;
SavedVariable save_var_;
SavedVariable weight_;
};
#ifdef _WIN32
struct NnpackSpatialConvolutionBackward0 : public TraceableFunction {
TORCH_API NnpackSpatialConvolutionBackward0() = default;
#else
struct TORCH_API NnpackSpatialConvolutionBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NnpackSpatialConvolutionBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<c10::SymInt> bias_sym_sizes_opt;
SavedVariable input_;
std::vector<c10::SymInt> padding;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct LstmMpsBackward0 : public TraceableFunction {
TORCH_API LstmMpsBackward0() = default;
#else
struct TORCH_API LstmMpsBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LstmMpsBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
hx_.clear();
hx_released_ = true;
input_.reset_data();
params_.clear();
params_released_ = true;
result3_.reset_data();
result4_.reset_data();
result5_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool batch_first;
bool bidirectional;
double dropout;
bool has_biases;
std::vector<SavedVariable> hx_;
bool hx_released_ = false;
SavedVariable input_;
int64_t num_layers = 0;
std::vector<SavedVariable> params_;
bool params_released_ = false;
bool train;
SavedVariable result3_;
SavedVariable result4_;
SavedVariable result5_;
size_t hx_size_;
size_t params_size_;
};
#ifdef _WIN32
struct CudnnRnnBackward0 : public TraceableFunction {
TORCH_API CudnnRnnBackward0() = default;
#else
struct TORCH_API CudnnRnnBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CudnnRnnBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
cx_.reset_data();
dropout_state_.reset_data();
hx_.reset_data();
input_.reset_data();
weight_.clear();
weight_released_ = true;
result0_.reset_data();
result3_.reset_data();
result4_.reset_data();
}
bool retain_variables = true;
void will_release_variables() override {
retain_variables = false;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool batch_first;
std::vector<c10::SymInt> batch_sizes;
bool bidirectional;
SavedVariable cx_;
double dropout;
SavedVariable dropout_state_;
c10::SymInt hidden_size;
SavedVariable hx_;
SavedVariable input_;
int64_t mode = 0;
int64_t num_layers = 0;
c10::SymInt proj_size;
bool train;
std::vector<SavedVariable> weight_;
bool weight_released_ = false;
int64_t weight_stride0 = 0;
SavedVariable result0_;
SavedVariable result3_;
SavedVariable result4_;
size_t weight_size_;
};
#ifdef _WIN32
struct CudnnRnnBackwardBackward0 : public TraceableFunction {
TORCH_API CudnnRnnBackwardBackward0() = default;
#else
struct TORCH_API CudnnRnnBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "CudnnRnnBackwardBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
size_t weight_size_;
};
#ifdef _WIN32
struct MiopenConvolutionTransposeBackward0 : public TraceableFunction {
TORCH_API MiopenConvolutionTransposeBackward0() = default;
#else
struct TORCH_API MiopenConvolutionTransposeBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MiopenConvolutionTransposeBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<c10::SymInt> bias_sym_sizes_opt;
std::vector<c10::SymInt> dilation;
c10::SymInt groups;
std::vector<c10::SymInt> output_padding;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct MiopenConvolutionBackward0 : public TraceableFunction {
TORCH_API MiopenConvolutionBackward0() = default;
#else
struct TORCH_API MiopenConvolutionBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MiopenConvolutionBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<c10::SymInt> bias_sym_sizes_opt;
std::vector<c10::SymInt> dilation;
c10::SymInt groups;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct MiopenDepthwiseConvolutionBackward0 : public TraceableFunction {
TORCH_API MiopenDepthwiseConvolutionBackward0() = default;
#else
struct TORCH_API MiopenDepthwiseConvolutionBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MiopenDepthwiseConvolutionBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<c10::SymInt> bias_sym_sizes_opt;
std::vector<c10::SymInt> dilation;
c10::SymInt groups;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct MiopenBatchNormBackward0 : public TraceableFunction {
TORCH_API MiopenBatchNormBackward0() = default;
#else
struct TORCH_API MiopenBatchNormBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MiopenBatchNormBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
input_.reset_data();
running_mean_.reset_data();
running_var_.reset_data();
weight_.reset_data();
result1_.reset_data();
result2_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double epsilon;
SavedVariable input_;
SavedVariable running_mean_;
SavedVariable running_var_;
bool training;
SavedVariable weight_;
SavedVariable result1_;
SavedVariable result2_;
};
#ifdef _WIN32
struct MiopenBatchNormBackwardBackward0 : public TraceableFunction {
TORCH_API MiopenBatchNormBackwardBackward0() = default;
#else
struct TORCH_API MiopenBatchNormBackwardBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MiopenBatchNormBackwardBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
grad_output_.reset_data();
input_.reset_data();
running_mean_.reset_data();
running_var_.reset_data();
save_mean_.reset_data();
save_var_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double epsilon;
SavedVariable grad_output_;
SavedVariable input_;
SavedVariable running_mean_;
SavedVariable running_var_;
SavedVariable save_mean_;
SavedVariable save_var_;
SavedVariable weight_;
};
#ifdef _WIN32
struct MiopenRnnBackward0 : public TraceableFunction {
TORCH_API MiopenRnnBackward0() = default;
#else
struct TORCH_API MiopenRnnBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MiopenRnnBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
cx_.reset_data();
dropout_state_.reset_data();
hx_.reset_data();
input_.reset_data();
weight_.clear();
weight_released_ = true;
result0_.reset_data();
result3_.reset_data();
result4_.reset_data();
}
bool retain_variables = true;
void will_release_variables() override {
retain_variables = false;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool batch_first;
std::vector<int64_t> batch_sizes;
bool bidirectional;
SavedVariable cx_;
double dropout;
SavedVariable dropout_state_;
int64_t hidden_size = 0;
SavedVariable hx_;
SavedVariable input_;
int64_t mode = 0;
int64_t num_layers = 0;
bool train;
std::vector<SavedVariable> weight_;
bool weight_released_ = false;
int64_t weight_stride0 = 0;
SavedVariable result0_;
SavedVariable result3_;
SavedVariable result4_;
size_t weight_size_;
};
#ifdef _WIN32
struct MkldnnRnnLayerBackward0 : public TraceableFunction {
TORCH_API MkldnnRnnLayerBackward0() = default;
#else
struct TORCH_API MkldnnRnnLayerBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MkldnnRnnLayerBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
cx__.reset_data();
hx__.reset_data();
input_.reset_data();
weight0_.reset_data();
weight1_.reset_data();
weight2_.reset_data();
weight3_.reset_data();
result0_.reset_data();
result1_.reset_data();
result2_.reset_data();
result3_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool batch_first;
std::vector<int64_t> batch_sizes;
bool bidirectional;
SavedVariable cx__;
bool has_biases;
int64_t hidden_size = 0;
SavedVariable hx__;
SavedVariable input_;
int64_t mode = 0;
int64_t num_layers = 0;
bool reverse;
bool train;
SavedVariable weight0_;
SavedVariable weight1_;
SavedVariable weight2_;
SavedVariable weight3_;
SavedVariable result0_;
SavedVariable result1_;
SavedVariable result2_;
SavedVariable result3_;
};
#ifdef _WIN32
struct MkldnnConvolutionBackward0 : public TraceableFunction {
TORCH_API MkldnnConvolutionBackward0() = default;
#else
struct TORCH_API MkldnnConvolutionBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MkldnnConvolutionBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
c10::OptionalArray<c10::SymInt> bias_sym_sizes_opt;
std::vector<c10::SymInt> dilation;
c10::SymInt groups;
std::vector<c10::SymInt> padding;
SavedVariable self_;
std::vector<c10::SymInt> stride;
SavedVariable weight_;
};
#ifdef _WIN32
struct MkldnnLinearBackward0 : public TraceableFunction {
TORCH_API MkldnnLinearBackward0() = default;
#else
struct TORCH_API MkldnnLinearBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MkldnnLinearBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
weight_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
SavedVariable weight_;
};
#ifdef _WIN32
struct MkldnnMaxPool2DBackward0 : public TraceableFunction {
TORCH_API MkldnnMaxPool2DBackward0() = default;
#else
struct TORCH_API MkldnnMaxPool2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MkldnnMaxPool2DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool ceil_mode;
std::vector<int64_t> dilation;
std::vector<int64_t> kernel_size;
std::vector<int64_t> padding;
SavedVariable self_;
std::vector<int64_t> stride;
SavedVariable result_;
};
#ifdef _WIN32
struct MkldnnMaxPool3DBackward0 : public TraceableFunction {
TORCH_API MkldnnMaxPool3DBackward0() = default;
#else
struct TORCH_API MkldnnMaxPool3DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MkldnnMaxPool3DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool ceil_mode;
std::vector<int64_t> dilation;
std::vector<int64_t> kernel_size;
std::vector<int64_t> padding;
SavedVariable self_;
std::vector<int64_t> stride;
SavedVariable result_;
};
#ifdef _WIN32
struct MkldnnAdaptiveAvgPool2DBackward0 : public TraceableFunction {
TORCH_API MkldnnAdaptiveAvgPool2DBackward0() = default;
#else
struct TORCH_API MkldnnAdaptiveAvgPool2DBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MkldnnAdaptiveAvgPool2DBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct MkldnnReshapeBackward0 : public TraceableFunction {
TORCH_API MkldnnReshapeBackward0() = default;
#else
struct TORCH_API MkldnnReshapeBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "MkldnnReshapeBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct NestedTensorFromTensorListBackward0 : public TraceableFunction {
TORCH_API NestedTensorFromTensorListBackward0() = default;
#else
struct TORCH_API NestedTensorFromTensorListBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NestedTensorFromTensorListBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
list_.clear();
list_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> list_;
bool list_released_ = false;
size_t list_size_;
};
#ifdef _WIN32
struct NestedTensorFromMaskBackward0 : public TraceableFunction {
TORCH_API NestedTensorFromMaskBackward0() = default;
#else
struct TORCH_API NestedTensorFromMaskBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NestedTensorFromMaskBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> t_sym_sizes;
};
#ifdef _WIN32
struct NestedFromPaddedBackward0 : public TraceableFunction {
TORCH_API NestedFromPaddedBackward0() = default;
#else
struct TORCH_API NestedFromPaddedBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NestedFromPaddedBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
padded_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool fuse_transform_0213;
SavedVariable padded_;
};
#ifdef _WIN32
struct ToPaddedTensorBackward0 : public TraceableFunction {
TORCH_API ToPaddedTensorBackward0() = default;
#else
struct TORCH_API ToPaddedTensorBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ToPaddedTensorBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
at::Layout self_layout;
};
#ifdef _WIN32
struct NestedFromPaddedTensorBackward0 : public TraceableFunction {
TORCH_API NestedFromPaddedTensorBackward0() = default;
#else
struct TORCH_API NestedFromPaddedTensorBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NestedFromPaddedTensorBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> padded_sym_sizes;
};
#ifdef _WIN32
struct NestedViewFromBufferBackward0 : public Node {
TORCH_API NestedViewFromBufferBackward0() = default;
#else
struct TORCH_API NestedViewFromBufferBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NestedViewFromBufferBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct NestedViewFromJaggedBackward0 : public Node {
TORCH_API NestedViewFromJaggedBackward0() = default;
#else
struct TORCH_API NestedViewFromJaggedBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NestedViewFromJaggedBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct NestedGetValuesBackward0 : public Node {
TORCH_API NestedGetValuesBackward0() = default;
#else
struct TORCH_API NestedGetValuesBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NestedGetValuesBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct SafeSoftmaxBackward0 : public TraceableFunction {
TORCH_API SafeSoftmaxBackward0() = default;
#else
struct TORCH_API SafeSoftmaxBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SafeSoftmaxBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
at::ScalarType self_scalar_type;
SavedVariable result_;
};
#ifdef _WIN32
struct ScaledDotProductEfficientAttentionBackward0 : public TraceableFunction {
TORCH_API ScaledDotProductEfficientAttentionBackward0() = default;
#else
struct TORCH_API ScaledDotProductEfficientAttentionBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ScaledDotProductEfficientAttentionBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
attn_bias_.reset_data();
key_.reset_data();
query_.reset_data();
value_.reset_data();
log_sumexp_.reset_data();
output_.reset_data();
philox_offset_.reset_data();
philox_seed_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable attn_bias_;
double dropout_p;
bool is_causal;
SavedVariable key_;
SavedVariable query_;
::std::optional<double> scale;
SavedVariable value_;
SavedVariable log_sumexp_;
SavedVariable output_;
SavedVariable philox_offset_;
SavedVariable philox_seed_;
};
#ifdef _WIN32
struct ScaledDotProductFlashAttentionBackward0 : public TraceableFunction {
TORCH_API ScaledDotProductFlashAttentionBackward0() = default;
#else
struct TORCH_API ScaledDotProductFlashAttentionBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ScaledDotProductFlashAttentionBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
key_.reset_data();
query_.reset_data();
value_.reset_data();
cum_seq_k_.reset_data();
cum_seq_q_.reset_data();
logsumexp_.reset_data();
output_.reset_data();
rng_state_.reset_data();
unused_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
double dropout_p;
bool is_causal;
SavedVariable key_;
SavedVariable query_;
::std::optional<double> scale;
SavedVariable value_;
SavedVariable cum_seq_k_;
SavedVariable cum_seq_q_;
SavedVariable logsumexp_;
c10::SymInt max_k;
c10::SymInt max_q;
SavedVariable output_;
SavedVariable rng_state_;
SavedVariable unused_;
};
#ifdef _WIN32
struct ScaledDotProductFlashAttentionForCpuBackward0 : public TraceableFunction {
TORCH_API ScaledDotProductFlashAttentionForCpuBackward0() = default;
#else
struct TORCH_API ScaledDotProductFlashAttentionForCpuBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ScaledDotProductFlashAttentionForCpuBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
attn_mask_.reset_data();
key_.reset_data();
query_.reset_data();
value_.reset_data();
logsumexp_.reset_data();
output_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable attn_mask_;
double dropout_p;
bool is_causal;
SavedVariable key_;
SavedVariable query_;
::std::optional<double> scale;
SavedVariable value_;
SavedVariable logsumexp_;
SavedVariable output_;
};
#ifdef _WIN32
struct FlashAttentionBackward0 : public TraceableFunction {
TORCH_API FlashAttentionBackward0() = default;
#else
struct TORCH_API FlashAttentionBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FlashAttentionBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
cum_seq_k_.reset_data();
cum_seq_q_.reset_data();
key_.reset_data();
query_.reset_data();
value_.reset_data();
output_.reset_data();
rng_state_.reset_data();
softmax_logsumexp_.reset_data();
unused_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable cum_seq_k_;
SavedVariable cum_seq_q_;
double dropout_p;
bool is_causal;
SavedVariable key_;
c10::SymInt max_k;
c10::SymInt max_q;
SavedVariable query_;
::std::optional<double> scale;
SavedVariable value_;
::std::optional<c10::SymInt> window_size_left;
::std::optional<c10::SymInt> window_size_right;
SavedVariable output_;
SavedVariable rng_state_;
SavedVariable softmax_logsumexp_;
SavedVariable unused_;
};
#ifdef _WIN32
struct EfficientAttentionBackward0 : public TraceableFunction {
TORCH_API EfficientAttentionBackward0() = default;
#else
struct TORCH_API EfficientAttentionBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "EfficientAttentionBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
bias_.reset_data();
cu_seqlens_k_.reset_data();
cu_seqlens_q_.reset_data();
key_.reset_data();
query_.reset_data();
value_.reset_data();
logsumexp_.reset_data();
output_.reset_data();
philox_offset_.reset_data();
philox_seed_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable bias_;
SavedVariable cu_seqlens_k_;
SavedVariable cu_seqlens_q_;
int64_t custom_mask_type = 0;
double dropout_p;
SavedVariable key_;
SavedVariable query_;
::std::optional<double> scale;
SavedVariable value_;
SavedVariable logsumexp_;
c10::SymInt max_seqlen_batch_k;
c10::SymInt max_seqlen_batch_q;
SavedVariable output_;
SavedVariable philox_offset_;
SavedVariable philox_seed_;
};
#ifdef _WIN32
struct ScaledDotProductCudnnAttentionBackward0 : public TraceableFunction {
TORCH_API ScaledDotProductCudnnAttentionBackward0() = default;
#else
struct TORCH_API ScaledDotProductCudnnAttentionBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ScaledDotProductCudnnAttentionBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
attn_bias_.reset_data();
key_.reset_data();
query_.reset_data();
value_.reset_data();
cum_seq_k_.reset_data();
cum_seq_q_.reset_data();
logsumexp_.reset_data();
output_.reset_data();
philox_offset_.reset_data();
philox_seed_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable attn_bias_;
double dropout_p;
bool is_causal;
SavedVariable key_;
SavedVariable query_;
::std::optional<double> scale;
SavedVariable value_;
SavedVariable cum_seq_k_;
SavedVariable cum_seq_q_;
SavedVariable logsumexp_;
c10::SymInt max_k;
c10::SymInt max_q;
SavedVariable output_;
SavedVariable philox_offset_;
SavedVariable philox_seed_;
};
#ifdef _WIN32
struct ScaledDotProductFusedAttentionOverrideableBackward0 : public TraceableFunction {
TORCH_API ScaledDotProductFusedAttentionOverrideableBackward0() = default;
#else
struct TORCH_API ScaledDotProductFusedAttentionOverrideableBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ScaledDotProductFusedAttentionOverrideableBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
attn_bias_.reset_data();
key_.reset_data();
query_.reset_data();
value_.reset_data();
cum_seq_k_.reset_data();
cum_seq_q_.reset_data();
logsumexp_.reset_data();
output_.reset_data();
philox_offset_.reset_data();
philox_seed_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable attn_bias_;
double dropout_p;
bool is_causal;
SavedVariable key_;
SavedVariable query_;
::std::optional<double> scale;
SavedVariable value_;
SavedVariable cum_seq_k_;
SavedVariable cum_seq_q_;
SavedVariable logsumexp_;
c10::SymInt max_k;
c10::SymInt max_q;
SavedVariable output_;
SavedVariable philox_offset_;
SavedVariable philox_seed_;
};
#ifdef _WIN32
struct FftR2CBackward0 : public TraceableFunction {
TORCH_API FftR2CBackward0() = default;
#else
struct TORCH_API FftR2CBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FftR2CBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dim;
int64_t normalization = 0;
bool onesided;
SavedVariable self_;
};
#ifdef _WIN32
struct FftC2RBackward0 : public TraceableFunction {
TORCH_API FftC2RBackward0() = default;
#else
struct TORCH_API FftC2RBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FftC2RBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dim;
int64_t normalization = 0;
};
#ifdef _WIN32
struct FftC2CBackward0 : public TraceableFunction {
TORCH_API FftC2CBackward0() = default;
#else
struct TORCH_API FftC2CBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "FftC2CBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> dim;
bool forward;
int64_t normalization = 0;
};
#ifdef _WIN32
struct UnbindBackward0 : public Node {
TORCH_API UnbindBackward0() = default;
#else
struct TORCH_API UnbindBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnbindBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
};
#ifdef _WIN32
struct UnbindBackwardAutogradNestedTensor0 : public Node {
TORCH_API UnbindBackwardAutogradNestedTensor0() = default;
#else
struct TORCH_API UnbindBackwardAutogradNestedTensor0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnbindBackwardAutogradNestedTensor0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable self_;
at::Layout self_layout;
at::TensorOptions self_options;
};
#ifdef _WIN32
struct StackBackward0 : public TraceableFunction {
TORCH_API StackBackward0() = default;
#else
struct TORCH_API StackBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "StackBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
::std::vector<at::ScalarType> tensors_args_scalartypes;
size_t tensors_size_;
};
#ifdef _WIN32
struct ThnnFusedLstmCellBackward0 : public TraceableFunction {
TORCH_API ThnnFusedLstmCellBackward0() = default;
#else
struct TORCH_API ThnnFusedLstmCellBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ThnnFusedLstmCellBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
cx_.reset_data();
hidden_bias_.reset_data();
hidden_gates_.reset_data();
input_bias_.reset_data();
input_gates_.reset_data();
result1_.reset_data();
result2_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable cx_;
SavedVariable hidden_bias_;
SavedVariable hidden_gates_;
SavedVariable input_bias_;
SavedVariable input_gates_;
SavedVariable result1_;
SavedVariable result2_;
};
#ifdef _WIN32
struct ThnnFusedGruCellBackward0 : public TraceableFunction {
TORCH_API ThnnFusedGruCellBackward0() = default;
#else
struct TORCH_API ThnnFusedGruCellBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ThnnFusedGruCellBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
hidden_bias_.reset_data();
hidden_gates_.reset_data();
hx_.reset_data();
input_bias_.reset_data();
input_gates_.reset_data();
result1_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable hidden_bias_;
SavedVariable hidden_gates_;
SavedVariable hx_;
SavedVariable input_bias_;
SavedVariable input_gates_;
SavedVariable result1_;
};
#ifdef _WIN32
struct PackPaddedSequenceBackward0 : public TraceableFunction {
TORCH_API PackPaddedSequenceBackward0() = default;
#else
struct TORCH_API PackPaddedSequenceBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PackPaddedSequenceBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result1_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
bool batch_first;
std::vector<c10::SymInt> input_sym_sizes;
SavedVariable result1_;
};
#ifdef _WIN32
struct SegmentReduceBackward0 : public TraceableFunction {
TORCH_API SegmentReduceBackward0() = default;
#else
struct TORCH_API SegmentReduceBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SegmentReduceBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
data_.reset_data();
lengths_.reset_data();
offsets_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t axis = 0;
SavedVariable data_;
::std::optional<at::Scalar> initial;
SavedVariable lengths_;
SavedVariable offsets_;
std::string reduce;
SavedVariable result_;
};
#ifdef _WIN32
struct PinMemoryBackward0 : public TraceableFunction {
TORCH_API PinMemoryBackward0() = default;
#else
struct TORCH_API PinMemoryBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PinMemoryBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct TestWarnInAutogradBackward0 : public TraceableFunction {
TORCH_API TestWarnInAutogradBackward0() = default;
#else
struct TORCH_API TestWarnInAutogradBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TestWarnInAutogradBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct TestAutogradMultipleDispatchBackward0 : public TraceableFunction {
TORCH_API TestAutogradMultipleDispatchBackward0() = default;
#else
struct TORCH_API TestAutogradMultipleDispatchBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TestAutogradMultipleDispatchBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct TestAutogradMultipleDispatchBackwardAutogradNestedTensor0 : public TraceableFunction {
TORCH_API TestAutogradMultipleDispatchBackwardAutogradNestedTensor0() = default;
#else
struct TORCH_API TestAutogradMultipleDispatchBackwardAutogradNestedTensor0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TestAutogradMultipleDispatchBackwardAutogradNestedTensor0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct TestAutogradMultipleDispatchBackwardAutogradCUDA0 : public TraceableFunction {
TORCH_API TestAutogradMultipleDispatchBackwardAutogradCUDA0() = default;
#else
struct TORCH_API TestAutogradMultipleDispatchBackwardAutogradCUDA0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TestAutogradMultipleDispatchBackwardAutogradCUDA0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct TestAutogradMultipleDispatchBackwardAutogradNestedTensor1 : public TraceableFunction {
TORCH_API TestAutogradMultipleDispatchBackwardAutogradNestedTensor1() = default;
#else
struct TORCH_API TestAutogradMultipleDispatchBackwardAutogradNestedTensor1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TestAutogradMultipleDispatchBackwardAutogradNestedTensor1"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct TestAutogradMultipleDispatchViewBackward0 : public Node {
TORCH_API TestAutogradMultipleDispatchViewBackward0() = default;
#else
struct TORCH_API TestAutogradMultipleDispatchViewBackward0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TestAutogradMultipleDispatchViewBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct TestAutogradMultipleDispatchViewBackwardAutogradCUDA0 : public Node {
TORCH_API TestAutogradMultipleDispatchViewBackwardAutogradCUDA0() = default;
#else
struct TORCH_API TestAutogradMultipleDispatchViewBackwardAutogradCUDA0 : public Node {
#endif
using Node::Node;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TestAutogradMultipleDispatchViewBackwardAutogradCUDA0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct ScatterReduceBackward0 : public TraceableFunction {
TORCH_API ScatterReduceBackward0() = default;
#else
struct TORCH_API ScatterReduceBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ScatterReduceBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
index_.reset_data();
self_.reset_data();
src_.reset_data();
result_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
bool include_self;
SavedVariable index_;
std::string reduce;
SavedVariable self_;
SavedVariable src_;
SavedVariable result_;
};
#ifdef _WIN32
struct ReshapeCopyBackward0 : public TraceableFunction {
TORCH_API ReshapeCopyBackward0() = default;
#else
struct TORCH_API ReshapeCopyBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReshapeCopyBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct ForeachDivBackward0 : public TraceableFunction {
TORCH_API ForeachDivBackward0() = default;
#else
struct TORCH_API ForeachDivBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachDivBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.clear();
other_released_ = true;
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> other_;
bool other_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
size_t other_size_;
};
#ifdef _WIN32
struct ForeachPowBackward0 : public TraceableFunction {
TORCH_API ForeachPowBackward0() = default;
#else
struct TORCH_API ForeachPowBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachPowBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
exponent_.clear();
exponent_released_ = true;
self_.clear();
self_released_ = true;
result_.clear();
result_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> exponent_;
bool exponent_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
std::vector<SavedVariable> result_;
bool result_released_ = false;
size_t self_size_;
size_t exponent_size_;
};
#ifdef _WIN32
struct ForeachPowBackward1 : public TraceableFunction {
TORCH_API ForeachPowBackward1() = default;
#else
struct TORCH_API ForeachPowBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachPowBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
exponent.clear();
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<at::Scalar> exponent;
bool exponent_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachPowBackward2 : public TraceableFunction {
TORCH_API ForeachPowBackward2() = default;
#else
struct TORCH_API ForeachPowBackward2 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachPowBackward2"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
exponent_.clear();
exponent_released_ = true;
result_.clear();
result_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> exponent_;
bool exponent_released_ = false;
at::Scalar self;
std::vector<SavedVariable> result_;
bool result_released_ = false;
size_t exponent_size_;
};
#ifdef _WIN32
struct ForeachMinimumBackward0 : public TraceableFunction {
TORCH_API ForeachMinimumBackward0() = default;
#else
struct TORCH_API ForeachMinimumBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachMinimumBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar scalar;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachMinimumBackward1 : public TraceableFunction {
TORCH_API ForeachMinimumBackward1() = default;
#else
struct TORCH_API ForeachMinimumBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachMinimumBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
scalars.clear();
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<at::Scalar> scalars;
bool scalars_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachMaximumBackward0 : public TraceableFunction {
TORCH_API ForeachMaximumBackward0() = default;
#else
struct TORCH_API ForeachMaximumBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachMaximumBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar scalar;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachMaximumBackward1 : public TraceableFunction {
TORCH_API ForeachMaximumBackward1() = default;
#else
struct TORCH_API ForeachMaximumBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachMaximumBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
scalars.clear();
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<at::Scalar> scalars;
bool scalars_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachNormBackward0 : public TraceableFunction {
TORCH_API ForeachNormBackward0() = default;
#else
struct TORCH_API ForeachNormBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachNormBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
result_.clear();
result_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar ord;
std::vector<SavedVariable> self_;
bool self_released_ = false;
std::vector<SavedVariable> result_;
bool result_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct AliasBackward0_copy : public TraceableFunction {
TORCH_API AliasBackward0_copy() = default;
#else
struct TORCH_API AliasBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AliasBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct AsStridedBackward0_copy : public TraceableFunction {
TORCH_API AsStridedBackward0_copy() = default;
#else
struct TORCH_API AsStridedBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "AsStridedBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::TensorGeometry self_geometry;
std::vector<c10::SymInt> size;
::std::optional<c10::SymInt> storage_offset;
std::vector<c10::SymInt> stride;
};
#ifdef _WIN32
struct ConjBackward0_copy : public TraceableFunction {
TORCH_API ConjBackward0_copy() = default;
#else
struct TORCH_API ConjBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ConjBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct NegViewBackward0_copy : public TraceableFunction {
TORCH_API NegViewBackward0_copy() = default;
#else
struct TORCH_API NegViewBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NegViewBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct DiagonalBackward0_copy : public TraceableFunction {
TORCH_API DiagonalBackward0_copy() = default;
#else
struct TORCH_API DiagonalBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "DiagonalBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim1 = 0;
int64_t dim2 = 0;
int64_t offset = 0;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct ExpandBackward0_copy : public TraceableFunction {
TORCH_API ExpandBackward0_copy() = default;
#else
struct TORCH_API ExpandBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ExpandBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct PermuteBackward0_copy : public TraceableFunction {
TORCH_API PermuteBackward0_copy() = default;
#else
struct TORCH_API PermuteBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "PermuteBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dims;
};
#ifdef _WIN32
struct ReshapeAliasBackward0_copy : public TraceableFunction {
TORCH_API ReshapeAliasBackward0_copy() = default;
#else
struct TORCH_API ReshapeAliasBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ReshapeAliasBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct SelectBackward0_copy : public TraceableFunction {
TORCH_API SelectBackward0_copy() = default;
#else
struct TORCH_API SelectBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SelectBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
c10::SymInt index;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct SelectBackwardAutogradNestedTensor0_copy : public TraceableFunction {
TORCH_API SelectBackwardAutogradNestedTensor0_copy() = default;
#else
struct TORCH_API SelectBackwardAutogradNestedTensor0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SelectBackwardAutogradNestedTensor0_copy"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
c10::SymInt index;
SavedVariable self_;
};
#ifdef _WIN32
struct SliceBackward0_copy : public TraceableFunction {
TORCH_API SliceBackward0_copy() = default;
#else
struct TORCH_API SliceBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SliceBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
::std::optional<c10::SymInt> end;
std::vector<c10::SymInt> self_sym_sizes;
::std::optional<c10::SymInt> start;
c10::SymInt step;
};
#ifdef _WIN32
struct SplitBackward0_copy : public TraceableFunction {
TORCH_API SplitBackward0_copy() = default;
#else
struct TORCH_API SplitBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SplitBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
at::TensorOptions self_options;
std::vector<c10::SymInt> self_sym_sizes;
c10::SymInt split_size;
};
#ifdef _WIN32
struct SplitWithSizesBackward0_copy : public TraceableFunction {
TORCH_API SplitWithSizesBackward0_copy() = default;
#else
struct TORCH_API SplitWithSizesBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SplitWithSizesBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
at::TensorOptions self_options;
std::vector<c10::SymInt> self_sym_sizes;
std::vector<c10::SymInt> split_sizes;
};
#ifdef _WIN32
struct SplitWithSizesBackwardAutogradNestedTensor0_copy : public TraceableFunction {
TORCH_API SplitWithSizesBackwardAutogradNestedTensor0_copy() = default;
#else
struct TORCH_API SplitWithSizesBackwardAutogradNestedTensor0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SplitWithSizesBackwardAutogradNestedTensor0_copy"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable self_;
at::TensorOptions self_options;
std::vector<c10::SymInt> split_sizes;
};
#ifdef _WIN32
struct SqueezeBackward0_copy : public TraceableFunction {
TORCH_API SqueezeBackward0_copy() = default;
#else
struct TORCH_API SqueezeBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SqueezeBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct SqueezeBackward1_copy : public TraceableFunction {
TORCH_API SqueezeBackward1_copy() = default;
#else
struct TORCH_API SqueezeBackward1_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SqueezeBackward1_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct SqueezeBackwardAutogradNestedTensor0_copy : public TraceableFunction {
TORCH_API SqueezeBackwardAutogradNestedTensor0_copy() = default;
#else
struct TORCH_API SqueezeBackwardAutogradNestedTensor0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SqueezeBackwardAutogradNestedTensor0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
};
#ifdef _WIN32
struct SqueezeBackward2_copy : public TraceableFunction {
TORCH_API SqueezeBackward2_copy() = default;
#else
struct TORCH_API SqueezeBackward2_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SqueezeBackward2_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dim;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct SqueezeBackwardAutogradNestedTensor1_copy : public TraceableFunction {
TORCH_API SqueezeBackwardAutogradNestedTensor1_copy() = default;
#else
struct TORCH_API SqueezeBackwardAutogradNestedTensor1_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "SqueezeBackwardAutogradNestedTensor1_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<int64_t> dim;
int64_t self_dim = 0;
};
#ifdef _WIN32
struct TBackward0_copy : public TraceableFunction {
TORCH_API TBackward0_copy() = default;
#else
struct TORCH_API TBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct TransposeBackward0_copy : public TraceableFunction {
TORCH_API TransposeBackward0_copy() = default;
#else
struct TORCH_API TransposeBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TransposeBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim0 = 0;
int64_t dim1 = 0;
};
#ifdef _WIN32
struct UnfoldBackward0_copy : public TraceableFunction {
TORCH_API UnfoldBackward0_copy() = default;
#else
struct TORCH_API UnfoldBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnfoldBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dimension = 0;
std::vector<c10::SymInt> self_sym_sizes;
int64_t size = 0;
int64_t step = 0;
};
#ifdef _WIN32
struct LiftFreshBackward0_copy : public TraceableFunction {
TORCH_API LiftFreshBackward0_copy() = default;
#else
struct TORCH_API LiftFreshBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "LiftFreshBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct UnsqueezeBackward0_copy : public TraceableFunction {
TORCH_API UnsqueezeBackward0_copy() = default;
#else
struct TORCH_API UnsqueezeBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnsqueezeBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
};
#ifdef _WIN32
struct ViewBackward0_copy : public TraceableFunction {
TORCH_API ViewBackward0_copy() = default;
#else
struct TORCH_API ViewBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ViewBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<c10::SymInt> self_sym_sizes;
};
#ifdef _WIN32
struct ViewBackwardAutogradNestedTensor0_copy : public TraceableFunction {
TORCH_API ViewBackwardAutogradNestedTensor0_copy() = default;
#else
struct TORCH_API ViewBackwardAutogradNestedTensor0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ViewBackwardAutogradNestedTensor0_copy"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct ViewAsRealBackward0_copy : public TraceableFunction {
TORCH_API ViewAsRealBackward0_copy() = default;
#else
struct TORCH_API ViewAsRealBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ViewAsRealBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct ViewAsComplexBackward0_copy : public TraceableFunction {
TORCH_API ViewAsComplexBackward0_copy() = default;
#else
struct TORCH_API ViewAsComplexBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ViewAsComplexBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct ValuesBackward0_copy : public TraceableFunction {
TORCH_API ValuesBackward0_copy() = default;
#else
struct TORCH_API ValuesBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ValuesBackward0_copy"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct ValuesBackwardAutogradNestedTensor0_copy : public TraceableFunction {
TORCH_API ValuesBackwardAutogradNestedTensor0_copy() = default;
#else
struct TORCH_API ValuesBackwardAutogradNestedTensor0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ValuesBackwardAutogradNestedTensor0_copy"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct NestedViewFromBufferBackward0_copy : public TraceableFunction {
TORCH_API NestedViewFromBufferBackward0_copy() = default;
#else
struct TORCH_API NestedViewFromBufferBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NestedViewFromBufferBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct NestedViewFromJaggedBackward0_copy : public TraceableFunction {
TORCH_API NestedViewFromJaggedBackward0_copy() = default;
#else
struct TORCH_API NestedViewFromJaggedBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NestedViewFromJaggedBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
};
#ifdef _WIN32
struct NestedGetValuesBackward0_copy : public TraceableFunction {
TORCH_API NestedGetValuesBackward0_copy() = default;
#else
struct TORCH_API NestedGetValuesBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "NestedGetValuesBackward0_copy"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct UnbindBackward0_copy : public TraceableFunction {
TORCH_API UnbindBackward0_copy() = default;
#else
struct TORCH_API UnbindBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnbindBackward0_copy"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
};
#ifdef _WIN32
struct UnbindBackwardAutogradNestedTensor0_copy : public TraceableFunction {
TORCH_API UnbindBackwardAutogradNestedTensor0_copy() = default;
#else
struct TORCH_API UnbindBackwardAutogradNestedTensor0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "UnbindBackwardAutogradNestedTensor0_copy"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
int64_t dim = 0;
SavedVariable self_;
at::Layout self_layout;
at::TensorOptions self_options;
};
#ifdef _WIN32
struct TestAutogradMultipleDispatchViewBackward0_copy : public TraceableFunction {
TORCH_API TestAutogradMultipleDispatchViewBackward0_copy() = default;
#else
struct TORCH_API TestAutogradMultipleDispatchViewBackward0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TestAutogradMultipleDispatchViewBackward0_copy"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct TestAutogradMultipleDispatchViewBackwardAutogradCUDA0_copy : public TraceableFunction {
TORCH_API TestAutogradMultipleDispatchViewBackwardAutogradCUDA0_copy() = default;
#else
struct TORCH_API TestAutogradMultipleDispatchViewBackwardAutogradCUDA0_copy : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "TestAutogradMultipleDispatchViewBackwardAutogradCUDA0_copy"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.reset_data();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable self_;
};
#ifdef _WIN32
struct ForeachAbsBackward0 : public TraceableFunction {
TORCH_API ForeachAbsBackward0() = default;
#else
struct TORCH_API ForeachAbsBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachAbsBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachAcosBackward0 : public TraceableFunction {
TORCH_API ForeachAcosBackward0() = default;
#else
struct TORCH_API ForeachAcosBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachAcosBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachAddBackward1Scalar : public TraceableFunction {
TORCH_API ForeachAddBackward1Scalar() = default;
#else
struct TORCH_API ForeachAddBackward1Scalar : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachAddBackward1Scalar"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachAddBackward0List : public TraceableFunction {
TORCH_API ForeachAddBackward0List() = default;
#else
struct TORCH_API ForeachAddBackward0List : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachAddBackward0List"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.clear();
other_released_ = true;
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
std::vector<SavedVariable> other_;
bool other_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
size_t other_size_;
};
#ifdef _WIN32
struct ForeachAddBackward1ScalarList : public TraceableFunction {
TORCH_API ForeachAddBackward1ScalarList() = default;
#else
struct TORCH_API ForeachAddBackward1ScalarList : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachAddBackward1ScalarList"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachAddBackward0Tensor : public TraceableFunction {
TORCH_API ForeachAddBackward0Tensor() = default;
#else
struct TORCH_API ForeachAddBackward0Tensor : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachAddBackward0Tensor"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
SavedVariable other_;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachAddcdivBackward0Scalar : public TraceableFunction {
TORCH_API ForeachAddcdivBackward0Scalar() = default;
#else
struct TORCH_API ForeachAddcdivBackward0Scalar : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachAddcdivBackward0Scalar"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
tensor1_.clear();
tensor1_released_ = true;
tensor2_.clear();
tensor2_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
std::vector<SavedVariable> tensor1_;
bool tensor1_released_ = false;
std::vector<SavedVariable> tensor2_;
bool tensor2_released_ = false;
at::Scalar value;
size_t self_size_;
size_t tensor1_size_;
size_t tensor2_size_;
};
#ifdef _WIN32
struct ForeachAddcdivBackward0ScalarList : public TraceableFunction {
TORCH_API ForeachAddcdivBackward0ScalarList() = default;
#else
struct TORCH_API ForeachAddcdivBackward0ScalarList : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachAddcdivBackward0ScalarList"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
scalars.clear();
self_.clear();
self_released_ = true;
tensor1_.clear();
tensor1_released_ = true;
tensor2_.clear();
tensor2_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<at::Scalar> scalars;
bool scalars_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
std::vector<SavedVariable> tensor1_;
bool tensor1_released_ = false;
std::vector<SavedVariable> tensor2_;
bool tensor2_released_ = false;
size_t self_size_;
size_t tensor1_size_;
size_t tensor2_size_;
};
#ifdef _WIN32
struct ForeachAddcmulBackward0Scalar : public TraceableFunction {
TORCH_API ForeachAddcmulBackward0Scalar() = default;
#else
struct TORCH_API ForeachAddcmulBackward0Scalar : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachAddcmulBackward0Scalar"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
tensor1_.clear();
tensor1_released_ = true;
tensor2_.clear();
tensor2_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
std::vector<SavedVariable> tensor1_;
bool tensor1_released_ = false;
std::vector<SavedVariable> tensor2_;
bool tensor2_released_ = false;
at::Scalar value;
size_t self_size_;
size_t tensor1_size_;
size_t tensor2_size_;
};
#ifdef _WIN32
struct ForeachAddcmulBackward0ScalarList : public TraceableFunction {
TORCH_API ForeachAddcmulBackward0ScalarList() = default;
#else
struct TORCH_API ForeachAddcmulBackward0ScalarList : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachAddcmulBackward0ScalarList"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
scalars.clear();
self_.clear();
self_released_ = true;
tensor1_.clear();
tensor1_released_ = true;
tensor2_.clear();
tensor2_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<at::Scalar> scalars;
bool scalars_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
std::vector<SavedVariable> tensor1_;
bool tensor1_released_ = false;
std::vector<SavedVariable> tensor2_;
bool tensor2_released_ = false;
size_t self_size_;
size_t tensor1_size_;
size_t tensor2_size_;
};
#ifdef _WIN32
struct ForeachAsinBackward0 : public TraceableFunction {
TORCH_API ForeachAsinBackward0() = default;
#else
struct TORCH_API ForeachAsinBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachAsinBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachAtanBackward0 : public TraceableFunction {
TORCH_API ForeachAtanBackward0() = default;
#else
struct TORCH_API ForeachAtanBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachAtanBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachCeilBackward0 : public TraceableFunction {
TORCH_API ForeachCeilBackward0() = default;
#else
struct TORCH_API ForeachCeilBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachCeilBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachClampMaxBackward0Scalar : public TraceableFunction {
TORCH_API ForeachClampMaxBackward0Scalar() = default;
#else
struct TORCH_API ForeachClampMaxBackward0Scalar : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachClampMaxBackward0Scalar"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar scalar;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachClampMaxBackward1List : public TraceableFunction {
TORCH_API ForeachClampMaxBackward1List() = default;
#else
struct TORCH_API ForeachClampMaxBackward1List : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachClampMaxBackward1List"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.clear();
other_released_ = true;
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> other_;
bool other_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
size_t other_size_;
};
#ifdef _WIN32
struct ForeachClampMaxBackward0ScalarList : public TraceableFunction {
TORCH_API ForeachClampMaxBackward0ScalarList() = default;
#else
struct TORCH_API ForeachClampMaxBackward0ScalarList : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachClampMaxBackward0ScalarList"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
scalars.clear();
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<at::Scalar> scalars;
bool scalars_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachClampMinBackward0Scalar : public TraceableFunction {
TORCH_API ForeachClampMinBackward0Scalar() = default;
#else
struct TORCH_API ForeachClampMinBackward0Scalar : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachClampMinBackward0Scalar"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar scalar;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachClampMinBackward1List : public TraceableFunction {
TORCH_API ForeachClampMinBackward1List() = default;
#else
struct TORCH_API ForeachClampMinBackward1List : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachClampMinBackward1List"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.clear();
other_released_ = true;
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> other_;
bool other_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
size_t other_size_;
};
#ifdef _WIN32
struct ForeachClampMinBackward0ScalarList : public TraceableFunction {
TORCH_API ForeachClampMinBackward0ScalarList() = default;
#else
struct TORCH_API ForeachClampMinBackward0ScalarList : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachClampMinBackward0ScalarList"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
scalars.clear();
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<at::Scalar> scalars;
bool scalars_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachCosBackward0 : public TraceableFunction {
TORCH_API ForeachCosBackward0() = default;
#else
struct TORCH_API ForeachCosBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachCosBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachCoshBackward0 : public TraceableFunction {
TORCH_API ForeachCoshBackward0() = default;
#else
struct TORCH_API ForeachCoshBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachCoshBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachDivBackward1Scalar : public TraceableFunction {
TORCH_API ForeachDivBackward1Scalar() = default;
#else
struct TORCH_API ForeachDivBackward1Scalar : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachDivBackward1Scalar"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar scalar;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachDivBackward1ScalarList : public TraceableFunction {
TORCH_API ForeachDivBackward1ScalarList() = default;
#else
struct TORCH_API ForeachDivBackward1ScalarList : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachDivBackward1ScalarList"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
scalars.clear();
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<at::Scalar> scalars;
bool scalars_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachDivBackward0Tensor : public TraceableFunction {
TORCH_API ForeachDivBackward0Tensor() = default;
#else
struct TORCH_API ForeachDivBackward0Tensor : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachDivBackward0Tensor"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachErfBackward0 : public TraceableFunction {
TORCH_API ForeachErfBackward0() = default;
#else
struct TORCH_API ForeachErfBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachErfBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachErfcBackward0 : public TraceableFunction {
TORCH_API ForeachErfcBackward0() = default;
#else
struct TORCH_API ForeachErfcBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachErfcBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachExpBackward0 : public TraceableFunction {
TORCH_API ForeachExpBackward0() = default;
#else
struct TORCH_API ForeachExpBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachExpBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.clear();
result_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> result_;
bool result_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachExpm1Backward0 : public TraceableFunction {
TORCH_API ForeachExpm1Backward0() = default;
#else
struct TORCH_API ForeachExpm1Backward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachExpm1Backward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.clear();
result_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> result_;
bool result_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachFloorBackward0 : public TraceableFunction {
TORCH_API ForeachFloorBackward0() = default;
#else
struct TORCH_API ForeachFloorBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachFloorBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachFracBackward0 : public TraceableFunction {
TORCH_API ForeachFracBackward0() = default;
#else
struct TORCH_API ForeachFracBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachFracBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachLerpBackward1List : public TraceableFunction {
TORCH_API ForeachLerpBackward1List() = default;
#else
struct TORCH_API ForeachLerpBackward1List : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachLerpBackward1List"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
tensors1_.clear();
tensors1_released_ = true;
weights_.clear();
weights_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
std::vector<SavedVariable> tensors1_;
bool tensors1_released_ = false;
std::vector<SavedVariable> weights_;
bool weights_released_ = false;
size_t self_size_;
size_t tensors1_size_;
size_t weights_size_;
};
#ifdef _WIN32
struct ForeachLerpBackward0Scalar : public TraceableFunction {
TORCH_API ForeachLerpBackward0Scalar() = default;
#else
struct TORCH_API ForeachLerpBackward0Scalar : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachLerpBackward0Scalar"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar weight;
size_t self_size_;
size_t tensors1_size_;
};
#ifdef _WIN32
struct ForeachLerpBackward0ScalarList : public TraceableFunction {
TORCH_API ForeachLerpBackward0ScalarList() = default;
#else
struct TORCH_API ForeachLerpBackward0ScalarList : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachLerpBackward0ScalarList"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
weight.clear();
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<at::Scalar> weight;
bool weight_released_ = false;
size_t self_size_;
size_t tensors1_size_;
};
#ifdef _WIN32
struct ForeachLgammaBackward0 : public TraceableFunction {
TORCH_API ForeachLgammaBackward0() = default;
#else
struct TORCH_API ForeachLgammaBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachLgammaBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachLogBackward0 : public TraceableFunction {
TORCH_API ForeachLogBackward0() = default;
#else
struct TORCH_API ForeachLogBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachLogBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachLog10Backward0 : public TraceableFunction {
TORCH_API ForeachLog10Backward0() = default;
#else
struct TORCH_API ForeachLog10Backward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachLog10Backward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachLog1PBackward0 : public TraceableFunction {
TORCH_API ForeachLog1PBackward0() = default;
#else
struct TORCH_API ForeachLog1PBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachLog1PBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachLog2Backward0 : public TraceableFunction {
TORCH_API ForeachLog2Backward0() = default;
#else
struct TORCH_API ForeachLog2Backward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachLog2Backward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachMaxBackward1 : public TraceableFunction {
TORCH_API ForeachMaxBackward1() = default;
#else
struct TORCH_API ForeachMaxBackward1 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachMaxBackward1"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
result_.clear();
result_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
std::vector<SavedVariable> result_;
bool result_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachMaximumBackward0List : public TraceableFunction {
TORCH_API ForeachMaximumBackward0List() = default;
#else
struct TORCH_API ForeachMaximumBackward0List : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachMaximumBackward0List"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.clear();
other_released_ = true;
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> other_;
bool other_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
size_t other_size_;
};
#ifdef _WIN32
struct ForeachMinimumBackward0List : public TraceableFunction {
TORCH_API ForeachMinimumBackward0List() = default;
#else
struct TORCH_API ForeachMinimumBackward0List : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachMinimumBackward0List"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.clear();
other_released_ = true;
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> other_;
bool other_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
size_t other_size_;
};
#ifdef _WIN32
struct ForeachMulBackward1Scalar : public TraceableFunction {
TORCH_API ForeachMulBackward1Scalar() = default;
#else
struct TORCH_API ForeachMulBackward1Scalar : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachMulBackward1Scalar"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar scalar;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachMulBackward0List : public TraceableFunction {
TORCH_API ForeachMulBackward0List() = default;
#else
struct TORCH_API ForeachMulBackward0List : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachMulBackward0List"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.clear();
other_released_ = true;
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> other_;
bool other_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
size_t other_size_;
};
#ifdef _WIN32
struct ForeachMulBackward1ScalarList : public TraceableFunction {
TORCH_API ForeachMulBackward1ScalarList() = default;
#else
struct TORCH_API ForeachMulBackward1ScalarList : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachMulBackward1ScalarList"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
scalars.clear();
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<at::Scalar> scalars;
bool scalars_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachMulBackward0Tensor : public TraceableFunction {
TORCH_API ForeachMulBackward0Tensor() = default;
#else
struct TORCH_API ForeachMulBackward0Tensor : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachMulBackward0Tensor"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.reset_data();
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
SavedVariable other_;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachNegBackward0 : public TraceableFunction {
TORCH_API ForeachNegBackward0() = default;
#else
struct TORCH_API ForeachNegBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachNegBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachPowBackward0Scalar : public TraceableFunction {
TORCH_API ForeachPowBackward0Scalar() = default;
#else
struct TORCH_API ForeachPowBackward0Scalar : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachPowBackward0Scalar"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar exponent;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachReciprocalBackward0 : public TraceableFunction {
TORCH_API ForeachReciprocalBackward0() = default;
#else
struct TORCH_API ForeachReciprocalBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachReciprocalBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.clear();
result_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> result_;
bool result_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachRoundBackward0 : public TraceableFunction {
TORCH_API ForeachRoundBackward0() = default;
#else
struct TORCH_API ForeachRoundBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachRoundBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachRsqrtBackward0 : public TraceableFunction {
TORCH_API ForeachRsqrtBackward0() = default;
#else
struct TORCH_API ForeachRsqrtBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachRsqrtBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.clear();
result_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> result_;
bool result_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachSigmoidBackward0 : public TraceableFunction {
TORCH_API ForeachSigmoidBackward0() = default;
#else
struct TORCH_API ForeachSigmoidBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachSigmoidBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.clear();
result_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> result_;
bool result_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachSignBackward0 : public TraceableFunction {
TORCH_API ForeachSignBackward0() = default;
#else
struct TORCH_API ForeachSignBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachSignBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachSinBackward0 : public TraceableFunction {
TORCH_API ForeachSinBackward0() = default;
#else
struct TORCH_API ForeachSinBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachSinBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachSinhBackward0 : public TraceableFunction {
TORCH_API ForeachSinhBackward0() = default;
#else
struct TORCH_API ForeachSinhBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachSinhBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachSqrtBackward0 : public TraceableFunction {
TORCH_API ForeachSqrtBackward0() = default;
#else
struct TORCH_API ForeachSqrtBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachSqrtBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.clear();
result_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> result_;
bool result_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachSubBackward1Scalar : public TraceableFunction {
TORCH_API ForeachSubBackward1Scalar() = default;
#else
struct TORCH_API ForeachSubBackward1Scalar : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachSubBackward1Scalar"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachSubBackward0List : public TraceableFunction {
TORCH_API ForeachSubBackward0List() = default;
#else
struct TORCH_API ForeachSubBackward0List : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachSubBackward0List"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
other_.clear();
other_released_ = true;
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
at::Scalar alpha;
std::vector<SavedVariable> other_;
bool other_released_ = false;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
size_t other_size_;
};
#ifdef _WIN32
struct ForeachSubBackward1ScalarList : public TraceableFunction {
TORCH_API ForeachSubBackward1ScalarList() = default;
#else
struct TORCH_API ForeachSubBackward1ScalarList : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachSubBackward1ScalarList"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
self_.clear();
self_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> self_;
bool self_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachTanBackward0 : public TraceableFunction {
TORCH_API ForeachTanBackward0() = default;
#else
struct TORCH_API ForeachTanBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachTanBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.clear();
result_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> result_;
bool result_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachTanhBackward0 : public TraceableFunction {
TORCH_API ForeachTanhBackward0() = default;
#else
struct TORCH_API ForeachTanhBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachTanhBackward0"; }
void release_variables() override {
std::lock_guard<std::mutex> lock(mutex_);
result_.clear();
result_released_ = true;
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
std::vector<SavedVariable> result_;
bool result_released_ = false;
size_t self_size_;
};
#ifdef _WIN32
struct ForeachTruncBackward0 : public TraceableFunction {
TORCH_API ForeachTruncBackward0() = default;
#else
struct TORCH_API ForeachTruncBackward0 : public TraceableFunction {
#endif
using TraceableFunction::TraceableFunction;
variable_list apply(variable_list&& grads) override;
std::string name() const override { return "ForeachTruncBackward0"; }
void release_variables() override {
}
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(const variable_list& inputs, SwapSavedVariables& saved) override;
size_t self_size_;
};
}}} // namespace torch::autograd::generated
```
|
==================================================================================================================================================
SOURCE CODE FILE: VariableType.h
LINES: 1
SIZE: 1.52 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\generated\VariableType.h
ENCODING: utf-8
```h
#pragma once
// @generated from ..\tools\autograd\templates/VariableType.h
#include <ATen/core/Tensor.h>
#include <ATen/Context.h>
#include <c10/util/intrusive_ptr.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/autograd_not_implemented_fallback.h>
#include <cstdint> // for size_t
#include <functional> // for function
#include <memory> // for unique_ptr
#include <string>
#include <vector>
namespace at {
struct Quantizer;
}
namespace torch { namespace autograd {
using Variable = at::Tensor;
using at::Context;
using at::Device;
using at::Dimname;
using at::DimnameList;
using at::Generator;
using at::IntArrayRef;
using at::MemoryFormat;
using at::QScheme;
using at::Scalar;
using at::ScalarType;
using at::Storage;
using at::Tensor;
using at::TensorList;
using at::TensorOptions;
using at::Quantizer;
using std::optional;
namespace VariableType {
TORCH_API std::vector<at::DeprecatedTypeProperties*> allCUDATypes();
TORCH_API std::vector<at::DeprecatedTypeProperties*> allXPUTypes();
TORCH_API std::vector<at::DeprecatedTypeProperties*> allCPUTypes();
TORCH_API std::vector<at::DeprecatedTypeProperties*> allPrivateUser1Types();
at::Tensor & unpack(Tensor & t, const char * name, int pos);
const at::Tensor & unpack(const Tensor & t, const char * name, int pos);
at::Tensor unpack_opt(const Tensor & t, const char * name, int pos);
std::vector<at::Tensor> unpack(const at::ITensorListRef& tl, const char *name, int pos);
}
}} // namespace torch::autograd
```
|
===============================================================================================================================================
SOURCE CODE FILE: ViewFuncs.h
LINES: 1
SIZE: 37.40 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\generated\ViewFuncs.h
ENCODING: utf-8
```h
#pragma once
// @generated from ..\tools\autograd\templates/ViewFuncs.h
#include <torch/library.h>
#include <torch/csrc/autograd/variable.h>
#include <c10/core/SymIntArrayRef.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Operators.h>
#else
#include <ATen/ops/_conj_ops.h>
#include <ATen/ops/_indices_ops.h>
#include <ATen/ops/_neg_view_ops.h>
#include <ATen/ops/_nested_get_values_ops.h>
#include <ATen/ops/_nested_view_from_buffer_ops.h>
#include <ATen/ops/_nested_view_from_jagged_ops.h>
#include <ATen/ops/_reshape_alias_ops.h>
#include <ATen/ops/_test_autograd_multiple_dispatch_view_ops.h>
#include <ATen/ops/_values_ops.h>
#include <ATen/ops/alias_ops.h>
#include <ATen/ops/as_strided_ops.h>
#include <ATen/ops/ccol_indices_ops.h>
#include <ATen/ops/chunk_ops.h>
#include <ATen/ops/col_indices_ops.h>
#include <ATen/ops/crow_indices_ops.h>
#include <ATen/ops/diagonal_ops.h>
#include <ATen/ops/expand_ops.h>
#include <ATen/ops/indices_ops.h>
#include <ATen/ops/narrow_ops.h>
#include <ATen/ops/permute_ops.h>
#include <ATen/ops/row_indices_ops.h>
#include <ATen/ops/select_ops.h>
#include <ATen/ops/slice_ops.h>
#include <ATen/ops/slice_inverse_ops.h>
#include <ATen/ops/split_ops.h>
#include <ATen/ops/split_with_sizes_ops.h>
#include <ATen/ops/squeeze_ops.h>
#include <ATen/ops/squeeze_ops.h>
#include <ATen/ops/squeeze_ops.h>
#include <ATen/ops/t_ops.h>
#include <ATen/ops/transpose_ops.h>
#include <ATen/ops/unbind_ops.h>
#include <ATen/ops/unfold_ops.h>
#include <ATen/ops/unsqueeze_ops.h>
#include <ATen/ops/values_ops.h>
#include <ATen/ops/view_ops.h>
#include <ATen/ops/view_ops.h>
#include <ATen/ops/view_as_complex_ops.h>
#include <ATen/ops/view_as_real_ops.h>
#endif
namespace torch::autograd::generated {
using at::Scalar;
using at::Tensor;
using at::IntArrayRef;
using at::ArrayRef;
using at::Type;
using at::ScalarType;
using std::optional;
using c10::fmap;
#define _CONJ_VIEW_FUNC_AVAILABLE
struct _ConjViewFunc : public torch::autograd::ViewFunc {
_ConjViewFunc()
{}
virtual ~_ConjViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
#define _INDICES_VIEW_FUNC_AVAILABLE
struct _IndicesViewFunc : public torch::autograd::ViewFunc {
_IndicesViewFunc()
{}
virtual ~_IndicesViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
#define _NEG_VIEW_VIEW_FUNC_AVAILABLE
struct _NegViewViewFunc : public torch::autograd::ViewFunc {
_NegViewViewFunc()
{}
virtual ~_NegViewViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
#define _NESTED_GET_VALUES_VIEW_FUNC_AVAILABLE
struct _NestedGetValuesViewFunc : public torch::autograd::ViewFunc {
_NestedGetValuesViewFunc()
{}
virtual ~_NestedGetValuesViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
#define _NESTED_VIEW_FROM_BUFFER_VIEW_FUNC_AVAILABLE
struct _NestedViewFromBufferViewFunc : public torch::autograd::ViewFunc {
_NestedViewFromBufferViewFunc(const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) : nested_size(nested_size), nested_strides(nested_strides), offsets(offsets)
{}
virtual ~_NestedViewFromBufferViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
at::Tensor nested_size;
at::Tensor nested_strides;
at::Tensor offsets;
};
#define _NESTED_VIEW_FROM_JAGGED_VIEW_FUNC_AVAILABLE
struct _NestedViewFromJaggedViewFunc : public torch::autograd::ViewFunc {
_NestedViewFromJaggedViewFunc(const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional<at::Tensor> & lengths, int64_t ragged_idx, const ::std::optional<at::Tensor> & min_seqlen, const ::std::optional<at::Tensor> & max_seqlen) : offsets(offsets), dummy(dummy), lengths(lengths), ragged_idx(ragged_idx), min_seqlen(min_seqlen), max_seqlen(max_seqlen)
{}
virtual ~_NestedViewFromJaggedViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
at::Tensor offsets;
at::Tensor dummy;
::std::optional<at::Tensor> lengths;
int64_t ragged_idx;
::std::optional<at::Tensor> min_seqlen;
::std::optional<at::Tensor> max_seqlen;
};
#define _RESHAPE_ALIAS_VIEW_FUNC_AVAILABLE
struct _ReshapeAliasViewFunc : public torch::autograd::ViewFunc {
_ReshapeAliasViewFunc(c10::SymIntArrayRef size, c10::SymIntArrayRef stride) : size(size.vec()), stride(stride.vec())
{}
virtual ~_ReshapeAliasViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
::std::vector<c10::SymInt> size;
::std::vector<c10::SymInt> stride;
};
#define _TEST_AUTOGRAD_MULTIPLE_DISPATCH_VIEW_VIEW_FUNC_AVAILABLE
struct _TestAutogradMultipleDispatchViewViewFunc : public torch::autograd::ViewFunc {
_TestAutogradMultipleDispatchViewViewFunc()
{}
virtual ~_TestAutogradMultipleDispatchViewViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
#define _VALUES_VIEW_FUNC_AVAILABLE
struct _ValuesViewFunc : public torch::autograd::ViewFunc {
_ValuesViewFunc()
{}
virtual ~_ValuesViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
#define ALIAS_VIEW_FUNC_AVAILABLE
struct AliasViewFunc : public torch::autograd::ViewFunc {
AliasViewFunc()
{}
virtual ~AliasViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
#define AS_STRIDED_VIEW_FUNC_AVAILABLE
struct AsStridedViewFunc : public torch::autograd::ViewFunc {
AsStridedViewFunc(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset) : size(size.vec()), stride(stride.vec()), storage_offset(storage_offset)
{}
virtual ~AsStridedViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
::std::vector<c10::SymInt> size;
::std::vector<c10::SymInt> stride;
::std::optional<c10::SymInt> storage_offset;
};
#define CCOL_INDICES_VIEW_FUNC_AVAILABLE
struct CcolIndicesViewFunc : public torch::autograd::ViewFunc {
CcolIndicesViewFunc()
{}
virtual ~CcolIndicesViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
#define CHUNK_VIEW_FUNC_AVAILABLE
struct ChunkViewFunc : public torch::autograd::ViewFunc {
ChunkViewFunc(int64_t chunks, int64_t dim, int64_t view_idx) : chunks(chunks), dim(dim), view_idx(view_idx)
{}
virtual ~ChunkViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
int64_t chunks;
int64_t dim;
int64_t view_idx;
};
#define COL_INDICES_VIEW_FUNC_AVAILABLE
struct ColIndicesViewFunc : public torch::autograd::ViewFunc {
ColIndicesViewFunc()
{}
virtual ~ColIndicesViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
#define CROW_INDICES_VIEW_FUNC_AVAILABLE
struct CrowIndicesViewFunc : public torch::autograd::ViewFunc {
CrowIndicesViewFunc()
{}
virtual ~CrowIndicesViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
#define DIAGONAL_VIEW_FUNC_AVAILABLE
struct DiagonalViewFunc : public torch::autograd::ViewFunc {
DiagonalViewFunc(int64_t offset, int64_t dim1, int64_t dim2) : offset(offset), dim1(dim1), dim2(dim2)
{}
virtual ~DiagonalViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
int64_t offset;
int64_t dim1;
int64_t dim2;
};
#define EXPAND_VIEW_FUNC_AVAILABLE
struct ExpandViewFunc : public torch::autograd::ViewFunc {
ExpandViewFunc(c10::SymIntArrayRef size, bool implicit) : size(size.vec()), implicit(implicit)
{}
virtual ~ExpandViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
::std::vector<c10::SymInt> size;
bool implicit;
};
#define INDICES_VIEW_FUNC_AVAILABLE
struct IndicesViewFunc : public torch::autograd::ViewFunc {
IndicesViewFunc()
{}
virtual ~IndicesViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
#define NARROW_VIEW_FUNC_AVAILABLE
struct NarrowViewFunc : public torch::autograd::ViewFunc {
NarrowViewFunc(int64_t dim, c10::SymInt start, c10::SymInt length) : dim(dim), start(start), length(length)
{}
virtual ~NarrowViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
int64_t dim;
c10::SymInt start;
c10::SymInt length;
};
#define PERMUTE_VIEW_FUNC_AVAILABLE
struct PermuteViewFunc : public torch::autograd::ViewFunc {
PermuteViewFunc(at::IntArrayRef dims) : dims(dims.vec())
{}
virtual ~PermuteViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
::std::vector<int64_t> dims;
};
#define ROW_INDICES_VIEW_FUNC_AVAILABLE
struct RowIndicesViewFunc : public torch::autograd::ViewFunc {
RowIndicesViewFunc()
{}
virtual ~RowIndicesViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
#define SELECT_INT_VIEW_FUNC_AVAILABLE
struct SelectIntViewFunc : public torch::autograd::ViewFunc {
SelectIntViewFunc(int64_t dim, c10::SymInt index) : dim(dim), index(index)
{}
virtual ~SelectIntViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
int64_t dim;
c10::SymInt index;
};
#define SLICE_TENSOR_VIEW_FUNC_AVAILABLE
struct SliceTensorViewFunc : public torch::autograd::ViewFunc {
SliceTensorViewFunc(int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) : dim(dim), start(start), end(end), step(step)
{}
virtual ~SliceTensorViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
int64_t dim;
::std::optional<c10::SymInt> start;
::std::optional<c10::SymInt> end;
c10::SymInt step;
};
#define SLICE_INVERSE_VIEW_FUNC_AVAILABLE
struct SliceInverseViewFunc : public torch::autograd::ViewFunc {
SliceInverseViewFunc(const at::Tensor & src, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step) : src(src), dim(dim), start(start), end(end), step(step)
{}
virtual ~SliceInverseViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
at::Tensor src;
int64_t dim;
::std::optional<c10::SymInt> start;
::std::optional<c10::SymInt> end;
c10::SymInt step;
};
#define SPLIT_TENSOR_VIEW_FUNC_AVAILABLE
struct SplitTensorViewFunc : public torch::autograd::ViewFunc {
SplitTensorViewFunc(c10::SymInt split_size, int64_t dim, int64_t view_idx) : split_size(split_size), dim(dim), view_idx(view_idx)
{}
virtual ~SplitTensorViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
c10::SymInt split_size;
int64_t dim;
int64_t view_idx;
};
#define SPLIT_WITH_SIZES_VIEW_FUNC_AVAILABLE
struct SplitWithSizesViewFunc : public torch::autograd::ViewFunc {
SplitWithSizesViewFunc(c10::SymIntArrayRef split_sizes, int64_t dim, int64_t view_idx) : split_sizes(split_sizes.vec()), dim(dim), view_idx(view_idx)
{}
virtual ~SplitWithSizesViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
::std::vector<c10::SymInt> split_sizes;
int64_t dim;
int64_t view_idx;
};
#define SQUEEZE_VIEW_FUNC_AVAILABLE
struct SqueezeViewFunc : public torch::autograd::ViewFunc {
SqueezeViewFunc()
{}
virtual ~SqueezeViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
#define SQUEEZE_DIM_VIEW_FUNC_AVAILABLE
struct SqueezeDimViewFunc : public torch::autograd::ViewFunc {
SqueezeDimViewFunc(int64_t dim) : dim(dim)
{}
virtual ~SqueezeDimViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
int64_t dim;
};
#define SQUEEZE_DIMS_VIEW_FUNC_AVAILABLE
struct SqueezeDimsViewFunc : public torch::autograd::ViewFunc {
SqueezeDimsViewFunc(at::IntArrayRef dim) : dim(dim.vec())
{}
virtual ~SqueezeDimsViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
::std::vector<int64_t> dim;
};
#define T_VIEW_FUNC_AVAILABLE
struct TViewFunc : public torch::autograd::ViewFunc {
TViewFunc()
{}
virtual ~TViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
#define TRANSPOSE_INT_VIEW_FUNC_AVAILABLE
struct TransposeIntViewFunc : public torch::autograd::ViewFunc {
TransposeIntViewFunc(int64_t dim0, int64_t dim1) : dim0(dim0), dim1(dim1)
{}
virtual ~TransposeIntViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
int64_t dim0;
int64_t dim1;
};
#define UNBIND_INT_VIEW_FUNC_AVAILABLE
struct UnbindIntViewFunc : public torch::autograd::ViewFunc {
UnbindIntViewFunc(int64_t dim, int64_t view_idx) : dim(dim), view_idx(view_idx)
{}
virtual ~UnbindIntViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
int64_t dim;
int64_t view_idx;
};
#define UNFOLD_VIEW_FUNC_AVAILABLE
struct UnfoldViewFunc : public torch::autograd::ViewFunc {
UnfoldViewFunc(int64_t dimension, int64_t size, int64_t step) : dimension(dimension), size(size), step(step)
{}
virtual ~UnfoldViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
int64_t dimension;
int64_t size;
int64_t step;
};
#define UNSQUEEZE_VIEW_FUNC_AVAILABLE
struct UnsqueezeViewFunc : public torch::autograd::ViewFunc {
UnsqueezeViewFunc(int64_t dim) : dim(dim)
{}
virtual ~UnsqueezeViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
int64_t dim;
};
#define VALUES_VIEW_FUNC_AVAILABLE
struct ValuesViewFunc : public torch::autograd::ViewFunc {
ValuesViewFunc()
{}
virtual ~ValuesViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
#define VIEW_VIEW_FUNC_AVAILABLE
struct ViewViewFunc : public torch::autograd::ViewFunc {
ViewViewFunc(c10::SymIntArrayRef size) : size(size.vec())
{}
virtual ~ViewViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
::std::vector<c10::SymInt> size;
};
#define VIEW_DTYPE_VIEW_FUNC_AVAILABLE
struct ViewDtypeViewFunc : public torch::autograd::ViewFunc {
ViewDtypeViewFunc(at::ScalarType dtype) : dtype(dtype)
{}
virtual ~ViewDtypeViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
at::ScalarType dtype;
};
#define VIEW_AS_COMPLEX_VIEW_FUNC_AVAILABLE
struct ViewAsComplexViewFunc : public torch::autograd::ViewFunc {
ViewAsComplexViewFunc()
{}
virtual ~ViewAsComplexViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
#define VIEW_AS_REAL_VIEW_FUNC_AVAILABLE
struct ViewAsRealViewFunc : public torch::autograd::ViewFunc {
ViewAsRealViewFunc()
{}
virtual ~ViewAsRealViewFunc() override = default;
virtual std::vector<c10::SymInt> get_symints() const override;
virtual size_t num_symints() const override;
virtual std::vector<at::Tensor> get_tensors() const override;
virtual size_t num_tensors() const override;
virtual at::Tensor operator()(const at::Tensor&) const override;
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = ::std::nullopt,
std::optional<std::vector<at::Tensor>> = ::std::nullopt) const override;
protected:
virtual void set_symints(std::vector<c10::SymInt>) override;
virtual void set_tensors(std::vector<at::Tensor>) override;
private:
};
} // namespace torch::autograd::generated
```
|
======================================================================================================================================================
SOURCE CODE FILE: python_functions.h
LINES: 1
SIZE: 0.89 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\generated\python_functions.h
ENCODING: utf-8
```h
#pragma once
#include <Python.h>
// @generated from ..\tools\autograd\templates/python_functions.h
// Python bindings for automatically generated autograd functions
namespace torch { namespace autograd { namespace generated {
void initialize_autogenerated_functions_0(PyObject* module);
void initialize_autogenerated_functions_1(PyObject* module);
void initialize_autogenerated_functions_2(PyObject* module);
void initialize_autogenerated_functions_3(PyObject* module);
void initialize_autogenerated_functions_4(PyObject* module);
inline void initialize_autogenerated_functions(PyObject* module) {
initialize_autogenerated_functions_0(module);
initialize_autogenerated_functions_1(module);
initialize_autogenerated_functions_2(module);
initialize_autogenerated_functions_3(module);
initialize_autogenerated_functions_4(module);
}
}}} // namespace torch::autograd::generated
```
|
=========================================================================================================================================================
SOURCE CODE FILE: python_return_types.h
LINES: 1
SIZE: 4.06 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\generated\python_return_types.h
ENCODING: utf-8
```h
#pragma once
namespace torch {
namespace autograd {
namespace generated {
PyTypeObject* get__fake_quantize_per_tensor_affine_cachemask_tensor_qparams_structseq();
PyTypeObject* get__fused_moving_avg_obs_fq_helper_structseq();
PyTypeObject* get__linalg_det_structseq();
PyTypeObject* get__linalg_det_out_structseq();
PyTypeObject* get__linalg_eigh_structseq();
PyTypeObject* get__linalg_eigh_out_structseq();
PyTypeObject* get__linalg_slogdet_structseq();
PyTypeObject* get__linalg_slogdet_out_structseq();
PyTypeObject* get__linalg_solve_ex_structseq();
PyTypeObject* get__linalg_solve_ex_out_structseq();
PyTypeObject* get__linalg_svd_structseq();
PyTypeObject* get__linalg_svd_out_structseq();
PyTypeObject* get__lu_with_info_structseq();
PyTypeObject* get__scaled_dot_product_cudnn_attention_structseq();
PyTypeObject* get__scaled_dot_product_efficient_attention_structseq();
PyTypeObject* get__scaled_dot_product_flash_attention_structseq();
PyTypeObject* get__scaled_dot_product_flash_attention_for_cpu_structseq();
PyTypeObject* get__unpack_dual_structseq();
PyTypeObject* get_aminmax_structseq();
PyTypeObject* get_aminmax_out_structseq();
PyTypeObject* get_cummax_structseq();
PyTypeObject* get_cummax_out_structseq();
PyTypeObject* get_cummin_structseq();
PyTypeObject* get_cummin_out_structseq();
PyTypeObject* get_frexp_structseq();
PyTypeObject* get_frexp_out_structseq();
PyTypeObject* get_geqrf_out_structseq();
PyTypeObject* get_geqrf_structseq();
PyTypeObject* get_histogram_out_structseq();
PyTypeObject* get_histogram_structseq();
PyTypeObject* get_histogramdd_structseq();
PyTypeObject* get_kthvalue_structseq();
PyTypeObject* get_kthvalue_out_structseq();
PyTypeObject* get_linalg_cholesky_ex_structseq();
PyTypeObject* get_linalg_cholesky_ex_out_structseq();
PyTypeObject* get_linalg_eig_structseq();
PyTypeObject* get_linalg_eig_out_structseq();
PyTypeObject* get_linalg_eigh_structseq();
PyTypeObject* get_linalg_eigh_out_structseq();
PyTypeObject* get_linalg_inv_ex_structseq();
PyTypeObject* get_linalg_inv_ex_out_structseq();
PyTypeObject* get_linalg_ldl_factor_structseq();
PyTypeObject* get_linalg_ldl_factor_out_structseq();
PyTypeObject* get_linalg_ldl_factor_ex_structseq();
PyTypeObject* get_linalg_ldl_factor_ex_out_structseq();
PyTypeObject* get_linalg_lstsq_structseq();
PyTypeObject* get_linalg_lstsq_out_structseq();
PyTypeObject* get_linalg_lu_structseq();
PyTypeObject* get_linalg_lu_out_structseq();
PyTypeObject* get_linalg_lu_factor_structseq();
PyTypeObject* get_linalg_lu_factor_out_structseq();
PyTypeObject* get_linalg_lu_factor_ex_structseq();
PyTypeObject* get_linalg_lu_factor_ex_out_structseq();
PyTypeObject* get_linalg_qr_structseq();
PyTypeObject* get_linalg_qr_out_structseq();
PyTypeObject* get_linalg_slogdet_structseq();
PyTypeObject* get_linalg_slogdet_out_structseq();
PyTypeObject* get_linalg_solve_ex_structseq();
PyTypeObject* get_linalg_solve_ex_out_structseq();
PyTypeObject* get_linalg_svd_structseq();
PyTypeObject* get_linalg_svd_out_structseq();
PyTypeObject* get_lu_unpack_structseq();
PyTypeObject* get_lu_unpack_out_structseq();
PyTypeObject* get_max_structseq();
PyTypeObject* get_max_out_structseq();
PyTypeObject* get_median_structseq();
PyTypeObject* get_median_out_structseq();
PyTypeObject* get_min_structseq();
PyTypeObject* get_min_out_structseq();
PyTypeObject* get_mode_structseq();
PyTypeObject* get_mode_out_structseq();
PyTypeObject* get_nanmedian_structseq();
PyTypeObject* get_nanmedian_out_structseq();
PyTypeObject* get_qr_out_structseq();
PyTypeObject* get_qr_structseq();
PyTypeObject* get_slogdet_structseq();
PyTypeObject* get_slogdet_out_structseq();
PyTypeObject* get_sort_out_structseq();
PyTypeObject* get_sort_structseq();
PyTypeObject* get_svd_out_structseq();
PyTypeObject* get_svd_structseq();
PyTypeObject* get_topk_out_structseq();
PyTypeObject* get_topk_structseq();
PyTypeObject* get_triangular_solve_out_structseq();
PyTypeObject* get_triangular_solve_structseq();
}
void initReturnTypes(PyObject* module);
} // namespace autograd
} // namespace torch
```
|
========================================================================================================================================================
SOURCE CODE FILE: variable_factories.h
LINES: 1
SIZE: 55.62 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\generated\variable_factories.h
ENCODING: utf-8
```h
#pragma once
// @generated from ..\tools\autograd\templates/variable_factories.h
#include <ATen/core/Tensor.h>
#include <ATen/TracerMode.h>
#include <ATen/core/grad_mode.h>
#include <c10/util/ArrayRef.h>
#include <c10/core/MemoryFormat.h>
#include <torch/csrc/api/include/torch/detail/TensorDataContainer.h>
#include <torch/csrc/autograd/variable.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/from_blob.h>
#include <ATen/ops/_make_dep_token.h>
#include <ATen/ops/_cudnn_init_dropout_state.h>
#include <ATen/ops/arange.h>
#include <ATen/ops/arange.h>
#include <ATen/ops/arange.h>
#include <ATen/ops/bartlett_window.h>
#include <ATen/ops/bartlett_window.h>
#include <ATen/ops/blackman_window.h>
#include <ATen/ops/blackman_window.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_permuted.h>
#include <ATen/ops/_empty_affine_quantized.h>
#include <ATen/ops/_empty_per_channel_affine_quantized.h>
#include <ATen/ops/empty_quantized.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/empty_strided.h>
#include <ATen/ops/eye.h>
#include <ATen/ops/eye.h>
#include <ATen/ops/full.h>
#include <ATen/ops/full.h>
#include <ATen/ops/full_like.h>
#include <ATen/ops/from_file.h>
#include <ATen/ops/hann_window.h>
#include <ATen/ops/hann_window.h>
#include <ATen/ops/hamming_window.h>
#include <ATen/ops/hamming_window.h>
#include <ATen/ops/hamming_window.h>
#include <ATen/ops/hamming_window.h>
#include <ATen/ops/kaiser_window.h>
#include <ATen/ops/kaiser_window.h>
#include <ATen/ops/kaiser_window.h>
#include <ATen/ops/linspace.h>
#include <ATen/ops/linspace.h>
#include <ATen/ops/linspace.h>
#include <ATen/ops/linspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/logspace.h>
#include <ATen/ops/ones.h>
#include <ATen/ops/ones.h>
#include <ATen/ops/ones_like.h>
#include <ATen/ops/scalar_tensor.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand.h>
#include <ATen/ops/rand_like.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint.h>
#include <ATen/ops/randint_like.h>
#include <ATen/ops/randint_like.h>
#include <ATen/ops/randn.h>
#include <ATen/ops/randn.h>
#include <ATen/ops/randn.h>
#include <ATen/ops/randn.h>
#include <ATen/ops/randn_like.h>
#include <ATen/ops/randperm.h>
#include <ATen/ops/randperm.h>
#include <ATen/ops/range.h>
#include <ATen/ops/range.h>
#include <ATen/ops/zeros.h>
#include <ATen/ops/_efficientzerotensor.h>
#include <ATen/ops/zeros.h>
#include <ATen/ops/zeros_like.h>
#include <ATen/ops/_sparse_compressed_tensor_with_dims.h>
#include <ATen/ops/sparse_compressed_tensor.h>
#include <ATen/ops/sparse_csr_tensor.h>
#include <ATen/ops/sparse_csc_tensor.h>
#include <ATen/ops/sparse_bsr_tensor.h>
#include <ATen/ops/sparse_bsc_tensor.h>
#include <ATen/ops/sparse_compressed_tensor.h>
#include <ATen/ops/sparse_csr_tensor.h>
#include <ATen/ops/sparse_csc_tensor.h>
#include <ATen/ops/sparse_bsr_tensor.h>
#include <ATen/ops/sparse_bsc_tensor.h>
#include <ATen/ops/_sparse_compressed_tensor_unsafe.h>
#include <ATen/ops/_sparse_csr_tensor_unsafe.h>
#include <ATen/ops/_sparse_csc_tensor_unsafe.h>
#include <ATen/ops/_sparse_bsr_tensor_unsafe.h>
#include <ATen/ops/_sparse_bsc_tensor_unsafe.h>
#include <ATen/ops/sparse_coo_tensor.h>
#include <ATen/ops/sparse_coo_tensor.h>
#include <ATen/ops/sparse_coo_tensor.h>
#include <ATen/ops/_sparse_coo_tensor_unsafe.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims.h>
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
#include <ATen/ops/_to_copy.h>
#include <ATen/ops/tril_indices.h>
#include <ATen/ops/triu_indices.h>
#include <ATen/ops/normal.h>
#include <ATen/ops/fft_fftfreq.h>
#include <ATen/ops/fft_rfftfreq.h>
#endif
#include <functional>
#include <initializer_list>
#include <utility>
namespace torch {
/// NOTE: Currently `torch::tensor(...)` doesn't support mixed data types
/// (i.e. `torch::tensor({{bool, 2.0}})` doesn't work). We might be able to
/// support it in the future by iterating over all sub-lists to find
/// the largest data type that can represent all of the elements, or by using
/// variadic templates.
///
/// NOTE: C++ `torch::tensor` with a floating-point type or an `at::ArrayRef` / `std::vector` /
/// (nested) braced-init-list of floating-point types always produces a tensor of dtype
/// `torch::get_default_dtype()`, matching Python `torch.tensor` behavior.
///
/// NOTE: C++ `torch::tensor` with an integer type or an `at::ArrayRef` / `std::vector` /
/// (nested) braced-init-list of integer types always produces a tensor of dtype `at::kLong`
/// (aka. int64_t), matching Python `torch.tensor` behavior.
///
/// NOTE: The following dtypes are not supported by `torch::tensor` currently:
/// - `unsigned int`
/// - `unsigned long int`
/// - `unsigned long long int`
/// - `long long int`
inline at::Tensor tensor(detail::TensorDataContainer tensor_data_container, const at::TensorOptions& options = {}) {
return autograd::make_variable(
// note: we remove the requires_grad setting from the TensorOptions because
// it is ignored anyways (and we actually have an assertion that it isn't set
// which would fail otherwise). We handle requires_grad explicitly here
// instead of passing it through to the kernel.
tensor_data_container.convert_to_tensor(options.requires_grad(::std::nullopt)),
options.requires_grad());
}
/// A generic deleter function.
using Deleter = std::function<void(void*)>;
using at::MemoryFormat;
/// Exposes the given `data` as a `Tensor` without taking ownership of the
/// original data. `sizes` should specify the shape of the tensor, `strides` the
/// stride in each dimension. The `deleter` function (a
/// `std::function<void(void*)>`) will be called on the `data` when the Tensor
/// data would normally be deallocated. The `TensorOptions` specify additional
/// configuration options for the returned tensor, such as what type to
/// interpret the `data` as.
inline at::Tensor from_blob(
void* data,
at::IntArrayRef sizes,
at::IntArrayRef strides,
const Deleter& deleter,
const at::TensorOptions& options = at::TensorOptions()) {
at::Tensor tensor = ([&]() {
at::AutoDispatchBelowAutograd guard; // TODO: remove
at::tracer::impl::NoTracerDispatchMode tracer_guard;
return at::from_blob(data, sizes, strides, deleter, options.requires_grad(::std::nullopt));
})();
return autograd::make_variable(tensor, options.requires_grad());
}
/// Exposes the given `data` as a `Tensor` without taking ownership of the
/// original data. `sizes` should specify the shape of the tensor, `strides` the
/// stride in each dimension. The `TensorOptions`
/// specify additional configuration options for the returned tensor, such as
/// what type to interpret the `data` as.
inline at::Tensor from_blob(
void* data,
at::IntArrayRef sizes,
at::IntArrayRef strides,
const at::TensorOptions& options = at::TensorOptions()) {
at::Tensor tensor = ([&]() {
at::AutoDispatchBelowAutograd guard; // TODO: remove
at::tracer::impl::NoTracerDispatchMode tracer_guard;
return at::from_blob(data, sizes, strides, options.requires_grad(::std::nullopt));
})();
return autograd::make_variable(tensor, options.requires_grad());
}
/// Exposes the given `data` as a `Tensor` without taking ownership of the
/// original data. `sizes` should specify the shape of the tensor. The `deleter`
/// (a `std::function<void(void*)>`) function will be called on the `data` when
/// the Tensor data would normally be deallocated. The `TensorOptions` specify
/// additional configuration options for the returned tensor, such as what type
/// to interpret the `data` as.
inline at::Tensor from_blob(
void* data,
at::IntArrayRef sizes,
const Deleter& deleter,
const at::TensorOptions& options = at::TensorOptions()) {
at::Tensor tensor = ([&]() {
at::AutoDispatchBelowAutograd guard; // TODO: remove
at::tracer::impl::NoTracerDispatchMode tracer_guard;
return at::from_blob(data, sizes, deleter, options.requires_grad(::std::nullopt));
})();
return autograd::make_variable(tensor, options.requires_grad());
}
/// Exposes the given `data` as a `Tensor` without taking ownership of the
/// original data. `sizes` should specify the shape of the tensor. The
/// `TensorOptions` specify additional configuration options for the returned
/// tensor, such as what type to interpret the `data` as.
inline at::Tensor from_blob(
void* data,
at::IntArrayRef sizes,
const at::TensorOptions& options = at::TensorOptions()) {
at::Tensor tensor = ([&]() {
at::AutoDispatchBelowAutograd guard; // TODO: remove
at::tracer::impl::NoTracerDispatchMode tracer_guard;
return at::from_blob(data, sizes, options.requires_grad(::std::nullopt));
})();
return autograd::make_variable(tensor, options.requires_grad());
}
inline at::Tensor _make_dep_token(at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_make_dep_token(at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, at::TensorOptions options) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_cudnn_init_dropout_state(dropout, train, dropout_seed, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor arange(const at::Scalar & end, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::arange(end, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::arange(start, end, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::arange(start, end, step, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor bartlett_window(int64_t window_length, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::bartlett_window(window_length, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor bartlett_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::bartlett_window(window_length, periodic, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor blackman_window(int64_t window_length, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::blackman_window(window_length, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor blackman_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::blackman_window(window_length, periodic, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor empty(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::empty(size, names, at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor empty(at::IntArrayRef size, at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::empty(size, at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::empty_symint(size, at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor empty_permuted(at::IntArrayRef size, at::IntArrayRef physical_layout, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::empty_permuted(size, physical_layout, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor empty_permuted_symint(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::empty_permuted_symint(size, physical_layout, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options = {}, double scale = 1, int64_t zero_point = 0, ::std::optional<at::MemoryFormat> memory_format = c10::MemoryFormat::Contiguous) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_empty_affine_quantized(size, at::TensorOptions(options).requires_grad(::std::nullopt), scale, zero_point, memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _empty_affine_quantized_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}, double scale = 1, int64_t zero_point = 0, ::std::optional<at::MemoryFormat> memory_format = c10::MemoryFormat::Contiguous) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_empty_affine_quantized_symint(size, at::TensorOptions(options).requires_grad(::std::nullopt), scale, zero_point, memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = c10::MemoryFormat::Contiguous) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_empty_per_channel_affine_quantized(size, scales, zero_points, axis, at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _empty_per_channel_affine_quantized_symint(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = c10::MemoryFormat::Contiguous) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_empty_per_channel_affine_quantized_symint(size, scales, zero_points, axis, at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::empty_quantized(size, qtensor, at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::empty_like(self, at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::empty_strided(size, stride, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::empty_strided_symint(size, stride, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor eye(int64_t n, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::eye(n, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor eye_symint(c10::SymInt n, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::eye_symint(n, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor eye(int64_t n, int64_t m, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::eye(n, m, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor eye_symint(c10::SymInt n, c10::SymInt m, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::eye_symint(n, m, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, ::std::optional<at::DimnameList> names, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::full(size, fill_value, names, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::full(size, fill_value, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::full_symint(size, fill_value, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::full_like(self, fill_value, at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor from_file(c10::string_view filename, ::std::optional<bool> shared = ::std::nullopt, ::std::optional<int64_t> size = 0, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::from_file(filename, shared, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor hann_window(int64_t window_length, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::hann_window(window_length, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor hann_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::hann_window(window_length, periodic, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor hamming_window(int64_t window_length, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::hamming_window(window_length, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor hamming_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::hamming_window(window_length, periodic, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::hamming_window(window_length, periodic, alpha, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::hamming_window(window_length, periodic, alpha, beta, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::kaiser_window(window_length, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::kaiser_window(window_length, periodic, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::kaiser_window(window_length, periodic, beta, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor linspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor linspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor linspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor logspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor logspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor logspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor ones(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::ones(size, names, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor ones(at::IntArrayRef size, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::ones(size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor ones_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::ones_symint(size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor ones_like(const at::Tensor & self, at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::ones_like(self, at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor scalar_tensor(const at::Scalar & s, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::scalar_tensor(s, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor rand(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::rand(size, names, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::rand_symint(size, names, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor rand(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::rand(size, generator, names, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::rand_symint(size, generator, names, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor rand(at::IntArrayRef size, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::rand(size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor rand_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::rand_symint(size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor rand(at::IntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::rand(size, generator, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor rand_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::rand_symint(size, generator, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor rand_like(const at::Tensor & self, at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::rand_like(self, at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randint(int64_t high, at::IntArrayRef size, at::TensorOptions options = at::kLong) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randint(high, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options = at::kLong) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randint_symint(high, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randint(int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randint(high, size, generator, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randint_symint(high, size, generator, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, at::TensorOptions options = at::kLong) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randint(low, high, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options = at::kLong) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randint_symint(low, high, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randint(low, high, size, generator, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randint_symint(low, high, size, generator, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randint_like(const at::Tensor & self, int64_t high, at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randint_like(self, high, at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt high, at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randint_like_symint(self, high, at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randint_like(self, low, high, at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt low, c10::SymInt high, at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randint_like_symint(self, low, high, at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randn(at::IntArrayRef size, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randn(size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randn_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randn_symint(size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randn(at::IntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randn(size, generator, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randn_symint(size, generator, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randn(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randn(size, names, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randn_symint(size, names, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randn(at::IntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randn(size, generator, names, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randn_symint(c10::SymIntArrayRef size, ::std::optional<at::Generator> generator, ::std::optional<at::DimnameList> names, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randn_symint(size, generator, names, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randn_like(const at::Tensor & self, at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randn_like(self, at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randperm(int64_t n, at::TensorOptions options = at::kLong) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randperm(n, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randperm_symint(c10::SymInt n, at::TensorOptions options = at::kLong) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randperm_symint(n, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randperm(int64_t n, ::std::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randperm(n, generator, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor randperm_symint(c10::SymInt n, ::std::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::randperm_symint(n, generator, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor range(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step = 1, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::range(start, end, step, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor range(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::range(start, end, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor zeros(at::IntArrayRef size, ::std::optional<at::DimnameList> names, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::zeros(size, names, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _efficientzerotensor(at::IntArrayRef size, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_efficientzerotensor(size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _efficientzerotensor_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_efficientzerotensor_symint(size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor zeros(at::IntArrayRef size, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::zeros(size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::zeros_symint(size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor zeros_like(const at::Tensor & self, at::TensorOptions options = {}, ::std::optional<at::MemoryFormat> memory_format = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::zeros_like(self, at::TensorOptions(options).requires_grad(::std::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _sparse_compressed_tensor_with_dims(int64_t nnz, int64_t dense_dim, at::IntArrayRef size, at::IntArrayRef blocksize, at::ScalarType index_dtype, at::TensorOptions options) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_sparse_compressed_tensor_with_dims(nnz, dense_dim, size, blocksize, index_dtype, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::sparse_compressed_tensor(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor sparse_compressed_tensor_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::sparse_compressed_tensor_symint(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::sparse_csr_tensor(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::sparse_csc_tensor(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::sparse_bsr_tensor(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::sparse_bsc_tensor(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::TensorOptions options) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::sparse_compressed_tensor(compressed_indices, plain_indices, values, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::sparse_csr_tensor(crow_indices, col_indices, values, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::sparse_csc_tensor(ccol_indices, row_indices, values, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::sparse_bsr_tensor(crow_indices, col_indices, values, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::sparse_bsc_tensor(ccol_indices, row_indices, values, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_sparse_compressed_tensor_unsafe(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _sparse_compressed_tensor_unsafe_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_sparse_compressed_tensor_unsafe_symint(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_sparse_csr_tensor_unsafe(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_sparse_csc_tensor_unsafe(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_sparse_bsr_tensor_unsafe(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_sparse_bsc_tensor_unsafe(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor sparse_coo_tensor(at::IntArrayRef size, at::TensorOptions options) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::sparse_coo_tensor(size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options = {}, ::std::optional<bool> is_coalesced = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::sparse_coo_tensor(indices, values, at::TensorOptions(options).requires_grad(::std::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}, ::std::optional<bool> is_coalesced = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::sparse_coo_tensor(indices, values, size, at::TensorOptions(options).requires_grad(::std::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}, ::std::optional<bool> is_coalesced = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_sparse_coo_tensor_unsafe(indices, values, size, at::TensorOptions(options).requires_grad(::std::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options = {}, ::std::optional<bool> is_coalesced = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_sparse_coo_tensor_unsafe_symint(indices, values, size, at::TensorOptions(options).requires_grad(::std::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, ::std::optional<bool> is_coalesced = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, at::TensorOptions(options).requires_grad(::std::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, ::std::optional<bool> is_coalesced = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_sparse_coo_tensor_with_dims_and_tensors_symint(sparse_dim, dense_dim, size, indices, values, at::TensorOptions(options).requires_grad(::std::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor _to_copy(const at::Tensor & self, at::TensorOptions options = {}, bool non_blocking = false, ::std::optional<at::MemoryFormat> memory_format = ::std::nullopt) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::_to_copy(self, at::TensorOptions(options).requires_grad(::std::nullopt), non_blocking, memory_format), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset = 0, at::TensorOptions options = at::kLong) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::tril_indices(row, col, offset, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset = 0, at::TensorOptions options = at::kLong) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::triu_indices(row, col, offset, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor normal(double mean, double std, at::IntArrayRef size, ::std::optional<at::Generator> generator = ::std::nullopt, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::normal(mean, std, size, generator, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor normal_symint(double mean, double std, c10::SymIntArrayRef size, ::std::optional<at::Generator> generator = ::std::nullopt, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::normal_symint(mean, std, size, generator, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor fft_fftfreq(int64_t n, double d = 1.0, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::fft_fftfreq(n, d, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
inline at::Tensor fft_rfftfreq(int64_t n, double d = 1.0, at::TensorOptions options = {}) {
at::AutoDispatchBelowADInplaceOrView guard;
return autograd::make_variable(at::fft_rfftfreq(n, d, at::TensorOptions(options).requires_grad(::std::nullopt)), /*requires_grad=*/options.requires_grad());
}
} // namespace torch
```
|
=====================================================================================================================================
SOURCE CODE FILE: grad_mode.h
LINES: 1
SIZE: 0.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\grad_mode.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/grad_mode.h>
#include <torch/csrc/Export.h>
namespace torch::autograd {
using GradMode = at::GradMode;
using AutoGradMode = at::AutoGradMode;
} // namespace torch::autograd
```
|
======================================================================================================================================
SOURCE CODE FILE: graph_task.h
LINES: 1
SIZE: 9.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\graph_task.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ThreadLocalState.h>
#include <ATen/core/Tensor.h>
#include <c10/util/ThreadLocal.h>
#include <torch/csrc/autograd/input_buffer.h>
#include <torch/csrc/autograd/utils/warnings.h>
#include <vector>
namespace torch::autograd {
using edge_list = std::vector<Edge>;
struct ReadyQueue;
static constexpr int NO_DEVICE = -2;
static constexpr int CPU_DEVICE = -1;
// GraphTask holds metadata needed for a single execution of backward()
struct GraphTask : std::enable_shared_from_this<GraphTask> {
std::atomic<uint64_t> outstanding_tasks_{0};
// Indicates if an error occurred while executing any task. When this is
// true, it signals all threads to stop executing.
std::atomic_bool has_error_{false};
std::atomic_bool future_completed_{false};
// It is safe to read keep_graph_ without synchronization
bool keep_graph_;
// To protect reads/writes to not_ready_, dependencies_, captured_vars_,
// has_error_, future_result_, cpu_ready_queue_, and leaf_streams.
std::mutex mutex_;
std::unordered_map<Node*, InputBuffer> not_ready_;
std::unordered_map<Node*, int> dependencies_;
// Records the nodes that are in the graph
std::unordered_set<Node*> nodes_in_graph_;
c10::SmallVector<Node*, 4> graph_roots_;
// Note [Exec info]
// Exec info is created for each GraphTask, which allows filtering paths on
// the graph that are not needed. It has a bit complicated semantics. If it's
// empty, it means the task is run in a "default" mode, which means that all
// next_edges we encounter should get executed. If it's not empty, only
// functions that have an entry and this entry has needed == True should be
// executed. exec_info is only empty when the graph is executed via
// .backward() and the inputs parameter is not passed. Otherwise, when
// executed through .grad(), or when inputs arg is specified for .backward(),
// exec_info will be non-empty.
//
struct ExecInfo {
struct Capture {
Capture(const Capture&) = delete;
Capture(Capture&&) = default;
Capture& operator=(const Capture&) = delete;
Capture& operator=(Capture&&) = default;
~Capture() = default;
Capture(int input_idx, int output_idx)
: input_idx_(input_idx), output_idx_(output_idx) {}
int input_idx_; // within Node inputs
int output_idx_; // within the output vector of a GraphTask
// This hook will be executed after a grad is captured. The captured
// grad will be replaced by the return value of the hook.
struct GradCaptureHook {
virtual ~GradCaptureHook() = default;
virtual at::Tensor operator()(const at::Tensor& grad) = 0;
};
// NOTE [Deprecated capture hooks]
//
// The current status of capture hooks is that we continue to support
// the single usage of it by distributed in the dist_engine. If anyone
// else needs to use it for other purposes, they should file an issue.
//
// Capture hooks were originally created because there did not exist
// any way to register pre/post hooks to grad_fn in a way such that it
// would still be executed even if that is the grad_fn of a Tensor
// passed as input= of .grad. As far as I know, only dist_engine uses
// this hook.
//
// However, there are other alternatives today like tensor hooks that can
// replace the usage that originally motivated its creation. Also,
// Captures hooks are an outlier in terms of the types of hook that
// autograd offers in how it is registered and behaves, e.g. it is a hook
// registered not to the graph, but to a particular graph_task! This makes
// it a burden to maintain.
//
// It would be very nice to clean up/do a migration from pre/post
// hooks used in distributed to use tensor hooks, but for now we just
// mark this method as deprecated to prevent additional usage.
//
// If you still think you really need to capture hooks, please file an
// issue (and tag autograd).
const std::vector<std::unique_ptr<GradCaptureHook>>&
DO_NOT_USE_DEPRECATED_get_capture_hooks() const {
return hooks_;
}
// See NOTE [deprecated capture hooks]
void DO_NOT_USE_DEPRECATED_register_capture_hook(
std::unique_ptr<GradCaptureHook> hook) {
hooks_.push_back(std::move(hook));
}
private:
// The hooks will be called one by one in the order as they were added.
// The input grad of a hook will be the output of its preceding hook. The
// first hook will take the captured grad as the input. The output of the
// last hook will replace the captured grad.
std::vector<std::unique_ptr<GradCaptureHook>> hooks_;
};
bool should_execute() const {
return needed_ || captures_;
}
bool needed_ = false;
std::unique_ptr<std::vector<Capture>> captures_;
};
// exec_info_ is safe to read without synchronization
std::unordered_map<Node*, ExecInfo> exec_info_;
// Captures variables are grads captured that we return to the user. After
// execution of the GraphTask is completed, the captured_vars_ are moved
// out of the GraphTask and are no longer valid.
std::vector<Variable> captured_vars_;
// Note: this field is not ready to be used until the proper
// `thread_locals_.set_grad_mode()` call in the constructor.
at::ThreadLocalState thread_locals_ = at::ThreadLocalState();
std::unordered_set<c10::Stream> leaf_streams;
// Per-device current streams of the execute() that called this GraphTask.
// These will be synced with leaf_streams in exec_post_processing.
std::vector<std::optional<c10::Stream>> caller_current_streams_;
// Collects caller_current_streams_ for the accelerator device.
void stash_current_streams();
void init_to_execute(
Node& graph_root,
const edge_list& outputs,
bool accumulate_grad,
uint64_t min_topo_nr);
// The value of worker_device in the thread that created this task.
// See Note [Reentrant backwards]
// Safe to read owner_ and reentrant_depth_ without synchronization
int owner_;
// The number of parent graph tasks for this graph task
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const int reentrant_depth_;
bool can_checkpoint() const {
return exec_info_.empty();
}
// check if the GraphTask is completed or not
bool completed();
// mark the graph task as completed and trigger post processing
void mark_as_completed_and_run_post_processing();
// Set an appropriate exception on this graph_task which was encountered while
// running the provided function.
void set_exception(std::exception_ptr eptr, const std::shared_ptr<Node>& fn);
// Set an appropriate exception on this graph_task which was encountered while
// running the provided function. But doesn't signal completion on
// 'future_result_' right away. The user needs to explicitly mark
// 'future_result_' completed with an appropriate exception.
void set_exception_without_signal(const std::shared_ptr<Node>& fn);
// Whether or not to stop execution for this GraphTask when an error is
// encountered. When set to true, this would cause Engine::execute() to throw
// an exception as soon as the autograd engine receives an exception.
bool exit_on_error_;
// CPU threads are dedicated to processing CPU work for the backward they
// invoked. So any given graph task maintains its own cpu_ready_queue_ where
// you should send work for it to be done. We memoize the cpu_ready_queue_ per
// GraphTask so that we know which ready queue we should push to if we are on
// device thread (i.e. GPU) and but next NodeTask should be run on CPU.
std::shared_ptr<ReadyQueue> cpu_ready_queue_;
// Future representing the completion of the graph task. Notified when all
// tasks are done.
c10::intrusive_ptr<at::ivalue::Future> future_result_;
// Final callbacks installed during execution of this GraphTask
std::vector<std::function<void()>> final_callbacks_;
// To protect reads and writes to final_callbacks_. Intentionally no reusing
// mutex_ as the two are protecting different data structures.
std::mutex final_callbacks_lock_;
utils::DelayWarningHandler warning_handler_;
uint64_t id_;
GraphTask(
bool keep_graph,
bool grad_mode,
int reentrant_depth,
std::shared_ptr<ReadyQueue> cpu_ready_queue,
c10::SmallVector<Node*, 4> graph_roots,
bool exit_on_error = false);
private:
// run GraphTask post processing
void exec_post_processing();
};
// The guard that sets and restores current_graph_task.
class GraphTaskGuard {
public:
explicit GraphTaskGuard(std::shared_ptr<GraphTask> graph_task);
~GraphTaskGuard();
void restore_current_graph_task();
private:
std::shared_ptr<GraphTask> last_graph_task_;
};
TORCH_API const std::unordered_map<Node*, GraphTask::ExecInfo>*
get_current_graph_task_exec_info();
TORCH_API const std::unordered_set<Node*>*
get_current_graph_task_nodes_in_graph();
TORCH_API bool get_current_graph_task_keep_graph();
TORCH_API std::vector<Node*> get_current_graph_task_execution_order();
TORCH_API int get_current_graph_task_id();
void add_node_to_current_graph_task_exec_info(Node* fn);
} // namespace torch::autograd
```
|
========================================================================================================================================
SOURCE CODE FILE: input_buffer.h
LINES: 1
SIZE: 1.39 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\input_buffer.h
ENCODING: utf-8
```h
#pragma once
// The InputBuffer class accumulates a list of Variables for use by a
// function. It implements logic to avoid modifying the passed
// values in-place (adding an input twice will accumulate the result).
// This behaviour is needed and used only in backward graphs.
#include <utility>
#include <vector>
#include <c10/core/Stream.h>
#include <torch/csrc/autograd/variable.h>
#include <optional>
namespace torch::autograd {
struct InputBuffer {
explicit InputBuffer(size_t size) : buffer(size) {}
InputBuffer(const InputBuffer& other) = delete;
InputBuffer(InputBuffer&& other) = default;
explicit InputBuffer(variable_list&& inputs) : buffer(std::move(inputs)) {}
InputBuffer& operator=(InputBuffer&& other) = default;
// Accumulates the variable at a specified index.
// The optional CUDA streams determine which stream the accumulation
// is run on and how the addition is synchronized.
TORCH_API void add(
size_t pos,
Variable&& var,
const std::optional<c10::Stream>& opt_producer_stream,
const std::optional<c10::Stream>& opt_consumer_stream);
Variable operator[](size_t pos) {
return buffer[pos];
}
// Returns the inputs as a list of variables. Destroys given InputBuffer.
static std::vector<Variable> variables(InputBuffer&& g);
std::vector<Variable> buffer;
};
} // namespace torch::autograd
```
|
==========================================================================================================================================
SOURCE CODE FILE: input_metadata.h
LINES: 1
SIZE: 3.02 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\input_metadata.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ExpandUtils.h>
#include <ATen/NestedTensorImpl.h>
#include <ATen/core/Tensor.h>
#include <c10/core/Device.h>
#include <c10/core/DeviceType.h>
#include <c10/core/Stream.h>
#include <c10/core/SymIntArrayRef.h>
#include <c10/core/TensorImpl.h>
#include <c10/core/impl/DeviceGuardImplInterface.h>
#include <c10/util/DimVector.h>
#include <c10/util/Exception.h>
#include <c10/util/SmallVector.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/zeros.h>
#endif
namespace torch::autograd {
using SymIntSmallVec = c10::SmallVector<c10::SymInt, c10::kDimVectorStaticSize>;
using MetadataShape = std::variant<SymIntSmallVec, at::Tensor>;
/**
* Records TensorOptions, shape of the tensor, whether or not the Python
* dispatch key is set (tensor subclass), and, where applicable, the stream the
* corresponding operation took place on.
*
* If is_valid() is false, then the corresponding input is not used and may be
* an undefined tensor.
*/
struct TORCH_API InputMetadata {
InputMetadata() = default;
InputMetadata(
const at::TensorOptions& options,
MetadataShape input_shape,
bool is_tensor_subclass,
bool is_nested);
InputMetadata(const at::Tensor& t);
const at::TensorOptions& options() const {
return options_;
}
caffe2::TypeMeta dtype() const {
return options_.dtype();
}
at::Device device() const {
return options_.device();
}
at::Layout layout() const {
return options_.layout();
}
c10::Stream stream() const {
return stream_;
}
bool is_tensor_subclass() const {
return is_tensor_subclass_;
}
at::Tensor zeros_like() const;
bool is_same_shape(const at::Tensor& grad) const;
bool is_expandable_to_shape(const at::Tensor& grad) const;
at::Tensor reduce_grad(at::Tensor& grad) const;
at::Tensor maybe_reduce(
const size_t index,
at::Tensor grad,
const std::function<std::string(const std::string&)>& format_error) const;
std::stringstream incompatible_shape_error_message(
const size_t index,
const at::Tensor& grad) const;
bool was_default_constructed() const {
return was_default_constructed_;
}
bool is_cpp_nested_tensor() const;
bool is_nested_tensor() const {
return is_nested_;
}
c10::SymIntArrayRef shape_as_dim_vector() const;
// Danger: not thread safe, caller must protect with lock
SymIntSmallVec& mutable_shape_as_dim_vector();
private:
at::Tensor shape_as_tensor() const;
bool is_nestedness_same(const at::Tensor& grad) const;
bool maybe_expandable_to(const at::Tensor& grad) const;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const at::TensorOptions options_;
MetadataShape shape_;
c10::Stream stream_ = c10::Stream(c10::Stream::Default::DEFAULT, device());
bool is_tensor_subclass_ = false;
bool is_nested_ = false;
bool was_default_constructed_ = true;
};
} // namespace torch::autograd
```
|
================================================================================================================================================
SOURCE CODE FILE: jit_decomp_interface.h
LINES: 1
SIZE: 1.80 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\jit_decomp_interface.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/core/function_schema.h>
#include <c10/macros/Export.h>
// NOTE: [Jit Decomposition Interface]
//
// For some context of why we need this at all, see NOTE: [forward-mode AD
// decompositions mechanism]
//
// Introducing that mechanism from the NOTE is problematic because:
// - it relies on TorchScript, so now VariableTypeX.cpp depends on TorchScript.
// - there exist internal builds like lite_trainer, which depend on VariableType
// but do not depend on TorchScript.
//
// For internal builds like lite_trainer builds to pass, and for OSS builds that
// do depend on TorchScript to still support the forward AD decomp mechanism, we
// implement a PImpl pattern to avoid a static dependency in favor of a dynamic
// one
// - during static initialization time, if the library is built with TorchScript
// setJitDecompImpl is called in decomposition_registry.cpp setting a global
// ptr to the impl
// - when the program is run,if getJitDecompImpl returns a non null ptr, we can
// carry on normally, otherwise we gracefully error out
//
// For extra context, see VariableHooksInterface.h, where a similar technique
// is used
namespace torch::autograd::impl {
struct TORCH_API JitDecompInterface {
virtual ~JitDecompInterface() = default;
virtual bool has_jit_decomposition(
const c10::FunctionSchema& schema) const = 0;
virtual void run_jit_decomposition(
const c10::OperatorHandle& op,
jit::Stack* stack) const = 0;
};
TORCH_API void setJitDecompImpl(JitDecompInterface* impl);
TORCH_API JitDecompInterface* getJitDecompImpl();
struct TORCH_API JitDecompRegisterer {
explicit JitDecompRegisterer(JitDecompInterface* impl) {
setJitDecompImpl(impl);
}
};
} // namespace torch::autograd::impl
```
|
====================================================================================================================================
SOURCE CODE FILE: profiler.h
LINES: 1
SIZE: 0.11 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\profiler.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/autograd/profiler_kineto.h>
#include <torch/csrc/autograd/profiler_legacy.h>
```
|
===========================================================================================================================================
SOURCE CODE FILE: profiler_kineto.h
LINES: 1
SIZE: 8.08 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\profiler_kineto.h
ENCODING: utf-8
```h
#pragma once
#include <string>
#include <vector>
#include <torch/csrc/profiler/api.h>
#include <torch/csrc/profiler/events.h>
#include <torch/csrc/profiler/stubs/base.h>
#include <torch/csrc/profiler/util.h>
namespace torch {
namespace profiler::impl {
struct Result;
namespace kineto {
struct ActivityTraceWrapper;
} // namespace kineto
} // namespace profiler::impl
namespace autograd::profiler {
using experimental_event_t = std::shared_ptr<torch::profiler::impl::Result>;
using extra_meta_t = std::unordered_map<std::string, std::string>;
struct TORCH_API KinetoEvent {
KinetoEvent(
const std::shared_ptr<const torch::profiler::impl::Result>&,
const bool verbose);
uint64_t startThreadId() const;
uint64_t endThreadId() const;
uint8_t activityType() const;
uint64_t fwdThreadId() const;
bool hasShapes() const;
const c10::ArrayRef<std::vector<int64_t>> shapes() const;
bool hasTypes() const;
const c10::ArrayRef<std::string> dtypes() const;
bool hasConcreteInputs() const;
const c10::ArrayRef<c10::IValue> concreteInputs() const;
bool hasKwinputs() const;
const std::unordered_map<std::string, c10::IValue> kwinputs() const;
uint64_t flops() const;
int64_t sequenceNr() const;
bool hasStack() const;
const c10::ArrayRef<std::string> stack() const;
uint8_t scope() const;
bool hasModuleHierarchy() const;
const c10::ArrayRef<std::string> moduleHierarchy() const;
int64_t debugHandle() const;
std::string name() const;
std::string overload_name() const;
c10::DeviceType deviceType() const;
int deviceIndex() const;
int64_t nBytes() const;
uint64_t startNs() const;
uint64_t endNs() const;
uint64_t durationNs() const;
bool isAsync() const;
uint64_t correlationId() const;
uint64_t linkedCorrelationId() const;
int64_t deviceResourceId() const;
std::string backend() const;
bool isPythonFunction() const;
int64_t cudaElapsedUs() const;
int64_t privateuse1ElapsedUs() const;
void getPerfEventCounters(torch::profiler::perf_counters_t&) const;
extra_meta_t extraMeta() const;
private:
torch::profiler::impl::ProfilerVoidEventStub fallbackStart() const;
torch::profiler::impl::ProfilerVoidEventStub fallbackEnd() const;
std::shared_ptr<const torch::profiler::impl::Result> result_;
std::vector<std::string> python_stack_;
// Copy fields from result so we can return ArrayRefs.
std::vector<std::vector<int64_t>> shapes_;
std::vector<std::string> dtypes_;
std::vector<c10::IValue> concrete_inputs_;
std::unordered_map<std::string, c10::IValue> kwinputs_;
};
// Consolidating events returned directly from Kineto
// with events manually created by us (e.g. start/stop marks,
// memory allocation events)
struct TORCH_API ProfilerResult {
ProfilerResult();
ProfilerResult(
uint64_t start_time,
std::vector<KinetoEvent> events,
std::unique_ptr<torch::profiler::impl::kineto::ActivityTraceWrapper>&&
trace,
std::vector<experimental_event_t>&& event_tree);
~ProfilerResult();
uint64_t trace_start_ns() const {
return trace_start_ns_;
}
const std::vector<KinetoEvent>& events() const {
return events_;
}
const std::vector<experimental_event_t>& event_tree() const {
return event_tree_;
}
void save(const std::string& path);
private:
uint64_t trace_start_ns_ = 0;
std::vector<KinetoEvent> events_;
std::unique_ptr<torch::profiler::impl::kineto::ActivityTraceWrapper> trace_;
std::vector<experimental_event_t> event_tree_;
};
/*
* This API is used by backends to record latency of events that
* happened in the backend but were not visible to pytorch runtime.
* For example, if part of the model is lowered to a dsp backend, then
* the execution of that part of the model is delegated to the backend.
* When backend finishes execution it has an option to provide profiling
* information (latency only at the moment) corresponding to different operators
* that were executed in the backend.
* When such events are recorded by backend using this API, the event
* records will be collected by active kineto profiler. If no kineto profiler
* is active then the event is ignored.
* This provides us with a way to generate all the profiling information
* for a model regardless of where model (or part of it) executed.
* @param start_time_us: start time in us of the event
* @param end_time_us: end time in us of the event
* @param debug_handle: debug handle to correlate this event/op with
* model level module/source information
* @param scope: scope of the event, e.g. LITE_INTERPRETER, RECORD_FN etc.
* @param event_name: name of the event, e.g. op name
* @param backend_name: name of the backend where the event took place.
*/
TORCH_API void reportBackendEventToActiveKinetoProfiler(
const int64_t start_time_us,
const int64_t end_time_us,
const int64_t debug_handle,
const at::RecordScope scope,
const std::string& event_name,
const std::string& backend_name);
TORCH_API void enableProfiler(
const torch::profiler::impl::ProfilerConfig& config,
const std::set<torch::profiler::impl::ActivityType>& activities,
const std::unordered_set<at::RecordScope>& scopes = {});
/*
* Same as enableProfiler but with callback to do post-processing of
* KinetoEvents.
* enableProfilerWithEventPostProcess enables profiler to capture
* specified activities, with specified RecordFunction scope, if any.
* Additionally, it takes a functor that does in-place post processing of
* events, e.g. populate stack trace or module hierarchy information lazily
* using debug_handle.
* Example usage is with lite interpreter that has recording scope of
* LITE_INTERPRETER. In this case lite interpreter runtime, records debug
* handles in RecordFunction, along with other information. Debug handles are
* eventually passed down to KinetoEvent and recorded as part of the event.
* KinetoEdgeCPUProfiler, in torch/csrc/jit/mobile/profiler_edge.cpp, enables
* profiler using post-processing callback, via
* enableProfilerWithEventPostProcess, that takes these debug handles and
* generates stack trace and module hierarchy information, once profiling is
* done.
*/
using post_process_t = std::function<void(
/*debug_handle */ int64_t,
/*jit_stack */ std::vector<std::string>&,
/*jit_modules */ std::vector<std::string>&)>;
TORCH_API void enableProfilerWithEventPostProcess(
const torch::profiler::impl::ProfilerConfig& config,
const std::set<torch::profiler::impl::ActivityType>& activities,
post_process_t&& cb,
const std::unordered_set<at::RecordScope>& scopes = {});
TORCH_API std::unique_ptr<ProfilerResult> disableProfiler();
TORCH_API void prepareProfiler(
const torch::profiler::impl::ProfilerConfig& config,
const std::set<torch::profiler::impl::ActivityType>& activities);
TORCH_API void toggleCollectionDynamic(
const bool enable,
const std::set<torch::profiler::impl::ActivityType>& activities);
/**
* When a C++ thread really has no control over how the profiler was enabled,
* for example, by some unreachable Python code, it can call these functions
* to test/join/unjoin itself into the collection set of a profiler, if any.
* Without calling these functions, the symptom may be "not seeing GPU events
* from some child C++ threads". This is an example on how to use them,
*
* using namespace torch::autograd::profiler;
* bool enabled = isProfilerEnabledInMainThread();
* if (enabled != saved_enabled_state) {
* if (enabled) {
* enableProfilerInChildThread();
* } else {
* disableProfilerInChildThread();
* }
* saved_enabled_state = enabled;
* }
*/
TORCH_API bool isProfilerEnabledInMainThread();
TORCH_API void enableProfilerInChildThread();
TORCH_API void disableProfilerInChildThread();
} // namespace autograd::profiler
namespace profiler::impl {
// Experimental.
TORCH_API void _reportVulkanEventToProfiler(vulkan_id_t id);
} // namespace profiler::impl
} // namespace torch
```
|
===========================================================================================================================================
SOURCE CODE FILE: profiler_legacy.h
LINES: 1
SIZE: 10.82 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\profiler_legacy.h
ENCODING: utf-8
```h
#pragma once
#include <cstdint>
#include <iostream>
#include <memory>
#include <mutex>
#include <string>
#include <vector>
#include <torch/csrc/Export.h>
#include <torch/csrc/profiler/api.h>
#include <torch/csrc/profiler/stubs/base.h>
#include <torch/csrc/profiler/util.h>
namespace torch::autograd::profiler {
enum class C10_API_ENUM EventKind : uint16_t {
Mark,
PushRange,
PopRange,
MemoryAlloc,
};
// To be deprecated, once we switch to Kineto profiling
struct TORCH_API LegacyEvent {
LegacyEvent(
EventKind kind,
at::StringView name,
uint16_t thread_id,
bool record_cuda,
at::RecordFunctionHandle handle = 0,
std::vector<std::vector<int64_t>>&& shapes = {},
int64_t node_id = -1,
bool is_async = false)
: name_(std::move(name)),
kind_(kind),
thread_id_(thread_id),
handle_(handle),
shapes_(std::move(shapes)),
node_id_(node_id),
is_async_(is_async) {
record(record_cuda);
}
// Constructor to be used in conjunction with LegacyEvent::fromIValue.
LegacyEvent(
EventKind kind,
at::StringView name,
uint16_t thread_id,
at::RecordFunctionHandle handle,
std::vector<std::vector<int64_t>>&& shapes,
int64_t node_id,
bool is_remote,
int64_t cpu_memory_usage,
int64_t cpu_ns,
bool cuda_recorded,
int64_t cuda_memory_usage = 0,
c10::DeviceIndex device = -1,
double cuda_us = -1)
: cpu_ns_(cpu_ns),
name_(std::move(name)),
kind_(kind),
thread_id_(thread_id),
handle_(handle),
shapes_(std::move(shapes)),
cpu_memory_usage_(cpu_memory_usage),
cuda_memory_usage_(cuda_memory_usage),
device_(device),
node_id_(node_id),
is_remote_(is_remote),
cuda_us_(static_cast<int64_t>(cuda_us)) {
// Sanity check values that were deserialized
TORCH_INTERNAL_ASSERT(cpu_ns_ > 0);
if (cuda_recorded) {
TORCH_INTERNAL_ASSERT(device_ >= 0);
TORCH_INTERNAL_ASSERT(cuda_us_ >= 0);
}
}
// Returns IValues corresponding to event structure, to be used for
// serialization.
at::IValue toIValue() const;
// Reconstructs an event from IValues given by toIValue.
static LegacyEvent fromIValue(const at::IValue& eventIValue);
void record(bool record_cuda);
std::string kindStr() const {
switch (kind_) {
case EventKind::Mark:
return "mark";
case EventKind::PushRange:
return "push";
case EventKind::PopRange:
return "pop";
case EventKind::MemoryAlloc:
return "memory_alloc";
}
throw std::runtime_error("unknown event kind");
}
EventKind kind() const {
return kind_;
}
const char* name() const {
return name_.str();
}
uint64_t threadId() const {
return thread_id_;
}
std::vector<std::vector<int64_t>> shapes() const {
return shapes_;
}
double cpuElapsedUs(const LegacyEvent& e) const {
return static_cast<double>(e.cpu_ns_ - cpu_ns_) / (1000.0);
}
void setCpuUs(int64_t cpu_us) {
cpu_ns_ = cpu_us * 1000;
}
double cpuUs() const {
return static_cast<double>(cpu_ns_) / (1000.0);
}
double cudaElapsedUs(const LegacyEvent& e) const;
bool hasCuda() const {
return cuda_event != nullptr || (isRemote() && device_ != -1);
}
c10::DeviceIndex device() const {
return device_;
}
void updateMemoryStats(int64_t alloc_size, c10::Device device) {
if (device.is_cuda() || device.type() == c10::DeviceType::HIP) {
cuda_memory_usage_ = alloc_size;
} else if (
device.is_cpu() || device.type() == c10::DeviceType::MKLDNN ||
device.type() == c10::DeviceType::IDEEP) {
cpu_memory_usage_ = alloc_size;
} else {
LOG(WARNING) << "Unsupported memory profiling device: " << device;
}
}
int64_t cpuMemoryUsage() const {
return cpu_memory_usage_;
}
int64_t cudaMemoryUsage() const {
return cuda_memory_usage_;
}
at::RecordFunctionHandle handle() const {
return handle_;
}
// Node ID corresponding to this event.
int64_t nodeId() const {
return node_id_;
}
// Set Node ID on this event.
void setNodeId(int64_t node_id) {
node_id_ = node_id;
}
void setName(at::StringView newName_) {
name_ = std::move(newName_);
}
bool isRemote() const {
return is_remote_;
}
void setCudaUs(int64_t cuda_us) {
cuda_us_ = cuda_us;
}
void setSequenceNr(int64_t sequence_nr) {
sequence_nr_ = sequence_nr;
}
int64_t sequenceNr() const {
return sequence_nr_;
}
void setCorrelationId(uint64_t correlation_id) {
correlation_id_ = correlation_id;
}
uint64_t correlationId() const {
return correlation_id_;
}
const std::vector<std::string>& stack() const {
return stack_;
}
void setStack(const std::vector<std::string>& stack) {
stack_ = stack;
}
uint64_t fwdThreadId() const {
return fwd_thread_id_;
}
void setFwdThreadId(uint64_t fwd_thread_id) {
fwd_thread_id_ = fwd_thread_id;
}
uint8_t scope() const {
return scope_;
}
void setScope(uint8_t scope) {
scope_ = scope;
}
const std::unordered_map<std::string, c10::IValue>& extraArgs() const {
return extra_args_;
}
void setExtraArgs(std::unordered_map<std::string, c10::IValue>&& save_args) {
extra_args_ = std::move(save_args);
}
uint64_t flops() {
return flops_;
}
bool isAsync() {
return is_async_;
}
void setFlops(uint64_t flops) {
flops_ = flops;
}
private:
// signed to allow for negative intervals, initialized for safety.
int64_t cpu_ns_ = 0;
at::StringView name_;
EventKind kind_;
uint64_t thread_id_;
uint64_t fwd_thread_id_{0};
at::RecordFunctionHandle handle_{0};
std::vector<std::vector<int64_t>> shapes_;
int64_t cpu_memory_usage_ = 0;
int64_t cuda_memory_usage_ = 0;
c10::DeviceIndex device_ = -1;
torch::profiler::impl::ProfilerVoidEventStub cuda_event = nullptr;
int64_t node_id_ = 0;
bool is_remote_ = false;
int64_t cuda_us_ = -1;
int64_t sequence_nr_ = -1;
bool is_async_ = false;
std::vector<std::string> stack_;
uint8_t scope_{0};
uint64_t correlation_id_{0};
// Extra arguments for computing op flops
std::unordered_map<std::string, c10::IValue> extra_args_;
uint64_t flops_ = 0;
};
// a linked-list of fixed sized vectors, to avoid
// a std::vector resize from taking a large amount of time inside
// a profiling event
struct RangeEventList {
RangeEventList() {
events_.reserve(kReservedCapacity);
}
template <typename... Args>
void record(Args&&... args) {
std::lock_guard<std::mutex> guard(mutex_);
events_.emplace_back(std::forward<Args>(args)...);
}
std::vector<LegacyEvent> consolidate() {
std::lock_guard<std::mutex> lock(mutex_);
std::vector<LegacyEvent> result;
result.insert(
result.begin(),
std::make_move_iterator(events_.begin()),
std::make_move_iterator(events_.end()));
events_.erase(events_.begin(), events_.end());
return result;
}
size_t size() {
std::lock_guard<std::mutex> lock(mutex_);
return events_.size();
}
private:
// This mutex is used to serialize access when different threads are writing
// to the same instance of RangeEventList.
std::mutex mutex_;
std::vector<LegacyEvent> events_;
static const size_t kReservedCapacity = 1024;
};
// A struct to control settings of disableProfiler options.
struct TORCH_API ProfilerDisableOptions {
ProfilerDisableOptions() = default;
ProfilerDisableOptions(bool shouldCleanupTLSState, bool shouldConsolidate)
: cleanupTLSState(shouldCleanupTLSState),
consolidate(shouldConsolidate) {}
// Whether we should clean up profiler states that are thread local, such as
// ThreadLocalDebugInfo and thread local RecordFunction callbacks.
bool cleanupTLSState = true;
// Whether we should consolidate all currently recorded profiled events. If
// false, will not consolidate and other threads can continue to write to the
// event lists.
bool consolidate = true;
};
// NOTE: profiler mode is thread local, with automatic propagation
// across thread boundary (e.g. at::launch tasks)
TORCH_API void enableProfilerLegacy(
const torch::profiler::impl::ProfilerConfig&);
using thread_event_lists = std::vector<std::vector<LegacyEvent>>;
TORCH_API thread_event_lists disableProfilerLegacy(
std::optional<ProfilerDisableOptions> profilerDisableOptions =
std::nullopt);
// adds profiledEvents to the current thread local recorded events. Each event
// will be marked with node ID given by fromNodeId.
TORCH_API void addEventList(std::vector<LegacyEvent>&& profiledEvents);
// Writes profiled events to a stream.
TORCH_API void writeProfilerEventsToStream(
std::ostream& out,
const std::vector<LegacyEvent*>& events);
// Usage:
// {
// RecordProfile guard("filename.trace");
// // code you want to profile
// }
// Then open filename.trace in chrome://tracing
struct TORCH_API RecordProfile {
RecordProfile(std::ostream& out);
RecordProfile(const std::string& filename);
~RecordProfile();
private:
void init();
std::unique_ptr<std::ofstream> file_;
std::ostream& out_;
void processEvents(const std::vector<LegacyEvent*>& events);
};
// A guard that enables the legacy profiler, taking in an optional callback to
// process the results Usage:
// {
// TLSLegacyProfilerGuard g([](thread_event_lists profilerResults) {
// // process profilerResults
// });
// Code to profile
// }
struct TORCH_API TLSLegacyProfilerGuard {
explicit TLSLegacyProfilerGuard(
const torch::profiler::impl::ProfilerConfig& cfg,
std::optional<std::function<void(const thread_event_lists&)>>
resultCallback = std::nullopt,
std::optional<ProfilerDisableOptions> profilerDisableOptions =
std::nullopt)
: cb_(std::move(resultCallback)),
profilerDisableOptions_(profilerDisableOptions) {
enableProfilerLegacy(cfg);
}
~TLSLegacyProfilerGuard() {
thread_event_lists event_lists =
disableProfilerLegacy(profilerDisableOptions_);
if (cb_) {
try {
(*cb_)(event_lists);
} catch (const std::exception& e) {
LOG(ERROR) << "Got error processing profiler events: " << e.what();
}
}
}
private:
std::optional<std::function<void(const thread_event_lists&)>> cb_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const std::optional<ProfilerDisableOptions> profilerDisableOptions_;
};
} // namespace torch::autograd::profiler
```
|
===========================================================================================================================================
SOURCE CODE FILE: profiler_python.h
LINES: 1
SIZE: 0.09 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\profiler_python.h
ENCODING: utf-8
```h
#pragma once
namespace torch::autograd::profiler::python_tracer {
void init();
}
```
|
===============================================================================================================================================
SOURCE CODE FILE: python_anomaly_mode.h
LINES: 1
SIZE: 1.20 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_anomaly_mode.h
ENCODING: utf-8
```h
#pragma once
#include <pybind11/pybind11.h>
#include <torch/csrc/autograd/anomaly_mode.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/pybind.h>
namespace torch::autograd {
struct PyAnomalyMetadata : public AnomalyMetadata {
static constexpr const char* ANOMALY_TRACE_KEY = "traceback_";
static constexpr const char* ANOMALY_PARENT_KEY = "parent_";
PyAnomalyMetadata() {
pybind11::gil_scoped_acquire gil;
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
dict_ = PyDict_New();
}
// NOLINTNEXTLINE(bugprone-exception-escape)
~PyAnomalyMetadata() override {
// If python is already dead, leak the wrapped python objects
if (Py_IsInitialized()) {
pybind11::gil_scoped_acquire gil;
Py_DECREF(dict_);
}
}
void store_stack() override;
void print_stack(const std::string& current_node_name) override;
void assign_parent(const std::shared_ptr<Node>& parent_node) override;
PyObject* dict() {
return dict_;
}
private:
PyObject* dict_{nullptr};
};
void _print_stack(
PyObject* trace_stack,
const std::string& current_node_name,
bool is_parent);
} // namespace torch::autograd
```
|
===========================================================================================================================================
SOURCE CODE FILE: python_autograd.h
LINES: 1
SIZE: 0.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_autograd.h
ENCODING: utf-8
```h
#ifndef THP_AUTOGRAD_H
#define THP_AUTOGRAD_H
#include <torch/csrc/utils/pythoncapi_compat.h>
PyObject* THPAutograd_initExtension(PyObject* _unused, PyObject* unused);
void THPAutograd_initFunctions();
namespace torch::autograd {
PyMethodDef* python_functions();
}
#include <torch/csrc/autograd/python_engine.h>
#include <torch/csrc/autograd/python_function.h>
#include <torch/csrc/autograd/python_variable.h>
#endif
```
|
===============================================================================================================================================
SOURCE CODE FILE: python_cpp_function.h
LINES: 1
SIZE: 5.25 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_cpp_function.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/python_headers.h>
#include <memory>
#include <typeinfo>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/utils/object_ptr.h>
namespace torch::autograd {
struct THPCppFunction {
PyObject_HEAD
std::shared_ptr<Node> cdata;
};
template <typename Ctor>
TORCH_PYTHON_API PyObject* CppFunction_pynew(
PyTypeObject* type,
PyObject* args,
PyObject* kwds) {
THPObjectPtr obj(type->tp_alloc(type, 0));
if (!obj)
return nullptr;
THPCppFunction* f = (THPCppFunction*)obj.get();
HANDLE_TH_ERRORS
new (&f->cdata) std::shared_ptr<Node>(Ctor()(args));
END_HANDLE_TH_ERRORS
if (!f->cdata) {
return nullptr;
}
return obj.release();
}
#define THP_FUNCTION_DEFAULT_METHODS \
{(char*)"_register_hook_dict", \
THPCppFunction_register_hook_dict, \
METH_O, \
nullptr}, \
{(char*)"register_hook", THPCppFunction_register_hook, METH_O, nullptr}, \
{(char*)"register_prehook", \
THPCppFunction_register_prehook, \
METH_O, \
nullptr}, \
{(char*)"name", THPCppFunction_name, METH_NOARGS, nullptr}, \
{(char*)"_sequence_nr", \
THPCppFunction_sequence_nr, \
METH_NOARGS, \
nullptr}, \
{ \
(char*)"_set_sequence_nr", THPCppFunction_set_sequence_nr, METH_O, nullptr \
}
#define THP_FUNCTION_DEFAULT_PROPERTIES \
{(char*)"next_functions", \
THPCppFunction_next_functions, \
nullptr, \
nullptr, \
nullptr}, \
{(char*)"requires_grad", \
THPCppFunction_requires_grad, \
nullptr, \
nullptr, \
nullptr}, \
{(char*)"metadata", THPCppFunction_metadata, nullptr, nullptr, nullptr}, \
{ \
(char*)"_input_metadata", THPCppFunction_input_metadata, nullptr, nullptr, \
nullptr \
}
TORCH_PYTHON_API PyObject* THPCppFunction_next_functions(
PyObject* self,
void* _unused);
TORCH_PYTHON_API PyObject* THPCppFunction_metadata(
PyObject* self,
void* _unused);
TORCH_PYTHON_API PyObject* THPCppFunction_requires_grad(
PyObject* self,
void* _unused);
TORCH_PYTHON_API PyObject* THPCppFunction_register_hook_dict(
PyObject* self,
PyObject* _var);
TORCH_PYTHON_API PyObject* THPCppFunction_register_hook(
PyObject* self,
PyObject* hook);
TORCH_PYTHON_API PyObject* THPCppFunction_register_prehook(
PyObject* self,
PyObject* hook);
TORCH_PYTHON_API PyObject* THPCppFunction_name(
PyObject* self,
PyObject* noargs);
TORCH_PYTHON_API PyObject* THPCppFunction_sequence_nr(
PyObject* self,
PyObject* noargs);
TORCH_PYTHON_API PyObject* THPCppFunction_input_metadata(
PyObject* self,
void* _unused);
TORCH_PYTHON_API PyTypeObject* _initFunctionPyTypeObject(
PyTypeObject& type,
const char* name,
PyGetSetDef* function_properties,
PyMethodDef* function_methods);
TORCH_PYTHON_API PyObject* registerFunctionHook(Node& fn, PyObject* hook);
TORCH_PYTHON_API PyObject* registerFunctionPreHook(Node& fn, PyObject* hook);
template <typename Ctor>
TORCH_PYTHON_API PyTypeObject* createForwardFunctionPyTypeObject(
PyTypeObject& type,
const char* name,
PyGetSetDef* function_properties = nullptr,
PyMethodDef* function_methods = nullptr) {
type.tp_new = &CppFunction_pynew<Ctor>;
return _initFunctionPyTypeObject(
type, name, function_properties, function_methods);
}
TORCH_PYTHON_API void registerCppFunction(
const std::type_info& type,
PyTypeObject* pytype);
TORCH_PYTHON_API PyObject* functionToPyObject(
const std::shared_ptr<Node>& cdata);
TORCH_PYTHON_API bool THPCppFunction_Check(PyObject* obj);
} // namespace torch::autograd
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.