diff --git a/ckpts/universal/global_step20/zero/10.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/10.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..efe499ea49b2db57a61770dee54a2def8be87af1 --- /dev/null +++ b/ckpts/universal/global_step20/zero/10.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbdf1fb9a5b8ac37ddfed837889c081b64a5db654e8a9ef78f3b22987d43e9c8 +size 16778411 diff --git a/ckpts/universal/global_step20/zero/10.attention.dense.weight/fp32.pt b/ckpts/universal/global_step20/zero/10.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..b9250951431e653d2625acadc620e980e5a1720f --- /dev/null +++ b/ckpts/universal/global_step20/zero/10.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e13e8b70a66c4cab27f47edf14ac5f91abd7cc2d83249f10dcce09eaeb30cdb4 +size 16778317 diff --git a/ckpts/universal/global_step20/zero/10.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step20/zero/10.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..dd5886a5998cc887b3ad0f5dcbbf939e8d6386e5 --- /dev/null +++ b/ckpts/universal/global_step20/zero/10.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:917ee73a5297f3461284b91cf58a301a61fd8a42ba6ac69c2e1cf2774274e5d8 +size 50332749 diff --git a/ckpts/universal/global_step20/zero/26.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/26.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..4ed6a7ec1fcae602af146471c7f9e3678d907b2c --- /dev/null +++ b/ckpts/universal/global_step20/zero/26.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3415c95ef414a2ccd8aa1150c87eb768868935f8a629cb7c06d6e8cb30700ac7 +size 16778396 diff --git a/ckpts/universal/global_step20/zero/26.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/26.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..f51d9b3fa09364ce2547edcae9894d236f4730ec --- /dev/null +++ b/ckpts/universal/global_step20/zero/26.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd12c3657bc7194a6b9c0c827263e4c4dc56ad9f9044cd78b974d8aca8acdb42 +size 16778411 diff --git a/ckpts/universal/global_step20/zero/6.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/6.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..312e4125eaa3b1da7b3b2bd638a9842524631398 --- /dev/null +++ b/ckpts/universal/global_step20/zero/6.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:342b79bfd9865a5a64b86187205d4fb334acbfe992387aab3da642fd1afce0da +size 9372 diff --git a/ckpts/universal/global_step20/zero/6.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/6.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..127ba2f337bb72fd3a5403f715180b8e63baf4f1 --- /dev/null +++ b/ckpts/universal/global_step20/zero/6.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aad901732dc8516cc5e419426f1c816e354ad6230a64ac8307015d0e7c0226f5 +size 9387 diff --git a/ckpts/universal/global_step20/zero/6.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step20/zero/6.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..d5e47ae50e36b36836ca7d819ab4fd89b3f4842e --- /dev/null +++ b/ckpts/universal/global_step20/zero/6.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:163058f261d0ef55e08bf5ceb82343469521053139a2642245f2571a149bca76 +size 9293 diff --git a/ckpts/universal/global_step20/zero/7.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/7.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..2401b4d57fd542ab22373718226d8c9e1cf4b712 --- /dev/null +++ b/ckpts/universal/global_step20/zero/7.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0d72fd4978dfff4812935b6d22239bb4591b2b17d96727f0512dc988edbc3f7 +size 33555627 diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any_module_holder.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any_module_holder.h new file mode 100644 index 0000000000000000000000000000000000000000..cd1dca9ff7a0ad235c024df08107c7d856e6756c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/any_module_holder.h @@ -0,0 +1,133 @@ +#pragma once + +#include + +namespace torch { +namespace nn { + +class Module; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyModulePlaceholder ~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// The static type of the object we store in the `AnyModule`, which erases +/// the actual type, but allows us to call `forward()` on the underlying +/// module. +struct AnyModulePlaceholder : public AnyValue::Placeholder { + using AnyValue::Placeholder::Placeholder; + + /// The "erased" `forward()` method. + virtual AnyValue forward(std::vector&& arguments) = 0; + + /// Returns std::shared_ptr pointing to the erased module. + virtual std::shared_ptr ptr() = 0; + + /// Returns a `AnyModulePlaceholder` with a shallow copy of this `AnyModule`. + virtual std::unique_ptr copy() const = 0; + + /// Returns a `AnyModulePlaceholder` with a deep copy of this `AnyModule`. + virtual std::unique_ptr clone_module( + optional device) const = 0; +}; + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AnyModuleHolder ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +/// The dynamic type of the object stored in the `AnyModule`. It contains the +/// concrete instance to which all calls are forwarded. It is parameterized +/// over the concrete type of the module, and the types of the arguments the +/// module takes in its `forward()` method. +template +struct AnyModuleHolder : public AnyModulePlaceholder { + /// \internal + struct CheckedGetter { + template + decay_t&& operator()(size_t index) { + AT_ASSERT(index < arguments_.size()); + auto& value = arguments_[index]; + if (auto* maybe_value = value.template try_get>()) { + return std::move(*maybe_value); + } + AT_ERROR( + "Expected argument #", + index, + " to be of type ", + c10::demangle(typeid(T).name()), + ", but received value of type ", + c10::demangle(value.type_info().name())); + } + std::vector& arguments_; + }; + + /// \internal + struct InvokeForward { + template + AnyValue operator()(Ts&&... ts) { + return AnyValue(module_->forward(std::forward(ts)...)); + } + std::shared_ptr& module_; + }; + + /// Constructs the `AnyModuleHolder` from a concrete module. + explicit AnyModuleHolder(std::shared_ptr&& module_) + : AnyModulePlaceholder(typeid(ModuleType)), module(std::move(module_)) {} + + /// Calls `forward()` on the underlying module, casting each `AnyValue` in the + /// argument vector to a concrete value. + AnyValue forward(std::vector&& arguments) override { + if (module->_forward_has_default_args()) { + TORCH_CHECK( + arguments.size() >= module->_forward_num_required_args() && + arguments.size() <= sizeof...(ArgumentTypes), + c10::demangle(type_info.name()), + "'s forward() method expects at least ", + module->_forward_num_required_args(), + " argument(s) and at most ", + sizeof...(ArgumentTypes), + " argument(s), but received ", + arguments.size(), + "."); + arguments = std::move( + module->_forward_populate_default_args(std::move(arguments))); + } else { + std::string use_default_args_macro_prompt = " If " + + c10::demangle(type_info.name()) + + "'s forward() method has default arguments, " + + "please make sure the forward() method is declared with a corresponding `FORWARD_HAS_DEFAULT_ARGS` macro."; + TORCH_CHECK( + arguments.size() == sizeof...(ArgumentTypes), + c10::demangle(type_info.name()), + "'s forward() method expects ", + sizeof...(ArgumentTypes), + " argument(s), but received ", + arguments.size(), + ".", + (arguments.size() < sizeof...(ArgumentTypes)) + ? use_default_args_macro_prompt + : ""); + } + + // FYI: During invocation of a module's `forward()` method, the values live + // in the `arguments` vector inside this function. + return torch::unpack( + InvokeForward{module}, CheckedGetter{arguments}); + } + + std::shared_ptr ptr() override { + return module; + } + + std::unique_ptr copy() const override { + return std::make_unique(*this); + } + + std::unique_ptr clone_module( + optional device) const override { + return std::make_unique( + std::dynamic_pointer_cast(module->clone(device))); + } + + /// The actual concrete module instance. + std::shared_ptr module; +}; + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/parameterdict.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/parameterdict.h new file mode 100644 index 0000000000000000000000000000000000000000..f201825deb5bad0bc8640b6d977e156d10a74435 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/parameterdict.h @@ -0,0 +1,148 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { + +class ParameterDictImpl : public Cloneable { + public: + using Iterator = OrderedDict::Iterator; + using ConstIterator = OrderedDict::ConstIterator; + + ParameterDictImpl() = default; + + explicit ParameterDictImpl( + const torch::OrderedDict& params) { + parameters_ = params; + } + + /// `reset()` is empty for `ParameterDict`, since it does not have + /// parameters of its own. + void reset() override {} + + /// Pretty prints the `ParameterDict` module into the given `stream`. + void pretty_print(std::ostream& stream) const override { + stream << "torch::nn::ParameterDict(" << std::endl; + for (const auto& pair : parameters_) { + stream << "(" << pair.key() << ")" + << ": Parameter containing: [" << pair.value().scalar_type() + << " of size " << pair.value().sizes() << "]"; + ; + stream << std::endl; + } + stream << ")"; + } + + /// Insert the parameter along with the key into ParameterDict + /// The parameter is set to be require grad by default + Tensor& insert(std::string key, Tensor param) { + bool requires_grad = param.requires_grad(); + return register_parameter(std::move(key), std::move(param), requires_grad); + } + + /// Remove key from the ParameterDict and return its value, throw exception + /// if the key is not contained. Please check contains(key) before for a + /// non-throwing access. + Tensor pop(const std::string& key) { + torch::Tensor v = parameters_[key]; + parameters_.erase(key); + return v; + } + + /// Return the keys in the dict + ::std::vector keys() const { + return parameters_.keys(); + } + + /// Return the Values in the dict + ::std::vector values() const { + return parameters_.values(); + } + + /// Return an iterator to the start of ParameterDict + Iterator begin() { + return parameters_.begin(); + } + + /// Return a const iterator to the start of ParameterDict + ConstIterator begin() const { + return parameters_.begin(); + } + + /// Return an iterator to the end of ParameterDict + Iterator end() { + return parameters_.end(); + } + + /// Return a const iterator to the end of ParameterDict + ConstIterator end() const { + return parameters_.end(); + } + + /// Return the number of items currently stored in the ParameterDict + size_t size() const noexcept { + return parameters_.size(); + } + + /// Return true if the ParameterDict is empty, otherwise return false + bool empty() const noexcept { + return parameters_.is_empty(); + } + + /// Update the ParameterDict with the key-value pairs from + /// another ParameterDict, overwriting existing key + template + void update(const Container& container) { + for (auto& item : container) { + parameters_[item.key()] = item.value(); + } + } + + /// Remove all parameters in the ParameterDict + void clear() { + parameters_.clear(); + } + + /// Check if the centain parameter with the key in the ParameterDict + bool contains(const std::string& key) const noexcept { + return parameters_.contains(key); + } + + /// Returns the value associated with the given `key`. Throws an exception if + /// no such key is stored in the `ParameterDict`. Check contains(key) before + /// for a non-throwing way of access + const Tensor& get(const std::string& key) const { + return parameters_[key]; + } + + /// Returns the value associated with the given `key`. Throws an exception if + /// no such key is stored in the `ParameterDict`. Check contains(key) before + /// for a non-throwing way of access + Tensor& get(const std::string& key) { + return parameters_[key]; + } + + /// Returns the value associated with the given `key`. Throws an exception if + /// no such key is stored in the `ParameterDict`. Check contains(key) before + /// for a non-throwing way of access + Tensor& operator[](const std::string& key) { + return parameters_[key]; + } + + /// Returns the value associated with the given `key`. Throws an exception if + /// no such key is stored in the `ParameterDict`. Check contains(key) before + /// for a non-throwing way of access + const Tensor& operator[](const std::string& key) const { + return parameters_[key]; + } +}; + +TORCH_MODULE(ParameterDict); + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/parameterlist.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/parameterlist.h new file mode 100644 index 0000000000000000000000000000000000000000..30b7eb89e48b8b9df8cd0ac7f4a337ba714831b4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/parameterlist.h @@ -0,0 +1,169 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace nn { +class ParameterListImpl : public Cloneable { + public: + using Iterator = typename std::vector< + OrderedDict::Item>::iterator; + using ConstIterator = typename std::vector< + OrderedDict::Item>::const_iterator; + + ParameterListImpl() = default; + + /// Constructs the `ParameterList` from a variadic list of ParameterList. + template + explicit ParameterListImpl(Tensors&&... params) { + parameters_.reserve(sizeof...(Tensors)); + push_back_var(std::forward(params)...); + } + + template + explicit ParameterListImpl(const Tensors&... params) { + parameters_.reserve(sizeof...(Tensors)); + push_back_var(std::forward(params)...); + } + + /// `reset()` is empty for `ParameterList`, since it does not have parameters + /// of its own. + void reset() override {} + + /// Pretty prints the `ParameterList` module into the given `stream`. + void pretty_print(std::ostream& stream) const override { + stream << "torch::nn::ParameterList(" << std::endl; + for (const auto& pair : parameters_) { + stream << "(" << pair.key() << ")" + << ": Parameter containing: [" << pair.value().scalar_type() + << " of size " << pair.value().sizes() << "]"; + ; + stream << std::endl; + } + stream << ")"; + } + + /// push the a given parameter at the end of the list + void append(torch::Tensor&& param) { + bool requires_grad = param.requires_grad(); + register_parameter( + c10::to_string(parameters_.size()), std::move(param), requires_grad); + } + + /// push the a given parameter at the end of the list + void append(const torch::Tensor& param) { + bool requires_grad = param.requires_grad(); + register_parameter( + c10::to_string(parameters_.size()), param, requires_grad); + } + + /// push the a given parameter at the end of the list + /// And the key of the pair will be discarded, only the value + /// will be added into the `ParameterList` + void append(const OrderedDict::Item& pair) { + register_parameter( + c10::to_string(parameters_.size()), + pair.value(), + pair.value().requires_grad()); + } + + /// extend parameters from a container to the end of the list + template + void extend(const Container& container) { + for (const auto& param : container) { + append(param); + } + } + + /// Returns an iterator to the start of the ParameterList + /// the iterator returned will be type of `OrderedDict::Item` + Iterator begin() { + return parameters_.begin(); + } + + /// Returns a const iterator to the start of the ParameterList + /// the iterator returned will be type of `OrderedDict::Item` + ConstIterator begin() const { + return parameters_.begin(); + } + + /// Returns an iterator to the end of the ParameterList + /// the iterator returned will be type of `OrderedDict::Item` + Iterator end() { + return parameters_.end(); + } + + /// Returns a const iterator to the end of the ParameterList + /// the iterator returned will be type of `OrderedDict::Item` + ConstIterator end() const { + return parameters_.end(); + } + + /// Returns the value associated with the given `key`. Throws an exception if + /// no such key is stored in the `ParameterList`. Check contains(key) before + /// for a non-throwing way of access + at::Tensor& at(size_t idx) { + TORCH_CHECK(idx < size(), "Index out of range"); + return parameters_[c10::to_string(idx)]; + } + + /// Returns the value associated with the given `key`. Throws an exception if + /// no such key is stored in the `ParameterList`. Check contains(key) before + /// for a non-throwing way of access + const at::Tensor& at(size_t idx) const { + TORCH_CHECK(idx < size(), "Index out of range"); + return parameters_[c10::to_string(idx)]; + } + + /// Returns the value associated with the given `key`. Throws an exception if + /// no such key is stored in the `ParameterList`. Check contains(key) before + /// for a non-throwing way of access + at::Tensor& operator[](size_t idx) { + return at(idx); + } + + /// Returns the value associated with the given `key`. Throws an exception if + /// no such key is stored in the `ParameterList`. Check contains(key) before + /// for a non-throwing way of access + const at::Tensor& operator[](size_t idx) const { + return at(idx); + } + + /// Return the size of the ParameterList + size_t size() const noexcept { + return parameters_.size(); + } + /// True if the ParameterList is empty + bool is_empty() const noexcept { + return parameters_.is_empty(); + } + + /// Overload the +=, so that two ParameterList could be incrementally added + template + Container& operator+=(const Container& other) { + extend(other); + return *this; + } + + private: + template + void push_back_var(Head&& head, Tail&&... tail) { + append(std::forward(head)); + // Recursively calls this method, until the parameter pack only thas this + // entry left. Then calls `push_back()` a final time (above). + push_back_var(std::forward(tail)...); + } + + /// The base case, when the list of modules is empty. + void push_back_var() {} +}; +TORCH_MODULE(ParameterList); +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/sequential.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/sequential.h new file mode 100644 index 0000000000000000000000000000000000000000..9494926eef3c6c7fb0faaac4b57a520a29388f15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/modules/container/sequential.h @@ -0,0 +1,390 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// A list of `Module`s that acts as a `Module` itself. +/// +/// A `Sequential` is fundamentally a list of `Module`s, each with a `forward()` +/// method. `Sequential` provides a `forward()` method of its own, which accepts +/// any input and forwards it to the first module it stores. It then "chains" +/// outputs to inputs sequentially for each subsequent module, finally returning +/// the output of the last module. For example: +/// +/// \rst +/// .. code-block:: cpp +/// +/// torch::nn::Sequential seq( +/// torch::nn::Linear(3, 4), +/// torch::nn::BatchNorm1d(4), +/// torch::nn::Dropout(0.5) +/// ); +/// +/// auto output = seq->forward(torch::ones(3)); +/// +/// \endrst +/// +/// This can conceptually be thought of as the following loop (using Python as +/// pseudocode): +/// +/// \rst +/// .. code-block:: python +/// +/// def forward(sequential, input): +/// for module in sequential: +/// input = module(input) +/// return input +/// +/// \endrst +/// +/// Why should you use `Sequential` instead of a simple `std::vector`? The value +/// a `Sequential` provides over manually calling a sequence of modules is that +/// it allows treating the whole container *as a single module*, such that +/// performing a transformation on the `Sequential` applies to each of the +/// modules it stores (which are each a registered submodule of the +/// `Sequential`). For example, calling +/// `.to(torch::kCUDA)` on a `Sequential` will move each module in the list to +/// CUDA memory. For example: +/// +/// \rst +/// .. code-block:: cpp +/// +/// torch::nn::Sequential seq( +/// torch::nn::Linear(3, 4), +/// torch::nn::BatchNorm1d(4), +/// torch::nn::Dropout(0.5) +/// ); +/// +/// // Convert all modules to CUDA. +/// seq->to(torch::kCUDA); +/// +/// \endrst +/// +/// Finally, `Sequential` provides a lightweight container API, such as allowing +/// iteration over submodules, positional access, adding a new module after +/// construction via `push_back`, as well as joining two `Sequential`s via +/// `extend`. +/// +/// \rst +/// .. attention:: +/// One current limitation of `Sequential` is that all except the first module +/// must accept a single argument. If your modules need to take multiple +/// arguments, you should define them to take and return tuples. +/// \endrst +class SequentialImpl : public Cloneable { + public: + using Iterator = std::vector::iterator; + using ConstIterator = std::vector::const_iterator; + + SequentialImpl() = default; + + /// Constructs the `Sequential` from a variadic list of modules. + template + explicit SequentialImpl(Modules&&... modules) { + modules_.reserve(sizeof...(Modules)); + push_back(std::forward(modules)...); + } + + /// Constructs the `Sequential` from an `OrderedDict` of named `AnyModule`s. + explicit SequentialImpl( + torch::OrderedDict&& ordered_dict) { + modules_.reserve(ordered_dict.size()); + for (auto& item : ordered_dict) { + push_back(item.key(), std::move(item.value())); + } + } + + /// Constructs the `Sequential` from a braced-init-list of named `AnyModule`s. + /// It enables the following use case: + /// `Sequential sequential({{"m1", M(1)}, {"m2", M(2)}})` + explicit SequentialImpl(std::initializer_list named_modules) { + modules_.reserve(named_modules.size()); + for (const auto& named_module : named_modules) { + push_back(named_module.name(), named_module.module()); + } + } + + /// Special cloning function for `Sequential` because it does not use + /// `reset()`. + std::shared_ptr clone( + const optional& device = nullopt) const override { + auto clone = std::make_shared(); + for (const auto& module : modules_) { + clone->push_back(module.clone(device)); + } + return clone; + } + + /// `reset()` is empty for `Sequential`, since it does not have parameters of + /// its own. + void reset() override {} + + /// Pretty prints the `Sequential` module into the given `stream`. + void pretty_print(std::ostream& stream) const override { + stream << "torch::nn::Sequential"; + } + + /// Feeds `inputs` to the first module and then chains outputs to inputs, + /// returning the last output. + /// + /// Conceptually the following loop in Python: + /// + /// \rst + /// .. code-block:: python + /// + /// def forward(sequential, input): + /// for module in sequential: + /// input = module(input) + /// return input + /// + /// \endrst + /// + /// The return type is taken as the first template parameter. It defaults to + /// `Tensor`. If the last module in the `Sequential` returns another type `T`, + /// you should call `forward(inputs)` instead of just `forward(inputs)`: + /// + /// \rst + /// .. code-block:: cpp + /// + /// torch::Tensor tensor = sequential1->forward(inputs); + /// int integer = sequential2->forward(inputs); + /// float value = sequential3->forward(inputs); + /// + /// \endrst + template + ReturnType forward(InputTypes&&... inputs) { + TORCH_CHECK(!is_empty(), "Cannot call forward() on an empty Sequential"); + + auto iterator = modules_.begin(); + auto input = iterator->any_forward(std::forward(inputs)...); + + for (++iterator; iterator != modules_.end(); ++iterator) { + input = iterator->any_forward(std::move(input)); + } + + // Check the return value and give a nice error message if the requested + // return type was incorrect. + if (auto* return_value = input.template try_get()) { + return std::move(*return_value); + } + AT_ERROR( + "The type of the return value is ", + c10::demangle(input.type_info().name()), + ", but you asked for type ", + c10::demangle(typeid(ReturnType).name())); + } + + /// Adds a new (boxed) `Module` to the `Sequential` container. + template + void push_back(std::shared_ptr module_ptr) { + push_back(c10::to_string(modules_.size()), std::move(module_ptr)); + } + + /// Adds a new named (boxed) `Module` to the `Sequential` container. + template + void push_back(std::string name, std::shared_ptr module_ptr) { + push_back(std::move(name), AnyModule(std::move(module_ptr))); + } + + /// Adds a new `Module` to the `Sequential` container, moving or copying it + /// into a `shared_ptr` internally. This method allows passing value types, + /// and letting the container deal with the boxing. This means you can write + /// `Sequential(Module(3, 4))` instead of + /// `Sequential(std::make_shared(3, 4))`. + template > + void push_back(M&& module) { + push_back(c10::to_string(modules_.size()), std::forward(module)); + } + + /// Adds a new named `Module` to the `Sequential` container, moving or copying + /// it into a `shared_ptr` internally. This method allows passing value types, + /// and letting the container deal with the boxing. + template > + void push_back(std::string name, M&& module) { + using Type = typename std::remove_reference::type; + push_back(std::move(name), std::make_shared(std::forward(module))); + } + + /// Unwraps the contained module of a `ModuleHolder` and adds it to the + /// `Sequential`. + template + void push_back(const ModuleHolder& module_holder) { + push_back(c10::to_string(modules_.size()), module_holder); + } + + /// Unwraps the contained named module of a `ModuleHolder` and adds it to the + /// `Sequential`. + template + void push_back(std::string name, const ModuleHolder& module_holder) { + push_back(std::move(name), module_holder.ptr()); + } + + /// Iterates over the container and calls `push_back()` on each value. + template + void extend(const Container& container) { + for (const auto& module : container) { + push_back(module); + } + } + + /// Adds a type-erased `AnyModule` to the `Sequential`. + void push_back(AnyModule any_module) { + push_back(c10::to_string(modules_.size()), std::move(any_module)); + } + + void push_back(std::string name, AnyModule any_module) { + modules_.push_back(std::move(any_module)); + const auto index = modules_.size() - 1; + register_module(std::move(name), modules_[index].ptr()); + } + + /// Returns an iterator to the start of the `Sequential`. + Iterator begin() { + return modules_.begin(); + } + + /// Returns a const iterator to the start of the `Sequential`. + ConstIterator begin() const { + return modules_.begin(); + } + + /// Returns an iterator to the end of the `Sequential`. + Iterator end() { + return modules_.end(); + } + + /// Returns a const iterator to the end of the `Sequential`. + ConstIterator end() const { + return modules_.end(); + } + + /// Attempts to return the module at the given index as the requested type. + /// Throws an exception if the index is out of bounds or the types do not + /// match. + template + T& at(size_t index) { + static_assert( + torch::detail::is_module::value, + "Can only call Sequential::at with an nn::Module type"); + TORCH_CHECK(index < size(), "Index out of range"); + return modules_[index].get(); + } + + /// Attempts to return the module at the given index as the requested type. + /// Throws an exception if the index is out of bounds or the types do not + /// match. + template + const T& at(size_t index) const { + static_assert( + torch::detail::is_module::value, + "Can only call Sequential::at with an nn::Module type"); + TORCH_CHECK(index < size(), "Index out of range"); + return modules_[index].get(); + } + + /// Attempts to return a `std::shared_ptr` whose dynamic type is that of the + /// underlying module at the given index. Throws an exception if the index is + /// out of bounds. + std::shared_ptr ptr(size_t index) const { + TORCH_CHECK(index < size(), "Index out of range"); + return modules_[index].ptr(); + } + + /// Attempts to return a `std::shared_ptr` whose type is the one provided. + /// Throws an exception if the index is out of bounds or the types do not + /// match. + template + std::shared_ptr ptr(size_t index) const { + static_assert( + torch::detail::is_module::value, + "Can only call Sequential::ptr with an nn::Module type"); + TORCH_CHECK(index < size(), "Index out of range"); + return modules_[index].ptr(); + } + + /// Like `ptr(index)`. + std::shared_ptr operator[](size_t index) const { + // This is the only method we can call without a type. + return ptr(index); + } + + /// The current size of the `Sequential` container. + size_t size() const noexcept { + return modules_.size(); + } + + /// True if there are no modules in the `Sequential`. + bool is_empty() const noexcept { + return size() == 0; + } + + private: + /// Takes a First *and* Second parameter, to avoid ambiguity when a parameter + /// pack has only one type, in which case the template would be preferred, + /// even if the other `push_back` functions are better fits (e.g. `unique_ptr` + /// -> `shared_ptr` overload). + /// NOTE: We explicitly avoid matching this template with + /// `push_back(std::string("name"), module)` or `push_back("name", module)`, + /// since they should be handled by their respective `push_back` functions. + template < + typename First, + typename Second, + typename... Rest, + typename = torch::disable_if_t< + std::is_same::value || + // NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays) + std::is_same< + typename std::decay::type, + std::decay::type>::value>> + void push_back(First&& first, Second&& second, Rest&&... rest) { + push_back(std::forward(first)); + // Recursively calls this method, until the parameter pack only thas this + // entry left. Then calls `push_back()` a final time (above). + push_back(std::forward(second), std::forward(rest)...); + } + + /// The base case, when the list of modules is empty. + void push_back() {} + + // Box the AnyModules to give Sequential reference semantics, like the rest of + // the API. Note that this is not required otherwise, this could just be a + // `vector`. + std::vector modules_; +}; + +/// A `ModuleHolder` subclass for `SequentialImpl`. +/// See the documentation for `SequentialImpl` class to learn what methods it +/// provides, or the documentation for `ModuleHolder` to learn about PyTorch's +/// module storage semantics. +class Sequential : public torch::nn::ModuleHolder { + public: + using torch::nn::ModuleHolder::ModuleHolder; + + Sequential() : ModuleHolder() {} + + /// Constructs the `Sequential` from a braced-init-list of named `AnyModule`s. + /// It enables the following use case: + /// `Sequential sequential({{"m1", M(1)}, {"m2", M(2)}})` + Sequential(std::initializer_list named_modules) + : ModuleHolder(std::make_shared(named_modules)) {} +}; +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/activation.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/activation.h new file mode 100644 index 0000000000000000000000000000000000000000..e51805d3648521406b1ce3f72108ccf0ea7a94c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/activation.h @@ -0,0 +1,714 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `ELU` module. +/// +/// Example: +/// ``` +/// ELU model(ELUOptions().alpha(42.42).inplace(true)); +/// ``` +struct TORCH_API ELUOptions { + /// The `alpha` value for the ELU formulation. Default: 1.0 + TORCH_ARG(double, alpha) = 1.0; + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::elu`. +/// +/// See the documentation for `torch::nn::ELUOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::elu(x, F::ELUFuncOptions().alpha(0.42).inplace(true)); +/// ``` +using ELUFuncOptions = ELUOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `SELU` module. +/// +/// Example: +/// ``` +/// SELU model(SELUOptions().inplace(true)); +/// ``` +struct TORCH_API SELUOptions { + /* implicit */ SELUOptions(bool inplace = false); + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace); +}; + +namespace functional { +/// Options for `torch::nn::functional::selu`. +/// +/// See the documentation for `torch::nn::SELUOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::selu(input, F::SELUFuncOptions(false)); +/// ``` +using SELUFuncOptions = SELUOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `GLU` module. +/// +/// Example: +/// ``` +/// GLU model(GLUOptions(1)); +/// ``` +struct TORCH_API GLUOptions { + /* implicit */ GLUOptions(int64_t dim = -1); + + /// the dimension on which to split the input. Default: -1 + TORCH_ARG(int64_t, dim); +}; + +namespace functional { +/// Options for `torch::nn::functional::glu`. +/// +/// See the documentation for `torch::nn::GLUOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::glu(input, GLUFuncOptions(1)); +/// ``` +using GLUFuncOptions = GLUOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `GELU` module. +/// +/// Example: +/// ``` +/// GELU model(GELUOptions().approximate("none")); +/// ``` +struct TORCH_API GELUOptions { + /// Specifies the approximation to apply to the output. + TORCH_ARG(std::string, approximate) = "none"; +}; + +namespace functional { +/// Options for `torch::nn::functional::gelu`. +/// +/// See the documentation for `torch::nn::GELUOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::gelu(input, F::GELUFuncOptions().approximate("none")); +/// ``` +using GELUFuncOptions = GELUOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `Hardshrink` module. +/// +/// Example: +/// ``` +/// Hardshrink model(HardshrinkOptions().lambda(42.42)); +/// ``` +struct TORCH_API HardshrinkOptions { + /* implicit */ HardshrinkOptions(double lambda = 0.5); + + /// the `lambda` value for the Hardshrink formulation. Default: 0.5 + TORCH_ARG(double, lambda); +}; + +namespace functional { +/// Options for `torch::nn::functional::hardshrink`. +/// +/// See the documentation for `torch::nn::HardshrinkOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::hardshrink(x, F::HardshrinkFuncOptions().lambda(0.42)); +/// ``` +using HardshrinkFuncOptions = HardshrinkOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `Hardtanh` module. +/// +/// Example: +/// ``` +/// Hardtanh +/// model(HardtanhOptions().min_val(-42.42).max_val(0.42).inplace(true)); +/// ``` +struct TORCH_API HardtanhOptions { + /// minimum value of the linear region range. Default: -1 + TORCH_ARG(double, min_val) = -1.0; + + /// maximum value of the linear region range. Default: 1 + TORCH_ARG(double, max_val) = 1.0; + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::hardtanh`. +/// +/// See the documentation for `torch::nn::HardtanhOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::hardtanh(x, +/// F::HardtanhFuncOptions().min_val(-1.0).max_val(1.0).inplace(true)); +/// ``` +using HardtanhFuncOptions = HardtanhOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `LeakyReLU` module. +/// +/// Example: +/// ``` +/// LeakyReLU model(LeakyReLUOptions().negative_slope(0.42).inplace(true)); +/// ``` +struct TORCH_API LeakyReLUOptions { + /// Controls the angle of the negative slope. Default: 1e-2 + TORCH_ARG(double, negative_slope) = 1e-2; + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::leaky_relu`. +/// +/// See the documentation for `torch::nn::LeakyReLUOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::leaky_relu(x, +/// F::LeakyReLUFuncOptions().negative_slope(0.42).inplace(true)); +/// ``` +using LeakyReLUFuncOptions = LeakyReLUOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `Softmax` module. +/// +/// Example: +/// ``` +/// Softmax model(SoftmaxOptions(1)); +/// ``` +struct TORCH_API SoftmaxOptions { + SoftmaxOptions(int64_t dim); + + /// Dimension along which Softmax will be computed. + TORCH_ARG(int64_t, dim); +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::softmax`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::softmax(input, F::SoftmaxFuncOptions(1)); +/// ``` +struct TORCH_API SoftmaxFuncOptions { + SoftmaxFuncOptions(int64_t dim); + + /// Dimension along which Softmax will be computed. + TORCH_ARG(int64_t, dim); + + /// the desired data type of returned tensor. + /// If specified, the input tensor is casted to `dtype` before the operation + /// is performed. This is useful for preventing data type overflows. Default: + /// None. + TORCH_ARG(c10::optional, dtype) = c10::nullopt; +}; + +} // namespace functional + +// ============================================================================ + +/// Options for the `Softmin` module. +/// +/// Example: +/// ``` +/// Softmin model(SoftminOptions(1)); +/// ``` +struct TORCH_API SoftminOptions { + SoftminOptions(int64_t dim); + + /// Dimension along which Softmin will be computed. + TORCH_ARG(int64_t, dim); +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::softmin`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::softmin(input, F::SoftminFuncOptions(1)); +/// ``` +struct TORCH_API SoftminFuncOptions { + SoftminFuncOptions(int64_t dim); + + /// Dimension along which Softmin will be computed. + TORCH_ARG(int64_t, dim); + + /// the desired data type of returned tensor. + /// If specified, the input tensor is casted to `dtype` before the operation + /// is performed. This is useful for preventing data type overflows. Default: + /// None. + TORCH_ARG(c10::optional, dtype) = c10::nullopt; +}; + +} // namespace functional + +// ============================================================================ + +/// Options for the `LogSoftmax` module. +/// +/// Example: +/// ``` +/// LogSoftmax model(LogSoftmaxOptions(1)); +/// ``` +struct TORCH_API LogSoftmaxOptions { + LogSoftmaxOptions(int64_t dim); + + /// Dimension along which LogSoftmax will be computed. + TORCH_ARG(int64_t, dim); +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::log_softmax`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::log_softmax(input, LogSoftmaxFuncOptions(1)); +/// ``` +struct TORCH_API LogSoftmaxFuncOptions { + LogSoftmaxFuncOptions(int64_t dim); + + /// Dimension along which LogSoftmax will be computed. + TORCH_ARG(int64_t, dim); + + /// the desired data type of returned tensor. + /// If specified, the input tensor is casted to `dtype` before the operation + /// is performed. This is useful for preventing data type overflows. Default: + /// None. + TORCH_ARG(c10::optional, dtype) = c10::nullopt; +}; + +} // namespace functional + +// ============================================================================ + +/// Options for the `PReLU` module. +/// +/// Example: +/// ``` +/// PReLU model(PReLUOptions().num_parameters(42)); +/// ``` +struct TORCH_API PReLUOptions { + /// number of `a` to learn. Although it takes an int as input, there is only + /// two values are legitimate: 1, or the number of channels at input. Default: + /// 1 + TORCH_ARG(int64_t, num_parameters) = 1; + + /// the initial value of `a`. Default: 0.25 + TORCH_ARG(double, init) = 0.25; +}; + +// ============================================================================ + +/// Options for the `ReLU` module. +/// +/// Example: +/// ``` +/// ReLU model(ReLUOptions().inplace(true)); +/// ``` +struct TORCH_API ReLUOptions { + /* implicit */ ReLUOptions(bool inplace = false); + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace); +}; + +namespace functional { +/// Options for `torch::nn::functional::relu`. +/// +/// See the documentation for `torch::nn::ReLUOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::relu(x, F::ReLUFuncOptions().inplace(true)); +/// ``` +using ReLUFuncOptions = ReLUOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `ReLU6` module. +/// +/// Example: +/// ``` +/// ReLU6 model(ReLU6Options().inplace(true)); +/// ``` +struct TORCH_API ReLU6Options { + /* implicit */ ReLU6Options(bool inplace = false); + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace); +}; + +namespace functional { +/// Options for `torch::nn::functional::relu6`. +/// +/// See the documentation for `torch::nn::ReLU6Options` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::relu6(x, F::ReLU6FuncOptions().inplace(true)); +/// ``` +using ReLU6FuncOptions = ReLU6Options; +} // namespace functional + +// ============================================================================ + +/// Options for the `RReLU` module. +/// +/// Example: +/// ``` +/// RReLU model(RReLUOptions().lower(0.24).upper(0.42).inplace(true)); +/// ``` +struct TORCH_API RReLUOptions { + /// lower bound of the uniform distribution. Default: 1/8 + TORCH_ARG(double, lower) = 1.0 / 8.0; + + /// upper bound of the uniform distribution. Default: 1/3 + TORCH_ARG(double, upper) = 1.0 / 3.0; + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::rrelu`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::rrelu(x, F::RReLUFuncOptions().lower(0.1).upper(0.4).inplace(true)); +/// ``` +struct TORCH_API RReLUFuncOptions { + /// lower bound of the uniform distribution. Default: 1/8 + TORCH_ARG(double, lower) = 1.0 / 8.0; + + /// upper bound of the uniform distribution. Default: 1/3 + TORCH_ARG(double, upper) = 1.0 / 3.0; + + TORCH_ARG(bool, training) = false; + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +} // namespace functional + +// ============================================================================ + +/// Options for the `CELU` module. +/// +/// Example: +/// ``` +/// CELU model(CELUOptions().alpha(42.42).inplace(true)); +/// ``` +struct TORCH_API CELUOptions { + /// The `alpha` value for the CELU formulation. Default: 1.0 + TORCH_ARG(double, alpha) = 1.0; + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::celu`. +/// +/// See the documentation for `torch::nn::CELUOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::celu(x, F::CELUFuncOptions().alpha(0.42).inplace(true)); +/// ``` +using CELUFuncOptions = CELUOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `Softplus` module. +/// +/// Example: +/// ``` +/// Softplus model(SoftplusOptions().beta(0.24).threshold(42.42)); +/// ``` +struct TORCH_API SoftplusOptions { + /// the `beta` value for the Softplus formulation. Default: 1 + TORCH_ARG(double, beta) = 1.0; + + /// values above this revert to a linear function. Default: 20 + TORCH_ARG(double, threshold) = 20.0; +}; + +namespace functional { +/// Options for `torch::nn::functional::softplus`. +/// +/// See the documentation for `torch::nn::SoftplusOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::softplus(x, F::SoftplusFuncOptions().beta(0.5).threshold(3.0)); +/// ``` +using SoftplusFuncOptions = SoftplusOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `Softshrink` module. +/// +/// Example: +/// ``` +/// Softshrink model(SoftshrinkOptions(42.42)); +/// ``` +struct TORCH_API SoftshrinkOptions { + /* implicit */ SoftshrinkOptions(double lambda = 0.5); + + /// the `lambda` value for the Softshrink formulation. Default: 0.5 + TORCH_ARG(double, lambda); +}; + +namespace functional { +/// Options for `torch::nn::functional::softshrink`. +/// +/// See the documentation for `torch::nn::SoftshrinkOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::softshrink(x, F::SoftshrinkFuncOptions(0.42)); +/// ``` +using SoftshrinkFuncOptions = SoftshrinkOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `Threshold` module. +/// +/// Example: +/// ``` +/// Threshold model(ThresholdOptions(42.42, 24.24).inplace(true)); +/// ``` +struct TORCH_API ThresholdOptions { + ThresholdOptions(double threshold, double value) + : threshold_(threshold), value_(value) {} + + /// The value to threshold at + TORCH_ARG(double, threshold); + + /// The value to replace with + TORCH_ARG(double, value); + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::threshold`. +/// +/// See the documentation for `torch::nn::ThresholdOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::threshold(x, F::ThresholdFuncOptions(0.5, 0.5).inplace(true)); +/// ``` +using ThresholdFuncOptions = ThresholdOptions; +} // namespace functional + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::gumbel_softmax`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::gumbel_softmax(logits, F::GumbelSoftmaxFuncOptions().hard(true).dim(-1)); +/// ``` +struct TORCH_API GumbelSoftmaxFuncOptions { + /// non-negative scalar temperature + TORCH_ARG(double, tau) = 1.0; + + /// returned samples will be discretized as one-hot vectors, + /// but will be differentiated as if it is the soft sample in autograd. + /// Default: False + TORCH_ARG(bool, hard) = false; + + /// dimension along which softmax will be computed. Default: -1 + TORCH_ARG(int, dim) = -1; +}; + +} // namespace functional + +// ============================================================================ + +/// Options for the `MultiheadAttention` module. +/// +/// Example: +/// ``` +/// MultiheadAttention model(MultiheadAttentionOptions(20, 10).bias(false)); +/// ``` +struct TORCH_API MultiheadAttentionOptions { + MultiheadAttentionOptions(int64_t embed_dim, int64_t num_heads); + + /// total dimension of the model. + TORCH_ARG(int64_t, embed_dim); + + /// parallel attention heads. + TORCH_ARG(int64_t, num_heads); + + /// a Dropout layer on attn_output_weights. Default: 0.0. + TORCH_ARG(double, dropout) = 0.0; + + /// add bias as module parameter. Default: true. + TORCH_ARG(bool, bias) = true; + + /// add bias to the key and value sequences at dim=0. + TORCH_ARG(bool, add_bias_kv) = false; + + /// add a new batch of zeros to the key and value sequences at dim=1. + TORCH_ARG(bool, add_zero_attn) = false; + + /// total number of features in key. Default: c10::nullopt. + TORCH_ARG(int64_t, kdim); + + /// total number of features in key. Default: c10::nullopt. + TORCH_ARG(int64_t, vdim); +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::multi_head_attention_forward` +struct TORCH_API MultiheadAttentionForwardFuncOptions { + MultiheadAttentionForwardFuncOptions( + int64_t embed_dim_to_check, + int64_t num_heads, + Tensor in_proj_weight, + Tensor in_proj_bias, + Tensor bias_k, + Tensor bias_v, + bool add_zero_attn, + double dropout_p, + Tensor out_proj_weight, + Tensor out_proj_bias); + + TORCH_ARG(int64_t, embed_dim_to_check); + + TORCH_ARG(int64_t, num_heads); + + TORCH_ARG(Tensor, in_proj_weight); + + TORCH_ARG(Tensor, in_proj_bias); + + TORCH_ARG(Tensor, bias_k); + + TORCH_ARG(Tensor, bias_v); + + TORCH_ARG(bool, add_zero_attn); + + TORCH_ARG(double, dropout_p); + + TORCH_ARG(Tensor, out_proj_weight); + + TORCH_ARG(Tensor, out_proj_bias); + + TORCH_ARG(bool, training) = true; + + TORCH_ARG(Tensor, key_padding_mask) = {}; + + TORCH_ARG(bool, need_weights) = true; + + TORCH_ARG(Tensor, attn_mask) = {}; + + TORCH_ARG(bool, use_separate_proj_weight) = false; + + TORCH_ARG(Tensor, q_proj_weight) = {}; + + TORCH_ARG(Tensor, k_proj_weight) = {}; + + TORCH_ARG(Tensor, v_proj_weight) = {}; + + TORCH_ARG(Tensor, static_k) = {}; + + TORCH_ARG(Tensor, static_v) = {}; + + TORCH_ARG(bool, average_attn_weights) = true; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/adaptive.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/adaptive.h new file mode 100644 index 0000000000000000000000000000000000000000..d4754747a1d296b878231851af9d1b4caee94ab4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/adaptive.h @@ -0,0 +1,41 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `AdaptiveLogSoftmaxWithLoss` module. +/// +/// Example: +/// ``` +/// AdaptiveLogSoftmaxWithLoss model(AdaptiveLogSoftmaxWithLossOptions(8, 10, +/// {4, 8}).div_value(2.).head_bias(true)); +/// ``` +struct TORCH_API AdaptiveLogSoftmaxWithLossOptions { + /* implicit */ AdaptiveLogSoftmaxWithLossOptions( + int64_t in_features, + int64_t n_classes, + std::vector cutoffs); + + /// Number of features in the input tensor + TORCH_ARG(int64_t, in_features); + + /// Number of classes in the dataset + TORCH_ARG(int64_t, n_classes); + + /// Cutoffs used to assign targets to their buckets + TORCH_ARG(std::vector, cutoffs); + + /// value used as an exponent to compute sizes of the clusters. Default: 4.0 + TORCH_ARG(double, div_value) = 4.; + + /// If ``true``, adds a bias term to the 'head' of + /// the adaptive softmax. Default: false + TORCH_ARG(bool, head_bias) = false; +}; + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/batchnorm.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/batchnorm.h new file mode 100644 index 0000000000000000000000000000000000000000..cd2d7f164203e89545bd6fe63e25eded31b779bd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/batchnorm.h @@ -0,0 +1,95 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `BatchNorm` module. +struct TORCH_API BatchNormOptions { + /* implicit */ BatchNormOptions(int64_t num_features); + + /// The number of features of the input tensor. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(int64_t, num_features); + + /// The epsilon value added for numerical stability. + /// Changing this parameter after construction __is effective__. + TORCH_ARG(double, eps) = 1e-5; + + /// A momentum multiplier for the mean and variance. + /// Changing this parameter after construction __is effective__. + TORCH_ARG(c10::optional, momentum) = 0.1; + + /// Whether to learn a scale and bias that are applied in an affine + /// transformation on the input. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(bool, affine) = true; + + /// Whether to store and update batch statistics (mean and variance) in the + /// module. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(bool, track_running_stats) = true; +}; + +/// Options for the `BatchNorm1d` module. +/// +/// Example: +/// ``` +/// BatchNorm1d +/// model(BatchNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +using BatchNorm1dOptions = BatchNormOptions; + +/// Options for the `BatchNorm2d` module. +/// +/// Example: +/// ``` +/// BatchNorm2d +/// model(BatchNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +using BatchNorm2dOptions = BatchNormOptions; + +/// Options for the `BatchNorm3d` module. +/// +/// Example: +/// ``` +/// BatchNorm3d +/// model(BatchNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +using BatchNorm3dOptions = BatchNormOptions; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::batch_norm`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::batch_norm(input, mean, variance, +/// F::BatchNormFuncOptions().weight(weight).bias(bias).momentum(0.1).eps(1e-05).training(false)); +/// ``` +struct TORCH_API BatchNormFuncOptions { + TORCH_ARG(Tensor, weight) = Tensor(); + + TORCH_ARG(Tensor, bias) = Tensor(); + + TORCH_ARG(bool, training) = false; + + /// A momentum multiplier for the mean and variance. + /// Changing this parameter after construction __is effective__. + TORCH_ARG(c10::optional, momentum) = 0.1; + + /// The epsilon value added for numerical stability. + /// Changing this parameter after construction __is effective__. + TORCH_ARG(double, eps) = 1e-5; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/conv.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/conv.h new file mode 100644 index 0000000000000000000000000000000000000000..0b5b5b1b3f955abf2ec36a806fb1344611c4f060 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/conv.h @@ -0,0 +1,415 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { + +namespace detail { + +typedef std::variant< + enumtype::kZeros, + enumtype::kReflect, + enumtype::kReplicate, + enumtype::kCircular> + conv_padding_mode_t; + +template +using conv_padding_t = + std::variant, enumtype::kValid, enumtype::kSame>; + +/// Options for a `D`-dimensional convolution or convolution transpose module. +template +struct ConvNdOptions { + using padding_t = conv_padding_t; + ConvNdOptions( + int64_t in_channels, + int64_t out_channels, + ExpandingArray kernel_size) + : in_channels_(in_channels), + out_channels_(out_channels), + kernel_size_(std::move(kernel_size)) {} + + /// The number of channels the input volumes will have. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(int64_t, in_channels); + + /// The number of output channels the convolution should produce. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(int64_t, out_channels); + + /// The kernel size to use. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, kernel_size); + + /// The stride of the convolution. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, stride) = 1; + + /// The padding to add to the input volumes. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(padding_t, padding) = 0; + + public: + decltype(auto) padding(std::initializer_list il) { + return padding(IntArrayRef{il}); + } + + /// The kernel dilation. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, dilation) = 1; + + /// If true, convolutions will be transpose convolutions (a.k.a. + /// deconvolutions). + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(bool, transposed) = false; + + /// For transpose convolutions, the padding to add to output volumes. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, output_padding) = 0; + + /// The number of convolution groups. + /// This parameter __can__ be changed after construction. + TORCH_ARG(int64_t, groups) = 1; + + /// Whether to add a bias after individual applications of the kernel. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(bool, bias) = true; + + /// Accepted values `torch::kZeros`, `torch::kReflect`, `torch::kReplicate` or + /// `torch::kCircular`. Default: `torch::kZeros` + TORCH_ARG(conv_padding_mode_t, padding_mode) = torch::kZeros; +}; + +} // namespace detail + +// ============================================================================ + +/// Options for a `D`-dimensional convolution module. +template +struct ConvOptions { + using padding_mode_t = detail::conv_padding_mode_t; + using padding_t = detail::conv_padding_t; + + ConvOptions( + int64_t in_channels, + int64_t out_channels, + ExpandingArray kernel_size) + : in_channels_(in_channels), + out_channels_(out_channels), + kernel_size_(std::move(kernel_size)) {} + + /// The number of channels the input volumes will have. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(int64_t, in_channels); + + /// The number of output channels the convolution should produce. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(int64_t, out_channels); + + /// The kernel size to use. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, kernel_size); + + /// The stride of the convolution. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, stride) = 1; + + /// The padding to add to the input volumes. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(padding_t, padding) = 0; + + public: + decltype(auto) padding(std::initializer_list il) { + return padding(IntArrayRef{il}); + } + + /// The kernel dilation. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, dilation) = 1; + + /// The number of convolution groups. + /// This parameter __can__ be changed after construction. + TORCH_ARG(int64_t, groups) = 1; + + /// Whether to add a bias after individual applications of the kernel. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(bool, bias) = true; + + /// Accepted values `torch::kZeros`, `torch::kReflect`, `torch::kReplicate` or + /// `torch::kCircular`. Default: `torch::kZeros` + TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros; +}; + +/// `ConvOptions` specialized for the `Conv1d` module. +/// +/// Example: +/// ``` +/// Conv1d model(Conv1dOptions(3, 2, 3).stride(1).bias(false)); +/// ``` +using Conv1dOptions = ConvOptions<1>; + +/// `ConvOptions` specialized for the `Conv2d` module. +/// +/// Example: +/// ``` +/// Conv2d model(Conv2dOptions(3, 2, 3).stride(1).bias(false)); +/// ``` +using Conv2dOptions = ConvOptions<2>; + +/// `ConvOptions` specialized for the `Conv3d` module. +/// +/// Example: +/// ``` +/// Conv3d model(Conv3dOptions(3, 2, 3).stride(1).bias(false)); +/// ``` +using Conv3dOptions = ConvOptions<3>; + +// ============================================================================ + +namespace functional { + +/// Options for a `D`-dimensional convolution functional. +template +struct ConvFuncOptions { + using padding_t = torch::nn::detail::conv_padding_t; + + /// optional bias of shape `(out_channels)`. Default: ``None`` + TORCH_ARG(torch::Tensor, bias) = Tensor(); + + /// The stride of the convolving kernel. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + TORCH_ARG(ExpandingArray, stride) = 1; + + /// Implicit paddings on both sides of the input. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + TORCH_ARG(padding_t, padding) = 0; + + public: + decltype(auto) padding(std::initializer_list il) { + return padding(IntArrayRef{il}); + } + + /// The spacing between kernel elements. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + TORCH_ARG(ExpandingArray, dilation) = 1; + + /// Split input into groups, `in_channels` should be divisible by + /// the number of groups. + TORCH_ARG(int64_t, groups) = 1; +}; + +/// `ConvFuncOptions` specialized for `torch::nn::functional::conv1d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv1d(x, weight, F::Conv1dFuncOptions().stride(1)); +/// ``` +using Conv1dFuncOptions = ConvFuncOptions<1>; + +/// `ConvFuncOptions` specialized for `torch::nn::functional::conv2d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv2d(x, weight, F::Conv2dFuncOptions().stride(1)); +/// ``` +using Conv2dFuncOptions = ConvFuncOptions<2>; + +/// `ConvFuncOptions` specialized for `torch::nn::functional::conv3d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv3d(x, weight, F::Conv3dFuncOptions().stride(1)); +/// ``` +using Conv3dFuncOptions = ConvFuncOptions<3>; + +} // namespace functional + +// ============================================================================ + +template +struct ConvTransposeOptions { + using padding_mode_t = detail::conv_padding_mode_t; + + ConvTransposeOptions( + int64_t in_channels, + int64_t out_channels, + ExpandingArray kernel_size) + : in_channels_(in_channels), + out_channels_(out_channels), + kernel_size_(std::move(kernel_size)) {} + + /// The number of channels the input volumes will have. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(int64_t, in_channels); + + /// The number of output channels the convolution should produce. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(int64_t, out_channels); + + /// The kernel size to use. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, kernel_size); + + /// The stride of the convolution. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, stride) = 1; + + /// The padding to add to the input volumes. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, padding) = 0; + + /// For transpose convolutions, the padding to add to output volumes. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, output_padding) = 0; + + /// The number of convolution groups. + /// This parameter __can__ be changed after construction. + TORCH_ARG(int64_t, groups) = 1; + + /// Whether to add a bias after individual applications of the kernel. + /// Changing this parameter after construction __has no effect__. + TORCH_ARG(bool, bias) = true; + + /// The kernel dilation. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + /// This parameter __can__ be changed after construction. + TORCH_ARG(ExpandingArray, dilation) = 1; + + /// Accepted values `torch::kZeros`, `torch::kReflect`, `torch::kReplicate` or + /// `torch::kCircular`. Default: `torch::kZeros` + TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros; +}; + +/// `ConvTransposeOptions` specialized for the `ConvTranspose1d` module. +/// +/// Example: +/// ``` +/// ConvTranspose1d model(ConvTranspose1dOptions(3, 2, +/// 3).stride(1).bias(false)); +/// ``` +using ConvTranspose1dOptions = ConvTransposeOptions<1>; + +/// `ConvTransposeOptions` specialized for the `ConvTranspose2d` module. +/// +/// Example: +/// ``` +/// ConvTranspose2d model(ConvTranspose2dOptions(3, 2, +/// 3).stride(1).bias(false)); +/// ``` +using ConvTranspose2dOptions = ConvTransposeOptions<2>; + +/// `ConvTransposeOptions` specialized for the `ConvTranspose3d` module. +/// +/// Example: +/// ``` +/// ConvTranspose3d model(ConvTranspose3dOptions(2, 2, +/// 2).stride(1).bias(false)); +/// ``` +using ConvTranspose3dOptions = ConvTransposeOptions<3>; + +// ============================================================================ + +namespace functional { + +/// Options for a `D`-dimensional convolution functional. +template +struct ConvTransposeFuncOptions { + /// optional bias of shape `(out_channels)`. Default: ``None`` + TORCH_ARG(torch::Tensor, bias) = Tensor(); + + /// The stride of the convolving kernel. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + TORCH_ARG(ExpandingArray, stride) = 1; + + /// Implicit paddings on both sides of the input. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + TORCH_ARG(ExpandingArray, padding) = 0; + + /// Additional size added to one side of each dimension in the output shape. + /// Default: 0 + TORCH_ARG(ExpandingArray, output_padding) = 0; + + /// Split input into groups, `in_channels` should be divisible by + /// the number of groups. + TORCH_ARG(int64_t, groups) = 1; + + /// The spacing between kernel elements. + /// For a `D`-dim convolution, must be a single number or a list of `D` + /// numbers. + TORCH_ARG(ExpandingArray, dilation) = 1; +}; + +/// `ConvTransposeFuncOptions` specialized for +/// `torch::nn::functional::conv_transpose1d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv_transpose1d(x, weight, F::ConvTranspose1dFuncOptions().stride(1)); +/// ``` +using ConvTranspose1dFuncOptions = ConvTransposeFuncOptions<1>; + +/// `ConvTransposeFuncOptions` specialized for +/// `torch::nn::functional::conv_transpose2d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv_transpose2d(x, weight, F::ConvTranspose2dFuncOptions().stride(1)); +/// ``` +using ConvTranspose2dFuncOptions = ConvTransposeFuncOptions<2>; + +/// `ConvTransposeFuncOptions` specialized for +/// `torch::nn::functional::conv_transpose3d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::conv_transpose3d(x, weight, F::ConvTranspose3dFuncOptions().stride(1)); +/// ``` +using ConvTranspose3dFuncOptions = ConvTransposeFuncOptions<3>; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/distance.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/distance.h new file mode 100644 index 0000000000000000000000000000000000000000..654cd6626498db19fe1ec548e1d10c48cfeb390f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/distance.h @@ -0,0 +1,71 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `CosineSimilarity` module. +/// +/// Example: +/// ``` +/// CosineSimilarity model(CosineSimilarityOptions().dim(0).eps(0.5)); +/// ``` +struct TORCH_API CosineSimilarityOptions { + /// Dimension where cosine similarity is computed. Default: 1 + TORCH_ARG(int64_t, dim) = 1; + /// Small value to avoid division by zero. Default: 1e-8 + TORCH_ARG(double, eps) = 1e-8; +}; + +namespace functional { +/// Options for `torch::nn::functional::cosine_similarity`. +/// +/// See the documentation for `torch::nn::CosineSimilarityOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::cosine_similarity(input1, input2, +/// F::CosineSimilarityFuncOptions().dim(1)); +/// ``` +using CosineSimilarityFuncOptions = CosineSimilarityOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `PairwiseDistance` module. +/// +/// Example: +/// ``` +/// PairwiseDistance +/// model(PairwiseDistanceOptions().p(3).eps(0.5).keepdim(true)); +/// ``` +struct TORCH_API PairwiseDistanceOptions { + /// The norm degree. Default: 2 + TORCH_ARG(double, p) = 2.0; + /// Small value to avoid division by zero. Default: 1e-6 + TORCH_ARG(double, eps) = 1e-6; + /// Determines whether or not to keep the vector dimension. Default: false + TORCH_ARG(bool, keepdim) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::pairwise_distance`. +/// +/// See the documentation for `torch::nn::PairwiseDistanceOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::pairwise_distance(input1, input2, F::PairwiseDistanceFuncOptions().p(1)); +/// ``` +using PairwiseDistanceFuncOptions = PairwiseDistanceOptions; +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/dropout.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/dropout.h new file mode 100644 index 0000000000000000000000000000000000000000..7f41f5672382c9e612b8c6f29c4ac261f02e328c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/dropout.h @@ -0,0 +1,130 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `Dropout` module. +/// +/// Example: +/// ``` +/// Dropout model(DropoutOptions().p(0.42).inplace(true)); +/// ``` +struct TORCH_API DropoutOptions { + /* implicit */ DropoutOptions(double p = 0.5); + + /// The probability of an element to be zeroed. Default: 0.5 + TORCH_ARG(double, p) = 0.5; + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +/// Options for the `Dropout2d` module. +/// +/// Example: +/// ``` +/// Dropout2d model(Dropout2dOptions().p(0.42).inplace(true)); +/// ``` +using Dropout2dOptions = DropoutOptions; + +/// Options for the `Dropout3d` module. +/// +/// Example: +/// ``` +/// Dropout3d model(Dropout3dOptions().p(0.42).inplace(true)); +/// ``` +using Dropout3dOptions = DropoutOptions; + +/// Options for the `AlphaDropout` module. +/// +/// Example: +/// ``` +/// AlphaDropout model(AlphaDropoutOptions(0.2).inplace(true)); +/// ``` +using AlphaDropoutOptions = DropoutOptions; + +/// Options for the `FeatureAlphaDropout` module. +/// +/// Example: +/// ``` +/// FeatureAlphaDropout model(FeatureAlphaDropoutOptions(0.2).inplace(true)); +/// ``` +using FeatureAlphaDropoutOptions = DropoutOptions; + +namespace functional { + +/// Options for `torch::nn::functional::dropout`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::dropout(input, F::DropoutFuncOptions().p(0.5)); +/// ``` +struct TORCH_API DropoutFuncOptions { + /// The probability of an element to be zeroed. Default: 0.5 + TORCH_ARG(double, p) = 0.5; + + TORCH_ARG(bool, training) = true; + + /// can optionally do the operation in-place. Default: False + TORCH_ARG(bool, inplace) = false; +}; + +/// Options for `torch::nn::functional::dropout2d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::dropout2d(input, F::Dropout2dFuncOptions().p(0.5)); +/// ``` +using Dropout2dFuncOptions = DropoutFuncOptions; + +/// Options for `torch::nn::functional::dropout3d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::dropout3d(input, F::Dropout3dFuncOptions().p(0.5)); +/// ``` +using Dropout3dFuncOptions = DropoutFuncOptions; + +/// Options for `torch::nn::functional::alpha_dropout`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::alpha_dropout(input, +/// F::AlphaDropoutFuncOptions().p(0.5).training(false)); +/// ``` +struct TORCH_API AlphaDropoutFuncOptions { + TORCH_ARG(double, p) = 0.5; + + TORCH_ARG(bool, training) = false; + + TORCH_ARG(bool, inplace) = false; +}; + +/// Options for `torch::nn::functional::feature_alpha_dropout`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::feature_alpha_dropout(input, +/// F::FeatureAlphaDropoutFuncOptions().p(0.5).training(false)); +/// ``` +struct TORCH_API FeatureAlphaDropoutFuncOptions { + TORCH_ARG(double, p) = 0.5; + + TORCH_ARG(bool, training) = false; + + TORCH_ARG(bool, inplace) = false; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/embedding.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/embedding.h new file mode 100644 index 0000000000000000000000000000000000000000..d8d06716308e138559a04ec7804b226c768cf3e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/embedding.h @@ -0,0 +1,242 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `Embedding` module. +/// +/// Example: +/// ``` +/// Embedding model(EmbeddingOptions(10, +/// 2).padding_idx(3).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true)); +/// ``` +struct TORCH_API EmbeddingOptions { + EmbeddingOptions(int64_t num_embeddings, int64_t embedding_dim); + + /// The size of the dictionary of embeddings. + TORCH_ARG(int64_t, num_embeddings); + /// The size of each embedding vector. + TORCH_ARG(int64_t, embedding_dim); + /// If specified, the entries at `padding_idx` do not contribute to the + /// gradient; therefore, the embedding vector at `padding_idx` is not updated + /// during training, i.e. it remains as a fixed "pad". For a newly constructed + /// Embedding, the embedding vector at `padding_idx` will default to all + /// zeros, but can be updated to another value to be used as the padding + /// vector. + TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; + /// If given, each embedding vector with norm larger than `max_norm` is + /// renormalized to have norm `max_norm`. + TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. + TORCH_ARG(double, norm_type) = 2.; + /// If given, this will scale gradients by the inverse of frequency of the + /// words in the mini-batch. Default ``false``. + TORCH_ARG(bool, scale_grad_by_freq) = false; + /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor. + TORCH_ARG(bool, sparse) = false; + /// The learnable weights of the module of shape (num_embeddings, + /// embedding_dim) + TORCH_ARG(torch::Tensor, _weight) = Tensor(); +}; + +// ============================================================================ + +/// Options for the `Embedding::from_pretrained` function. +struct TORCH_API EmbeddingFromPretrainedOptions { + /// If ``true``, the tensor does not get updated in the learning process. + /// Equivalent to ``embedding.weight.requires_grad_(false)``. Default: + /// ``true`` + TORCH_ARG(bool, freeze) = true; + /// If specified, the entries at `padding_idx` do not contribute to the + /// gradient; therefore, the embedding vector at `padding_idx` is not updated + /// during training, i.e. it remains as a fixed "pad". + TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; + /// If given, each embedding vector with norm larger than `max_norm` is + /// renormalized to have norm `max_norm`. + TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. + TORCH_ARG(double, norm_type) = 2.; + /// If given, this will scale gradients by the inverse of frequency of the + /// words in the mini-batch. Default ``false``. + TORCH_ARG(bool, scale_grad_by_freq) = false; + /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor. + TORCH_ARG(bool, sparse) = false; +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::embedding`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::embedding(input, weight, +/// F::EmbeddingFuncOptions().norm_type(2.5).scale_grad_by_freq(true).sparse(true)); +/// ``` +struct TORCH_API EmbeddingFuncOptions { + /// If specified, the entries at `padding_idx` do not contribute to the + /// gradient; therefore, the embedding vector at `padding_idx` is not updated + /// during training, i.e. it remains as a fixed "pad". + TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; + /// If given, each embedding vector with norm larger than `max_norm` is + /// renormalized to have norm `max_norm`. + TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. + TORCH_ARG(double, norm_type) = 2.; + /// If given, this will scale gradients by the inverse of frequency of the + /// words in the mini-batch. Default ``false``. + TORCH_ARG(bool, scale_grad_by_freq) = false; + /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor. + TORCH_ARG(bool, sparse) = false; +}; + +} // namespace functional + +// ============================================================================ + +typedef std::variant + EmbeddingBagMode; + +/// Options for the `EmbeddingBag` module. +/// +/// Example: +/// ``` +/// EmbeddingBag model(EmbeddingBagOptions(10, +/// 2).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true).mode(torch::kSum)); +/// ``` +struct TORCH_API EmbeddingBagOptions { + EmbeddingBagOptions(int64_t num_embeddings, int64_t embedding_dim); + + /// The size of the dictionary of embeddings. + TORCH_ARG(int64_t, num_embeddings); + /// The size of each embedding vector. + TORCH_ARG(int64_t, embedding_dim); + /// If given, each embedding vector with norm larger than `max_norm` is + /// renormalized to have norm `max_norm`. + TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. + TORCH_ARG(double, norm_type) = 2.; + /// If given, this will scale gradients by the inverse of frequency of the + /// words in the mini-batch. Default ``false``. Note: this option is not + /// supported when ``mode="kMax"``. + TORCH_ARG(bool, scale_grad_by_freq) = false; + /// ``"kSum"``, ``"kMean"`` or ``"kMax"``. Specifies the way to reduce the + /// bag. ``"kSum"`` computes the weighted sum, taking `per_sample_weights` + /// into consideration. ``"kMean"`` computes the average of the values in the + /// bag, ``"kMax"`` computes the max value over each bag. + TORCH_ARG(EmbeddingBagMode, mode) = torch::kMean; + /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor. + /// Note: this option is not supported when ``mode="kMax"``. + TORCH_ARG(bool, sparse) = false; + /// The learnable weights of the module of shape (num_embeddings, + /// embedding_dim) + TORCH_ARG(torch::Tensor, _weight) = Tensor(); + /// If ``true``, `offsets` has one additional element, where the last element + /// is equivalent to the size of `indices`. This matches the CSR format. + TORCH_ARG(bool, include_last_offset) = false; + /// If specified, the entries at `padding_idx` do not contribute to the + /// gradient; therefore, the embedding vector at padding_idx is not updated + /// during training, i.e. it remains as a fixed "pad". For a newly constructed + /// EmbeddingBag, the embedding vector at `padding_idx` will default to all + /// zeros, but can be updated to another value to be used as the padding + /// vector. Note that the embedding vector at `padding_idx` is excluded from + /// the reduction. + TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; +}; + +// ============================================================================ + +/// Options for the `EmbeddingBag::from_pretrained` function. +struct TORCH_API EmbeddingBagFromPretrainedOptions { + /// If ``true``, the tensor does not get updated in the learning process. + /// Equivalent to ``embeddingbag.weight.requires_grad_(false)``. Default: + /// ``true`` + TORCH_ARG(bool, freeze) = true; + /// If given, each embedding vector with norm larger than `max_norm` is + /// renormalized to have norm `max_norm`. + TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. + TORCH_ARG(double, norm_type) = 2.; + /// If given, this will scale gradients by the inverse of frequency of the + /// words in the mini-batch. Default ``false``. Note: this option is not + /// supported when ``mode="kMax"``. + TORCH_ARG(bool, scale_grad_by_freq) = false; + /// ``"kSum"``, ``"kMean"`` or ``"kMax"``. Specifies the way to reduce the + /// bag. ``"kSum"`` computes the weighted sum, taking `per_sample_weights` + /// into consideration. ``"kMean"`` computes the average of the values in the + /// bag, ``"kMax"`` computes the max value over each bag. + TORCH_ARG(EmbeddingBagMode, mode) = torch::kMean; + /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor. + /// Note: this option is not supported when ``mode="kMax"``. + TORCH_ARG(bool, sparse) = false; + /// If ``true``, `offsets` has one additional element, where the last element + /// is equivalent to the size of `indices`. This matches the CSR format. Note: + /// this option is currently only supported when ``mode="sum"``. + TORCH_ARG(bool, include_last_offset) = false; + /// If specified, the entries at `padding_idx` do not contribute to the + /// gradient; therefore, the embedding vector at padding_idx is not updated + /// during training, i.e. it remains as a fixed "pad". Note that the embedding + /// vector at `padding_idx` is excluded from the reduction. + TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::embedding_bag`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::embedding_bag(input, weight, +/// F::EmbeddingBagFuncOptions().mode(torch::kSum).offsets(offsets)); +/// ``` +struct TORCH_API EmbeddingBagFuncOptions { + /// Only used when `input` is 1D. `offsets` determines + /// the starting index position of each bag (sequence) in `input`. + TORCH_ARG(torch::Tensor, offsets) = Tensor(); + /// If given, each embedding vector with norm larger than `max_norm` is + /// renormalized to have norm `max_norm`. + TORCH_ARG(c10::optional, max_norm) = c10::nullopt; + /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. + TORCH_ARG(double, norm_type) = 2.; + /// If given, this will scale gradients by the inverse of frequency of the + /// words in the mini-batch. Default ``false``. Note: this option is not + /// supported when ``mode="kMax"``. + TORCH_ARG(bool, scale_grad_by_freq) = false; + /// ``"kSum"``, ``"kMean"`` or ``"kMax"``. Specifies the way to reduce the + /// bag. ``"kSum"`` computes the weighted sum, taking `per_sample_weights` + /// into consideration. ``"kMean"`` computes the average of the values in the + /// bag, ``"kMax"`` computes the max value over each bag. + TORCH_ARG(EmbeddingBagMode, mode) = torch::kMean; + /// If ``true``, gradient w.r.t. `weight` matrix will be a sparse tensor. + /// Note: this option is not supported when ``mode="kMax"``. + TORCH_ARG(bool, sparse) = false; + /// a tensor of float / double weights, or None to indicate all weights should + /// be taken to be 1. If specified, `per_sample_weights` must have exactly the + /// same shape as input and is treated as having the same `offsets`, if those + /// are not None. + TORCH_ARG(torch::Tensor, per_sample_weights) = Tensor(); + /// If ``true``, `offsets` has one additional element, where the last element + /// is equivalent to the size of `indices`. This matches the CSR format. Note: + /// this option is currently only supported when ``mode="sum"``. + TORCH_ARG(bool, include_last_offset) = false; + /// If specified, the entries at `padding_idx` do not contribute to the + /// gradient; therefore, the embedding vector at padding_idx is not updated + /// during training, i.e. it remains as a fixed "pad". Note that the embedding + /// vector at `padding_idx` is excluded from the reduction. + TORCH_ARG(c10::optional, padding_idx) = c10::nullopt; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/fold.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/fold.h new file mode 100644 index 0000000000000000000000000000000000000000..21c24bff845acfcf94724c2b0dc717b7ca250d2a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/fold.h @@ -0,0 +1,99 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `Fold` module. +/// +/// Example: +/// ``` +/// Fold model(FoldOptions({8, 8}, {3, 3}).dilation(2).padding({2, +/// 1}).stride(2)); +/// ``` +struct TORCH_API FoldOptions { + FoldOptions(ExpandingArray<2> output_size, ExpandingArray<2> kernel_size) + : output_size_(std::move(output_size)), + kernel_size_(std::move(kernel_size)) {} + + /// describes the spatial shape of the large containing tensor of the sliding + /// local blocks. It is useful to resolve the ambiguity when multiple input + /// shapes map to same number of sliding blocks, e.g., with stride > 0. + TORCH_ARG(ExpandingArray<2>, output_size); + + /// the size of the sliding blocks + TORCH_ARG(ExpandingArray<2>, kernel_size); + + /// controls the spacing between the kernel points; also known as the à trous + /// algorithm. + TORCH_ARG(ExpandingArray<2>, dilation) = 1; + + /// controls the amount of implicit zero-paddings on both sides for padding + /// number of points for each dimension before reshaping. + TORCH_ARG(ExpandingArray<2>, padding) = 0; + + /// controls the stride for the sliding blocks. + TORCH_ARG(ExpandingArray<2>, stride) = 1; +}; + +namespace functional { +/// Options for `torch::nn::functional::fold`. +/// +/// See the documentation for `torch::nn::FoldOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::fold(input, F::FoldFuncOptions({3, 2}, {2, 2})); +/// ``` +using FoldFuncOptions = FoldOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `Unfold` module. +/// +/// Example: +/// ``` +/// Unfold model(UnfoldOptions({2, 4}).dilation(2).padding({2, 1}).stride(2)); +/// ``` +struct TORCH_API UnfoldOptions { + UnfoldOptions(ExpandingArray<2> kernel_size) + : kernel_size_(std::move(kernel_size)) {} + + /// the size of the sliding blocks + TORCH_ARG(ExpandingArray<2>, kernel_size); + + /// controls the spacing between the kernel points; also known as the à trous + /// algorithm. + TORCH_ARG(ExpandingArray<2>, dilation) = 1; + + /// controls the amount of implicit zero-paddings on both sides for padding + /// number of points for each dimension before reshaping. + TORCH_ARG(ExpandingArray<2>, padding) = 0; + + /// controls the stride for the sliding blocks. + TORCH_ARG(ExpandingArray<2>, stride) = 1; +}; + +namespace functional { +/// Options for `torch::nn::functional::unfold`. +/// +/// See the documentation for `torch::nn::UnfoldOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::unfold(input, F::UnfoldFuncOptions({2, 2}).padding(1).stride(2)); +/// ``` +using UnfoldFuncOptions = UnfoldOptions; +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/instancenorm.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/instancenorm.h new file mode 100644 index 0000000000000000000000000000000000000000..d93e10d0c95a23befcfe0318ebc4ddcd96aa5e71 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/instancenorm.h @@ -0,0 +1,89 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `InstanceNorm` module. +struct TORCH_API InstanceNormOptions { + /* implicit */ InstanceNormOptions(int64_t num_features); + + /// The number of features of the input tensor. + TORCH_ARG(int64_t, num_features); + + /// The epsilon value added for numerical stability. + TORCH_ARG(double, eps) = 1e-5; + + /// A momentum multiplier for the mean and variance. + TORCH_ARG(double, momentum) = 0.1; + + /// Whether to learn a scale and bias that are applied in an affine + /// transformation on the input. + TORCH_ARG(bool, affine) = false; + + /// Whether to store and update batch statistics (mean and variance) in the + /// module. + TORCH_ARG(bool, track_running_stats) = false; +}; + +/// Options for the `InstanceNorm1d` module. +/// +/// Example: +/// ``` +/// InstanceNorm1d +/// model(InstanceNorm1dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +using InstanceNorm1dOptions = InstanceNormOptions; + +/// Options for the `InstanceNorm2d` module. +/// +/// Example: +/// ``` +/// InstanceNorm2d +/// model(InstanceNorm2dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +using InstanceNorm2dOptions = InstanceNormOptions; + +/// Options for the `InstanceNorm3d` module. +/// +/// Example: +/// ``` +/// InstanceNorm3d +/// model(InstanceNorm3dOptions(4).eps(0.5).momentum(0.1).affine(false).track_running_stats(true)); +/// ``` +using InstanceNorm3dOptions = InstanceNormOptions; + +namespace functional { + +/// Options for `torch::nn::functional::instance_norm`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::instance_norm(input, +/// F::InstanceNormFuncOptions().running_mean(mean).running_var(variance).weight(weight).bias(bias).momentum(0.1).eps(1e-5)); +/// ``` +struct TORCH_API InstanceNormFuncOptions { + TORCH_ARG(Tensor, running_mean) = Tensor(); + + TORCH_ARG(Tensor, running_var) = Tensor(); + + TORCH_ARG(Tensor, weight) = Tensor(); + + TORCH_ARG(Tensor, bias) = Tensor(); + + TORCH_ARG(bool, use_input_stats) = true; + + TORCH_ARG(double, momentum) = 0.1; + + TORCH_ARG(double, eps) = 1e-5; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/linear.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/linear.h new file mode 100644 index 0000000000000000000000000000000000000000..5952d97806b378814f8bb0c1ffa6cf783d2f8426 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/linear.h @@ -0,0 +1,95 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `Linear` module. +/// +/// Example: +/// ``` +/// Linear model(LinearOptions(5, 2).bias(false)); +/// ``` +struct TORCH_API LinearOptions { + LinearOptions(int64_t in_features, int64_t out_features); + /// size of each input sample + TORCH_ARG(int64_t, in_features); + + /// size of each output sample + TORCH_ARG(int64_t, out_features); + + /// If set to false, the layer will not learn an additive bias. Default: true + TORCH_ARG(bool, bias) = true; +}; + +// ============================================================================ + +/// Options for the `Flatten` module. +/// +/// Example: +/// ``` +/// Flatten model(FlattenOptions().start_dim(2).end_dim(4)); +/// ``` +struct TORCH_API FlattenOptions { + /// first dim to flatten + TORCH_ARG(int64_t, start_dim) = 1; + /// last dim to flatten + TORCH_ARG(int64_t, end_dim) = -1; +}; + +// ============================================================================ + +/// Options for the `Unflatten` module. +/// +/// Note: If input tensor is named, use dimname and namedshape arguments. +/// +/// Example: +/// ``` +/// Unflatten unnamed_model(UnflattenOptions(0, {2, 2})); +/// Unflatten named_model(UnflattenOptions("B", {{"B1", 2}, {"B2", 2}})); +/// ``` +struct TORCH_API UnflattenOptions { + typedef std::vector> namedshape_t; + + UnflattenOptions(int64_t dim, std::vector sizes); + UnflattenOptions(const char* dimname, namedshape_t namedshape); + UnflattenOptions(std::string dimname, namedshape_t namedshape); + + /// dim to unflatten + TORCH_ARG(int64_t, dim); + /// name of dim to unflatten, for use with named tensors + TORCH_ARG(std::string, dimname); + /// new shape of unflattened dim + TORCH_ARG(std::vector, sizes); + /// new shape of unflattened dim with names, for use with named tensors + TORCH_ARG(namedshape_t, namedshape); +}; + +// ============================================================================ + +/// Options for the `Bilinear` module. +/// +/// Example: +/// ``` +/// Bilinear model(BilinearOptions(3, 2, 4).bias(false)); +/// ``` +struct TORCH_API BilinearOptions { + BilinearOptions( + int64_t in1_features, + int64_t in2_features, + int64_t out_features); + /// The number of features in input 1 (columns of the input1 matrix). + TORCH_ARG(int64_t, in1_features); + /// The number of features in input 2 (columns of the input2 matrix). + TORCH_ARG(int64_t, in2_features); + /// The number of output features to produce (columns of the output matrix). + TORCH_ARG(int64_t, out_features); + /// Whether to learn and add a bias after the bilinear transformation. + TORCH_ARG(bool, bias) = true; +}; + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/loss.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/loss.h new file mode 100644 index 0000000000000000000000000000000000000000..c9eb2b66f3e0b2122639f6354dadf539819efc48 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/loss.h @@ -0,0 +1,802 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `L1Loss` module. +/// +/// Example: +/// ``` +/// L1Loss model(L1LossOptions(torch::kNone)); +/// ``` +struct TORCH_API L1LossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3(L1LossOptions, reduction, kNone, kMean, kSum) + + /// Specifies the reduction to apply to the output. + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::l1_loss`. +/// +/// See the documentation for `torch::nn::L1LossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::l1_loss(input, target, F::L1LossFuncOptions(torch::kNone)); +/// ``` +using L1LossFuncOptions = L1LossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `KLDivLoss` module. +/// +/// Example: +/// ``` +/// KLDivLoss +/// model(KLDivLossOptions().reduction(torch::kNone).log_target(false)); +/// ``` +struct TORCH_API KLDivLossOptions { + typedef std::variant< + enumtype::kNone, + enumtype::kBatchMean, + enumtype::kSum, + enumtype::kMean> + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG4( + KLDivLossOptions, + reduction, + kNone, + kBatchMean, + kSum, + kMean) + + /// Specifies the reduction to apply to the output. + /// ``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``. Default: ``'mean'`` + TORCH_ARG(reduction_t, reduction) = torch::kMean; + + /// Specifies whether `target` is accepted in the log space. Default: False + TORCH_ARG(bool, log_target) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::kl_div`. +/// +/// See the documentation for `torch::nn::KLDivLossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::kl_div(input, target, +/// F::KLDivFuncOptions().reduction(torch::kNone).log_target(false)); +/// ``` +using KLDivFuncOptions = KLDivLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `MSELoss` module. +/// +/// Example: +/// ``` +/// MSELoss model(MSELossOptions(torch::kNone)); +/// ``` +struct TORCH_API MSELossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3(MSELossOptions, reduction, kNone, kMean, kSum) + + /// Specifies the reduction to apply to the output. + /// ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'`` + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::mse_loss`. +/// +/// See the documentation for `torch::nn::MSELossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::mse_loss(input, target, F::MSELossFuncOptions(torch::kNone)); +/// ``` +using MSELossFuncOptions = MSELossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `BCELoss` module. +/// +/// Example: +/// ``` +/// BCELoss model(BCELossOptions().reduction(torch::kNone).weight(weight)); +/// ``` +struct TORCH_API BCELossOptions { + typedef std::variant + reduction_t; + + /// A manual rescaling weight given to the loss of each batch element. + TORCH_ARG(Tensor, weight) = {}; + /// Specifies the reduction to apply to the output. + /// ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'`` + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::binary_cross_entropy`. +/// +/// See the documentation for `torch::nn::BCELossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::binary_cross_entropy(input, target, +/// F::BinaryCrossEntropyFuncOptions().weight(weight)); +/// ``` +using BinaryCrossEntropyFuncOptions = BCELossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `HingeEmbeddingLoss` module. +/// +/// Example: +/// ``` +/// HingeEmbeddingLoss +/// model(HingeEmbeddingLossOptions().margin(4).reduction(torch::kNone)); +/// ``` +struct TORCH_API HingeEmbeddingLossOptions { + typedef std::variant + reduction_t; + + /// Specifies the threshold for which the distance of a negative sample must + /// reach in order to incur zero loss. Default: 1 + TORCH_ARG(double, margin) = 1.0; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::hinge_embedding_loss`. +/// +/// See the documentation for `torch::nn::HingeEmbeddingLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::hinge_embedding_loss(input, target, +/// F::HingeEmbeddingLossFuncOptions().margin(2)); +/// ``` +using HingeEmbeddingLossFuncOptions = HingeEmbeddingLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `MultiMarginLoss` module. +/// +/// Example: +/// ``` +/// MultiMarginLoss model(MultiMarginLossOptions().margin(2).weight(weight)); +/// ``` +struct TORCH_API MultiMarginLossOptions { + typedef std::variant + reduction_t; + + /// Has a default value of :math:`1`. :math:`1` and :math:`2` + /// are the only supported values. + TORCH_ARG(int64_t, p) = 1; + /// Has a default value of :math:`1`. + TORCH_ARG(double, margin) = 1.0; + /// A manual rescaling weight given to each + /// class. If given, it has to be a Tensor of size `C`. Otherwise, it is + /// treated as if having all ones. + TORCH_ARG(Tensor, weight) = Tensor(); + /// Specifies the reduction to apply to the output: + /// ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be + /// applied, + /// ``'mean'``: the sum of the output will be divided by the number of + /// elements in the output, ``'sum'``: the output will be summed. Default: + /// ``'mean'`` + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::multi_margin_loss`. +/// +/// See the documentation for `torch::nn::MultiMarginLossOptions` class to learn +/// what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::multi_margin_loss(input, target, +/// F::MultiMarginLossFuncOptions().margin(2).weight(weight)); +/// ``` +using MultiMarginLossFuncOptions = MultiMarginLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `CosineEmbeddingLoss` module. +/// +/// Example: +/// ``` +/// CosineEmbeddingLoss model(CosineEmbeddingLossOptions().margin(0.5)); +/// ``` +struct TORCH_API CosineEmbeddingLossOptions { + typedef std::variant + reduction_t; + + /// Specifies the threshold for which the distance of a negative sample must + /// reach in order to incur zero loss. Should be a number from -1 to 1, 0 + /// to 0.5 is suggested. Default: 0.0 + TORCH_ARG(double, margin) = 0.0; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::cosine_embedding_loss`. +/// +/// See the documentation for `torch::nn::CosineEmbeddingLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::cosine_embedding_loss(input1, input2, target, +/// F::CosineEmbeddingLossFuncOptions().margin(0.5)); +/// ``` +using CosineEmbeddingLossFuncOptions = CosineEmbeddingLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `MultiLabelMarginLoss` module. +/// +/// Example: +/// ``` +/// MultiLabelMarginLoss model(MultiLabelMarginLossOptions(torch::kNone)); +/// ``` +struct TORCH_API MultiLabelMarginLossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3( + MultiLabelMarginLossOptions, + reduction, + kNone, + kMean, + kSum) + + /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. + /// 'none': no reduction will be applied, 'mean': the sum of the output will + /// be divided by the number of elements in the output, 'sum': the output will + /// be summed. Default: 'mean' + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::multilabel_margin_loss`. +/// +/// See the documentation for `torch::nn::MultiLabelMarginLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::multilabel_margin_loss(input, target, +/// F::MultilabelMarginLossFuncOptions(torch::kNone)); +/// ``` +using MultilabelMarginLossFuncOptions = MultiLabelMarginLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `SoftMarginLoss` module. +/// +/// Example: +/// ``` +/// SoftMarginLoss model(SoftMarginLossOptions(torch::kNone)); +/// ``` +struct TORCH_API SoftMarginLossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3( + SoftMarginLossOptions, + reduction, + kNone, + kMean, + kSum) + + /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. + /// 'none': no reduction will be applied, 'mean': the sum of the output will + /// be divided by the number of elements in the output, 'sum': the output will + /// be summed. Default: 'mean' + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::soft_margin_loss`. +/// +/// See the documentation for `torch::nn::SoftMarginLossOptions` class to learn +/// what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::soft_margin_loss(input, target, +/// F::SoftMarginLossFuncOptions(torch::kNone)); +/// ``` +using SoftMarginLossFuncOptions = SoftMarginLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `MultiLabelSoftMarginLoss` module. +/// +/// Example: +/// ``` +/// MultiLabelSoftMarginLoss +/// model(MultiLabelSoftMarginLossOptions().reduction(torch::kNone).weight(weight)); +/// ``` +struct TORCH_API MultiLabelSoftMarginLossOptions { + typedef std::variant + reduction_t; + + /// A manual rescaling weight given to each + /// class. If given, it has to be a Tensor of size `C`. Otherwise, it is + /// treated as if having all ones. + TORCH_ARG(Tensor, weight) = Tensor(); + + /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. + /// 'none': no reduction will be applied, 'mean': the sum of the output will + /// be divided by the number of elements in the output, 'sum': the output will + /// be summed. Default: 'mean' + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::multilabel_soft_margin_loss`. +/// +/// See the documentation for `torch::nn::MultiLabelSoftMarginLossOptions` class +/// to learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::multilabel_soft_margin_loss(input, target, +/// F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight)); +/// ``` +using MultilabelSoftMarginLossFuncOptions = MultiLabelSoftMarginLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `TripletMarginLoss` module. +/// +/// Example: +/// ``` +/// TripletMarginLoss +/// model(TripletMarginLossOptions().margin(3).p(2).eps(1e-06).swap(false)); +/// ``` +struct TORCH_API TripletMarginLossOptions { + typedef std::variant + reduction_t; + + /// Specifies the threshold for which the distance of a negative sample must + /// reach in order to incur zero loss. Default: 1 + TORCH_ARG(double, margin) = 1.0; + /// Specifies the norm degree for pairwise distance. Default: 2 + TORCH_ARG(double, p) = 2.0; + TORCH_ARG(double, eps) = 1e-6; + /// The distance swap is described in detail in the paper Learning shallow + /// convolutional feature descriptors with triplet losses by V. Balntas, + /// E. Riba et al. Default: False + TORCH_ARG(bool, swap) = false; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::triplet_margin_loss`. +/// +/// See the documentation for `torch::nn::TripletMarginLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::triplet_margin_loss(anchor, positive, negative, +/// F::TripletMarginLossFuncOptions().margin(1.0)); +/// ``` +using TripletMarginLossFuncOptions = TripletMarginLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `TripletMarginWithDistanceLoss` module. +/// +/// Example: +/// ``` +/// TripletMarginWithDistanceLoss +/// model(TripletMarginWithDistanceLossOptions().margin(3).swap(false)); +/// ``` +struct TORCH_API TripletMarginWithDistanceLossOptions { + typedef std::variant + reduction_t; + typedef std::function + distance_function_t; + + /// Specifies a nonnegative, real-valued function that quantifies the + /// closeness of two tensors. If not specified, `F::pairwise_distance` will + /// be used. Default: nullopt + TORCH_ARG(c10::optional, distance_function) = + c10::nullopt; + /// Specifies a nonnegative margin representing the minimum difference + /// between the positive and negative distances required for the loss to be 0. + /// Larger margins penalize cases where the negative examples are not distance + /// enough from the anchors, relative to the positives. Default: 1 + TORCH_ARG(double, margin) = 1.0; + /// Whether to use the distance swap described in the paper Learning shallow + /// convolutional feature descriptors with triplet losses by V. Balntas, + /// E. Riba et al. If True, and if the positive example is closer to the + /// negative example than the anchor is, swaps the positive example and the + /// anchor in the loss computation. Default: False + TORCH_ARG(bool, swap) = false; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::triplet_margin_with_distance_loss`. +/// +/// See the documentation for `torch::nn::TripletMarginWithDistanceLossOptions` +/// class to learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::triplet_margin_with_distance_loss(anchor, positive, negative, +/// F::TripletMarginWithDistanceLossFuncOptions().margin(1.0)); +/// ``` +using TripletMarginWithDistanceLossFuncOptions = + TripletMarginWithDistanceLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `CTCLoss` module. +/// +/// Example: +/// ``` +/// CTCLoss +/// model(CTCLossOptions().blank(42).zero_infinity(false).reduction(torch::kSum)); +/// ``` +struct TORCH_API CTCLossOptions { + typedef std::variant + reduction_t; + + /// blank label. Default `0`. + TORCH_ARG(int64_t, blank) = 0; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; + /// Whether to zero infinite losses and the associated gradients. + /// Default: `false`. Infinite losses mainly occur when the inputs are + /// too short to be aligned to the targets. + TORCH_ARG(bool, zero_infinity) = false; +}; + +namespace functional { +/// Options for `torch::nn::functional::ctc_loss`. +/// +/// See the documentation for `torch::nn::CTCLossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::ctc_loss(log_probs, targets, input_lengths, target_lengths, +/// F::CTCLossFuncOptions().reduction(torch::kNone)); +/// ``` +using CTCLossFuncOptions = CTCLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `SmoothL1Loss` module. +/// +/// Example: +/// ``` +/// SmoothL1Loss model(SmoothL1LossOptions().reduction(torch::kNone).beta(0.5)); +/// ``` +struct TORCH_API SmoothL1LossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3( + SmoothL1LossOptions, + reduction, + kNone, + kMean, + kSum) + + /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. + /// 'none': no reduction will be applied, 'mean': the sum of the output will + /// be divided by the number of elements in the output, 'sum': the output will + /// be summed. Default: 'mean' + TORCH_ARG(reduction_t, reduction) = torch::kMean; + /// Specifies the threshold at which to change between L1 and L2 loss. + /// If beta is not specified, a value of 1.0 will be used. + /// Default: nullopt + TORCH_ARG(c10::optional, beta) = c10::nullopt; +}; + +namespace functional { +/// Options for `torch::nn::functional::smooth_l1_loss`. +/// +/// See the documentation for `torch::nn::SmoothL1LossOptions` class to learn +/// what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::smooth_l1_loss(input, target, F::SmoothL1LossFuncOptions(torch::kNone)); +/// ``` +using SmoothL1LossFuncOptions = SmoothL1LossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `HuberLoss` module. +/// +/// Example: +/// ``` +/// HuberLoss model(HuberLossOptions().reduction(torch::kNone).delta(0.5)); +/// ``` +struct TORCH_API HuberLossOptions { + typedef std::variant + reduction_t; + + TORCH_OPTIONS_CTOR_VARIANT_ARG3( + HuberLossOptions, + reduction, + kNone, + kMean, + kSum) + + /// Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. + /// 'none': no reduction will be applied, 'mean': the sum of the output will + /// be divided by the number of elements in the output, 'sum': the output will + /// be summed. Default: 'mean' + TORCH_ARG(reduction_t, reduction) = torch::kMean; + /// Specifies the threshold at which to change between L1 and L2 loss. + /// Default: 1.0 + TORCH_ARG(double, delta) = 1.0; +}; + +namespace functional { +/// Options for `torch::nn::functional::huber_loss`. +/// +/// See the documentation for `torch::nn::HuberLossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::huber_loss(input, target, F::HuberLossFuncOptions(torch::kNone)); +/// ``` +using HuberLossFuncOptions = HuberLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `PoissonNLLLoss` module. +/// +/// Example: +/// ``` +/// PoissonNLLLoss +/// model(PoissonNLLLossOptions().log_input(false).full(true).eps(0.42).reduction(torch::kSum)); +/// ``` +struct TORCH_API PoissonNLLLossOptions { + typedef std::variant + reduction_t; + + /// if true the loss is computed as `exp(input) - target * input`, + /// if false the loss is `input - target * log(input + eps)`. + TORCH_ARG(bool, log_input) = true; + /// whether to compute full loss, i.e. to add the Stirling approximation term + /// target * log(target) - target + 0.5 * log(2 * pi * target). + TORCH_ARG(bool, full) = false; + /// Small value to avoid evaluation of `log(0)` when `log_input = false`. + /// Default: 1e-8 + TORCH_ARG(double, eps) = 1e-8; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::poisson_nll_loss`. +/// +/// See the documentation for `torch::nn::PoissonNLLLossOptions` class to learn +/// what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::poisson_nll_loss(input, target, +/// F::PoissonNLLLossFuncOptions().reduction(torch::kNone)); +/// ``` +using PoissonNLLLossFuncOptions = PoissonNLLLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `MarginRankingLoss` module. +/// +/// Example: +/// ``` +/// MarginRankingLoss +/// model(MarginRankingLossOptions().margin(0.5).reduction(torch::kSum)); +/// ``` +struct TORCH_API MarginRankingLossOptions { + typedef std::variant + reduction_t; + + /// Has a default value of `0`. + TORCH_ARG(double, margin) = 0; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::margin_ranking_loss`. +/// +/// See the documentation for `torch::nn::MarginRankingLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::margin_ranking_loss(input1, input2, target, +/// F::MarginRankingLossFuncOptions().margin(0.5).reduction(torch::kSum)); +/// ``` +using MarginRankingLossFuncOptions = MarginRankingLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `NLLLoss` module. +/// +/// Example: +/// ``` +/// NLLLoss model(NLLLossOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +struct TORCH_API NLLLossOptions { + typedef std::variant + reduction_t; + + /// A manual rescaling weight given to each + /// class. If given, it has to be a Tensor of size `C`. Otherwise, it is + /// treated as if having all ones. + TORCH_ARG(Tensor, weight) = {}; + /// Specifies a target value that is ignored + /// and does not contribute to the input gradient. + TORCH_ARG(int64_t, ignore_index) = -100; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; +}; + +namespace functional { +/// Options for `torch::nn::functional::nll_loss`. +/// +/// See the documentation for `torch::nn::NLLLossOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::nll_loss(input, target, +/// F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +using NLLLossFuncOptions = NLLLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `CrossEntropyLoss` module. +/// +/// Example: +/// ``` +/// CrossEntropyLoss +/// model(CrossEntropyLossOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +struct TORCH_API CrossEntropyLossOptions { + typedef std::variant + reduction_t; + + /// A manual rescaling weight given to each class. If given, has to be a + /// Tensor of size C + TORCH_ARG(Tensor, weight) = {}; + /// Specifies a target value that is ignored + /// and does not contribute to the input gradient. + TORCH_ARG(int64_t, ignore_index) = -100; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; + /// Specifies the amount of smoothing when computing the loss. Default: 0.0 + TORCH_ARG(double, label_smoothing) = 0.0; +}; + +namespace functional { +/// Options for `torch::nn::functional::cross_entropy`. +/// +/// See the documentation for `torch::nn::CrossEntropyLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::cross_entropy(input, target, +/// F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean)); +/// ``` +using CrossEntropyFuncOptions = CrossEntropyLossOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `BCEWithLogitsLoss` module. +/// +/// Example: +/// ``` +/// BCEWithLogitsLoss +/// model(BCEWithLogitsLossOptions().reduction(torch::kNone).weight(weight)); +/// ``` +struct TORCH_API BCEWithLogitsLossOptions { + typedef std::variant + reduction_t; + /// A manual rescaling weight given to the loss of each batch element. + /// If given, has to be a Tensor of size `nbatch`. + TORCH_ARG(Tensor, weight) = {}; + /// Specifies the reduction to apply to the output. Default: Mean + TORCH_ARG(reduction_t, reduction) = torch::kMean; + /// A weight of positive examples. + /// Must be a vector with length equal to the number of classes. + TORCH_ARG(Tensor, pos_weight) = {}; +}; + +namespace functional { +/// Options for `torch::nn::functional::binary_cross_entropy_with_logits`. +/// +/// See the documentation for `torch::nn::BCEWithLogitsLossOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::binary_cross_entropy_with_logits(input, target, +/// F::BinaryCrossEntropyWithLogitsFuncOptions().pos_weight(pos_weight).reduction(torch::kSum)); +/// ``` +using BinaryCrossEntropyWithLogitsFuncOptions = BCEWithLogitsLossOptions; +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/normalization.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/normalization.h new file mode 100644 index 0000000000000000000000000000000000000000..ae8c206736d50dee245565ddb201346f527925d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/normalization.h @@ -0,0 +1,192 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `LayerNorm` module. +/// +/// Example: +/// ``` +/// LayerNorm model(LayerNormOptions({2, +/// 2}).elementwise_affine(false).eps(2e-5)); +/// ``` +struct TORCH_API LayerNormOptions { + /* implicit */ LayerNormOptions(std::vector normalized_shape); + /// input shape from an expected input. + TORCH_ARG(std::vector, normalized_shape); + /// a value added to the denominator for numerical stability. ``Default: + /// 1e-5``. + TORCH_ARG(double, eps) = 1e-5; + /// a boolean value that when set to ``true``, this module + /// has learnable per-element affine parameters initialized to ones (for + /// weights) and zeros (for biases). ``Default: true``. + TORCH_ARG(bool, elementwise_affine) = true; +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::layer_norm`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::layer_norm(input, F::LayerNormFuncOptions({2, 2}).eps(2e-5)); +/// ``` +struct TORCH_API LayerNormFuncOptions { + /* implicit */ LayerNormFuncOptions(std::vector normalized_shape); + /// input shape from an expected input. + TORCH_ARG(std::vector, normalized_shape); + + TORCH_ARG(Tensor, weight) = {}; + + TORCH_ARG(Tensor, bias) = {}; + + /// a value added to the denominator for numerical stability. ``Default: + /// 1e-5``. + TORCH_ARG(double, eps) = 1e-5; +}; + +} // namespace functional + +// ============================================================================ + +/// Options for the `LocalResponseNorm` module. +/// +/// Example: +/// ``` +/// LocalResponseNorm +/// model(LocalResponseNormOptions(2).alpha(0.0002).beta(0.85).k(2.)); +/// ``` +struct TORCH_API LocalResponseNormOptions { + /* implicit */ LocalResponseNormOptions(int64_t size) : size_(size) {} + /// amount of neighbouring channels used for normalization + TORCH_ARG(int64_t, size); + + /// multiplicative factor. Default: 1e-4 + TORCH_ARG(double, alpha) = 1e-4; + + /// exponent. Default: 0.75 + TORCH_ARG(double, beta) = 0.75; + + /// additive factor. Default: 1 + TORCH_ARG(double, k) = 1.; +}; + +namespace functional { +/// Options for `torch::nn::functional::local_response_norm`. +/// +/// See the documentation for `torch::nn::LocalResponseNormOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::local_response_norm(x, F::LocalResponseNormFuncOptions(2)); +/// ``` +using LocalResponseNormFuncOptions = LocalResponseNormOptions; +} // namespace functional + +// ============================================================================ + +/// Options for the `CrossMapLRN2d` module. +/// +/// Example: +/// ``` +/// CrossMapLRN2d model(CrossMapLRN2dOptions(3).alpha(1e-5).beta(0.1).k(10)); +/// ``` +struct TORCH_API CrossMapLRN2dOptions { + CrossMapLRN2dOptions(int64_t size); + + TORCH_ARG(int64_t, size); + + TORCH_ARG(double, alpha) = 1e-4; + + TORCH_ARG(double, beta) = 0.75; + + TORCH_ARG(int64_t, k) = 1; +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::normalize`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::normalize(input, F::NormalizeFuncOptions().p(1).dim(-1)); +/// ``` +struct TORCH_API NormalizeFuncOptions { + /// The exponent value in the norm formulation. Default: 2.0 + TORCH_ARG(double, p) = 2.0; + /// The dimension to reduce. Default: 1 + TORCH_ARG(int64_t, dim) = 1; + /// Small value to avoid division by zero. Default: 1e-12 + TORCH_ARG(double, eps) = 1e-12; + /// the output tensor. If `out` is used, this + /// operation won't be differentiable. + TORCH_ARG(c10::optional, out) = c10::nullopt; +}; + +} // namespace functional + +// ============================================================================ + +/// Options for the `GroupNorm` module. +/// +/// Example: +/// ``` +/// GroupNorm model(GroupNormOptions(2, 2).eps(2e-5).affine(false)); +/// ``` +struct TORCH_API GroupNormOptions { + /* implicit */ GroupNormOptions(int64_t num_groups, int64_t num_channels); + + /// number of groups to separate the channels into + TORCH_ARG(int64_t, num_groups); + /// number of channels expected in input + TORCH_ARG(int64_t, num_channels); + /// a value added to the denominator for numerical stability. Default: 1e-5 + TORCH_ARG(double, eps) = 1e-5; + /// a boolean value that when set to ``true``, this module + /// has learnable per-channel affine parameters initialized to ones (for + /// weights) and zeros (for biases). Default: ``true``. + TORCH_ARG(bool, affine) = true; +}; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::group_norm`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::group_norm(input, F::GroupNormFuncOptions(2).eps(2e-5)); +/// ``` +struct TORCH_API GroupNormFuncOptions { + /* implicit */ GroupNormFuncOptions(int64_t num_groups); + + /// number of groups to separate the channels into + TORCH_ARG(int64_t, num_groups); + + TORCH_ARG(Tensor, weight) = {}; + + TORCH_ARG(Tensor, bias) = {}; + + /// a value added to the denominator for numerical stability. Default: 1e-5 + TORCH_ARG(double, eps) = 1e-5; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/padding.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/padding.h new file mode 100644 index 0000000000000000000000000000000000000000..8b8312f78ee649bd4127da10dd906790878dbd71 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/padding.h @@ -0,0 +1,219 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for a `D`-dimensional ReflectionPad module. +template +struct TORCH_API ReflectionPadOptions { + ReflectionPadOptions(ExpandingArray padding) : padding_(padding) {} + + /// The size of the padding. + /// If it is `int`, uses the same padding in all boundaries. + /// If it is a 2-`tuple` (for ReflectionPad1d), uses (padding_left, + /// padding_right). If it is a 4-`tuple` (for ReflectionPad2d), uses + /// (padding_left, padding_right, padding_top, padding_bottom). If it is a + /// 6-`tuple` (for ReflectionPad3d), uses (padding_left, padding_right, + /// padding_top, padding_bottom, padding_front, padding_back). + + TORCH_ARG(ExpandingArray, padding); +}; + +/// `ReflectionPadOptions` specialized for the `ReflectionPad1d` module. +/// +/// Example: +/// ``` +/// ReflectionPad1d model(ReflectionPad1dOptions({3, 1})); +/// ``` +using ReflectionPad1dOptions = ReflectionPadOptions<1>; + +/// `ReflectionPadOptions` specialized for the `ReflectionPad2d` module. +/// +/// Example: +/// ``` +/// ReflectionPad2d model(ReflectionPad2dOptions({1, 1, 2, 0})); +/// ``` +using ReflectionPad2dOptions = ReflectionPadOptions<2>; + +/// `ReflectionPadOptions` specialized for the `ReflectionPad3d` module. +/// +/// Example: +/// ``` +/// ReflectionPad3d model(ReflectionPad3dOptions({1, 1, 2, 0, 1, 1})); +/// ``` +using ReflectionPad3dOptions = ReflectionPadOptions<3>; + +// ============================================================================ + +/// Options for a `D`-dimensional ReplicationPad module. +template +struct TORCH_API ReplicationPadOptions { + ReplicationPadOptions(ExpandingArray padding) : padding_(padding) {} + + /// The size of the padding. + /// - If it is `int`, uses the same padding in all boundaries. + /// - If it is a 2-`tuple` (for ReplicationPad1d), uses (padding_left, + /// padding_right). + /// - If it is a 4-`tuple` (for ReplicationPad2d), uses (padding_left, + /// padding_right, padding_top, padding_bottom). + /// - If it is a 6-`tuple` (for ReplicationPad3d), uses + /// (padding_left, padding_right, padding_top, padding_bottom, + /// padding_front, padding_back). + TORCH_ARG(ExpandingArray, padding); +}; + +/// `ReplicationPadOptions` specialized for the `ReplicationPad1d` module. +/// +/// Example: +/// ``` +/// ReplicationPad1d model(ReplicationPad1dOptions({3, 1})); +/// ``` +using ReplicationPad1dOptions = ReplicationPadOptions<1>; + +/// `ReplicationPadOptions` specialized for the `ReplicationPad2d` module. +/// +/// Example: +/// ``` +/// ReplicationPad2d model(ReplicationPad2dOptions({1, 1, 2, 0})); +/// ``` +using ReplicationPad2dOptions = ReplicationPadOptions<2>; + +/// `ReplicationPadOptions` specialized for the `ReplicationPad3d` module. +/// +/// Example: +/// ``` +/// ReplicationPad3d model(ReplicationPad3dOptions({1, 2, 1, 2, 1, 2})); +/// ``` +using ReplicationPad3dOptions = ReplicationPadOptions<3>; + +// ============================================================================ + +template +struct TORCH_API ZeroPadOptions { + ZeroPadOptions(ExpandingArray padding) : padding_(padding) {} + + /// The size of the padding. + /// - If it is `int`, uses the same padding in all boundaries. + /// - If it is a 2-`tuple` (for ZeroPad1d), uses (padding_left, + /// padding_right). + /// - If it is a 4-`tuple` (for ZeroPad2d), uses (padding_left, padding_right, + /// padding_top, padding_bottom). + /// - If it is a 6-`tuple` (for ZeroPad3d), uses + /// (padding_left, padding_right, padding_top, padding_bottom, + /// padding_front, padding_back). + TORCH_ARG(ExpandingArray, padding); +}; + +/// `ZeroPadOptions` specialized for the `ZeroPad1d` module. +/// +/// Example: +/// ``` +/// ConstantPad1d model(ConstantPad1dOptions({3, 1}); +/// ``` +using ZeroPad1dOptions = ZeroPadOptions<1>; + +/// `ZeroPadOptions` specialized for the `ZeroPad2d` module. +/// +/// Example: +/// ``` +/// ConstantPad2d model(ConstantPad2dOptions({1, 1, 2, 0}); +/// ``` +using ZeroPad2dOptions = ZeroPadOptions<2>; + +/// `ZeroPadOptions` specialized for the `ZeroPad3d` module. +/// +/// Example: +/// ``` +/// ConstantPad3d model(ConstantPad3dOptions({1, 2, 1, 2, 1, 2}); +/// ``` +using ZeroPad3dOptions = ZeroPadOptions<3>; + +// ============================================================================ + +/// Options for a `D`-dimensional ConstantPad module. +template +struct TORCH_API ConstantPadOptions { + ConstantPadOptions(ExpandingArray padding, double value) + : padding_(padding), value_(value) {} + + /// The size of the padding. + /// - If it is `int`, uses the same padding in all boundaries. + /// - If it is a 2-`tuple` (for ConstantPad1d), uses (padding_left, + /// padding_right). + /// - If it is a 4-`tuple` (for ConstantPad2d), uses (padding_left, + /// padding_right, padding_top, padding_bottom). + /// - If it is a 6-`tuple` (for ConstantPad3d), uses + /// (padding_left, padding_right, padding_top, padding_bottom, + /// padding_front, padding_back). + TORCH_ARG(ExpandingArray, padding); + + /// Fill value for constant padding. + TORCH_ARG(double, value); +}; + +/// `ConstantPadOptions` specialized for the `ConstantPad1d` module. +/// +/// Example: +/// ``` +/// ConstantPad1d model(ConstantPad1dOptions({3, 1}, 3.5)); +/// ``` +using ConstantPad1dOptions = ConstantPadOptions<1>; + +/// `ConstantPadOptions` specialized for the `ConstantPad2d` module. +/// +/// Example: +/// ``` +/// ConstantPad2d model(ConstantPad2dOptions({3, 0, 2, 1}, 3.5)); +/// ``` +using ConstantPad2dOptions = ConstantPadOptions<2>; + +/// `ConstantPadOptions` specialized for the `ConstantPad3d` module. +/// +/// Example: +/// ``` +/// ConstantPad3d model(ConstantPad3dOptions({1, 2, 1, 2, 1, 2}, 3.5)); +/// ``` +using ConstantPad3dOptions = ConstantPadOptions<3>; + +// ============================================================================ + +namespace functional { + +/// Options for `torch::nn::functional::pad`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::pad(input, F::PadFuncOptions({1, 2, 2, 1, 1, +/// 2}).mode(torch::kReplicate)); +/// ``` +struct TORCH_API PadFuncOptions { + typedef std::variant< + enumtype::kConstant, + enumtype::kReflect, + enumtype::kReplicate, + enumtype::kCircular> + mode_t; + + PadFuncOptions(std::vector pad); + + /// m-elements tuple, where m/2 <= input dimensions and m is even. + TORCH_ARG(std::vector, pad); + + /// "constant", "reflect", "replicate" or "circular". Default: "constant" + TORCH_ARG(mode_t, mode) = torch::kConstant; + + /// fill value for "constant" padding. Default: 0 + TORCH_ARG(double, value) = 0; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pixelshuffle.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pixelshuffle.h new file mode 100644 index 0000000000000000000000000000000000000000..859da98616db155e1729b2bbc5e37dab5c529a44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pixelshuffle.h @@ -0,0 +1,65 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `PixelShuffle` module. +/// +/// Example: +/// ``` +/// PixelShuffle model(PixelShuffleOptions(5)); +/// ``` +struct TORCH_API PixelShuffleOptions { + PixelShuffleOptions(int64_t upscale_factor) + : upscale_factor_(upscale_factor) {} + + /// Factor to increase spatial resolution by + TORCH_ARG(int64_t, upscale_factor); +}; + +/// Options for the `PixelUnshuffle` module. +/// +/// Example: +/// ``` +/// PixelUnshuffle model(PixelUnshuffleOptions(5)); +/// ``` +struct TORCH_API PixelUnshuffleOptions { + /* implicit */ PixelUnshuffleOptions(int64_t downscale_factor) + : downscale_factor_(downscale_factor) {} + + /// Factor to decrease spatial resolution by + TORCH_ARG(int64_t, downscale_factor); +}; + +namespace functional { +/// Options for `torch::nn::functional::pixel_shuffle`. +/// +/// See the documentation for `torch::nn::PixelShuffleOptions` class to learn +/// what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::pixel_shuffle(x, F::PixelShuffleFuncOptions(2)); +/// ``` +using PixelShuffleFuncOptions = PixelShuffleOptions; + +/// Options for `torch::nn::functional::pixel_unshuffle`. +/// +/// See the documentation for `torch::nn::PixelUnshuffleOptions` class to learn +/// what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::pixel_unshuffle(x, F::PixelUnshuffleFuncOptions(2)); +/// ``` +using PixelUnshuffleFuncOptions = PixelUnshuffleOptions; +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pooling.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pooling.h new file mode 100644 index 0000000000000000000000000000000000000000..41de605e90fb04509b317c7baf1f34d6a822ff97 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/pooling.h @@ -0,0 +1,596 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +/// Options for a `D`-dimensional avgpool module. +template +struct AvgPoolOptions { + AvgPoolOptions(ExpandingArray kernel_size) + : kernel_size_(kernel_size), stride_(kernel_size) {} + + /// the size of the window to take an average over + TORCH_ARG(ExpandingArray, kernel_size); + + /// the stride of the window. Default value is `kernel_size` + TORCH_ARG(ExpandingArray, stride); + + /// implicit zero padding to be added on both sides + TORCH_ARG(ExpandingArray, padding) = 0; + + /// when True, will use `ceil` instead of `floor` to compute the output shape + TORCH_ARG(bool, ceil_mode) = false; + + /// when True, will include the zero-padding in the averaging calculation + TORCH_ARG(bool, count_include_pad) = true; + + /// if specified, it will be used as divisor, otherwise size of the pooling + /// region will be used. + + TORCH_ARG(c10::optional, divisor_override) = c10::nullopt; +}; + +/// `AvgPoolOptions` specialized for the `AvgPool1d` module. +/// +/// Example: +/// ``` +/// AvgPool1d model(AvgPool1dOptions(3).stride(2)); +/// ``` +using AvgPool1dOptions = AvgPoolOptions<1>; + +/// `AvgPoolOptions` specialized for the `AvgPool2d` module. +/// +/// Example: +/// ``` +/// AvgPool2d model(AvgPool2dOptions({3, 2}).stride({2, 2})); +/// ``` +using AvgPool2dOptions = AvgPoolOptions<2>; + +/// `AvgPoolOptions` specialized for the `AvgPool3d` module. +/// +/// Example: +/// ``` +/// AvgPool3d model(AvgPool3dOptions(5).stride(2)); +/// ``` +using AvgPool3dOptions = AvgPoolOptions<3>; + +namespace functional { +/// Options for `torch::nn::functional::avg_pool1d`. +/// +/// See the documentation for `torch::nn::AvgPool1dOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::avg_pool1d(x, F::AvgPool1dFuncOptions(3).stride(2)); +/// ``` +using AvgPool1dFuncOptions = AvgPool1dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::avg_pool2d`. +/// +/// See the documentation for `torch::nn::AvgPool2dOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::avg_pool2d(x, F::AvgPool2dFuncOptions(3).stride(2)); +/// ``` +using AvgPool2dFuncOptions = AvgPool2dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::avg_pool3d`. +/// +/// See the documentation for `torch::nn::AvgPool3dOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::avg_pool3d(x, F::AvgPool3dFuncOptions(3).stride(2)); +/// ``` +using AvgPool3dFuncOptions = AvgPool3dOptions; +} // namespace functional + +// ============================================================================ + +/// Options for a `D`-dimensional maxpool module. +template +struct MaxPoolOptions { + MaxPoolOptions(ExpandingArray kernel_size) + : kernel_size_(kernel_size), stride_(kernel_size) {} + + /// the size of the window to take a max over + TORCH_ARG(ExpandingArray, kernel_size); + + /// the stride of the window. Default value is `kernel_size + TORCH_ARG(ExpandingArray, stride); + + /// implicit zero padding to be added on both sides + TORCH_ARG(ExpandingArray, padding) = 0; + + /// a parameter that controls the stride of elements in the window + TORCH_ARG(ExpandingArray, dilation) = 1; + + /// when True, will use `ceil` instead of `floor` to compute the output shape + TORCH_ARG(bool, ceil_mode) = false; +}; + +/// `MaxPoolOptions` specialized for the `MaxPool1d` module. +/// +/// Example: +/// ``` +/// MaxPool1d model(MaxPool1dOptions(3).stride(2)); +/// ``` +using MaxPool1dOptions = MaxPoolOptions<1>; + +/// `MaxPoolOptions` specialized for the `MaxPool2d` module. +/// +/// Example: +/// ``` +/// MaxPool2d model(MaxPool2dOptions({3, 2}).stride({2, 2})); +/// ``` +using MaxPool2dOptions = MaxPoolOptions<2>; + +/// `MaxPoolOptions` specialized for the `MaxPool3d` module. +/// +/// Example: +/// ``` +/// MaxPool3d model(MaxPool3dOptions(3).stride(2)); +/// ``` +using MaxPool3dOptions = MaxPoolOptions<3>; + +namespace functional { +/// Options for `torch::nn::functional::max_pool1d` and +/// `torch::nn::functional::max_pool1d_with_indices`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool1d(x, F::MaxPool1dFuncOptions(3).stride(2)); +/// ``` +using MaxPool1dFuncOptions = MaxPool1dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::max_pool2d` and +/// `torch::nn::functional::max_pool2d_with_indices`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool2d(x, F::MaxPool2dFuncOptions(3).stride(2)); +/// ``` +using MaxPool2dFuncOptions = MaxPool2dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::max_pool3d` and +/// `torch::nn::functional::max_pool3d_with_indices`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_pool3d(x, F::MaxPool3dFuncOptions(3).stride(2)); +/// ``` +using MaxPool3dFuncOptions = MaxPool3dOptions; +} // namespace functional + +// ============================================================================ + +/// Options for a `D`-dimensional adaptive maxpool module. +template +struct AdaptiveMaxPoolOptions { + AdaptiveMaxPoolOptions(output_size_t output_size) + : output_size_(output_size) {} + + /// the target output size + TORCH_ARG(output_size_t, output_size); +}; + +/// `AdaptiveMaxPoolOptions` specialized for the `AdaptiveMaxPool1d` module. +/// +/// Example: +/// ``` +/// AdaptiveMaxPool1d model(AdaptiveMaxPool1dOptions(3)); +/// ``` +using AdaptiveMaxPool1dOptions = AdaptiveMaxPoolOptions>; + +/// `AdaptiveMaxPoolOptions` specialized for the `AdaptiveMaxPool2d` module. +/// +/// Example: +/// ``` +/// AdaptiveMaxPool2d model(AdaptiveMaxPool2dOptions({3, 2})); +/// ``` +using AdaptiveMaxPool2dOptions = + AdaptiveMaxPoolOptions>; + +/// `AdaptiveMaxPoolOptions` specialized for the `AdaptiveMaxPool3d` module. +/// +/// Example: +/// ``` +/// AdaptiveMaxPool3d model(AdaptiveMaxPool3dOptions(3)); +/// ``` +using AdaptiveMaxPool3dOptions = + AdaptiveMaxPoolOptions>; + +namespace functional { +/// Options for `torch::nn::functional::adaptive_max_pool1d` and +/// `torch::nn::functional::adaptive_max_pool1d_with_indices` +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool1d(x, F::AdaptiveMaxPool1dFuncOptions(3)); +/// ``` +using AdaptiveMaxPool1dFuncOptions = AdaptiveMaxPool1dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::adaptive_max_pool2d` and +/// `torch::nn::functional::adaptive_max_pool2d_with_indices` +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool2d(x, F::AdaptiveMaxPool2dFuncOptions(3)); +/// ``` +using AdaptiveMaxPool2dFuncOptions = AdaptiveMaxPool2dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::adaptive_max_pool3d` and +/// `torch::nn::functional::adaptive_max_pool3d_with_indices` +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_max_pool3d(x, F::AdaptiveMaxPool3dFuncOptions(3)); +/// ``` +using AdaptiveMaxPool3dFuncOptions = AdaptiveMaxPool3dOptions; +} // namespace functional + +// ============================================================================ + +/// Options for a `D`-dimensional adaptive avgpool module. +template +struct AdaptiveAvgPoolOptions { + AdaptiveAvgPoolOptions(output_size_t output_size) + : output_size_(output_size) {} + + /// the target output size + TORCH_ARG(output_size_t, output_size); +}; + +/// `AdaptiveAvgPoolOptions` specialized for the `AdaptiveAvgPool1d` module. +/// +/// Example: +/// ``` +/// AdaptiveAvgPool1d model(AdaptiveAvgPool1dOptions(5)); +/// ``` +using AdaptiveAvgPool1dOptions = AdaptiveAvgPoolOptions>; + +/// `AdaptiveAvgPoolOptions` specialized for the `AdaptiveAvgPool2d` module. +/// +/// Example: +/// ``` +/// AdaptiveAvgPool2d model(AdaptiveAvgPool2dOptions({3, 2})); +/// ``` +using AdaptiveAvgPool2dOptions = + AdaptiveAvgPoolOptions>; + +/// `AdaptiveAvgPoolOptions` specialized for the `AdaptiveAvgPool3d` module. +/// +/// Example: +/// ``` +/// AdaptiveAvgPool3d model(AdaptiveAvgPool3dOptions(3)); +/// ``` +using AdaptiveAvgPool3dOptions = + AdaptiveAvgPoolOptions>; + +namespace functional { +/// Options for `torch::nn::functional::adaptive_avg_pool1d`. +/// +/// See the documentation for `torch::nn::AdaptiveAvgPool1dOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_avg_pool1d(x, F::AdaptiveAvgPool1dFuncOptions(3)); +/// ``` +using AdaptiveAvgPool1dFuncOptions = AdaptiveAvgPool1dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::adaptive_avg_pool2d`. +/// +/// See the documentation for `torch::nn::AdaptiveAvgPool2dOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3)); +/// ``` +using AdaptiveAvgPool2dFuncOptions = AdaptiveAvgPool2dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::adaptive_avg_pool3d`. +/// +/// See the documentation for `torch::nn::AdaptiveAvgPool3dOptions` class to +/// learn what arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::adaptive_avg_pool3d(x, F::AdaptiveAvgPool3dFuncOptions(3)); +/// ``` +using AdaptiveAvgPool3dFuncOptions = AdaptiveAvgPool3dOptions; +} // namespace functional + +// ============================================================================ + +/// Options for a `D`-dimensional maxunpool module. +template +struct MaxUnpoolOptions { + MaxUnpoolOptions(ExpandingArray kernel_size) + : kernel_size_(kernel_size), stride_(kernel_size) {} + + /// the size of the window to take a max over + TORCH_ARG(ExpandingArray, kernel_size); + + /// the stride of the window. Default value is `kernel_size + TORCH_ARG(ExpandingArray, stride); + + /// implicit zero padding to be added on both sides + TORCH_ARG(ExpandingArray, padding) = 0; +}; + +/// `MaxUnpoolOptions` specialized for the `MaxUnpool1d` module. +/// +/// Example: +/// ``` +/// MaxUnpool1d model(MaxUnpool1dOptions(3).stride(2).padding(1)); +/// ``` +using MaxUnpool1dOptions = MaxUnpoolOptions<1>; + +/// `MaxUnpoolOptions` specialized for the `MaxUnpool2d` module. +/// +/// Example: +/// ``` +/// MaxUnpool2d model(MaxUnpool2dOptions(3).stride(2).padding(1)); +/// ``` +using MaxUnpool2dOptions = MaxUnpoolOptions<2>; + +/// `MaxUnpoolOptions` specialized for the `MaxUnpool3d` module. +/// +/// Example: +/// ``` +/// MaxUnpool3d model(MaxUnpool3dOptions(3).stride(2).padding(1)); +/// ``` +using MaxUnpool3dOptions = MaxUnpoolOptions<3>; + +// ============================================================================ + +namespace functional { + +/// Options for a `D`-dimensional maxunpool functional. +template +struct MaxUnpoolFuncOptions { + MaxUnpoolFuncOptions(ExpandingArray kernel_size) + : kernel_size_(kernel_size), stride_(kernel_size) {} + + /// the size of the window to take a max over + TORCH_ARG(ExpandingArray, kernel_size); + + /// the stride of the window. Default value is `kernel_size + TORCH_ARG(ExpandingArray, stride); + + /// implicit zero padding to be added on both sides + TORCH_ARG(ExpandingArray, padding) = 0; + + /// the targeted output size + TORCH_ARG(c10::optional>, output_size) = c10::nullopt; +}; + +/// `MaxUnpoolFuncOptions` specialized for +/// `torch::nn::functional::max_unpool1d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_unpool1d(x, indices, +/// F::MaxUnpool1dFuncOptions(3).stride(2).padding(1)); +/// ``` +using MaxUnpool1dFuncOptions = MaxUnpoolFuncOptions<1>; + +/// `MaxUnpoolFuncOptions` specialized for +/// `torch::nn::functional::max_unpool2d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_unpool2d(x, indices, +/// F::MaxUnpool2dFuncOptions(3).stride(2).padding(1)); +/// ``` +using MaxUnpool2dFuncOptions = MaxUnpoolFuncOptions<2>; + +/// `MaxUnpoolFuncOptions` specialized for +/// `torch::nn::functional::max_unpool3d`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::max_unpool3d(x, indices, F::MaxUnpool3dFuncOptions(3)); +/// ``` +using MaxUnpool3dFuncOptions = MaxUnpoolFuncOptions<3>; + +} // namespace functional + +// ============================================================================ + +/// Options for a `D`-dimensional fractional maxpool module. +template +struct FractionalMaxPoolOptions { + FractionalMaxPoolOptions(ExpandingArray kernel_size) + : kernel_size_(kernel_size) {} + + /// the size of the window to take a max over + TORCH_ARG(ExpandingArray, kernel_size); + + /// the target output size of the image + TORCH_ARG(c10::optional>, output_size) = c10::nullopt; + + /// If one wants to have an output size as a ratio of the input size, this + /// option can be given. This has to be a number or tuple in the range (0, 1) + using ExpandingArrayDouble = torch::ExpandingArray; + TORCH_ARG(c10::optional, output_ratio) = c10::nullopt; + + TORCH_ARG(torch::Tensor, _random_samples) = Tensor(); +}; + +/// `FractionalMaxPoolOptions` specialized for the `FractionalMaxPool2d` module. +/// +/// Example: +/// ``` +/// FractionalMaxPool2d model(FractionalMaxPool2dOptions(5).output_size(1)); +/// ``` +using FractionalMaxPool2dOptions = FractionalMaxPoolOptions<2>; + +/// `FractionalMaxPoolOptions` specialized for the `FractionalMaxPool3d` module. +/// +/// Example: +/// ``` +/// FractionalMaxPool3d model(FractionalMaxPool3dOptions(5).output_size(1)); +/// ``` +using FractionalMaxPool3dOptions = FractionalMaxPoolOptions<3>; + +namespace functional { +/// Options for `torch::nn::functional::fractional_max_pool2d` and +/// `torch::nn::functional::fractional_max_pool2d_with_indices` +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::fractional_max_pool2d(x, +/// F::FractionalMaxPool2dFuncOptions(3).output_size(2)); +/// ``` +using FractionalMaxPool2dFuncOptions = FractionalMaxPool2dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::fractional_max_pool3d` and +/// `torch::nn::functional::fractional_max_pool3d_with_indices` +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::fractional_max_pool3d(x, +/// F::FractionalMaxPool3dFuncOptions(3).output_size(2)); +/// ``` +using FractionalMaxPool3dFuncOptions = FractionalMaxPool3dOptions; +} // namespace functional + +// ============================================================================ + +/// Options for a `D`-dimensional lppool module. +template +struct LPPoolOptions { + LPPoolOptions(double norm_type, ExpandingArray kernel_size) + : norm_type_(norm_type), + kernel_size_(kernel_size), + stride_(kernel_size) {} + + TORCH_ARG(double, norm_type); + + // the size of the window to take an average over + TORCH_ARG(ExpandingArray, kernel_size); + + // the stride of the window. Default value is `kernel_size` + TORCH_ARG(ExpandingArray, stride); + + // when True, will use `ceil` instead of `floor` to compute the output shape + TORCH_ARG(bool, ceil_mode) = false; +}; + +/// `LPPoolOptions` specialized for the `LPPool1d` module. +/// +/// Example: +/// ``` +/// LPPool1d model(LPPool1dOptions(1, 2).stride(5).ceil_mode(true)); +/// ``` +using LPPool1dOptions = LPPoolOptions<1>; + +/// `LPPoolOptions` specialized for the `LPPool2d` module. +/// +/// Example: +/// ``` +/// LPPool2d model(LPPool2dOptions(1, std::vector({3, 4})).stride({5, +/// 6}).ceil_mode(true)); +/// ``` +using LPPool2dOptions = LPPoolOptions<2>; + +/// `LPPoolOptions` specialized for the `LPPool3d` module. +/// +/// Example: +/// ``` +/// LPPool3d model(LPPool3dOptions(1, std::vector({3, 4, 5})).stride( +/// {5, 6, 7}).ceil_mode(true)); +/// ``` +using LPPool3dOptions = LPPoolOptions<3>; + +namespace functional { +/// Options for `torch::nn::functional::lp_pool1d`. +/// +/// See the documentation for `torch::nn::LPPool1dOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::lp_pool1d(x, F::LPPool1dFuncOptions(2, 3).stride(2)); +/// ``` +using LPPool1dFuncOptions = LPPool1dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::lp_pool2d`. +/// +/// See the documentation for `torch::nn::LPPool2dOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::lp_pool2d(x, F::LPPool2dFuncOptions(2, {2, 3}).stride(2)); +/// ``` +using LPPool2dFuncOptions = LPPool2dOptions; +} // namespace functional + +namespace functional { +/// Options for `torch::nn::functional::lp_pool3d`. +/// +/// See the documentation for `torch::nn::LPPool3dOptions` class to learn what +/// arguments are supported. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::lp_pool3d(x, F::LPPool3dFuncOptions(2, {2, 3, 4}).stride(2)); +/// ``` +using LPPool3dFuncOptions = LPPool3dOptions; +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/rnn.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/rnn.h new file mode 100644 index 0000000000000000000000000000000000000000..133acc500276d2c0577e863f5bdad4ca6528cc14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/rnn.h @@ -0,0 +1,236 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +namespace detail { + +/// Common options for RNN, LSTM and GRU modules. +struct TORCH_API RNNOptionsBase { + typedef std::variant< + enumtype::kLSTM, + enumtype::kGRU, + enumtype::kRNN_TANH, + enumtype::kRNN_RELU> + rnn_options_base_mode_t; + + RNNOptionsBase( + rnn_options_base_mode_t mode, + int64_t input_size, + int64_t hidden_size); + + TORCH_ARG(rnn_options_base_mode_t, mode); + /// The number of features of a single sample in the input sequence `x`. + TORCH_ARG(int64_t, input_size); + /// The number of features in the hidden state `h`. + TORCH_ARG(int64_t, hidden_size); + /// The number of recurrent layers (cells) to use. + TORCH_ARG(int64_t, num_layers) = 1; + /// Whether a bias term should be added to all linear operations. + TORCH_ARG(bool, bias) = true; + /// If true, the input sequence should be provided as `(batch, sequence, + /// features)`. If false (default), the expected layout is `(sequence, batch, + /// features)`. + TORCH_ARG(bool, batch_first) = false; + /// If non-zero, adds dropout with the given probability to the output of each + /// RNN layer, except the final layer. + TORCH_ARG(double, dropout) = 0.0; + /// Whether to make the RNN bidirectional. + TORCH_ARG(bool, bidirectional) = false; + /// Cell projection dimension. If 0, projections are not added. Can only be + /// used for LSTMs. + TORCH_ARG(int64_t, proj_size) = 0; +}; + +} // namespace detail + +/// Options for the `RNN` module. +/// +/// Example: +/// ``` +/// RNN model(RNNOptions(128, +/// 64).num_layers(3).dropout(0.2).nonlinearity(torch::kTanh)); +/// ``` +struct TORCH_API RNNOptions { + typedef std::variant nonlinearity_t; + + RNNOptions(int64_t input_size, int64_t hidden_size); + + /// The number of expected features in the input `x` + TORCH_ARG(int64_t, input_size); + /// The number of features in the hidden state `h` + TORCH_ARG(int64_t, hidden_size); + /// Number of recurrent layers. E.g., setting ``num_layers=2`` + /// would mean stacking two RNNs together to form a `stacked RNN`, + /// with the second RNN taking in outputs of the first RNN and + /// computing the final results. Default: 1 + TORCH_ARG(int64_t, num_layers) = 1; + /// The non-linearity to use. Can be either ``torch::kTanh`` or + /// ``torch::kReLU``. Default: ``torch::kTanh`` + TORCH_ARG(nonlinearity_t, nonlinearity) = torch::kTanh; + /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`. + /// Default: ``true`` + TORCH_ARG(bool, bias) = true; + /// If ``true``, then the input and output tensors are provided + /// as `(batch, seq, feature)`. Default: ``false`` + TORCH_ARG(bool, batch_first) = false; + /// If non-zero, introduces a `Dropout` layer on the outputs of each + /// RNN layer except the last layer, with dropout probability equal to + /// `dropout`. Default: 0 + TORCH_ARG(double, dropout) = 0.0; + /// If ``true``, becomes a bidirectional RNN. Default: ``false`` + TORCH_ARG(bool, bidirectional) = false; +}; + +/// Options for the `LSTM` module. +/// +/// Example: +/// ``` +/// LSTM model(LSTMOptions(2, +/// 4).num_layers(3).batch_first(false).bidirectional(true)); +/// ``` +struct TORCH_API LSTMOptions { + LSTMOptions(int64_t input_size, int64_t hidden_size); + + /// The number of expected features in the input `x` + TORCH_ARG(int64_t, input_size); + /// The number of features in the hidden state `h` + TORCH_ARG(int64_t, hidden_size); + /// Number of recurrent layers. E.g., setting ``num_layers=2`` + /// would mean stacking two LSTMs together to form a `stacked LSTM`, + /// with the second LSTM taking in outputs of the first LSTM and + /// computing the final results. Default: 1 + TORCH_ARG(int64_t, num_layers) = 1; + /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`. + /// Default: ``true`` + TORCH_ARG(bool, bias) = true; + /// If ``true``, then the input and output tensors are provided + /// as (batch, seq, feature). Default: ``false`` + TORCH_ARG(bool, batch_first) = false; + /// If non-zero, introduces a `Dropout` layer on the outputs of each + /// LSTM layer except the last layer, with dropout probability equal to + /// `dropout`. Default: 0 + TORCH_ARG(double, dropout) = 0.0; + /// If ``true``, becomes a bidirectional LSTM. Default: ``false`` + TORCH_ARG(bool, bidirectional) = false; + /// Cell projection dimension. If 0, projections are not added + TORCH_ARG(int64_t, proj_size) = 0; +}; + +/// Options for the `GRU` module. +/// +/// Example: +/// ``` +/// GRU model(GRUOptions(2, +/// 4).num_layers(3).batch_first(false).bidirectional(true)); +/// ``` +struct TORCH_API GRUOptions { + GRUOptions(int64_t input_size, int64_t hidden_size); + + /// The number of expected features in the input `x` + TORCH_ARG(int64_t, input_size); + /// The number of features in the hidden state `h` + TORCH_ARG(int64_t, hidden_size); + /// Number of recurrent layers. E.g., setting ``num_layers=2`` + /// would mean stacking two GRUs together to form a `stacked GRU`, + /// with the second GRU taking in outputs of the first GRU and + /// computing the final results. Default: 1 + TORCH_ARG(int64_t, num_layers) = 1; + /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`. + /// Default: ``true`` + TORCH_ARG(bool, bias) = true; + /// If ``true``, then the input and output tensors are provided + /// as (batch, seq, feature). Default: ``false`` + TORCH_ARG(bool, batch_first) = false; + /// If non-zero, introduces a `Dropout` layer on the outputs of each + /// GRU layer except the last layer, with dropout probability equal to + /// `dropout`. Default: 0 + TORCH_ARG(double, dropout) = 0.0; + /// If ``true``, becomes a bidirectional GRU. Default: ``false`` + TORCH_ARG(bool, bidirectional) = false; +}; + +namespace detail { + +/// Common options for RNNCell, LSTMCell and GRUCell modules +struct TORCH_API RNNCellOptionsBase { + RNNCellOptionsBase( + int64_t input_size, + int64_t hidden_size, + bool bias, + int64_t num_chunks); + TORCH_ARG(int64_t, input_size); + TORCH_ARG(int64_t, hidden_size); + TORCH_ARG(bool, bias); + TORCH_ARG(int64_t, num_chunks); +}; + +} // namespace detail + +/// Options for the `RNNCell` module. +/// +/// Example: +/// ``` +/// RNNCell model(RNNCellOptions(20, +/// 10).bias(false).nonlinearity(torch::kReLU)); +/// ``` +struct TORCH_API RNNCellOptions { + typedef std::variant nonlinearity_t; + + RNNCellOptions(int64_t input_size, int64_t hidden_size); + + /// The number of expected features in the input `x` + TORCH_ARG(int64_t, input_size); + /// The number of features in the hidden state `h` + TORCH_ARG(int64_t, hidden_size); + /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`. + /// Default: ``true`` + TORCH_ARG(bool, bias) = true; + /// The non-linearity to use. Can be either ``torch::kTanh`` or + /// ``torch::kReLU``. Default: ``torch::kTanh`` + TORCH_ARG(nonlinearity_t, nonlinearity) = torch::kTanh; +}; + +/// Options for the `LSTMCell` module. +/// +/// Example: +/// ``` +/// LSTMCell model(LSTMCellOptions(20, 10).bias(false)); +/// ``` +struct TORCH_API LSTMCellOptions { + LSTMCellOptions(int64_t input_size, int64_t hidden_size); + + /// The number of expected features in the input `x` + TORCH_ARG(int64_t, input_size); + /// The number of features in the hidden state `h` + TORCH_ARG(int64_t, hidden_size); + /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`. + /// Default: ``true`` + TORCH_ARG(bool, bias) = true; +}; + +/// Options for the `GRUCell` module. +/// +/// Example: +/// ``` +/// GRUCell model(GRUCellOptions(20, 10).bias(false)); +/// ``` +struct TORCH_API GRUCellOptions { + GRUCellOptions(int64_t input_size, int64_t hidden_size); + + /// The number of expected features in the input `x` + TORCH_ARG(int64_t, input_size); + /// The number of features in the hidden state `h` + TORCH_ARG(int64_t, hidden_size); + /// If ``false``, then the layer does not use bias weights `b_ih` and `b_hh`. + /// Default: ``true`` + TORCH_ARG(bool, bias) = true; +}; + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformer.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformer.h new file mode 100644 index 0000000000000000000000000000000000000000..41db38fe0757a72081b0125f8747bc0b65c16c85 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformer.h @@ -0,0 +1,64 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `Transformer` module +/// +/// Example: +/// ``` +/// TransformerOptions options; +/// TransformerOptions options(16, 4); +/// auto options = TransformerOptions().d_model(4).nhead(2).dropout(0.0); +/// ``` +struct TORCH_API TransformerOptions { + // The following constructors are commonly used + // Please don't add more unless it is proved as a common usage + TransformerOptions() = default; + TransformerOptions(int64_t d_model, int64_t nhead); + TransformerOptions( + int64_t d_model, + int64_t nhead, + int64_t num_encoder_layers, + int64_t num_decoder_layers); + + /// the number of expected features in the encoder/decoder inputs + /// (default=512) + TORCH_ARG(int64_t, d_model) = 512; + + /// the number of heads in the multiheadattention models (default=8) + TORCH_ARG(int64_t, nhead) = 8; + + /// the number of sub-encoder-layers in the encoder (default=6) + TORCH_ARG(int64_t, num_encoder_layers) = 6; + + /// the number of sub-decoder-layers in the decoder (default=6) + TORCH_ARG(int64_t, num_decoder_layers) = 6; + + /// the dimension of the feedforward network model (default=2048) + TORCH_ARG(int64_t, dim_feedforward) = 2048; + + /// the dropout value (default=0.1) + TORCH_ARG(double, dropout) = 0.1; + + /// the activation function of encoder/decoder intermediate layer + /// (default=``torch::kReLU``) + TORCH_ARG(activation_t, activation) = torch::kReLU; + + /// custom encoder (default=None) + TORCH_ARG(AnyModule, custom_encoder); + + /// custom decoder (default=None) + TORCH_ARG(AnyModule, custom_decoder); +}; + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformercoder.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformercoder.h new file mode 100644 index 0000000000000000000000000000000000000000..64f6b998f4c657d1ca45a0123dbba379effd0979 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformercoder.h @@ -0,0 +1,76 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace nn { + +/// Options for the `TransformerEncoder` +/// +/// Example: +/// ``` +/// TransformerEncoderLayer encoderLayer(TransformerEncoderLayerOptions(512, +/// 8).dropout(0.1)); auto options = TransformerEncoderOptions(encoderLayer, +/// 6).norm(LayerNorm(LayerNormOptions({2}))); +/// ``` +struct TORCH_API TransformerEncoderOptions { + // This constructor will keep a shallow copy of encoder_layer, so it keeps all + // the data in encoder_layer. + TransformerEncoderOptions( + TransformerEncoderLayer encoder_layer, + int64_t num_layers); + // This constructor will create a new TransformerEncoderLayer obj based on + // passed in encoder_layer_options. + TransformerEncoderOptions( + const TransformerEncoderLayerOptions& encoder_layer_options, + int64_t num_layers); + + /// transformer Encoder Layer + TORCH_ARG(TransformerEncoderLayer, encoder_layer) = nullptr; + + /// number of encoder layers + TORCH_ARG(int64_t, num_layers); + + /// normalization module + TORCH_ARG(AnyModule, norm); +}; + +/// Options for the `TransformerDecoder` module. +/// +/// Example: +/// ``` +/// TransformerDecoderLayer decoder_layer(TransformerDecoderLayerOptions(512, +/// 8).dropout(0.1)); auto options = TransformerDecoderOptions(decoder_layer, +/// 6)norm(LayerNorm(LayerNormOptions({2}))); TransformerDecoder +/// transformer_decoder(options); +/// ``` +struct TORCH_API TransformerDecoderOptions { + // This constructor will keep the a ref of passed in decoder_layer, + // so it keeps all the data in decoder_layer. + TransformerDecoderOptions( + TransformerDecoderLayer decoder_layer, + int64_t num_layers); + // This constructor will create a new TransformerDecoderLayer obj, + // based on passed in decoder_layer_options. + TransformerDecoderOptions( + const TransformerDecoderLayerOptions& decoder_layer_options, + int64_t num_layers); + + /// decoder layer to be cloned + TORCH_ARG(TransformerDecoderLayer, decoder_layer) = nullptr; + + /// number of decoder layers + TORCH_ARG(int64_t, num_layers); + + /// normalization module + TORCH_ARG(AnyModule, norm); +}; + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformerlayer.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformerlayer.h new file mode 100644 index 0000000000000000000000000000000000000000..cbd6af26a1da696ef118caa70b77d9c17a969dfd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/transformerlayer.h @@ -0,0 +1,72 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { + +using activation_t = std::variant< + enumtype::kReLU, + enumtype::kGELU, + std::function>; + +/// Options for the `TransformerEncoderLayer` +/// +/// Example: +/// ``` +/// auto options = TransformerEncoderLayer(512, 8).dropout(0.2); +/// ``` +struct TORCH_API TransformerEncoderLayerOptions { + /* implicit */ TransformerEncoderLayerOptions(int64_t d_model, int64_t nhead); + + /// the number of expected features in the input + TORCH_ARG(int64_t, d_model); + + /// the number of heads in the multiheadattention models + TORCH_ARG(int64_t, nhead); + + /// the dimension of the feedforward network model, default is 2048 + TORCH_ARG(int64_t, dim_feedforward) = 2048; + + /// the dropout value, default is 0.1 + TORCH_ARG(double, dropout) = 0.1; + + /// the activation function of intermediate layer, can be ``torch::kReLU``, + /// ``torch::GELU``, or a unary callable. Default: ``torch::kReLU`` + TORCH_ARG(activation_t, activation) = torch::kReLU; +}; + +// ============================================================================ + +/// Options for the `TransformerDecoderLayer` module. +/// +/// Example: +/// ``` +/// TransformerDecoderLayer model(TransformerDecoderLayerOptions(512, +/// 8).dropout(0.2)); +/// ``` +struct TORCH_API TransformerDecoderLayerOptions { + TransformerDecoderLayerOptions(int64_t d_model, int64_t nhead); + + /// number of expected features in the input + TORCH_ARG(int64_t, d_model); + + /// number of heads in the multiheadattention models + TORCH_ARG(int64_t, nhead); + + /// dimension of the feedforward network model. Default: 2048 + TORCH_ARG(int64_t, dim_feedforward) = 2048; + + /// dropout value. Default: 1 + TORCH_ARG(double, dropout) = 0.1; + + /// activation function of intermediate layer, can be ``torch::kGELU``, + /// ``torch::kReLU``, or a unary callable. Default: ``torch::kReLU`` + TORCH_ARG(activation_t, activation) = torch::kReLU; +}; + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/upsampling.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/upsampling.h new file mode 100644 index 0000000000000000000000000000000000000000..ca793beb97725d62b8b3b66c3da19574723d5932 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/upsampling.h @@ -0,0 +1,110 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace torch { +namespace nn { + +/// Options for the `Upsample` module. +/// +/// Example: +/// ``` +/// Upsample +/// model(UpsampleOptions().scale_factor(std::vector({3})).mode(torch::kLinear).align_corners(false)); +/// ``` +struct TORCH_API UpsampleOptions { + /// output spatial sizes. + TORCH_ARG(c10::optional>, size) = c10::nullopt; + + /// multiplier for spatial size. + TORCH_ARG(c10::optional>, scale_factor) = c10::nullopt; + + /// the upsampling algorithm: one of "nearest", "linear", "bilinear", + /// "bicubic" and "trilinear". Default: "nearest" + typedef std::variant< + enumtype::kNearest, + enumtype::kLinear, + enumtype::kBilinear, + enumtype::kBicubic, + enumtype::kTrilinear> + mode_t; + TORCH_ARG(mode_t, mode) = torch::kNearest; + + /// if "True", the corner pixels of the input and output tensors are + /// aligned, and thus preserving the values at those pixels. This only has + /// effect when :attr:`mode` is "linear", "bilinear", "bicubic", or + /// "trilinear". Default: "False" + TORCH_ARG(c10::optional, align_corners) = c10::nullopt; +}; + +namespace functional { + +/// Options for `torch::nn::functional::interpolate`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::interpolate(input, +/// F::InterpolateFuncOptions().size(std::vector({4})).mode(torch::kNearest)); +/// ``` +struct TORCH_API InterpolateFuncOptions { + typedef std::variant< + enumtype::kNearest, + enumtype::kLinear, + enumtype::kBilinear, + enumtype::kBicubic, + enumtype::kTrilinear, + enumtype::kArea, + enumtype::kNearestExact> + mode_t; + + /// output spatial sizes. + TORCH_ARG(c10::optional>, size) = c10::nullopt; + + /// multiplier for spatial size. + TORCH_ARG(c10::optional>, scale_factor) = c10::nullopt; + + /// the upsampling algorithm: one of "nearest", "linear", "bilinear", + /// "bicubic", "trilinear", "area", "nearest-exact". Default: "nearest" + TORCH_ARG(mode_t, mode) = torch::kNearest; + + /// Geometrically, we consider the pixels of the input and output as squares + /// rather than points. If set to "True", the input and output tensors are + /// aligned by the center points of their corner pixels, preserving the values + /// at the corner pixels. If set to "False", the input and output tensors + /// are aligned by the corner points of their corner pixels, and the + /// interpolation uses edge value padding for out-of-boundary values, making + /// this operation *independent* of input size when `scale_factor` is + /// kept the same. It is *required* when interpolating mode is "linear", + /// "bilinear", "bicubic" or "trilinear". Default: "False" + TORCH_ARG(c10::optional, align_corners) = c10::nullopt; + + /// recompute the scale_factor for use in the + /// interpolation calculation. When `scale_factor` is passed as a parameter, + /// it is used to compute the `output_size`. If `recompute_scale_factor` is + /// `true` or not specified, a new `scale_factor` will be computed based on + /// the output and input sizes for use in the interpolation computation (i.e. + /// the computation will be identical to if the computed `output_size` were + /// passed-in explicitly). Otherwise, the passed-in `scale_factor` will be + /// used in the interpolation computation. Note that when `scale_factor` is + /// floating-point, the recomputed scale_factor may differ from the one passed + /// in due to rounding and precision issues. + TORCH_ARG(c10::optional, recompute_scale_factor) = c10::nullopt; + + /// flag to apply anti-aliasing. Using anti-alias + /// option together with :attr:`align_corners` equals "False", interpolation + /// result would match Pillow result for downsampling operation. Supported + /// modes: "bilinear". Default: "False". + TORCH_ARG(bool, antialias) = false; +}; + +} // namespace functional + +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/vision.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/vision.h new file mode 100644 index 0000000000000000000000000000000000000000..814f4b6684d96190e88b72d53a24eceac6eb5b0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn/options/vision.h @@ -0,0 +1,36 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace nn { +namespace functional { + +/// Options for `torch::nn::functional::grid_sample`. +/// +/// Example: +/// ``` +/// namespace F = torch::nn::functional; +/// F::grid_sample(input, grid, +/// F::GridSampleFuncOptions().mode(torch::kBilinear).padding_mode(torch::kZeros).align_corners(true)); +/// ``` +struct TORCH_API GridSampleFuncOptions { + typedef std::variant mode_t; + typedef std:: + variant + padding_mode_t; + + /// interpolation mode to calculate output values. Default: Bilinear + TORCH_ARG(mode_t, mode) = torch::kBilinear; + /// padding mode for outside grid values. Default: Zeros + TORCH_ARG(padding_mode_t, padding_mode) = torch::kZeros; + /// Specifies perspective to pixel as point. Default: false + TORCH_ARG(c10::optional, align_corners) = c10::nullopt; +}; + +} // namespace functional +} // namespace nn +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cache_entry.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cache_entry.h new file mode 100644 index 0000000000000000000000000000000000000000..d15fc9896db2e100c8505aa54d2ad30fcce5863f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cache_entry.h @@ -0,0 +1,69 @@ +#pragma once + +#include + +#ifdef __cplusplus + +#include +#include +#include + +namespace py = pybind11; + +extern "C" { + +#endif + +/* +Our cache resides on the extra scratch space of the code object. The structure +of the cache is as follows: + +-> ExtraState + -> CacheEntry (list) + -> check_fn + -> code + -> FrameState + +CacheEntry is a linked list node containing the check_fn for guards +and the optimized code. + +The FrameState is a PyDict that enables sharing between different frames. This +is used to detect dynamism in automatic dynamic shapes. + +These two are encapsulated into a ExtraState. +*/ + +typedef struct CacheEntry CacheEntry; +typedef struct ExtraState ExtraState; + +#ifdef __cplusplus + +typedef struct VISIBILITY_HIDDEN CacheEntry { + // check the guards: lambda: : bool + py::object check_fn; + // modified user bytecode (protected by check_fn's guards) + py::object code; + // Reference to owning ExtraState + ExtraState* _owner{nullptr}; + // Reference to this CacheEntry's location in owner's linked list + std::list::iterator _owner_loc; + + CacheEntry(const py::handle& guarded_code); + ~CacheEntry(); + + // Warning: returns a reference whose lifetime is controlled by C++ + py::object next(); +} CacheEntry; + +#endif + +// Returns borrowed reference +PyCodeObject* CacheEntry_get_code(CacheEntry* e); + +// Returns a borrowed reference to CacheEntry as a PyObject +// Warning: lifetime is controlled by C++ +PyObject* CacheEntry_to_obj(CacheEntry* e); + +#ifdef __cplusplus +} // extern "C" +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/compiled_autograd.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/compiled_autograd.h new file mode 100644 index 0000000000000000000000000000000000000000..63e9bbc03c0d7e1e825e19efa00927150c1b4f4a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/compiled_autograd.h @@ -0,0 +1,713 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include + +// see [Note: Compiled Autograd] + +namespace torch::dynamo::autograd { +using namespace torch::autograd; + +struct SizeInput { + // Note: int value is still needed when dynamic to pass as an arg + enum DynType : uint8_t { STATIC = 0, DYNAMIC = 1 }; + SizeInput(DynType dt, int64_t v) : dyn_type(dt), value(v) {} + DynType dyn_type; + int64_t value; +}; + +struct CacheKeyBuffer { + CacheKeyBuffer(const uint8_t* key, uint16_t len) : data(new uint8_t[len]) { + std::memcpy(data.get(), key, len); + } + const uint8_t* get() const { + return data.get(); + } + + private: + std::unique_ptr data; +}; + +struct CacheKey { + // Key to find the next node in the shadow graph. We use C++ RTTI for the + // type of the node (ntype), then a key generated with a visitor pattern. + CacheKey(const std::type_index& ntype, const uint8_t* key, uint16_t len) + : node_type(ntype), key_size(len), key(key) {} + + bool operator<(const CacheKey& other) const { + if (node_type != other.node_type) { + return node_type < other.node_type; + } + if (key_size != other.key_size) { + return key_size < other.key_size; + } + return std::memcmp(key, other.key, key_size) < 0; + } + + bool operator==(const CacheKey& other) const { + return node_type == other.node_type && key_size == other.key_size && + std::memcmp(key, other.key, key_size) == 0; + } + + size_t hash() const { + // don't bother hashing the key data, common case 1 cache entry per node + return std::hash()(node_type) ^ key_size; + } + + std::type_index node_type; + uint16_t key_size; + const uint8_t* key; +}; + +struct NodeCall { + NodeCall(uint32_t id_, std::shared_ptr node_) + : id(id_), node(std::move(node_)) {} + + void mark_output(int input_nr, int output_idx) { + graph_output.emplace_back(std::make_pair(input_nr, output_idx)); + } + + uint32_t id; + std::shared_ptr node; + std::vector> tensor_pre_hooks; + std::vector pre_hooks; + std::vector post_hooks; + std::vector post_acc_grad_hooks; + std::vector> graph_output; + bool needed = true; +}; + +struct NodeCalls : public std::unordered_map { + NodeCall& lookup(const std::shared_ptr& function) { + auto it = find(function.get()); + if (it == end()) { + it = emplace(function.get(), NodeCall(_next_id++, function)).first; + } + return it->second; + } + + private: + uint32_t _next_id = 0; +}; + +struct TensorArg { + // Represents a de-duplicated tensor that will be passed into the graph + TensorArg(uint32_t i = 0) : id(i) {} + uint32_t index() const { + TORCH_INTERNAL_ASSERT(defined()); + return id - 1; + } + bool defined() const { + return id != 0; + } + uint32_t id; + at::Tensor proxy_tensor; +}; + +struct TensorArgs { + // Manages a collection of TensorArgs and mappings from Tensors/SavedVariables + // to them. This also allows us to unpack SavedVariable exactly once and + // store the unpacked Tensor. + + TensorArg& lookup(const at::Tensor& tensor, bool create = false) { + if (!tensor.defined()) { + return _undefined; + } + auto impl = tensor.unsafeGetTensorImpl(); + auto it = _args.find(impl); + if (it == _args.end()) { + TORCH_INTERNAL_ASSERT(create && inputs.size() == _next_id - 1); + it = _args.emplace(impl, TensorArg(_next_id++)).first; + inputs.emplace_back(tensor); + } + return it->second; + } + + TensorArg& lookup(const SavedVariable& sv) { + auto it = _saved_variables.find(&sv); + TORCH_INTERNAL_ASSERT(it != _saved_variables.end()); + return *it->second; + } + + TensorArg& add(const at::Tensor& tensor) { + return lookup(tensor, true); + } + + TensorArg& add(const SavedVariable& sv, const std::shared_ptr& node) { + // TODO(jansel): Here we unpack the SavedVariable exactly once. This might + // fire SavedTensor hooks. In the future we should try to put saved tensor + // hooks into the graph. + at::Tensor tensor = sv.unpack(node); + TensorArg& arg = add(tensor); + _saved_variables.emplace(&sv, &arg); + return arg; + } + + // the concrete tensors that will get passed into the graph as inputs + std::vector inputs; + + private: + std::unordered_map _args; + // Every TensorArg from this is actually owned by _args (or _undefined) and + // that's why we have an un-owned pointer here. + std::unordered_map _saved_variables; + TensorArg _undefined; + uint32_t _next_id = 1; // id=0 used by _undefined +}; + +struct AutogradCompilerCall { + void add_size_input(const c10::SymInt& s) { + all_size_inputs.emplace_back( + SizeInput(default_dyn_type, s.guard_int(__FILE__, __LINE__))); + } + + int emplace_hook(c10::SafePyObject&& fn) { + hooks.emplace_back(std::move(fn)); + return hooks.size() - 1; + } + + TensorArgs tensor_args; + std::vector all_size_inputs; + std::vector dyn_size_inputs; + std::vector hooks; + NodeCalls node_calls; + SizeInput::DynType default_dyn_type = SizeInput::STATIC; +}; + +class CompiledNodeArgs { + // CompiledNodeArgs builds a representation of the constant values found + // across all the nodes in the compiled graph, via 'collect' overloads. The + // collected constants are specialized on by concatenation into a cache key. + // Tensor, symint arguments (which are lifted to become graph inputs rather + // than specialized on) are forwarded to the compiler and not included in the + // key. + public: + void collect(const TensorArg& t) { + collect_size(t.id); + if (t.defined()) { + const at::Tensor& tensor = _compiler.tensor_args.inputs[t.index()]; + // including these in the cache key means dynamo-level tensor guards can + // be skipped + collect(tensor.device()); + collect(tensor.dtype()); + collect(tensor.requires_grad()); + } + } + + void collect(const at::Tensor& t) { + collect(_compiler.tensor_args.add(t)); + } + void collect(const SavedVariable& t) { + collect(_compiler.tensor_args.add(t, _node_call.node)); + } + void collect(const c10::SymInt& t) { + _compiler.add_size_input(t); + } + template + void collect(const std::vector& t) { + collect_size(t.size()); + for (const T& i : t) { + collect(i); + } + } + template + void collect(const c10::ArrayRef& t) { + collect_size(t.size()); + for (const T& i : t) { + collect(i); + } + } + template + void collect(const c10::OptionalArray& t) { + collect(t.list); + } + template + void collect(const c10::optional& t) { + if (cond(t.has_value())) { + collect(*t); + } + } + template + void collect(const std::pair& t) { + collect(t.first); + collect(t.second); + } + void collect(const c10::Scalar& t) { + auto type = t.type(); + specialize_on_bytes(type); + if (type == c10::ScalarType::Double) { + collect(t.toDouble()); + } else if (type == c10::ScalarType::Long) { + collect(t.toLong()); + } else if (type == c10::ScalarType::Bool) { + collect(t.toBool()); + } else if (type == c10::ScalarType::ComplexDouble) { + auto c = t.toComplexDouble(); + collect(c.real()); + collect(c.imag()); + } else { + TORCH_INTERNAL_ASSERT(false); + } + } + void collect(const c10::TensorOptions& t) { + collect(t.device()); + collect(t.dtype()); + collect(t.layout()); + collect(t.requires_grad()); + collect(t.pinned_memory()); + collect(t.memory_format_opt()); + } + void collect(const at::TensorGeometry& t) { + collect(t.sym_sizes()); + collect(t.sym_strides()); + collect(t.sym_storage_offset()); + } + void collect(const torch::autograd::TypeAndSize& t) { + collect(t.sym_sizes); + collect(t.options); + } + void collect(const c10::Device& t) { + collect(t.type()); + collect(t.index()); + } + void collect(const std::string& t) { + collect_size(t.size()); + for (char c : t) { + collect(c); + } + } + void collect(const caffe2::TypeMeta& t) { + specialize_on_bytes(t.id()); + } + void collect(const std::shared_ptr& t) { + // Note: this is only capturing the ID of the node not everything + // contained inside it. This is used for tracking connections between + // nodes and the actual details of the node itself must be handled by + // a seperate call to `node->compiled_args()`. + if (cond((bool)t)) { + collect(_compiler.node_calls.lookup(t)); + } + } + void collect(const NodeCall& t) { + collect_size(t.id); + collect(t.graph_output); + collect_hooks_from(t.node.get()); + } + void collect(const Edge& t) { + if (cond(t.is_valid())) { + collect_size(_compiler.node_calls.lookup(t.function).id); + collect_size(t.input_nr); + collect(t.function->input_metadata(t.input_nr)); // for validate_outputs + } + } + void collect(const InputMetadata& t) { + TORCH_CHECK(!t.is_nested_tensor(), "NestedTensor not implemented"); + collect(t.options()); + collect(t.is_tensor_subclass()); + collect(t.shape_as_dim_vector()); + } + void collect(const VariableInfo& t) { + collect(t.layout); + collect(t.device); + collect(t.scalar_type); + collect(t.size); + collect(t.requires_grad); + collect(t.is_empty); + } + bool cond(bool cond) { + collect(cond); + return cond; + } + +#define COLLECT_AS_BYTES(T) \ + void collect(T t) { \ + specialize_on_bytes(t); \ + } + COLLECT_AS_BYTES(c10::ScalarType); + COLLECT_AS_BYTES(c10::DeviceType); + COLLECT_AS_BYTES(c10::Layout); + COLLECT_AS_BYTES(c10::MemoryFormat); + COLLECT_AS_BYTES(int8_t); + COLLECT_AS_BYTES(int16_t); + COLLECT_AS_BYTES(int32_t); + COLLECT_AS_BYTES(int64_t); + COLLECT_AS_BYTES(uint8_t); + COLLECT_AS_BYTES(uint16_t); + COLLECT_AS_BYTES(uint32_t); + COLLECT_AS_BYTES(uint64_t); + COLLECT_AS_BYTES(bool); + COLLECT_AS_BYTES(float); + COLLECT_AS_BYTES(double); +#undef COLLECT_AS_BYTES + + void collect_hooks_from(Node* fn) { + TORCH_CHECK( + fn->retains_grad_hooks().empty(), + "retains_grad_hooks not implemented for compiled autograd"); + for (auto& i : fn->tensor_pre_hooks()) { + i->compiled_args(*this); + } + for (auto& i : fn->pre_hooks()) { + i->compiled_args(*this); + } + for (auto& i : fn->post_hooks()) { + i->compiled_args(*this); + } + collect_size(_node_call.tensor_pre_hooks.size()); + collect_size(_node_call.pre_hooks.size()); + collect_size(_node_call.post_hooks.size()); + for (const auto& h : _node_call.tensor_pre_hooks) { + collect_size(h.second); // index + } + } + + CacheKey key() const { + Node* node = _node_call.node.get(); + return CacheKey( + typeid(*node), _specialization_key, _specialization_key_size); + } + + int add_backward(c10::SafePyObject&& obj) { + return _compiler.emplace_hook(std::move(obj)); + } + + int add_backward_state(c10::SafePyObject&& obj) { + return _compiler.emplace_hook(std::move(obj)); + } + + void add_tensor_pre_hook(c10::SafePyObject&& obj, int index) { + auto fn_id = _compiler.emplace_hook(std::move(obj)); + collect_size(fn_id); + _node_call.tensor_pre_hooks.emplace_back(std::make_pair(fn_id, index)); + } + + void add_pre_hook(c10::SafePyObject&& obj) { + auto fn_id = _compiler.emplace_hook(std::move(obj)); + collect_size(fn_id); + _node_call.pre_hooks.emplace_back(fn_id); + } + + void add_post_hook(c10::SafePyObject&& obj) { + auto fn_id = _compiler.emplace_hook(std::move(obj)); + collect_size(fn_id); + _node_call.post_hooks.emplace_back(fn_id); + } + + void add_post_acc_grad_hook(c10::SafePyObject&& obj) { + auto fn_id = _compiler.emplace_hook(std::move(obj)); + collect_size(fn_id); + _node_call.post_acc_grad_hooks.emplace_back(fn_id); + } + + void collect_size(size_t s) { + // we expect sizes to be small, so try to cram them into a single byte + constexpr uint8_t encode_as_u64 = std::numeric_limits::max(); + constexpr uint8_t encode_as_u32 = encode_as_u64 - 1; + constexpr uint8_t encode_as_u16 = encode_as_u64 - 2; + if (C10_UNLIKELY(s >= encode_as_u16)) { + // first write a byte indicating the path we followed, then the data + if (s <= std::numeric_limits::max()) { + // 3 bytes + specialize_on_bytes(encode_as_u16); + specialize_on_bytes(static_cast(s)); + } else if (s <= std::numeric_limits::max()) { + // 5 bytes + specialize_on_bytes(encode_as_u32); + specialize_on_bytes(static_cast(s)); + } else { + // 9 bytes + specialize_on_bytes(encode_as_u64); + specialize_on_bytes(s); + } + } else { + // happy case, 1 byte + specialize_on_bytes(static_cast(s)); + } + } + + SizeInput::DynType set_default_dyn_type(SizeInput::DynType default_dyn_type) { + return std::exchange(_compiler.default_dyn_type, default_dyn_type); + } + + CompiledNodeArgs(AutogradCompilerCall& compiler, NodeCall& node_call) + : _compiler(compiler), + _node_call(node_call), + _specialization_key_size(0), + _specialization_key_storage(1024), + _specialization_key( + (uint8_t*)std::malloc(_specialization_key_storage)) {} + ~CompiledNodeArgs() { + std::free(_specialization_key); + } + CompiledNodeArgs(const CompiledNodeArgs&) = delete; + + private: + template + void specialize_on_bytes(const T& t) { + while (C10_UNLIKELY( + _specialization_key_size + sizeof(T) > _specialization_key_storage)) { + _specialization_key_storage *= 2; + _specialization_key = (uint8_t*)std::realloc( + _specialization_key, _specialization_key_storage); + } + std::memcpy(_specialization_key + _specialization_key_size, &t, sizeof(T)); + _specialization_key_size += sizeof(T); + } + + AutogradCompilerCall& _compiler; + NodeCall& _node_call; + size_t _specialization_key_size; + size_t _specialization_key_storage; + uint8_t* _specialization_key; +}; + +struct TraceState { + TraceState( + const std::vector>& ss, + size_t num_outputs) + : sym_sizes_index(0), sym_sizes(ss), outputs(num_outputs) {} + + void debug_asserts() { + TORCH_INTERNAL_ASSERT(sym_sizes_index == sym_sizes.size()); + } + c10::optional next_sym_size() { + TORCH_INTERNAL_ASSERT(sym_sizes_index < sym_sizes.size()); + return sym_sizes[sym_sizes_index++]; + } + + size_t sym_sizes_index; + std::vector> sym_sizes; + variable_list outputs; +}; + +class SwapSavedVariables { + // SwapSavedVariables is used during the tracing/compilation phase after a + // cache-miss. It swaps any 'lifted' inputs (tensors, symints) to proxy nodes, + // allows tracing to happen, then swaps them back afterwards. + public: + void before(at::Tensor& t) { + TensorArg& arg = compiler.tensor_args.lookup(t); + stashed_tensors.save(&t, std::move(t)); + if (arg.defined()) { + TORCH_INTERNAL_ASSERT(arg.proxy_tensor.defined()); + t = arg.proxy_tensor; + } + } + void after(at::Tensor& t) { + stashed_tensors.restore(&t); + } + + void before(SavedVariable& t) { + TensorArg& arg = compiler.tensor_args.lookup(t); + stashed_variables.save(&t, std::move(t)); + if (arg.defined()) { + TORCH_INTERNAL_ASSERT(arg.proxy_tensor.defined()); + t = SavedVariable(arg.proxy_tensor, false); + } + } + void after(SavedVariable& t) { + stashed_variables.restore(&t); + } + + void before(c10::SymInt& t) { + stashed_symints.save(&t, c10::SymInt(t)); + auto opt_value = state.next_sym_size(); + if (opt_value.has_value()) { + t = *opt_value; // dynamic shape + } + } + void after(c10::SymInt& t) { + stashed_symints.restore(&t); + } + + void before(Edge& t) { + if (t.is_valid()) { + // need for symints used by validate_outputs + before(t.function->mutable_input_metadata(t.input_nr)); + } + } + void after(Edge& t) { + if (t.is_valid()) { + after(t.function->mutable_input_metadata(t.input_nr)); + } + } + void before(InputMetadata& t) { + before(t.mutable_shape_as_dim_vector()); + } + void after(InputMetadata& t) { + after(t.mutable_shape_as_dim_vector()); + } + void before(at::TensorGeometry& t) { + before(t.mutable_sizes()); + before(t.mutable_strides()); + before(t.mutable_storage_offset()); + t.recompute(); + } + void after(at::TensorGeometry& t) { + after(t.mutable_sizes()); + after(t.mutable_strides()); + after(t.mutable_storage_offset()); + t.recompute(); + } + void before(torch::autograd::TypeAndSize& t) { + before(t.sym_sizes); + before(t.options); + } + void after(torch::autograd::TypeAndSize& t) { + after(t.sym_sizes); + after(t.options); + } + void before(VariableInfo& t) { + before(t.size); + } + void after(VariableInfo& t) { + after(t.size); + } + + template + void before(std::vector& t) { + for (T& i : t) { + before(i); + } + } + template + void after(std::vector& t) { + for (T& i : t) { + after(i); + } + } + template + void before(c10::SmallVector& t) { + for (T& i : t) { + before(i); + } + } + template + void after(c10::SmallVector& t) { + for (T& i : t) { + after(i); + } + } + + template + void before(c10::OptionalArray& t) { + before(t.list); + } + template + void after(c10::OptionalArray& t) { + after(t.list); + } + + template + void before(c10::optional& t) { + if (t.has_value()) { + before(*t); + } + } + template + void after(c10::optional& t) { + if (t.has_value()) { + after(*t); + } + } + +#define NO_OP_VISIT(T) \ + void before(const T&) {} \ + void after(const T&) {} + NO_OP_VISIT(caffe2::TypeMeta); + NO_OP_VISIT(c10::Device); + NO_OP_VISIT(c10::DeviceType); + NO_OP_VISIT(c10::Layout); + NO_OP_VISIT(c10::MemoryFormat); + NO_OP_VISIT(c10::ScalarType); + NO_OP_VISIT(c10::Scalar); + NO_OP_VISIT(c10::TensorOptions); + NO_OP_VISIT(std::string); + NO_OP_VISIT(int64_t); + NO_OP_VISIT(bool); + NO_OP_VISIT(double); +#undef NO_OP_VISIT + + SwapSavedVariables( + AutogradCompilerCall& c, + TraceState& s, + PyObject* p, + const NodeCall& n) + : compiler(c), state(s), py_compiler(p), curr_node_call(n) {} + + PyObject* get_py_compiler() { + return py_compiler; + } + + const NodeCall& get_curr_node_call() { + return curr_node_call; + } + + void debug_asserts() { + stashed_variables.debug_assert(); + stashed_tensors.debug_assert(); + stashed_symints.debug_assert(); + } + + private: + template + struct Stashed { + Stashed(T&& v) : prior_value(std::move(v)) {} + T prior_value; + // Note: we need count here to support duplicate calls to before() + // which happen when we have multiple autograd::Edge objects pointing + // to the same autograd::Node + int count = 1; + }; + + template + struct StashedVars : public std::unordered_map> { + void save(const T* key, T&& value) { + auto it = this->find(key); + if (it == this->end()) { + this->emplace(key, std::move(value)); + } else { + // keep the value from the prior save() + it->second.count++; + } + } + void restore(T* var) { + auto it = this->find(var); + TORCH_INTERNAL_ASSERT(it != this->end(), "missing before())"); + if (--it->second.count == 0) { + // restore the value on the last restore() + *var = std::move(it->second.prior_value); + this->erase(it); + } + } + void debug_assert() { + TORCH_INTERNAL_ASSERT(this->empty(), "missing call to after()"); + } + }; + + AutogradCompilerCall& compiler; + TraceState& state; + // This is a borrowed reference, we do not increment ownership, or lower it, + // it's lifecycle is entirely longer than this objects. + PyObject* py_compiler; + const NodeCall& curr_node_call; + + // These mappings are used to save the prior values when we overwrite things + // in before(). In after(), we use these to cleanup after ourselves. + StashedVars stashed_variables; + StashedVars stashed_tensors; + StashedVars stashed_symints; +}; + +} // namespace torch::dynamo::autograd + +template <> +struct std::hash { + size_t operator()(const torch::dynamo::autograd::CacheKey& k) const { + return k.hash(); + } +}; diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cpp_shim.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cpp_shim.h new file mode 100644 index 0000000000000000000000000000000000000000..5baf67805b06c3ef71670379e401820c505e144c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cpp_shim.h @@ -0,0 +1,15 @@ +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +struct _PytorchRecordFunctionState; +typedef struct _PytorchRecordFunctionState _PytorchRecordFunctionState; + +_PytorchRecordFunctionState* _pytorch_record_function_enter(const char* name); +void _pytorch_record_function_exit(_PytorchRecordFunctionState* state); + +#ifdef __cplusplus +} // extern "C" +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cpython_defs.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cpython_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..d9d3efa24a28d73cbc7fbc24a49738f22c492ffd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/cpython_defs.h @@ -0,0 +1,21 @@ +#pragma once + +#include + +// Functions that need to be copied from the CPython source +// should go in cpython_defs.c. Copying is required when, e.g., +// we need to call internal CPython functions that are not exposed. + +#if IS_PYTHON_3_11_PLUS + +#include + +int THP_PyFrame_FastToLocalsWithError(_PyInterpreterFrame* frame); + +PyFunctionObject* _PyFunction_CopyWithNewCode( + PyFunctionObject* o, + PyCodeObject* code); + +void THP_PyFrame_Clear(_PyInterpreterFrame* frame); + +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/debug_macros.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/debug_macros.h new file mode 100644 index 0000000000000000000000000000000000000000..2a05938cee6d8e832937e0540726e6ac792cc95f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/debug_macros.h @@ -0,0 +1,46 @@ +#pragma once + +#include + +#ifdef _WIN32 +#define unlikely(x) (x) +#else +#define unlikely(x) __builtin_expect((x), 0) +#endif + +#define NULL_CHECK(val) \ + if (unlikely((val) == NULL)) { \ + fprintf(stderr, "NULL ERROR: %s:%d\n", __FILE__, __LINE__); \ + PyErr_Print(); \ + abort(); \ + } else { \ + } + +// CHECK might be previously declared +#undef CHECK +#define CHECK(cond) \ + if (unlikely(!(cond))) { \ + fprintf(stderr, "DEBUG CHECK FAILED: %s:%d\n", __FILE__, __LINE__); \ + abort(); \ + } else { \ + } + +// Uncomment next line to print debug message +// #define TORCHDYNAMO_DEBUG 1 +#ifdef TORCHDYNAMO_DEBUG + +#define DEBUG_CHECK(cond) CHECK(cond) +#define DEBUG_NULL_CHECK(val) NULL_CHECK(val) +#define DEBUG_TRACE(msg, ...) \ + fprintf(stderr, "TRACE[%s:%d] " msg "\n", __func__, __LINE__, __VA_ARGS__) +#define DEBUG_TRACE0(msg) \ + fprintf(stderr, "TRACE[%s:%d] " msg "\n", __func__, __LINE__) + +#else + +#define DEBUG_CHECK(cond) +#define DEBUG_NULL_CHECK(val) +#define DEBUG_TRACE(msg, ...) +#define DEBUG_TRACE0(msg) + +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/eval_frame.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/eval_frame.h new file mode 100644 index 0000000000000000000000000000000000000000..99b16f3198c80f86fe5b872e4fdf0d4f1e7d0e22 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/eval_frame.h @@ -0,0 +1,6 @@ +#pragma once +#include + +extern "C" { +PyObject* torch_c_dynamo_eval_frame_init(void); +} diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/extra_state.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/extra_state.h new file mode 100644 index 0000000000000000000000000000000000000000..a8bfb331b6bb855a076c85a9caeb421f2785ed66 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/extra_state.h @@ -0,0 +1,146 @@ +#pragma once + +#include + +#ifdef __cplusplus + +#include +#include +#include + +namespace py = pybind11; + +extern "C" { + +#endif + +// Flag to just run a frame normally +#define SKIP_CODE ((void*)0x1) + +// Points to the extra scratch space on the code object +extern Py_ssize_t extra_index; + +// function to call when cache lookup errors +extern PyObject* guard_error_hook; + +typedef PyObject FrameState; +typedef struct CacheEntry CacheEntry; + +// ExtraState encasulates CacheEntry and FrameState. ExtraState is the highest +// level of abstraction of what is stored on the extra code object. Previously, +// we saved different parts on different extra indexes. We prefer this way +// because of cleaner abstraction and faster SetExtra access. + +#ifdef __cplusplus + +typedef struct VISIBILITY_HIDDEN ExtraState { + // List of cache entries for compiled code objects + std::list cache_entry_list; + // Frame state to detect dynamic shape dims + py::dict frame_state; + + CacheEntry* get_first_entry(); + void move_to_front(CacheEntry* cache_entry); + void invalidate(CacheEntry* cache_entry); +} ExtraState; + +#else + +typedef struct ExtraState ExtraState; + +#endif + +// Helper to extra the cache_entry from the extra state. +// Ownership contract +// args +// - extra_state: Borrowed +// return +// - CacheEntry: Borrowed. +CacheEntry* extract_cache_entry(ExtraState* extra_state); + +// Returns either the previously stored frame state or an empty dict. +// Ownership contract +// args +// - extra_state: Borrowed +// return +// - extra_state->frame_state: Borrowed. +FrameState* extract_frame_state(ExtraState* extra_state); + +// Ownership contract +// args +// - code: Borrowed +// return +// - extra_state: Borrowed. +ExtraState* get_extra_state(PyCodeObject* code); + +// This is passed as freefunc to _PyEval_RequestCodeExtraIndex. This acts as a +// deleter for the object on extra scratch space. This function is called +// internally in _PyCode_SetExtra and also during the code deallocation. + +// Destroys the extra state by deleting cache_entry, frame state and finally +// freeing the constructed extra state. + +// Developer note - You should not call this function directly. This is called +// directly inside set_extra_state. If you are in a situation trying to call +// this function, consider if set_extra_state should be called. +void destroy_extra_state(void* obj); + +// Clears the existing object sitting on the extra scratch spance and sets it +// up with the new state. Note that _PyCode_SetExtra calls the +// destroy_extra_state deleter internally, and therefore we don't call it +// explicity here. + +// Ownership contract +// args +// - extra_state: Stolen +// return +// - there is no return, but the extra_state is stolen, so it becomes +// set_extra_state responsibility to clean it up. It will be deleted during +// the reset_code/skip, when the set_extra_state is called with +// NULL/SKIP_CODE. + +// Invariant - Dont set the extra state for the extra state that is already on +// the code object. Otherwise, we will first free up the old extra state +// (which is also the new extra state) and write something invalid on the +// scratch space. +void set_extra_state(PyCodeObject* code, ExtraState* extra_state); + +// Creates a new extra state and put it on the extra scrach space of the code +// object. + +// Ownership contract +// args +// - code: Borrowed +// return: +// - extra_state: New reference. +// These references are then further passed to set_extra_state which becomes +// the final owner of these references. +ExtraState* init_and_set_extra_state(PyCodeObject* code); + +// Lookup the cache held by extra_state. +// Ownership contract +// args +// - extra_state: Borrowed +// - f_locals: Borrowed +// return: +// - Py_None or PyCodeObject: Borrowed reference. +PyObject* lookup(ExtraState* extra_state, PyObject* f_locals); + +// Create a new cache entry at extra_state holding on to guarded_code. +// Ownership contract +// args +// - extra_state: Borrowed +// - guarded_code: Borrowed +// return: +// - cache_entry: Borrowed reference +CacheEntry* create_cache_entry(ExtraState* extra_state, PyObject* guraded_code); + +#ifdef __cplusplus + +} // extern "C" + +// Returns the list of CacheEntry corresponding to code_obj. +// Warning: returns references whose lifetimes are controlled by C++ +py::list _debug_get_cache_entry_list(const py::handle& code_obj); + +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/guards.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/guards.h new file mode 100644 index 0000000000000000000000000000000000000000..78727403351b54279b375fd66b5a54ea210c691a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/guards.h @@ -0,0 +1,4 @@ +#pragma once +#include + +PyObject* torch_c_dynamo_guards_init(); diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/init.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/init.h new file mode 100644 index 0000000000000000000000000000000000000000..aed80d1b579f9126e92ced5f6e49294d0759317f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/init.h @@ -0,0 +1,13 @@ +#pragma once + +// C2039 MSVC +#include +#include + +#include + +namespace torch { +namespace dynamo { +void initDynamoBindings(PyObject* torch); +} +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/python_compiled_autograd.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/python_compiled_autograd.h new file mode 100644 index 0000000000000000000000000000000000000000..9422cd0c0383186484f7e4ae9906ba2aa137dbba --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/python_compiled_autograd.h @@ -0,0 +1,7 @@ +#pragma once +#include + +// see [Note: Compiled Autograd] +namespace torch::dynamo::autograd { +PyObject* torch_c_dynamo_compiled_autograd_init(); +} // namespace torch::dynamo::autograd diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/utils.h new file mode 100644 index 0000000000000000000000000000000000000000..3fd932f0441fe0e2a765679b0f8dbff898bbdb4e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/dynamo/utils.h @@ -0,0 +1,9 @@ +#pragma once + +// The visibility attribute is to avoid a warning about storing a field in the +// struct that has a different visibility (from pybind) than the struct. +#ifdef _WIN32 +#define VISIBILITY_HIDDEN +#else +#define VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/byte_order.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/byte_order.h new file mode 100644 index 0000000000000000000000000000000000000000..d960b287e20fbc363dbe53bc7676bd6bf28c6759 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/byte_order.h @@ -0,0 +1,227 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __FreeBSD__ +#include +#include +#define thp_bswap16(x) bswap16(x) +#define thp_bswap32(x) bswap32(x) +#define thp_bswap64(x) bswap64(x) +#elif defined(__APPLE__) +#include +#define thp_bswap16(x) OSSwapInt16(x) +#define thp_bswap32(x) OSSwapInt32(x) +#define thp_bswap64(x) OSSwapInt64(x) +#elif defined(__GNUC__) && !defined(__MINGW32__) +#include +#define thp_bswap16(x) bswap_16(x) +#define thp_bswap32(x) bswap_32(x) +#define thp_bswap64(x) bswap_64(x) +#elif defined _WIN32 || defined _WIN64 +#define thp_bswap16(x) _byteswap_ushort(x) +#define thp_bswap32(x) _byteswap_ulong(x) +#define thp_bswap64(x) _byteswap_uint64(x) +#endif + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define to_be16(x) thp_bswap16(x) +#define from_be16(x) thp_bswap16(x) +#define to_be32(x) thp_bswap32(x) +#define from_be32(x) thp_bswap32(x) +#define to_be64(x) thp_bswap64(x) +#define from_be64(x) thp_bswap64(x) +#define to_le16(x) (x) +#define from_le16(x) (x) +#define to_le32(x) (x) +#define from_le32(x) (x) +#define to_le64(x) (x) +#define from_le64(x) (x) +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define to_be16(x) (x) +#define from_be16(x) (x) +#define to_be32(x) (x) +#define from_be32(x) (x) +#define to_be64(x) (x) +#define from_be64(x) (x) +#define to_le16(x) thp_bswap16(x) +#define from_le16(x) thp_bswap16(x) +#define to_le32(x) thp_bswap32(x) +#define from_le32(x) thp_bswap32(x) +#define to_le64(x) thp_bswap64(x) +#define from_le64(x) thp_bswap64(x) +#else +#error Unexpected or undefined __BYTE_ORDER__ +#endif + +namespace torch { +namespace utils { + +enum THPByteOrder { THP_LITTLE_ENDIAN = 0, THP_BIG_ENDIAN = 1 }; + +TORCH_API THPByteOrder THP_nativeByteOrder(); + +TORCH_API void THP_decodeInt16Buffer( + int16_t* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeInt32Buffer( + int32_t* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeInt64Buffer( + int64_t* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeHalfBuffer( + c10::Half* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeFloatBuffer( + float* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeDoubleBuffer( + double* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeBoolBuffer( + bool* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeBFloat16Buffer( + at::BFloat16* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeComplexFloatBuffer( + c10::complex* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); +TORCH_API void THP_decodeComplexDoubleBuffer( + c10::complex* dst, + const uint8_t* src, + bool do_byte_swap, + size_t len); + +TORCH_API void THP_decodeInt16Buffer( + int16_t* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeInt32Buffer( + int32_t* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeInt64Buffer( + int64_t* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeHalfBuffer( + c10::Half* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeFloatBuffer( + float* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeDoubleBuffer( + double* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeBoolBuffer( + bool* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeBFloat16Buffer( + at::BFloat16* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeFloat8_e5m2Buffer( + at::Float8_e5m2* dst, + const uint8_t* src, + size_t len); +TORCH_API void THP_decodeFloat8_e4m3fnBuffer( + at::Float8_e4m3fn* dst, + const uint8_t* src, + size_t len); +TORCH_API void THP_decodeFloat8_e5m2fnuzBuffer( + at::Float8_e5m2fnuz* dst, + const uint8_t* src, + size_t len); +TORCH_API void THP_decodeFloat8_e4m3fnuzBuffer( + at::Float8_e4m3fnuz* dst, + const uint8_t* src, + size_t len); +TORCH_API void THP_decodeComplexFloatBuffer( + c10::complex* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_decodeComplexDoubleBuffer( + c10::complex* dst, + const uint8_t* src, + THPByteOrder order, + size_t len); + +TORCH_API void THP_encodeInt16Buffer( + uint8_t* dst, + const int16_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_encodeInt32Buffer( + uint8_t* dst, + const int32_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_encodeInt64Buffer( + uint8_t* dst, + const int64_t* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_encodeFloatBuffer( + uint8_t* dst, + const float* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_encodeDoubleBuffer( + uint8_t* dst, + const double* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_encodeComplexFloatBuffer( + uint8_t* dst, + const c10::complex* src, + THPByteOrder order, + size_t len); +TORCH_API void THP_encodeComplexDoubleBuffer( + uint8_t* dst, + const c10::complex* src, + THPByteOrder order, + size_t len); + +} // namespace utils +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cpp_stacktraces.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cpp_stacktraces.h new file mode 100644 index 0000000000000000000000000000000000000000..30602b0c9b73183c2584111b522a3a162ab68edf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cpp_stacktraces.h @@ -0,0 +1,8 @@ +#pragma once + +#include + +namespace torch { +TORCH_API bool get_cpp_stacktraces_enabled(); +TORCH_API bool get_disable_addr2line(); +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_enabled.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_enabled.h new file mode 100644 index 0000000000000000000000000000000000000000..e27c168a8ef46a5860f793e79e9be05a80f27e18 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_enabled.h @@ -0,0 +1,15 @@ +#pragma once + +namespace torch { +namespace utils { + +static inline bool cuda_enabled() { +#ifdef USE_CUDA + return true; +#else + return false; +#endif +} + +} // namespace utils +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/device_lazy_init.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/device_lazy_init.h new file mode 100644 index 0000000000000000000000000000000000000000..9141cf3beec9542f80f85473b2813bba39f1a582 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/device_lazy_init.h @@ -0,0 +1,48 @@ +#pragma once + +#include + +// device_lazy_init() is always compiled, even for CPU-only builds. + +namespace torch::utils { + +/** + * This mechanism of lazy initialization is designed for each device backend. + * Currently, CUDA and XPU follow this design. This function `device_lazy_init` + * MUST be called before you attempt to access any Type(CUDA or XPU) object + * from ATen, in any way. It guarantees that the device runtime status is lazily + * initialized when the first runtime API is requested. + * + * Here are some common ways that a device object may be retrieved: + * - You call getNonVariableType or getNonVariableTypeOpt + * - You call toBackend() on a Type + * + * It's important to do this correctly, because if you forget to add it you'll + * get an oblique error message seems like "Cannot initialize CUDA without + * ATen_cuda library" or "Cannot initialize XPU without ATen_xpu library" if you + * try to use CUDA or XPU functionality from a CPU-only build, which is not good + * UX. + */ +void device_lazy_init(at::DeviceType device_type); +void set_requires_device_init(at::DeviceType device_type, bool value); + +static inline void maybe_initialize_device(at::Device& device) { + // Add more devices here to enable lazy initialization. + if (device.is_cuda() || device.is_xpu()) { + device_lazy_init(device.type()); + } +} + +static inline void maybe_initialize_device(c10::optional& device) { + if (!device.has_value()) { + return; + } + maybe_initialize_device(device.value()); +} + +static inline void maybe_initialize_device(const at::TensorOptions& options) { + auto device = options.device(); + maybe_initialize_device(device); +} + +} // namespace torch::utils diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/disable_torch_function.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/disable_torch_function.h new file mode 100644 index 0000000000000000000000000000000000000000..5173358ca78cb1d9b637814b468a7eb05ea322b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/disable_torch_function.h @@ -0,0 +1,42 @@ +#pragma once +#include +#include +#include + +namespace torch { +// Sometimes we don't want infinite recursion for subclasses, +// Or a way to achieve the old behaviour. + +// This is an internal utility, not exposed to users. +bool torch_function_enabled(); +PyObject* disabled_torch_function_impl(); +PyObject* disabled_torch_dispatch_impl(); +void set_disabled_torch_function_impl(PyObject* value); +void set_disabled_torch_dispatch_impl(PyObject* value); +// Set ignore_mode to true if you're trying to collect overloaded arguments; +// using mode here will improperly cause you to add ALL objects to the +// overloaded list even if they don't actually have __torch_function__ +bool check_has_torch_function(PyObject* obj, bool ignore_mode = false); + +struct DisableTorchDispatch { + DisableTorchDispatch() + : guard_(c10::DispatchKeySet( + {c10::DispatchKey::Python, c10::DispatchKey::PreDispatch})), + guard_tls_snapshot_(c10::DispatchKey::PythonTLSSnapshot) {} + c10::impl::ExcludeDispatchKeyGuard guard_; + c10::impl::ExcludeDispatchKeyGuard guard_tls_snapshot_; +}; + +} // namespace torch + +PyObject* THPModule_isEnabledTorchFunction(PyObject* self, PyObject* unused); +PyObject* THPModule_DisableTorchFunctionType(); +PyObject* THPModule_DisableTorchFunctionSubclassType(); +PyObject* THPModule_disable_torch_function(PyObject* self, PyObject* args); +PyObject* THPModule_disable_torch_dispatch(PyObject* self, PyObject* args); +PyObject* THPModule_has_torch_function(PyObject*, PyObject* arg); +PyObject* THPModule_has_torch_function_unary(PyObject*, PyObject* obj); +PyObject* THPModule_has_torch_function_variadic( + PyObject*, + PyObject* const* args, + Py_ssize_t nargs); diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/init.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/init.h new file mode 100644 index 0000000000000000000000000000000000000000..bf6dd216bbcc9283662157f662fbed810896e66a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/init.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace throughput_benchmark { + +void initThroughputBenchmarkBindings(PyObject* module); + +} // namespace throughput_benchmark +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/invalid_arguments.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/invalid_arguments.h new file mode 100644 index 0000000000000000000000000000000000000000..fecf93225e76b9a67f4022da81708ffdbff94097 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/invalid_arguments.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include +#include + +namespace torch { + +std::string format_invalid_args( + PyObject* given_args, + PyObject* given_kwargs, + const std::string& function_name, + const std::vector& options); + +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/nested.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/nested.h new file mode 100644 index 0000000000000000000000000000000000000000..f3a1061e4712361372de79e1832e4b7950c62788 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/nested.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace utils { + +at::Tensor nested_tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); + +} // namespace utils +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/numpy_stub.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/numpy_stub.h new file mode 100644 index 0000000000000000000000000000000000000000..ecdfa61500ecf00f1523090a486a17c776415ff9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/numpy_stub.h @@ -0,0 +1,21 @@ +#pragma once + +#include + +#ifdef USE_NUMPY + +#if !defined(NO_IMPORT_ARRAY) && !defined(WITH_NUMPY_IMPORT_ARRAY) +#define NO_IMPORT_ARRAY +#endif + +#ifndef PY_ARRAY_UNIQUE_SYMBOL +#define PY_ARRAY_UNIQUE_SYMBOL __numpy_array_api +#endif + +#ifndef NPY_NO_DEPRECATED_API +#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION +#endif + +#include + +#endif // USE_NUMPY diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/object_ptr.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/object_ptr.h new file mode 100644 index 0000000000000000000000000000000000000000..81ad207306844a4b8e0b57efe72a6079d4a74e26 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/object_ptr.h @@ -0,0 +1,67 @@ +#pragma once + +#include +#include +#include + +template +class TORCH_PYTHON_API THPPointer { + public: + THPPointer() : ptr(nullptr){}; + explicit THPPointer(T* ptr) noexcept : ptr(ptr){}; + THPPointer(THPPointer&& p) noexcept : ptr(std::exchange(p.ptr, nullptr)) {} + + ~THPPointer() { + free(); + }; + T* get() { + return ptr; + } + const T* get() const { + return ptr; + } + T* release() { + T* tmp = ptr; + ptr = nullptr; + return tmp; + } + operator T*() { + return ptr; + } + THPPointer& operator=(T* new_ptr) noexcept { + free(); + ptr = new_ptr; + return *this; + } + THPPointer& operator=(THPPointer&& p) noexcept { + free(); + ptr = p.ptr; + p.ptr = nullptr; + return *this; + } + T* operator->() { + return ptr; + } + explicit operator bool() const { + return ptr != nullptr; + } + + private: + void free(); + T* ptr = nullptr; +}; + +/** + * An RAII-style, owning pointer to a PyObject. You must protect + * destruction of this object with the GIL. + * + * WARNING: Think twice before putting this as a field in a C++ + * struct. This class does NOT take out the GIL on destruction, + * so if you will need to ensure that the destructor of your struct + * is either (a) always invoked when the GIL is taken or (b) takes + * out the GIL itself. Easiest way to avoid this problem is to + * not use THPPointer in this situation. + */ +using THPObjectPtr = THPPointer; +using THPCodeObjectPtr = THPPointer; +using THPFrameObjectPtr = THPPointer; diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/out_types.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/out_types.h new file mode 100644 index 0000000000000000000000000000000000000000..1cab00bc270f2e3dff532cc715327c030698c190 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/out_types.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace torch { +namespace utils { + +TORCH_API void check_out_type_matches( + const at::Tensor& result, + c10::optional scalarType, + bool scalarType_is_none, + c10::optional layout, + c10::optional device, + bool device_is_none); + +} +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pybind.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pybind.h new file mode 100644 index 0000000000000000000000000000000000000000..36cb83659aa212fb86f72a44cfbecce79bcc5294 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pybind.h @@ -0,0 +1,381 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace py = pybind11; + +// This makes intrusive_ptr to be available as a custom pybind11 holder type, +// see +// https://pybind11.readthedocs.io/en/stable/advanced/smart_ptrs.html#custom-smart-pointers +PYBIND11_DECLARE_HOLDER_TYPE(T, c10::intrusive_ptr, true); + +PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonOrSharedTypePtr); +PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonTypePtr, true); + +namespace pybind11::detail { + +// torch.Tensor <-> at::Tensor conversions (without unwrapping) +template <> +struct TORCH_PYTHON_API type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(at::Tensor, _("torch.Tensor")); + + bool load(handle src, bool); + + static handle cast( + const at::Tensor& src, + return_value_policy /* policy */, + handle /* parent */); +}; + +// torch._StorageBase <-> at::Storage +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(at::Storage, _("torch.StorageBase")); + + bool load(handle src, bool) { + PyObject* obj = src.ptr(); + if (torch::isStorage(obj)) { + value = torch::createStorage(obj); + return true; + } + return false; + } + + static handle cast( + const at::Storage& src, + return_value_policy /* policy */, + handle /* parent */) { + return handle(torch::createPyObject(src)); + } +}; + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(at::Generator, _("torch.Generator")); + + bool load(handle src, bool) { + PyObject* obj = src.ptr(); + if (THPGenerator_Check(obj)) { + value = reinterpret_cast(obj)->cdata; + return true; + } + return false; + } + + static handle cast( + const at::Generator& src, + return_value_policy /* policy */, + handle /* parent */) { + return handle(THPGenerator_Wrap(src)); + } +}; + +template <> +struct TORCH_PYTHON_API type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(at::IntArrayRef, _("Tuple[int, ...]")); + + bool load(handle src, bool); + static handle cast( + at::IntArrayRef src, + return_value_policy /* policy */, + handle /* parent */); + + private: + std::vector v_value; +}; + +template <> +struct TORCH_PYTHON_API type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(at::SymIntArrayRef, _("List[int]")); + + bool load(handle src, bool); + static handle cast( + at::SymIntArrayRef src, + return_value_policy /* policy */, + handle /* parent */); + + private: + std::vector v_value; +}; + +template <> +struct TORCH_PYTHON_API type_caster> { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(at::ArrayRef, _("List[SymNode]")); + + bool load(handle src, bool); + static handle cast( + at::ArrayRef src, + return_value_policy /* policy */, + handle /* parent */); + + private: + std::vector v_value; +}; + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(at::MemoryFormat, _("torch.memory_format")); + + bool load(handle src, bool) { + PyObject* obj = src.ptr(); + if (THPMemoryFormat_Check(obj)) { + value = reinterpret_cast(obj)->memory_format; + return true; + } + return false; + } + static handle cast( + at::MemoryFormat src, + return_value_policy /* policy */, + handle /* parent */) { + return handle(torch::utils::getTHPMemoryFormat(src)); + } +}; + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(at::Device, _("torch.device")); + + // PYBIND11_TYPE_CASTER defines a member field called value. Since at::Device + // cannot be default-initialized, we provide this constructor to explicitly + // initialize that field. The value doesn't matter as it will be overwritten + // after a successful call to load. + type_caster() : value(c10::kCPU) {} + + bool load(handle src, bool) { + PyObject* obj = src.ptr(); + if (THPDevice_Check(obj)) { + value = reinterpret_cast(obj)->device; + return true; + } + return false; + } + + static handle cast( + const at::Device& src, + return_value_policy /* policy */, + handle /* parent */) { + return handle(THPDevice_New(src)); + } +}; + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(c10::Stream, _("torch.Stream")); + + bool load(handle src, bool) { + PyObject* obj = src.ptr(); + if (THPStream_Check(obj)) { + value = c10::Stream::unpack3( + ((THPStream*)obj)->stream_id, + ((THPStream*)obj)->device_index, + static_cast(((THPStream*)obj)->device_type)); + return true; + } + return false; + } + + static handle cast( + const c10::Stream& src, + return_value_policy /* policy */, + handle /* parent */) { + return handle(THPStream_Wrap(src)); + } +}; + +template <> +struct type_caster + : public type_caster_base { + using base = type_caster_base; + c10::DispatchKey tmp; + + public: + bool load(handle src, bool convert) { + if (base::load(src, convert)) { + return true; + } else if (py::isinstance( + src, py::module_::import("builtins").attr("str"))) { + tmp = c10::parseDispatchKey(py::cast(src)); + value = &tmp; + return true; + } + return false; + } + + static handle cast( + c10::DispatchKey src, + return_value_policy policy, + handle parent) { + return base::cast(src, policy, parent); + } +}; + +template <> +struct TORCH_PYTHON_API type_caster { + public: + PYBIND11_TYPE_CASTER( + c10::Scalar, + _("Union[Number, torch.SymInt, torch.SymFloat, torch.SymBool]")); + bool load(py::handle src, bool); + + static py::handle cast( + const c10::Scalar& si, + return_value_policy /* policy */, + handle /* parent */); +}; + +template <> +struct TORCH_PYTHON_API type_caster { + public: + PYBIND11_TYPE_CASTER(c10::SymInt, _("Union[int, torch.SymInt]")); + bool load(py::handle src, bool); + + static py::handle cast( + const c10::SymInt& si, + return_value_policy /* policy */, + handle /* parent */); +}; + +template <> +struct TORCH_PYTHON_API type_caster { + public: + PYBIND11_TYPE_CASTER(c10::SymFloat, _("float")); + bool load(py::handle src, bool); + + static py::handle cast( + const c10::SymFloat& si, + return_value_policy /* policy */, + handle /* parent */); +}; + +template <> +struct TORCH_PYTHON_API type_caster { + public: + PYBIND11_TYPE_CASTER(c10::SymBool, _("Union[bool, torch.SymBool]")); + bool load(py::handle src, bool); + + static py::handle cast( + const c10::SymBool& si, + return_value_policy /* policy */, + handle /* parent */); +}; + +template +struct type_caster> { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(c10::complex, _("complex")); + + bool load(handle src, bool) { + PyObject* obj = src.ptr(); + + // Refered from `THPUtils_unpackComplexDouble` + Py_complex py_complex = PyComplex_AsCComplex(obj); + if (py_complex.real == -1.0 && PyErr_Occurred()) { + return false; + } + + // Python's Complex is always double precision. + value = c10::complex(py_complex.real, py_complex.imag); + return true; + } + + static handle cast( + const c10::complex& complex, + return_value_policy /* policy */, + handle /* parent */) { + // Python only knows double precision complex. + return handle(PyComplex_FromDoubles(complex.real(), complex.imag())); + } +}; + +} // namespace pybind11::detail + +namespace torch::impl { + +// Use this function if you have a C++ object that is used from both C++ +// and Python contexts, and you need its GIL to be released when you +// destruct it in the Python context. +// +// This function is a valid shared_ptr destructor and can be used to +// conveniently allocate a shared_ptr to an object whose destructor will be run +// without the GIL. Pass it as the second argument to shared_ptr, e.g., +// +// shared_ptr(new T(), destroy_without_gil) +// +// Attaching the GIL release logic to the holder pointer rather than the +// actual destructor of T is helpful when T is Python-agnostic and +// shouldn't refer to the PYthon API. +// +// Note there are limitations to the correctness of code that makes use of this. +// In particular, if a shared_ptr is constructed from C++ code without this +// destructor and then passed to pybind11, pybind11 will happily take ownership +// of the shared_ptr (and be willing to destruct it from a context where it is +// holding the GIL). unique_ptr with a type branded deleter is less prone to +// this problem, because a stock deleter unique_ptr is not convertible with it. +// I plan to mitigate this problem by adding DEBUG-only asserts to the true C++ +// destructors that the GIL is not held (using a virtual call to get to the +// Python interpreter); alternately, we could use a virtual call to simply +// ensure we release the GIL in the C++ destructor, however, this is a layering +// violation (why does code that is ostensibly Python agnostic calling into the +// GIL). +// +// Adapted from +// https://github.com/pybind/pybind11/issues/1446#issuecomment-406341510 +template +inline void destroy_without_gil(T* ptr) { + // Because the ownership of a shared_ptr is diffuse, it's not possible to + // necessarily predict whether or not the last reference to an object will + // be destructed from Python or C++. This means that in the destructor here, + // we don't necessarily know if we actually have the GIL or not; in fact, + // we don't even know if the Python interpreter still exists! Thus, we have + // to test for it before releasing the GIL. + // + // PyGILState_Check is hopefully self explanatory. But Py_IsInitialized or + // _PyIsFinalizing? Both get set at the same time during the Python + // destruction process: + // https://github.com/python/cpython/blob/d92513390a1a0da781bb08c284136f4d7abea36d/Python/pylifecycle.c#L1716-L1717 + // so the operant question is whether or not you want to release the GIL after + // finalization has completed (and there is just no Python interpreter). + // Clearly there is no need to release GIL in that state, so we want + // Py_IsInitialized. + if (Py_IsInitialized() && PyGILState_Check()) { + pybind11::gil_scoped_release nogil; + delete ptr; + } else { + delete ptr; + } +} + +} // namespace torch::impl diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pycfunction_helpers.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pycfunction_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..745e1842e682c8a2fb3cc9d94e77122505016571 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pycfunction_helpers.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +#include + +inline PyCFunction castPyCFunctionWithKeywords(PyCFunctionWithKeywords func) { + C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wcast-function-type") + C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wcast-function-type-strict") + return reinterpret_cast(func); + C10_DIAGNOSTIC_POP() + C10_DIAGNOSTIC_POP() +} diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pyobject_preservation.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pyobject_preservation.h new file mode 100644 index 0000000000000000000000000000000000000000..456095d7b7037d46bd59f8f173795e177ae269a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pyobject_preservation.h @@ -0,0 +1,7 @@ +#pragma once + +#include + +// This file contains utilities used for handling PyObject preservation + +void clear_slots(PyTypeObject* type, PyObject* self); diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_arg_parser.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_arg_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..cec99a843301c3d0c80e3746ac5e565b5cf634d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_arg_parser.h @@ -0,0 +1,1286 @@ +#pragma once + +// Parse arguments to Python functions implemented in C++ +// This is similar to PyArg_ParseTupleAndKeywords(), but specifically handles +// the types relevant to PyTorch and distinguishes between overloaded function +// signatures. +// +// Example: +// +// static PythonArgParser parser({ +// "norm(Scalar p, int64_t dim, bool keepdim=False)", +// "norm(Scalar p=2)", +// }); +// ParsedArgs<3> parsed_args; +// auto r = parser.parse(args, kwargs, parsed_args); +// if (r.idx == 0) { +// norm(r.scalar(0), r.int64(1), r.bool(0)); +// } else { +// norm(r.scalar(0)); +// } +// +// We auto-generate most uses of PythonArgParser; the generated files +// are torch/csrc/autograd/generated/python_*.cpp +// +// Some gotchas that you should watch out for: +// +// - Note [Order of overloads matters] +// Order of overloads matters. A set of input arguments may +// bind to multiple argument specs; we will always pick the +// first one in PythonArgParser. However, when you are writing +// overloads in, e.g., native_functions.yaml, you don't have to +// worry about what order you write them, because the code +// generation logic always gives the overloads a canonical +// order, where Tensor overloads come first, before Scalar overloads. +// This logic is in sort_declarations in +// tools/autograd/gen_python_functions.py +// +// - Zero-dim tensors (e.g., torch.tensor(2)) bind to both +// Scalar and Tensor, UNLESS they require grad (in which case +// they only bind to Tensor). + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +inline bool THPUtils_checkScalar(PyObject* obj) { +#ifdef USE_NUMPY + if (torch::utils::is_numpy_scalar(obj)) { + return true; + } +#endif + return PyFloat_Check(obj) || PyLong_Check(obj) || PyComplex_Check(obj) || + torch::is_symint(py::handle(obj)) || + torch::is_symfloat(py::handle(obj)) || torch::is_symbool(py::handle(obj)); +} + +namespace torch { + +bool should_allow_numbers_as_tensors(const std::string& name); + +enum class ParameterType { + TENSOR, + SCALAR, + INT64, + SYM_INT, + DOUBLE, + COMPLEX, + TENSOR_LIST, + INT_LIST, + GENERATOR, + BOOL, + STORAGE, + PYOBJECT, + SCALARTYPE, + LAYOUT, + MEMORY_FORMAT, + DEVICE, + STREAM, + STRING, + DIMNAME, + DIMNAME_LIST, + QSCHEME, + FLOAT_LIST, + SCALAR_LIST, + SYM_INT_LIST, + DISPATCH_KEY_SET +}; + +struct FunctionParameter; +struct FunctionSignature; +struct PythonArgs; + +// Contains bound Python arguments in declaration order +template +struct ParsedArgs { + ParsedArgs() : args() {} + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + PyObject* args[N]; +}; + +// A PythonArgParser contains a list of valid signatures. Instances are +// typically global variables and should be immutable. +struct PYBIND11_EXPORT PythonArgParser { + explicit PythonArgParser( + const std::vector& fmts, + bool traceable = false); + + // meant only for `torch` functions. + template + inline PythonArgs parse( + PyObject* self, + PyObject* args, + PyObject* kwargs, + ParsedArgs& dst); + + template + inline PythonArgs parse(PyObject* args, PyObject* kwargs, ParsedArgs& dst); + + inline PythonArgs parse(PyObject* self, ParsedArgs<0>& dst); + + // Formatted strings of non-hidden signatures + std::vector get_signatures() const; + + private: + [[noreturn]] void print_error( + PyObject* self, + PyObject* args, + PyObject* kwargs, + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + PyObject* parsed_args[]); + void check_deprecated(const FunctionSignature& signature); + PythonArgs raw_parse( + PyObject* self, + PyObject* args, + PyObject* kwargs, + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + PyObject* parsed_args[]); + + std::vector signatures_; + std::string function_name; + size_t max_args; + bool traceable; +}; + +// FunctionSignature represents a single valid signature for a Python function. +// It is immutable once constructed. The contained data can be concurrently +// accessed by multiple calls. +struct FunctionSignature { + explicit FunctionSignature(const std::string& fmt, int index); + + bool parse( + PyObject* self, + PyObject* args, + PyObject* kwargs, + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + PyObject* dst[], + std::vector& overloaded_args, + bool raise_exception); + + std::string toString() const; + + std::string name; + std::vector params; + size_t min_args; + size_t max_args; + size_t max_pos_args; + int index; + bool hidden; + bool deprecated; +}; + +// PythonArgs contains bound Python arguments for an actual invocation +// along with references to the matched signature. +struct PythonArgs { + PythonArgs( + bool traceable, + const FunctionSignature& signature, + PyObject** args, + std::vector overloaded_args) + : idx(signature.index), + traceable(traceable), + signature(signature), + args(args), + overloaded_args(std::move(overloaded_args)) {} + + int idx; + bool traceable; + const FunctionSignature& signature; + PyObject** args; + std::vector overloaded_args; // NOTE: borrowed references + + inline bool has_torch_function(); + inline std::string get_func_name(); + inline at::Tensor tensor(int i); + inline c10::optional optionalTensor(int i); + inline at::Scalar scalar(int i); + inline at::Scalar scalarWithDefault(int i, const at::Scalar& default_scalar); + inline std::vector scalarlist(int i); + inline std::vector tensorlist(int i); + inline torch::List> list_of_optional_tensors(int i); + template + inline std::array tensorlist_n(int i); + inline std::vector intlist(int i); + inline std::vector symintlist(int i); + inline c10::OptionalArray intlistOptional(int i); + inline c10::OptionalArray symintlistOptional(int i); + inline std::vector intlistWithDefault( + int i, + std::vector default_intlist); + inline c10::optional generator(int i); + inline at::Storage storage(int i); + inline at::Storage storage( + int i, + at::ScalarType& storage_scalar_type, + bool& is_typed_storage); + inline c10::Stream stream(int i); + inline at::ScalarType scalartype(int i); + inline at::ScalarType scalartypeWithDefault( + int i, + at::ScalarType default_scalartype); + inline c10::optional scalartypeOptional(int i); + inline c10::optional scalarOptional(int i); + inline c10::optional toInt64Optional(int i); + inline c10::optional toSymIntOptional(int i); + inline c10::optional toBoolOptional(int i); + inline c10::optional toDoubleOptional(int i); + inline c10::OptionalArray doublelistOptional(int i); + inline std::vector doublelist(int i); + inline std::vector getDoublelist(int i); + inline at::Layout layout(int i); + inline at::Layout layoutWithDefault(int i, at::Layout default_layout); + inline c10::optional layoutOptional(int i); + inline at::Device device(int i); + inline at::Device deviceWithDefault(int i, const at::Device& default_device); + inline c10::optional deviceOptional(int i); + inline at::Dimname dimname(int i); + inline std::vector dimnamelist(int i); + inline c10::optional> toDimnameListOptional(int i); + inline at::MemoryFormat memoryformat(int i); + inline c10::optional memoryformatOptional(int i); + inline at::QScheme toQScheme(int i); + inline std::string string(int i); + inline std::string stringWithDefault(int i, const std::string& default_str); + inline c10::optional stringOptional(int i); + inline c10::string_view stringView(int i); + inline c10::string_view stringViewWithDefault( + int i, + const c10::string_view default_str); + inline c10::optional stringViewOptional(int i); + inline PyObject* pyobject(int i); + inline int64_t toInt64(int i); + inline c10::SymInt toSymInt(int i); + inline c10::SymBool toSymBool(int i); + inline int64_t toInt64WithDefault(int i, int64_t default_int); + inline double toDouble(int i); + inline double toDoubleWithDefault(int i, double default_double); + inline c10::complex toComplex(int i); + inline c10::complex toComplexWithDefault( + int i, + c10::complex default_complex); + inline bool toBool(int i); + inline bool toBoolWithDefault(int i, bool default_bool); + inline bool isNone(int i); + inline c10::optional toDispatchKeySetOptional(int i); + + private: + at::Tensor tensor_slow(int i); + at::Scalar scalar_slow(int i); + at::Scalar scalar_slow(PyObject* arg); +}; + +// FunctionParameter is a single formal parameter of a Python function. +// It is immutable once constructed. +struct FunctionParameter { + FunctionParameter(const std::string& fmt, bool keyword_only); + + bool check( + PyObject* obj, + std::vector& overloaded_args, + int argnum, + int64_t* failed_idx = nullptr); + + void set_default_str(const std::string& str); + std::string type_name() const; + + ParameterType type_; + bool optional; + bool allow_none; + bool keyword_only; + bool allow_numbers_as_tensors = false; + int size; + std::string name; + // having this as a raw PyObject * will presumably leak it, but these are only + // held by static objects anyway, and Py_Finalize can already be called when + // this is destructed. + PyObject* python_name; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) + at::SmallVector numpy_python_names; + at::Scalar default_scalar; + std::vector default_intlist; + std::string default_string; + union { + bool default_bool; + int64_t default_int; + double default_double; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + double default_complex[2]; // see Scalar + at::ScalarType default_scalartype; + at::Layout default_layout; + }; +}; + +template +inline PythonArgs PythonArgParser::parse( + PyObject* self, + PyObject* args, + PyObject* kwargs, + ParsedArgs& dst) { + TORCH_CHECK_VALUE( + N >= max_args, + "PythonArgParser: dst ParsedArgs buffer does not have enough capacity, expected ", + max_args, + " (got ", + N, + ")"); + return raw_parse(self, args, kwargs, dst.args); +} + +template +inline PythonArgs PythonArgParser::parse( + PyObject* args, + PyObject* kwargs, + ParsedArgs& dst) { + return parse(nullptr, args, kwargs, dst); +} + +inline PythonArgs PythonArgParser::parse(PyObject* self, ParsedArgs<0>& dst) { + return parse(self, nullptr, nullptr, dst); +} + +inline bool PythonArgs::has_torch_function() { + return !overloaded_args.empty() || at::impl::torch_function_mode_enabled(); +} + +inline std::string PythonArgs::get_func_name() { + return signature.name; +} + +// TODO: this can return MaybeOwned +inline at::Tensor PythonArgs::tensor(int i) { + if (args[i] && THPVariable_CheckExact(args[i])) { + return THPVariable_Unpack(args[i]); + } + return tensor_slow(i); +} + +inline c10::optional PythonArgs::optionalTensor(int i) { + at::Tensor t = tensor(i); + // NOLINTNEXTLINE(bugprone-branch-clone) + if (t.defined()) { + return t; + } else { + return c10::nullopt; + } +} + +inline at::Scalar PythonArgs::scalar(int i) { + if (!args[i]) + return signature.params[i].default_scalar; + return scalar_slow(i); +} + +inline std::vector PythonArgs::scalarlist(int i) { + if (!args[i]) + return std::vector(); + auto tuple = six::isTuple(args[i]); + THPObjectPtr arg = six::maybeAsTuple(args[i]); + // NOLINTNEXTLINE(bugprone-branch-clone) + auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get()); + std::vector res(size); + for (const auto idx : c10::irange(size)) { + PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx) + : PyList_GET_ITEM(arg.get(), idx); + res[idx] = scalar_slow(obj); + } + return res; +} + +inline at::Scalar PythonArgs::scalarWithDefault( + int i, + const at::Scalar& default_scalar) { + if (!args[i]) + return default_scalar; + return scalar_slow(i); +} + +inline c10::optional PythonArgs::scalarOptional(int i) { + if (!args[i]) + return c10::nullopt; + return scalar_slow(i); +} + +inline std::vector PythonArgs::tensorlist(int i) { + if (!args[i]) + return std::vector(); + auto tuple = six::isTuple(args[i]); + THPObjectPtr arg = six::maybeAsTuple(args[i]); + // NOLINTNEXTLINE(bugprone-branch-clone) + auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get()); + std::vector res(size); + for (const auto idx : c10::irange(size)) { + PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx) + : PyList_GET_ITEM(arg.get(), idx); + // This is checked by the argument parser so it's safe to cast without + // checking if this is a tensor first + res[idx] = THPVariable_Unpack(obj); + } + return res; +} + +inline torch::List> PythonArgs:: + list_of_optional_tensors(int i) { + if (!args[i]) + return torch::List>(); + auto tuple = six::isTuple(args[i]); + THPObjectPtr arg = six::maybeAsTuple(args[i]); + // NOLINTNEXTLINE(bugprone-branch-clone) + auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get()); + torch::List> res; + res.reserve(size); + for (const auto idx : c10::irange(size)) { + PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx) + : PyList_GET_ITEM(arg.get(), idx); + // This is checked by the argument parser so it's safe to cast without + // checking if this is a tensor first + res.push_back(THPVariable_Unpack(obj)); + } + return res; +} + +template +inline std::array PythonArgs::tensorlist_n(int i) { + auto res = std::array(); + if (!args[i]) + return res; + auto tuple = six::isTuple(args[i]); + THPObjectPtr arg = six::maybeAsTuple(args[i]); + // NOLINTNEXTLINE(bugprone-branch-clone) + auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get()); + if (size != N) { + throw TypeError("expected tuple of %d elements but got %d", N, (int)size); + } + for (const auto idx : c10::irange(size)) { + PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx) + : PyList_GET_ITEM(arg.get(), idx); + // This is checked by the argument parser so it's safe to cast without + // checking if this is a tensor first + res[idx] = THPVariable_Unpack(obj); + } + return res; +} + +inline std::vector PythonArgs::intlist(int i) { + return intlistWithDefault(i, signature.params[i].default_intlist); +} + +inline PyObject* toPyObject(c10::SymInt symint) { + if (symint.is_symbolic()) { + auto r = py::cast(symint).release().ptr(); + TORCH_INTERNAL_ASSERT(r); + return r; + } else { + auto m = symint.maybe_as_int(); + return THPUtils_packInt64(*m); + } +} + +inline void throw_intlist_exception( + const torch::PythonArgs* args, + size_t i, + PyObject* obj, + size_t idx, + const std::exception& e = python_error()) { + std::string error = strlen(e.what()) + ? e.what() + : std::string("type must be ") + args->signature.params[i].type_name() + + ",but got " + Py_TYPE(obj)->tp_name; + throw TypeError( + "%s(): argument '%s' failed to unpack the object at pos %zu with error \"%s\"", + args->signature.name.c_str(), + args->signature.params[i].name.c_str(), + idx + 1, + error.c_str()); +} + +inline std::vector PythonArgs::symintlist(int i) { + if (!args[i]) { + return c10::fmap(signature.params[i].default_intlist, [](int64_t di) { + return c10::SymInt(di); + }); + } + + const auto size1 = signature.params[i].size; + if (size1 > 0 && THPUtils_checkLong(args[i])) { + return std::vector( + size1, c10::SymInt(THPUtils_unpackLong(args[i]))); + } + + if (size1 > 0 && torch::is_symint(py::handle(args[i]))) { + auto si = py::handle(args[i]).cast(); + return std::vector(size1, si); + } + + PyObject* arg = args[i]; + auto tuple = PyTuple_Check(arg); + // NOLINTNEXTLINE(bugprone-branch-clone) + const auto size2 = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg); + std::vector res; + res.reserve(size2); + for (const auto idx : c10::irange(size2)) { + PyObject* obj = + tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx); + + // Elements of torch.Size are tensors during tracing, and we need to + // record extra information before they are turned into an IntArrayRef + if (traceable && jit::tracer::isTracing() && THPVariable_Check(obj)) { + auto& var = THPVariable_Unpack(obj); + jit::tracer::ArgumentStash::stashIntArrayRefElem( + signature.params[i].name, size2, idx, var); + try { + res.emplace_back(var.item()); + continue; + } catch (std::exception& e) { + throw_intlist_exception(this, i, obj, idx, e); + } + continue; + } else { + // convert tensor to scalar outside of try / catch, + // so that Tensor subclass exceptions will not be caught. + if (THPUtils_checkLongExact(obj)) { + // Fast path for plain numbers + try { + res.emplace_back(THPUtils_unpackLong(obj)); + } catch (std::exception& e) { + throw_intlist_exception(this, i, obj, idx, e); + } + } else if (THPVariable_Check(obj)) { + auto& var = THPVariable_Unpack(obj); + if (var.numel() != 1 || + !at::isIntegralType( + var.dtype().toScalarType(), /*include_bool*/ true)) { + throw_intlist_exception(this, i, obj, idx); + } + auto scalar = var.item(); + TORCH_CHECK(scalar.isIntegral(/*include bool*/ false)); + res.push_back(scalar.toSymInt()); + } else { + try { + if (is_symint(py::handle(obj))) { + res.push_back(py::handle(obj).cast()); + } else { + res.emplace_back(THPUtils_unpackIndex(obj)); + } + } catch (std::exception& e) { + throw_intlist_exception(this, i, obj, idx, e); + } + } + } + } + + return res; +} + +inline std::vector PythonArgs::intlistWithDefault( + int i, + std::vector default_intlist) { + if (!args[i]) + return default_intlist; + PyObject* arg = args[i]; + const auto size1 = signature.params[i].size; + if (size1 > 0 && THPUtils_checkLong(arg)) { + return std::vector(size1, THPUtils_unpackLong(arg)); + } + if (size1 > 0 && torch::is_symint(py::handle(arg))) { + return std::vector( + size1, + py::handle(arg).cast().guard_int(__FILE__, __LINE__)); + } + auto tuple = PyTuple_Check(arg); + // NOLINTNEXTLINE(bugprone-branch-clone) + const auto size2 = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg); + std::vector res(size2); + for (const auto idx : c10::irange(size2)) { + PyObject* obj = + tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx); + // Elements of torch.Size are tensors during tracing, and we need to + // record extra information before they are turned into an IntArrayRef + if (traceable && jit::tracer::isTracing() && THPVariable_Check(obj)) { + auto& var = THPVariable_Unpack(obj); + jit::tracer::ArgumentStash::stashIntArrayRefElem( + signature.params[i].name, size2, idx, var); + try { + res[idx] = var.item(); + continue; + } catch (std::exception& e) { + throw_intlist_exception(this, i, obj, idx, e); + } + } else { + // convert tensor to scalar outside of try / catch, + // so that Tensor subclass exceptions will not be caught. + if (THPUtils_checkLongExact(obj)) { + // Fast path for plain numbers + try { + res[idx] = THPUtils_unpackLong(obj); + } catch (std::exception& e) { + throw_intlist_exception(this, i, obj, idx, e); + } + } else if (torch::is_symint(py::handle(obj))) { + res[idx] = py::cast(py::handle(obj)) + .guard_int(__FILE__, __LINE__); + } else if (THPVariable_Check(obj)) { + auto& var = THPVariable_Unpack(obj); + if (var.numel() != 1 || + !at::isIntegralType( + var.dtype().toScalarType(), /*include_bool*/ true)) { + throw_intlist_exception(this, i, obj, idx); + } + res[idx] = var.item(); + } else { + try { + res[idx] = THPUtils_unpackIndex(obj); + } catch (std::exception& e) { + throw_intlist_exception(this, i, obj, idx, e); + } + } + } + } + return res; +} + +inline c10::OptionalArray PythonArgs::intlistOptional(int i) { + if (!args[i]) { + return {}; + } + return intlist(i); +} + +inline c10::OptionalArray PythonArgs::symintlistOptional(int i) { + if (!args[i]) { + return {}; + } + return symintlist(i); +} + +inline std::vector PythonArgs::getDoublelist(int i) { + PyObject* arg = args[i]; + auto tuple = PyTuple_Check(arg); + // NOLINTNEXTLINE(bugprone-branch-clone) + auto size = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg); + std::vector res(size); + for (const auto idx : c10::irange(size)) { + PyObject* obj = + tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx); + try { + res[idx] = THPUtils_unpackDouble(obj); + } catch (const std::exception& e) { + throw TypeError( + "%s(): argument '%s' must be %s, but found element of type %s at pos %zu", + signature.name.c_str(), + signature.params[i].name.c_str(), + signature.params[i].type_name().c_str(), + Py_TYPE(obj)->tp_name, + idx + 1); + } + } + return res; +} + +inline c10::OptionalArray PythonArgs::doublelistOptional(int i) { + if (!args[i]) { + return {}; + } + return this->getDoublelist(i); +} + +inline std::vector PythonArgs::doublelist(int i) { + if (!args[i]) { + return {}; + } + return this->getDoublelist(i); +} + +inline c10::optional PythonArgs::toDispatchKeySetOptional( + int i) { + if (!args[i]) { + return {}; + } + return py::cast(py::handle(args[i])); +} + +inline at::ScalarType PythonArgs::scalartypeWithDefault( + int i, + at::ScalarType default_scalartype) { + if (!args[i]) + return default_scalartype; + return scalartype(i); +} + +inline at::ScalarType toScalarType(PyObject* obj) { + if (obj == (PyObject*)&PyFloat_Type) { + return at::ScalarType::Double; + } + if (obj == (PyObject*)&PyBool_Type) { + return at::ScalarType::Bool; + } + if (obj == (PyObject*)&PyLong_Type) { + return at::ScalarType::Long; + } + return reinterpret_cast(obj)->scalar_type; +} + +inline at::ScalarType PythonArgs::scalartype(int i) { + if (!args[i]) { + auto scalartype = signature.params[i].default_scalartype; + return (scalartype == at::ScalarType::Undefined) + ? torch::tensors::get_default_scalar_type() + : scalartype; + } + PyObject* obj = args[i]; + return toScalarType(obj); +} + +inline c10::optional PythonArgs::scalartypeOptional(int i) { + if (!args[i]) + return c10::nullopt; + return scalartype(i); +} + +inline at::Layout toLayout(PyObject* obj) { + const auto layout = reinterpret_cast(obj); + return layout->layout; +} + +inline at::Layout PythonArgs::layout(int i) { + if (!args[i]) + return signature.params[i].default_layout; + return toLayout(args[i]); +} + +inline at::Layout PythonArgs::layoutWithDefault( + int i, + at::Layout default_layout) { + if (!args[i]) + return default_layout; + return layout(i); +} + +inline c10::optional PythonArgs::layoutOptional(int i) { + if (!args[i]) + return c10::nullopt; + return layout(i); +} + +inline at::Device toDevice(PyObject* obj) { + if (THPDevice_Check(obj)) { + const auto device = reinterpret_cast(obj); + return device->device; + } + if (THPUtils_checkLong(obj)) { + const auto device_index = THPUtils_unpackLong(obj); + TORCH_CHECK(device_index >= 0, "Device index must not be negative"); + if (c10::is_privateuse1_backend_registered()) { + return at::Device( + c10::DeviceType::PrivateUse1, + static_cast(device_index)); + } + return at::Device( + c10::DeviceType::CUDA, static_cast(device_index)); + } + const std::string& device_str = THPUtils_unpackString(obj); + return at::Device(device_str); +} + +inline at::Device PythonArgs::device(int i) { + if (!args[i]) { + return torch::tensors::get_default_device(); + } + return toDevice(args[i]); +} + +inline at::Device PythonArgs::deviceWithDefault( + int i, + const at::Device& default_device) { + if (!args[i]) + return default_device; + return device(i); +} + +inline c10::optional PythonArgs::deviceOptional(int i) { + if (!args[i]) + return c10::nullopt; + return device(i); +} + +inline at::Dimname PythonArgs::dimname(int i) { + TORCH_INTERNAL_ASSERT(args[i] != nullptr); + return THPDimname_parse(args[i]); +} + +inline std::vector parseDimnameList(PyObject* arg) { + auto tuple = PyTuple_Check(arg); + // NOLINTNEXTLINE(bugprone-branch-clone) + auto size = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg); + std::vector res; + res.reserve(size); + for (const auto idx : c10::irange(size)) { + PyObject* obj = + tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx); + res.push_back(THPDimname_parse(obj)); + } + return res; +} + +inline c10::optional> PythonArgs:: + toDimnameListOptional(int i) { + if (!args[i]) + return c10::nullopt; + return parseDimnameList(args[i]); +} + +inline std::vector PythonArgs::dimnamelist(int i) { + TORCH_INTERNAL_ASSERT(args[i]); + PyObject* arg = args[i]; + auto size = signature.params[i].size; + TORCH_INTERNAL_ASSERT(size == 0 || size == 1); + if (size == 1 && THPUtils_checkDimname(arg)) { + return {THPDimname_parse(arg)}; + } + return parseDimnameList(arg); +} + +inline at::MemoryFormat PythonArgs::memoryformat(int i) { + if (!args[i]) + return at::MemoryFormat::Contiguous; + TORCH_CHECK( + THPMemoryFormat_Check(args[i]), + "memory_format arg must be an instance of the torch.memory_format"); + const auto memory_format = reinterpret_cast(args[i]); + return memory_format->memory_format; +} + +inline c10::optional PythonArgs::memoryformatOptional(int i) { + if (!args[i]) + return c10::nullopt; + return memoryformat(i); +} + +inline at::QScheme PythonArgs::toQScheme(int i) { + if (!args[i]) + return at::kPerTensorAffine; + TORCH_CHECK( + THPQScheme_Check(args[i]), + "qscheme arg must be an instance of the torch.qscheme"); + const auto qscheme = reinterpret_cast(args[i]); + return qscheme->qscheme; +} + +inline std::string PythonArgs::string(int i) { + return stringWithDefault(i, signature.params[i].default_string); +} + +inline std::string PythonArgs::stringWithDefault( + int i, + const std::string& default_str) { + if (!args[i]) + return default_str; + return THPUtils_unpackString(args[i]); +} + +inline c10::optional PythonArgs::stringOptional(int i) { + if (!args[i]) + return c10::nullopt; + return THPUtils_unpackString(args[i]); +} + +inline c10::string_view PythonArgs::stringView(int i) { + return stringViewWithDefault(i, signature.params[i].default_string); +} + +inline c10::string_view PythonArgs::stringViewWithDefault( + int i, + const c10::string_view default_str) { + if (!args[i]) + return default_str; + return THPUtils_unpackStringView(args[i]); +} + +inline c10::optional PythonArgs::stringViewOptional(int i) { + if (!args[i]) + return c10::nullopt; + return THPUtils_unpackStringView(args[i]); +} + +inline int64_t PythonArgs::toInt64(int i) { + if (!args[i]) + return signature.params[i].default_int; + if (traceable && jit::tracer::isTracing() && THPVariable_Check(args[i])) { + auto& var = THPVariable_Unpack(args[i]); + jit::tracer::ArgumentStash::stashValue( + signature.params[i].name, idx, var, c10::IntType::get()); + } + if (torch::is_symint(py::handle(args[i]))) { + return py::cast(py::handle(args[i])) + .guard_int(__FILE__, __LINE__); + } + return THPUtils_unpackLong(args[i]); +} + +inline c10::SymInt PythonArgs::toSymInt(int i) { + if (!args[i]) { + return c10::SymInt(signature.params[i].default_int); + } + + if (traceable && jit::tracer::isTracing() && THPVariable_Check(args[i])) { + auto& var = THPVariable_Unpack(args[i]); + jit::tracer::ArgumentStash::stashValue( + signature.params[i].name, idx, var, c10::IntType::get()); + } + + return py::cast(py::handle(args[i])); +} + +inline c10::SymBool PythonArgs::toSymBool(int i) { + if (!args[i]) { + return c10::SymBool(signature.params[i].default_bool); + } + if (traceable && jit::tracer::isTracing() && THPVariable_Check(args[i])) { + auto& var = THPVariable_Unpack(args[i]); + jit::tracer::ArgumentStash::stashValue( + signature.params[i].name, idx, var, c10::BoolType::get()); + } + + return py::cast(py::handle(args[i])); +} + +inline int64_t PythonArgs::toInt64WithDefault(int i, int64_t default_int) { + if (!args[i]) + return default_int; + return toInt64(i); +} + +inline c10::optional PythonArgs::toInt64Optional(int i) { + if (!args[i]) + return c10::nullopt; + return toInt64(i); +} + +inline c10::optional PythonArgs::toSymIntOptional(int i) { + if (!args[i]) + return c10::nullopt; + return toSymInt(i); +} + +inline c10::optional PythonArgs::toBoolOptional(int i) { + if (!args[i]) { + return c10::nullopt; + } + return toBool(i); +} + +inline c10::optional PythonArgs::toDoubleOptional(int i) { + if (!args[i]) { + return c10::nullopt; + } + return toDouble(i); +} + +inline double PythonArgs::toDouble(int i) { + if (!args[i]) + return signature.params[i].default_double; + if (torch::is_symfloat(py::handle(args[i]))) { + return py::cast(py::handle(args[i])) + .guard_float(__FILE__, __LINE__); + } + if (torch::is_symint(py::handle(args[i]))) { + return static_cast(py::cast(py::handle(args[i])) + .guard_int(__FILE__, __LINE__)); + } + return THPUtils_unpackDouble(args[i]); +} + +inline bool PythonArgs::toBool(int i) { + if (!args[i]) + return signature.params[i].default_bool; + if (torch::is_symbool(py::handle(args[i]))) { + return py::cast(py::handle(args[i])) + .guard_bool(__FILE__, __LINE__); + } + return args[i] == Py_True; +} + +inline double PythonArgs::toDoubleWithDefault(int i, double default_double) { + if (!args[i]) + return default_double; + return toDouble(i); +} + +inline c10::complex PythonArgs::toComplex(int i) { + if (!args[i]) + return *(reinterpret_cast*>( + signature.params[i].default_complex)); + return THPUtils_unpackComplexDouble(args[i]); +} + +inline c10::complex PythonArgs::toComplexWithDefault( + int i, + c10::complex default_value) { + if (!args[i]) + return default_value; + return toComplex(i); +} + +inline bool PythonArgs::toBoolWithDefault(int i, bool default_bool) { + if (!args[i]) + return default_bool; + return toBool(i); +} + +inline bool PythonArgs::isNone(int i) { + return args[i] == nullptr; +} + +inline c10::optional PythonArgs::generator(int i) { + if (!args[i]) + return c10::nullopt; + return reinterpret_cast(args[i])->cdata; +} + +inline at::Storage PythonArgs::storage(int i) { + if (!args[i]) + return at::Storage(); + return createStorage(args[i]); +} + +inline at::Storage PythonArgs::storage( + int i, + at::ScalarType& storage_scalar_type, + bool& is_typed_storage) { + at::Storage storage; + if (!args[i]) { + storage = at::Storage(); + is_typed_storage = false; + storage_scalar_type = at::ScalarType::Undefined; + } else { + std::tie(storage, storage_scalar_type, is_typed_storage) = + createStorageGetType(args[i]); + } + return storage; +} + +inline c10::Stream PythonArgs::stream(int i) { + if (!args[i]) + return c10::Stream( + c10::Stream::Default::DEFAULT, c10::Device(c10::DeviceType::CPU, -1)); + if (!THPStream_Check(args[i])) { + throw TypeError( + "expected Stream object. Got '%s'", Py_TYPE(args[i])->tp_name); + } + return c10::Stream::unpack3( + ((THPStream*)args[i])->stream_id, + static_cast(((THPStream*)args[i])->device_index), + static_cast(((THPStream*)args[i])->device_type)); +} + +inline PyObject* PythonArgs::pyobject(int i) { + if (!args[i]) + return Py_None; + return args[i]; +} + +/* + * + * Handle __torch_function__ overrides if we know that there are overloaded + * arguments. All objects stored in r.overloaded_args must have a + * __torch_function__ implementation and the arguments must be ordered in order + * of precedence. Precedence goes from left to right in the order of the + * signature of the function the overloaded arguments were passed to, except + * subclasses are always considered before superclasses. + * + * If the result of calling __torch_function__ is NotImplemented, the + * next implementation in the precedence order is called. If all + * arguments return NotImplemented from their __torch_function__ + * implementation, a TypeError is raised in Python. + * + * Assumes overloaded_args has at least one entry. All entries must have + * a __torch_function__ attribute that resolves to a callable that + * accepts a torch API function, a tuple of arguments, and a dict of + * keyword arguments for the torch API function. + * + * It is sufficient to call PythonArgs::has_torch_function before + * calling this function to verify that there are valid arguments + * present. If that is not done then special care must be taken to + * ensure there are arguments that are overloaded with + * __torch_function__. + * + * See torch._overrides.handle_torch_function for the equivalent + * code in the pure-python implementation. + * + * 'r' is a parsed PythonArgs instance, returned from + * PythonArgParser::parse. + * + * 'args' is a reference to the python tuple of arguments to the torch + * API function. + * + * 'kwargs' is a reference to the python dict of keyword arguments to + * the torch API function. + * + * 'torch_api' is a reference to a python torch API namespace. + * + * 'torch_api_function' is the reference to the original torch method, usually, + * we can use torch_api and func_name to get torch_api_function. In some cases, + * e.g., torch custom op, we create the function in C++, if we still use + * torch_api and func_name to fetch original api, a cyclic call will happen. + * + * 'overloaded_args' is the args which have overloaded __torch_function__. + * + * 'func_name' is the named of the original torch method. + * + * TODO: we could use different names for the following 'handle_torch_function' + * instead of overloading. + * + */ +// Used for Tensor methods with arguments. +auto handle_torch_function( + PythonArgs& r, + PyObject* self, + PyObject* args, + PyObject* kwargs, + PyObject* torch_api, + const char* module_name, + const char* func_name_override = nullptr) -> PyObject*; + +// Used for functions which needs to parse python args. +auto handle_torch_function( + PythonArgs& r, + PyObject* args, + PyObject* kwargs, + PyObject* torch_api, + const char* module_name, + const char* func_name_override = nullptr) -> PyObject*; + +// Used for functions that have no argument parsing. +auto handle_torch_function( + PyObject* self, + const std::string& func_name, + PyObject* args = nullptr, + PyObject* kwargs = nullptr, + PyObject* torch_api = THPVariableClass, + const std::string& module_name = "torch.Tensor") -> PyObject*; + +// Used for functions created in C++, e.g., C++ custom op, which doesn't use +// PythonArgParser to get overloaded_args. +enum class TorchFunctionName { TorchFunction, TorchDispatch }; + +auto TORCH_PYTHON_API handle_torch_function_no_python_arg_parser( + at::ArrayRef overloaded_args, + PyObject* args, + PyObject* kwargs, + const char* func_name, + PyObject* torch_api_function, + const char* module_name, + TorchFunctionName torch_function_name = TorchFunctionName::TorchFunction) + -> PyObject*; + +// Used for getters of Tensor properties +auto handle_torch_function_getter( + THPVariable* self, + const std::string& property_name) -> PyObject*; + +// Used for setters of Tensor properties. +auto handle_torch_function_setter( + THPVariable* self, + const std::string& property_name, + PyObject* value) -> int; + +// Used for __getitem__ and __setitem__ +auto handle_torch_function_indexing( + PyObject* self, + PyObject* index, + PyObject* val = nullptr) -> PyObject*; + +/* + * Check if the input obj is Tensor type, including its subclass, or overloaded + * type. If the type defines __torch_function__, it also returns true. + * Otherwise returns flase. If the class is not torch.Tensor, and it defines + * __torch_function__, we append obj to overloaded_args. + * + * 'obj': the input argument to be checked + * 'overloaded_args': the vector to append the overloaded args. + */ +bool is_tensor_and_append_overloaded( + PyObject* obj, + std::vector* overloaded_args); + +/* + * Check if the input obj is Tensor List or Tensor Tuple type. First check + * whether obj is Tuple or List type, if true, iterate over each element and + * check whether it is Tensor type, including its subclass or overloaded type. + * At the same time, the overloaded arg is appended to the overloaded_args. + * + * 'obj': the input argument to be checked + * 'overloaded_args': the vector to append the overloaded args. + * 'argnum': the number of total arguments of the function being checked. + * 'throw_error': whether throw error if any element in the list or tuple is + * not tensor type or overloaded. + */ +bool is_tensor_list_and_append_overloaded( + PyObject* obj, + std::vector* overloaded_args, + int argnum, + bool throw_error); + +/* Given an argument that is definitely a tensor and is definitely overloaded, + * append it to the overloaded arguments list. Use this instead of + * is_tensor_and_append_overloaded in situations where you have a PyObject + * and you know it definitely is a Tensor and it is definitely overloaded. + * + * 'overloaded_args': the vector to append the overloaded args + * 'obj': the input tensor that is overloaded + */ +void append_overloaded_tensor( + std::vector* overloaded_args, + PyObject* obj); + +/* Given an argument that is definitely a type and is definitely overloaded, + * append it to the overloaded arguments list. Use this only with + * __torch_dispatch__, where we operate on classes that have a + * __torch_dispatch__ classmethod. + * + * 'overloaded_args': the vector to append the overloaded type + * 'obj': the input class that has a __torch_dispatch__ classmethod. + */ +void append_overloaded_type( + std::vector* overloaded_args, + PyObject* obj); + +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d719de730551b97d09c4d5c015b1c9908f722e1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_dispatch.h @@ -0,0 +1,17 @@ +#include +#include + +namespace torch { +namespace impl { +namespace dispatch { + +void initDispatchBindings(PyObject* module); + +void python_op_registration_trampoline_impl( + const c10::OperatorHandle& op, + c10::DispatchKey key, + torch::jit::Stack* stack); + +} // namespace dispatch +} // namespace impl +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_numbers.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_numbers.h new file mode 100644 index 0000000000000000000000000000000000000000..2a17afdf0e18f176138bed3ae9ddc9e968d02195 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_numbers.h @@ -0,0 +1,207 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// largest integer that can be represented consecutively in a double +const int64_t DOUBLE_INT_MAX = 9007199254740992; + +inline PyObject* THPUtils_packDeviceIndex(c10::DeviceIndex value) { + return PyLong_FromLong(value); +} + +inline PyObject* THPUtils_packInt32(int32_t value) { + return PyLong_FromLong(value); +} + +inline PyObject* THPUtils_packInt64(int64_t value) { + return PyLong_FromLongLong(value); +} + +inline PyObject* THPUtils_packUInt32(uint32_t value) { + return PyLong_FromUnsignedLong(value); +} + +inline PyObject* THPUtils_packUInt64(uint64_t value) { + return PyLong_FromUnsignedLongLong(value); +} + +inline PyObject* THPUtils_packDoubleAsInt(double value) { + return PyLong_FromDouble(value); +} + +inline bool THPUtils_checkLongExact(PyObject* obj) { + return PyLong_CheckExact(obj) && !PyBool_Check(obj); +} + +inline bool THPUtils_checkLong(PyObject* obj) { + // Fast path + if (THPUtils_checkLongExact(obj)) { + return true; + } + +#ifdef USE_NUMPY + if (torch::utils::is_numpy_int(obj)) { + return true; + } +#endif + + return PyLong_Check(obj) && !PyBool_Check(obj); +} + +inline int32_t THPUtils_unpackInt(PyObject* obj) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int overflow; + long value = PyLong_AsLongAndOverflow(obj, &overflow); + if (value == -1 && PyErr_Occurred()) { + throw python_error(); + } + if (overflow != 0) { + throw std::runtime_error("Overflow when unpacking long"); + } + if (value > std::numeric_limits::max() || + value < std::numeric_limits::min()) { + throw std::runtime_error("Overflow when unpacking long"); + } + return (int32_t)value; +} + +inline int64_t THPUtils_unpackLong(PyObject* obj) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int overflow; + long long value = PyLong_AsLongLongAndOverflow(obj, &overflow); + if (value == -1 && PyErr_Occurred()) { + throw python_error(); + } + if (overflow != 0) { + throw std::runtime_error("Overflow when unpacking long"); + } + return (int64_t)value; +} + +inline uint32_t THPUtils_unpackUInt32(PyObject* obj) { + unsigned long value = PyLong_AsUnsignedLong(obj); + if (PyErr_Occurred()) { + throw python_error(); + } + if (value > std::numeric_limits::max()) { + throw std::runtime_error("Overflow when unpacking unsigned long"); + } + return (uint32_t)value; +} + +inline uint64_t THPUtils_unpackUInt64(PyObject* obj) { + unsigned long long value = PyLong_AsUnsignedLongLong(obj); + if (PyErr_Occurred()) { + throw python_error(); + } + return (uint64_t)value; +} + +bool THPUtils_checkIndex(PyObject* obj); + +inline int64_t THPUtils_unpackIndex(PyObject* obj) { + if (!THPUtils_checkLong(obj)) { + auto index = THPObjectPtr(PyNumber_Index(obj)); + if (index == nullptr) { + throw python_error(); + } + // NB: This needs to be called before `index` goes out of scope and the + // underlying object's refcount is decremented + return THPUtils_unpackLong(index.get()); + } + return THPUtils_unpackLong(obj); +} + +inline bool THPUtils_unpackBool(PyObject* obj) { + if (obj == Py_True) { + return true; + } else if (obj == Py_False) { + return false; + } else { + throw std::runtime_error("couldn't convert python object to boolean"); + } +} + +inline bool THPUtils_checkBool(PyObject* obj) { +#ifdef USE_NUMPY + if (torch::utils::is_numpy_bool(obj)) { + return true; + } +#endif + return PyBool_Check(obj); +} + +inline bool THPUtils_checkDouble(PyObject* obj) { +#ifdef USE_NUMPY + if (torch::utils::is_numpy_scalar(obj)) { + return true; + } +#endif + return PyFloat_Check(obj) || PyLong_Check(obj); +} + +inline double THPUtils_unpackDouble(PyObject* obj) { + if (PyFloat_Check(obj)) { + return PyFloat_AS_DOUBLE(obj); + } + double value = PyFloat_AsDouble(obj); + if (value == -1 && PyErr_Occurred()) { + throw python_error(); + } + return value; +} + +inline c10::complex THPUtils_unpackComplexDouble(PyObject* obj) { + Py_complex value = PyComplex_AsCComplex(obj); + if (value.real == -1.0 && PyErr_Occurred()) { + throw python_error(); + } + + return c10::complex(value.real, value.imag); +} + +inline bool THPUtils_unpackNumberAsBool(PyObject* obj) { + if (PyFloat_Check(obj)) { + return (bool)PyFloat_AS_DOUBLE(obj); + } + + if (PyComplex_Check(obj)) { + double real_val = PyComplex_RealAsDouble(obj); + double imag_val = PyComplex_ImagAsDouble(obj); + return !(real_val == 0 && imag_val == 0); + } + + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int overflow; + long long value = PyLong_AsLongLongAndOverflow(obj, &overflow); + if (value == -1 && PyErr_Occurred()) { + throw python_error(); + } + // No need to check overflow, because when overflow occured, it should + // return true in order to keep the same behavior of numpy. + return (bool)value; +} + +inline c10::DeviceIndex THPUtils_unpackDeviceIndex(PyObject* obj) { + int overflow = 0; + long value = PyLong_AsLongAndOverflow(obj, &overflow); + if (value == -1 && PyErr_Occurred()) { + throw python_error(); + } + if (overflow != 0) { + throw std::runtime_error("Overflow when unpacking DeviceIndex"); + } + if (value > std::numeric_limits::max() || + value < std::numeric_limits::min()) { + throw std::runtime_error("Overflow when unpacking DeviceIndex"); + } + return (c10::DeviceIndex)value; +} diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_raii.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_raii.h new file mode 100644 index 0000000000000000000000000000000000000000..70a5ddfeb55ee441104c4e0db684bd68f42b6bdb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_raii.h @@ -0,0 +1,86 @@ +#include +#include +#include + +namespace torch { +namespace impl { + +template +struct RAIIContextManager { + explicit RAIIContextManager(Args&&... args) + : args_(std::forward(args)...) {} + + void enter() { + auto emplace = [&](Args... args) { + guard_.emplace(std::forward(args)...); + }; + std::apply(std::move(emplace), args_); + } + + void exit() { + guard_ = c10::nullopt; + } + + private: + c10::optional guard_; + std::tuple args_; +}; + +// Turns a C++ RAII guard into a Python context manager. +// See _ExcludeDispatchKeyGuard in python_dispatch.cpp for example. +template +void py_context_manager(const py::module& m, const char* name) { + using ContextManagerT = RAIIContextManager; + py::class_(m, name) + .def(py::init()) + .def("__enter__", [](ContextManagerT& guard) { guard.enter(); }) + .def( + "__exit__", + [](ContextManagerT& guard, + py::object exc_type, + py::object exc_value, + py::object traceback) { guard.exit(); }); +} + +template +struct DeprecatedRAIIContextManager { + explicit DeprecatedRAIIContextManager(Args&&... args) { + guard_.emplace(std::forward(args)...); + } + + void enter() {} + + void exit() { + guard_ = c10::nullopt; + } + + private: + c10::optional guard_; + std::tuple args_; +}; + +// Definition: a "Python RAII guard" is an object in Python that acquires +// a resource on init and releases the resource on deletion. +// +// This API turns a C++ RAII guard into an object can be used either as a +// Python context manager or as a "Python RAII guard". +// +// Please prefer `py_context_manager` to this API if you are binding a new +// RAII guard into Python because "Python RAII guards" don't work as expected +// in Python (Python makes no guarantees about when an object gets deleted) +template +void py_context_manager_DEPRECATED(const py::module& m, const char* name) { + using ContextManagerT = DeprecatedRAIIContextManager; + py::class_(m, name) + .def(py::init()) + .def("__enter__", [](ContextManagerT& guard) { guard.enter(); }) + .def( + "__exit__", + [](ContextManagerT& guard, + py::object exc_type, + py::object exc_value, + py::object traceback) { guard.exit(); }); +} + +} // namespace impl +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_scalars.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_scalars.h new file mode 100644 index 0000000000000000000000000000000000000000..2819f56b6bab35cb3547de06c3f0f43b90dd36f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_scalars.h @@ -0,0 +1,163 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace torch { +namespace utils { + +template +inline T unpackIntegral(PyObject* obj, const char* type) { +#if PY_VERSION_HEX >= 0x030a00f0 + // In Python-3.10 floats can no longer be silently converted to integers + // Keep backward compatible behavior for now + if (PyFloat_Check(obj)) { + return c10::checked_convert(THPUtils_unpackDouble(obj), type); + } + return c10::checked_convert(THPUtils_unpackLong(obj), type); +#else + return static_cast(THPUtils_unpackLong(obj)); +#endif +} + +inline void store_scalar(void* data, at::ScalarType scalarType, PyObject* obj) { + switch (scalarType) { + case at::kByte: + *(uint8_t*)data = unpackIntegral(obj, "uint8"); + break; + case at::kUInt16: + *(uint16_t*)data = unpackIntegral(obj, "uint16"); + break; + case at::kUInt32: + *(uint32_t*)data = unpackIntegral(obj, "uint32"); + break; + case at::kUInt64: + // NB: This doesn't allow implicit conversion of float to int + *(uint64_t*)data = THPUtils_unpackUInt64(obj); + break; + case at::kChar: + *(int8_t*)data = unpackIntegral(obj, "int8"); + break; + case at::kShort: + *(int16_t*)data = unpackIntegral(obj, "int16"); + break; + case at::kInt: + *(int32_t*)data = unpackIntegral(obj, "int32"); + break; + case at::kLong: + *(int64_t*)data = unpackIntegral(obj, "int64"); + break; + case at::kHalf: + *(at::Half*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + case at::kFloat: + *(float*)data = (float)THPUtils_unpackDouble(obj); + break; + case at::kDouble: + *(double*)data = THPUtils_unpackDouble(obj); + break; + case at::kComplexHalf: + *(c10::complex*)data = + (c10::complex)static_cast>( + THPUtils_unpackComplexDouble(obj)); + break; + case at::kComplexFloat: + *(c10::complex*)data = + (c10::complex)THPUtils_unpackComplexDouble(obj); + break; + case at::kComplexDouble: + *(c10::complex*)data = THPUtils_unpackComplexDouble(obj); + break; + case at::kBool: + *(bool*)data = THPUtils_unpackNumberAsBool(obj); + break; + case at::kBFloat16: + *(at::BFloat16*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + case at::kFloat8_e5m2: + *(at::Float8_e5m2*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + case at::kFloat8_e5m2fnuz: + *(at::Float8_e5m2fnuz*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + case at::kFloat8_e4m3fn: + *(at::Float8_e4m3fn*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + case at::kFloat8_e4m3fnuz: + *(at::Float8_e4m3fnuz*)data = + at::convert(THPUtils_unpackDouble(obj)); + break; + default: + throw std::runtime_error("invalid type"); + } +} + +inline PyObject* load_scalar(const void* data, at::ScalarType scalarType) { + switch (scalarType) { + case at::kByte: + return THPUtils_packInt64(*(uint8_t*)data); + case at::kUInt16: + return THPUtils_packInt64(*(uint16_t*)data); + case at::kUInt32: + return THPUtils_packUInt32(*(uint32_t*)data); + case at::kUInt64: + return THPUtils_packUInt64(*(uint64_t*)data); + case at::kChar: + return THPUtils_packInt64(*(int8_t*)data); + case at::kShort: + return THPUtils_packInt64(*(int16_t*)data); + case at::kInt: + return THPUtils_packInt64(*(int32_t*)data); + case at::kLong: + return THPUtils_packInt64(*(int64_t*)data); + case at::kHalf: + return PyFloat_FromDouble( + at::convert(*(at::Half*)data)); + case at::kFloat: + return PyFloat_FromDouble(*(float*)data); + case at::kDouble: + return PyFloat_FromDouble(*(double*)data); + case at::kComplexHalf: { + auto data_ = reinterpret_cast*>(data); + return PyComplex_FromDoubles(data_->real(), data_->imag()); + } + case at::kComplexFloat: { + auto data_ = reinterpret_cast*>(data); + return PyComplex_FromDoubles(data_->real(), data_->imag()); + } + case at::kComplexDouble: + return PyComplex_FromCComplex( + *reinterpret_cast((c10::complex*)data)); + case at::kBool: + return PyBool_FromLong(*(bool*)data); + case at::kBFloat16: + return PyFloat_FromDouble( + at::convert(*(at::BFloat16*)data)); + case at::kFloat8_e5m2: + return PyFloat_FromDouble( + at::convert(*(at::Float8_e5m2*)data)); + case at::kFloat8_e4m3fn: + return PyFloat_FromDouble( + at::convert(*(at::Float8_e4m3fn*)data)); + case at::kFloat8_e5m2fnuz: + return PyFloat_FromDouble(at::convert( + *(at::Float8_e5m2fnuz*)data)); + case at::kFloat8_e4m3fnuz: + return PyFloat_FromDouble(at::convert( + *(at::Float8_e4m3fnuz*)data)); + default: + throw std::runtime_error("invalid type"); + } +} + +} // namespace utils +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_strings.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_strings.h new file mode 100644 index 0000000000000000000000000000000000000000..a2754ef4610b4d25bc24f418fdab10effc32bac4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_strings.h @@ -0,0 +1,129 @@ +#pragma once + +#include +#include +#include +#include +#include + +// Utilities for handling Python strings. Note that PyString, when defined, is +// the same as PyBytes. + +// Returns true if obj is a bytes/str or unicode object +// As of Python 3.6, this does not require the GIL +inline bool THPUtils_checkString(PyObject* obj) { + return PyBytes_Check(obj) || PyUnicode_Check(obj); +} + +// Unpacks PyBytes (PyString) or PyUnicode as std::string +// PyBytes are unpacked as-is. PyUnicode is unpacked as UTF-8. +// NOTE: this method requires the GIL +inline std::string THPUtils_unpackString(PyObject* obj) { + if (PyBytes_Check(obj)) { + size_t size = PyBytes_GET_SIZE(obj); + return std::string(PyBytes_AS_STRING(obj), size); + } + if (PyUnicode_Check(obj)) { + Py_ssize_t size = 0; + const char* data = PyUnicode_AsUTF8AndSize(obj, &size); + if (!data) { + throw std::runtime_error("error unpacking string as utf-8"); + } + return std::string(data, (size_t)size); + } + throw std::runtime_error("unpackString: expected bytes or unicode object"); +} + +// Unpacks PyBytes (PyString) or PyUnicode as c10::string_view +// PyBytes are unpacked as-is. PyUnicode is unpacked as UTF-8. +// NOTE: If `obj` is destroyed, then the non-owning c10::string_view will +// become invalid. If the string needs to be accessed at any point after +// `obj` is destroyed, then the c10::string_view should be copied into +// a std::string, or another owning object, and kept alive. For an example, +// look at how IValue and autograd nodes handle c10::string_view arguments. +// NOTE: this method requires the GIL +inline c10::string_view THPUtils_unpackStringView(PyObject* obj) { + if (PyBytes_Check(obj)) { + size_t size = PyBytes_GET_SIZE(obj); + return c10::string_view(PyBytes_AS_STRING(obj), size); + } + if (PyUnicode_Check(obj)) { + Py_ssize_t size = 0; + const char* data = PyUnicode_AsUTF8AndSize(obj, &size); + if (!data) { + throw std::runtime_error("error unpacking string as utf-8"); + } + return c10::string_view(data, (size_t)size); + } + throw std::runtime_error("unpackString: expected bytes or unicode object"); +} + +inline PyObject* THPUtils_packString(const char* str) { + return PyUnicode_FromString(str); +} + +inline PyObject* THPUtils_packString(const std::string& str) { + return PyUnicode_FromStringAndSize(str.c_str(), str.size()); +} + +inline PyObject* THPUtils_internString(const std::string& str) { + return PyUnicode_InternFromString(str.c_str()); +} + +// Precondition: THPUtils_checkString(obj) must be true +inline bool THPUtils_isInterned(PyObject* obj) { + return PyUnicode_CHECK_INTERNED(obj); +} + +// Precondition: THPUtils_checkString(obj) must be true +inline void THPUtils_internStringInPlace(PyObject** obj) { + PyUnicode_InternInPlace(obj); +} + +/* + * Reference: + * https://github.com/numpy/numpy/blob/f4c497c768e0646df740b647782df463825bfd27/numpy/core/src/common/get_attr_string.h#L42 + * + * Stripped down version of PyObject_GetAttrString, + * avoids lookups for None, tuple, and List objects, + * and doesn't create a PyErr since this code ignores it. + * + * This can be much faster then PyObject_GetAttrString where + * exceptions are not used by caller. + * + * 'obj' is the object to search for attribute. + * + * 'name' is the attribute to search for. + * + * Returns a py::object wrapping the return value. If the attribute lookup + * failed the value will be NULL. + * + */ + +// NOLINTNEXTLINE(clang-diagnostic-unused-function) +static py::object PyObject_FastGetAttrString(PyObject* obj, const char* name) { + PyTypeObject* tp = Py_TYPE(obj); + PyObject* res = (PyObject*)nullptr; + + /* Attribute referenced by (char *)name */ + if (tp->tp_getattr != nullptr) { + // This is OK per https://bugs.python.org/issue39620 + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) + res = (*tp->tp_getattr)(obj, const_cast(name)); + if (res == nullptr) { + PyErr_Clear(); + } + } + /* Attribute referenced by (PyObject *)name */ + else if (tp->tp_getattro != nullptr) { + auto w = py::reinterpret_steal(THPUtils_internString(name)); + if (w.ptr() == nullptr) { + return py::object(); + } + res = (*tp->tp_getattro)(obj, w.ptr()); + if (res == nullptr) { + PyErr_Clear(); + } + } + return py::reinterpret_steal(res); +} diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_stub.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_stub.h new file mode 100644 index 0000000000000000000000000000000000000000..336c530d2b1faa35d7c87399846bb834a84ad569 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_stub.h @@ -0,0 +1,4 @@ +#pragma once + +struct _object; +using PyObject = _object; diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_symnode.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_symnode.h new file mode 100644 index 0000000000000000000000000000000000000000..c4814930507bfefc2240cf4fbda16784d2ddca96 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_symnode.h @@ -0,0 +1,288 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace torch { + +TORCH_PYTHON_API py::handle get_symint_class(); +TORCH_PYTHON_API py::handle get_symfloat_class(); +TORCH_PYTHON_API py::handle get_symbool_class(); + +// NB: These functions must not be called too early, otherwise torch not setup. +// Alternate design is to have torch "register" the object to us +inline bool is_symint(py::handle obj) { + return py::isinstance(obj, get_symint_class()); +} +inline bool is_symfloat(py::handle obj) { + return py::isinstance(obj, get_symfloat_class()); +} +inline bool is_symbool(py::handle obj) { + return py::isinstance(obj, get_symbool_class()); +} + +namespace impl { + +// This c10::SymNodeImpl simply backends to a Python object that +// implements the API. The Python object is the source of truth, +// this is just an adapter so C++ calls can get to the object. +class PythonSymNodeImpl : public c10::SymNodeImpl { + public: + PythonSymNodeImpl(py::object pyobj) : c10::SymNodeImpl() { + pyobj_ = std::make_shared( + pyobj.release().ptr(), getPyInterpreter()); + }; + + c10::SymNode wrap_int(int64_t num) override { + py::gil_scoped_acquire acquire; + auto r = getPyObj().attr("wrap_int")(num); + return c10::make_intrusive(std::move(r)); + } + + c10::SymNode wrap_float(double num) override { + py::gil_scoped_acquire acquire; + auto r = getPyObj().attr("wrap_float")(num); + return c10::make_intrusive(std::move(r)); + } + + c10::SymNode wrap_bool(bool num) override { + py::gil_scoped_acquire acquire; + auto r = getPyObj().attr("wrap_bool")(num); + return c10::make_intrusive(std::move(r)); + } + +#define TORCH_SYMNODE_SIZES_STRIDES(n) \ + c10::SymNode n( \ + c10::ArrayRef sizes, c10::ArrayRef strides) \ + override { \ + py::gil_scoped_acquire acquire; \ + auto r = getPyObj().attr(#n)(sizes, strides); \ + return c10::make_intrusive(std::move(r)); \ + } + + // clang-format off + TORCH_SYMNODE_SIZES_STRIDES(is_contiguous) + TORCH_SYMNODE_SIZES_STRIDES(is_channels_last_contiguous_2d) + TORCH_SYMNODE_SIZES_STRIDES(is_channels_last_contiguous_3d) + TORCH_SYMNODE_SIZES_STRIDES(is_channels_last_strides_2d) + TORCH_SYMNODE_SIZES_STRIDES(is_channels_last_strides_3d) + TORCH_SYMNODE_SIZES_STRIDES(is_non_overlapping_and_dense) + // clang-format on + +#undef TORCH_SYMNODE_SIZES_STRIDES + + bool bool_() override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("bool_")().is(py::handle(Py_True)); + } + + bool is_int() override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("is_int")().is(py::handle(Py_True)); + } + + bool is_float() override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("is_float")().is(py::handle(Py_True)); + } + + bool is_bool() override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("is_bool")().is(py::handle(Py_True)); + } + + bool is_nested_int() const override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("is_nested_int")().is(py::handle(Py_True)); + } + + bool has_hint() override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("has_hint")().is(py::handle(Py_True)); + } + + int64_t guard_int(const char* file, int64_t line) override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("guard_int")(file, line).cast(); + } + + double guard_float(const char* file, int64_t line) override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("guard_float")(file, line).cast(); + } + + bool guard_bool(const char* file, int64_t line) override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("guard_bool")(file, line).cast(); + } + + bool expect_true(const char* file, int64_t line) override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("expect_true")(file, line).cast(); + } + + bool expect_size(const char* file, int64_t line) override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("expect_size")(file, line).cast(); + } + + bool guard_size_oblivious(const char* file, int64_t line) override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("guard_size_oblivious")(file, line).cast(); + } + + int64_t int_() override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("int_")().cast(); + } + + c10::optional maybe_as_int() override { + py::gil_scoped_acquire acquire; + const auto& r = getPyObj().attr("maybe_as_int")(); + if (r.is_none()) { + return c10::nullopt; + } else { + return r.cast(); + } + } + + std::string str() override { + py::gil_scoped_acquire acquire; + return getPyObj().attr("str")().cast(); + } + + c10::SymNode dispatch_sym_ite_( + const char* fname, + const c10::SymNode& other, + const c10::SymNode& third) { + auto pother = dynamic_cast(other.get()); + auto pthird = dynamic_cast(third.get()); + TORCH_CHECK(pother); + TORCH_CHECK(pthird); + py::gil_scoped_acquire acquire; + auto r = getPyObj().attr(fname)(pother->getPyObj(), pthird->getPyObj()); + return c10::make_intrusive(r); + } + + c10::SymNode dispatch_common_(const char* fname, const c10::SymNode& other) { + auto pother = dynamic_cast(other.get()); + TORCH_CHECK(pother); + py::gil_scoped_acquire acquire; + auto r = getPyObj().attr(fname)(pother->getPyObj()); + return c10::make_intrusive(r); + } + + c10::SymNode dispatch_common_(const char* fname) { + py::gil_scoped_acquire acquire; + auto r = getPyObj().attr(fname)(); + return c10::make_intrusive(r); + } + + c10::SymNode add(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode sub(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode mul(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode truediv(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode pow(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode floordiv(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode mod(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode eq(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode ne(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode gt(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode lt(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode le(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode ge(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode sym_min(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + c10::SymNode sym_max(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode sym_and(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode sym_or(const c10::SymNode& other) override { + return dispatch_common_(__func__, other); + } + + c10::SymNode sym_ite(const c10::SymNode& other, const c10::SymNode& third) + override { + return dispatch_sym_ite_(__func__, other, third); + } + + c10::SymNode sym_not() override { + return dispatch_common_(__func__); + } + + c10::SymNode ceil() override { + return dispatch_common_(__func__); + } + + c10::SymNode floor() override { + return dispatch_common_(__func__); + } + + c10::SymNode neg() override { + return dispatch_common_(__func__); + } + + c10::SymNode clone() override { + return dispatch_common_(__func__); + } + + c10::SymNode sym_float() override { + return dispatch_common_(__func__); + } + + py::handle getPyObj() const { + return py::handle(pyobj_->ptr(getPyInterpreter())); + } + std::shared_ptr pyobj_ = nullptr; +}; + +} // namespace impl +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_tuples.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_tuples.h new file mode 100644 index 0000000000000000000000000000000000000000..ab71ccbd4441180ad58e2df8f57179098b4f83eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_tuples.h @@ -0,0 +1,27 @@ +#pragma once + +#include +#include +#include +#include + +inline void THPUtils_packInt64Array( + PyObject* tuple, + size_t size, + const int64_t* sizes) { + for (size_t i = 0; i != size; ++i) { + PyObject* i64 = THPUtils_packInt64(sizes[i]); + if (!i64) { + throw python_error(); + } + PyTuple_SET_ITEM(tuple, i, i64); + } +} + +inline PyObject* THPUtils_packInt64Array(size_t size, const int64_t* sizes) { + THPObjectPtr tuple(PyTuple_New(size)); + if (!tuple) + throw python_error(); + THPUtils_packInt64Array(tuple.get(), size, sizes); + return tuple.release(); +} diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/six.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/six.h new file mode 100644 index 0000000000000000000000000000000000000000..cfca55bb86ec7158e7d3de90dbaae0b0e6cde4b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/six.h @@ -0,0 +1,52 @@ +#pragma once + +#include +#include +#include +#include + +namespace six { + +// Usually instances of PyStructSequence is also an instance of tuple +// but in some py2 environment it is not, so we have to manually check +// the name of the type to determine if it is a namedtupled returned +// by a pytorch operator. + +inline bool isStructSeq(pybind11::handle input) { + return pybind11::cast(input.get_type().attr("__module__")) == + "torch.return_types"; +} + +inline bool isStructSeq(PyObject* obj) { + return isStructSeq(pybind11::handle(obj)); +} + +inline bool isTuple(pybind11::handle input) { + if (PyTuple_Check(input.ptr())) { + return true; + } + return false; +} + +inline bool isTuple(PyObject* obj) { + return isTuple(pybind11::handle(obj)); +} + +// maybeAsTuple: if the input is a structseq, then convert it to a tuple +// +// On Python 3, structseq is a subtype of tuple, so these APIs could be used +// directly. But on Python 2, structseq is not a subtype of tuple, so we need to +// manually create a new tuple object from structseq. +inline THPObjectPtr maybeAsTuple(PyStructSequence* obj) { + Py_INCREF(obj); + return THPObjectPtr((PyObject*)obj); +} + +inline THPObjectPtr maybeAsTuple(PyObject* obj) { + if (isStructSeq(obj)) + return maybeAsTuple((PyStructSequence*)obj); + Py_INCREF(obj); + return THPObjectPtr(obj); +} + +} // namespace six diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/structseq.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/structseq.h new file mode 100644 index 0000000000000000000000000000000000000000..0d91d39d34be617c3b3ab7351a8c33f1c46b05f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/structseq.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace torch { +namespace utils { + +PyObject* returned_structseq_repr(PyStructSequence* obj); + +} +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_apply.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_apply.h new file mode 100644 index 0000000000000000000000000000000000000000..bd06e0f3e30b482f6521c869e605790618742404 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_apply.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include + +namespace torch { +namespace utils { + +const at::Tensor& apply_(const at::Tensor& self, PyObject* fn); +const at::Tensor& map_( + const at::Tensor& self, + const at::Tensor& other_, + PyObject* fn); +const at::Tensor& map2_( + const at::Tensor& self, + const at::Tensor& x_, + const at::Tensor& y_, + PyObject* fn); + +} // namespace utils +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_dtypes.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_dtypes.h new file mode 100644 index 0000000000000000000000000000000000000000..32b769971d03fa7e5cb87031b3ae972f5cf3ffeb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_dtypes.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace utils { + +std::pair getDtypeNames(at::ScalarType scalarType); + +void initializeDtypes(); + +} // namespace utils +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_flatten.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_flatten.h new file mode 100644 index 0000000000000000000000000000000000000000..04a55ec7960e671d825524d1000d69e70ee6bf0f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_flatten.h @@ -0,0 +1,86 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace utils { + +/// Generate an ID for a combination of tensor backend + scalar type to be used +/// when ordering tensors ('like' tensors are grouped by pulling out their +/// backend + scalar type, so this function combines that into a single number) +inline size_t type_id(const at::Tensor& tensor) { + return static_cast(tensor.options().backend()) * + static_cast(at::ScalarType::NumOptions) + + static_cast(tensor.scalar_type()); +} + +inline at::Tensor flatten_dense_tensors(at::TensorList tensors) { + return at::flatten_dense_tensors(tensors); +} + +inline std::vector unflatten_dense_tensors( + const at::Tensor& flat, + at::TensorList tensors) { + return at::unflatten_dense_tensors(flat, tensors); +} + +struct TensorGroup { + std::vector tensors; + size_t size = 0; + + size_t type_id() { + AT_ASSERT(!tensors.empty()); + return ::torch::utils::type_id(tensors[0]); + } + + const at::TensorOptions options() { + AT_ASSERT(!tensors.empty()); + return tensors[0].options(); + } +}; + +// Helper function that takes a list of tensors and splits them into tensor +// groups by the size limit and outputs these tensor groups. If the input +// tensors are of different tensor types, they will be split into different +// groups as well. +// +// Two options of splitting provided to the user, +// +// Imagine the size_limit is 256 and the list of input tensors are: +// tensor_a(fp16 - 128 bytes), +// tensor_b(fp32 - 256 bytes), +// tensor_c(fp16 - 128 bytes), +// +// when fine_grained == false: +// The function will read the list of tensors sequentially and accumulate +// enough tensors for each data type until the size_limit, therefore: +// it will output: {{tensor_a, tensor_c}, {tensor_b}} +// +// when fine_grained == true: +// The function will read the list of tensors sequentially and accumulate +// enough tensors for all data types until the size_limit, and then split +// the accumulated tensors into different groups by data types, therefore: +// it will output: {{tensor_a}, {tensor_b}, {tensor_c}} +TORCH_API std::vector take_tensors( + at::TensorList tensors, + size_t size_limit, + bool fine_grained = false); + +TORCH_API void reorder_tensors_like( + std::vector& tensors, + at::TensorList order); + +TORCH_API std::pair flatten_sparse_tensors( + at::TensorList tensors); + +TORCH_API std::vector unflatten_sparse_tensors( + const at::Tensor& flat_indices, + const at::Tensor& flat_values, + at::TensorList tensors); + +} // namespace utils +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_layouts.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_layouts.h new file mode 100644 index 0000000000000000000000000000000000000000..33e32b516b1215a8e55b4261101f3ac67ce4171e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_layouts.h @@ -0,0 +1,9 @@ +#pragma once + +namespace torch { +namespace utils { + +void initializeLayouts(); + +} +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_list.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_list.h new file mode 100644 index 0000000000000000000000000000000000000000..8ae77df4700af523a731e6ad6739ae57071421ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_list.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +namespace at { +class Tensor; +} + +namespace torch { +namespace utils { + +PyObject* tensor_to_list(const at::Tensor& tensor); + +} +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_memoryformats.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_memoryformats.h new file mode 100644 index 0000000000000000000000000000000000000000..6b820bd02882e37c97e7b0802cac2f1235899514 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_memoryformats.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include +#include + +namespace torch::utils { + +void initializeMemoryFormats(); +TORCH_PYTHON_API PyObject* getTHPMemoryFormat(c10::MemoryFormat); + +} // namespace torch::utils diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_new.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_new.h new file mode 100644 index 0000000000000000000000000000000000000000..7048660ec3f6a01fecf3cd610b6625b6763506da --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_new.h @@ -0,0 +1,116 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace utils { + +at::Tensor base_tensor_ctor(PyObject* args, PyObject* kwargs); +at::Tensor legacy_tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); +at::Tensor legacy_tensor_new( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); +at::Tensor indexing_tensor_from_data( + c10::TensorOptions options, + at::ScalarType scalar_type, + c10::optional device, + PyObject* data); +at::Tensor sparse_coo_tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); +void _validate_sparse_coo_tensor_args( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); + +at::Tensor sparse_compressed_tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); +at::Tensor sparse_csr_tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); +at::Tensor sparse_csc_tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); +at::Tensor sparse_bsr_tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); +at::Tensor sparse_bsc_tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); + +void _validate_sparse_compressed_tensor_args( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); +void _validate_sparse_csr_tensor_args( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); +void _validate_sparse_csc_tensor_args( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); +void _validate_sparse_bsr_tensor_args( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); +void _validate_sparse_bsc_tensor_args( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); + +at::Tensor tensor_ctor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); +at::Tensor as_tensor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PythonArgs& r); +at::Tensor new_tensor( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); +at::Tensor new_ones( + c10::DispatchKey dispatch_key, + at::ScalarType scalar_type, + PyObject* args, + PyObject* kwargs); +at::Tensor tensor_frombuffer( + PyObject* buffer, + at::ScalarType dtype, + int64_t count, + int64_t offset, + bool requires_grad); +at::Tensor tensor_fromDLPack(PyObject* data); +at::Tensor asarray( + PyObject* obj, + c10::optional dtype, + c10::optional device, + c10::optional copy, + bool requires_grad); +} // namespace utils +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_numpy.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_numpy.h new file mode 100644 index 0000000000000000000000000000000000000000..a067af44a45271cc047ec4b2f471640f2727d3f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_numpy.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +namespace torch::utils { + +PyObject* tensor_to_numpy(const at::Tensor& tensor, bool force = false); +at::Tensor tensor_from_numpy(PyObject* obj, bool warn_if_not_writeable = true); + +int aten_to_numpy_dtype(const at::ScalarType scalar_type); +at::ScalarType numpy_dtype_to_aten(int dtype); + +bool is_numpy_available(); +bool is_numpy_int(PyObject* obj); +bool is_numpy_bool(PyObject* obj); +bool is_numpy_scalar(PyObject* obj); + +void warn_numpy_not_writeable(); +at::Tensor tensor_from_cuda_array_interface(PyObject* obj); + +void validate_numpy_for_dlpack_deleter_bug(); +bool is_numpy_dlpack_deleter_bugged(); + +} // namespace torch::utils diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_qschemes.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_qschemes.h new file mode 100644 index 0000000000000000000000000000000000000000..71e65479047b8a5513e32cfe54dbd117c786160b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/tensor_qschemes.h @@ -0,0 +1,11 @@ +#pragma once +#include + +namespace torch { +namespace utils { + +PyObject* getTHPQScheme(at::QScheme qscheme); +void initializeQSchemes(); + +} // namespace utils +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/throughput_benchmark-inl.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/throughput_benchmark-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..4334a58683bb8a971abe117d7ce9b0edbda2db53 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/throughput_benchmark-inl.h @@ -0,0 +1,161 @@ +#pragma once + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +namespace torch { +namespace throughput_benchmark { +namespace detail { + +template +BenchmarkExecutionStats BenchmarkHelper::benchmark( + const BenchmarkConfig& config) const { + CHECK(initialized_); + TORCH_CHECK( + config.num_worker_threads == 1, + "Only parallelization by callers is supported"); + + LOG(INFO) << at::get_parallel_info(); + + // We pre-generate inputs here for each of the threads. This allows us to + // safely move inputs out for each of the threads independently and thus avoid + // overhead from the benchmark runner itself + std::vector> thread_inputs(config.num_calling_threads); + std::vector input_iters(config.num_calling_threads); + { + std::random_device seeder; + std::mt19937 engine(seeder()); + TORCH_CHECK( + !inputs_.empty(), + "Please provide benchmark inputs." + "Did you forget to call add_input()? "); + std::uniform_int_distribution dist(0, inputs_.size() - 1); + + for (const auto thread_id : c10::irange(config.num_calling_threads)) { + // Just in case we generate num_iters inputs for each of the threads + // This was if one thread does all the work we will be fine + for (const auto i [[maybe_unused]] : + c10::irange(config.num_iters + config.num_warmup_iters)) { + thread_inputs[thread_id].push_back(cloneInput(inputs_[dist(engine)])); + } + input_iters[thread_id] = 0; + } + } + + std::mutex m; + std::condition_variable worker_main_cv; + std::condition_variable main_worker_cv; + // TODO: add GUARDED_BY once it is available + int64_t initialized{0}; + int64_t finished{0}; + bool start{false}; + std::atomic num_attempted_iters{0}; + std::vector callers; + + callers.reserve(config.num_calling_threads); + + bool tls_grad_enabled = c10::GradMode::is_enabled(); + c10::impl::LocalDispatchKeySet tls_key_set = + c10::impl::tls_local_dispatch_key_set(); + + for (const auto thread_id : c10::irange(config.num_calling_threads)) { + callers.emplace_back([&, thread_id]() { + // We use conditional variable as a barrier to make sure each thread + // performs required warmeup iterations before we start measuring + c10::GradMode::set_enabled(tls_grad_enabled); + c10::impl::_force_tls_local_dispatch_key_set(tls_key_set); + + for (const auto j : c10::irange(config.num_warmup_iters)) { + (void)j; + runOnce(std::move(thread_inputs[thread_id][input_iters[thread_id]])); + ++input_iters[thread_id]; + } + { + std::unique_lock lock(m); + ++initialized; + worker_main_cv.notify_one(); + // NOLINTNEXTLINE(bugprone-infinite-loop) + while (!start) { + main_worker_cv.wait(lock); + } + } + LOG(INFO) << "Starting forward thread " << thread_id; + while (num_attempted_iters.fetch_add(1) < config.num_iters) { + runOnce(std::move(thread_inputs[thread_id][input_iters[thread_id]])); + ++input_iters[thread_id]; + } + + { + std::unique_lock lock(m); + ++finished; + worker_main_cv.notify_one(); + LOG(INFO) << "Shutting down forward thread " << thread_id + << ". Total number of finished threads: " << finished; + } + }); + } + + using Clock = std::chrono::high_resolution_clock; + using RecordProfile = torch::autograd::profiler::RecordProfile; + using TimePoint = std::chrono::time_point; + TimePoint start_time; + + std::unique_ptr profiler_guard; + { + std::unique_lock lock(m); + while (initialized != config.num_calling_threads) { + worker_main_cv.wait(lock); + } + if (!config.profiler_output_path.empty()) { + LOG(INFO) << "Using Autograd profiler. Trace will be saved to " + << config.profiler_output_path; + profiler_guard = + std::make_unique(config.profiler_output_path); + } + LOG(INFO) << "Starting threads"; + start = true; + start_time = Clock::now(); + } + + main_worker_cv.notify_all(); + { + std::unique_lock lock(m); + worker_main_cv.wait( + lock, [&]() { return finished == config.num_calling_threads; }); + } + auto end_time = std::chrono::high_resolution_clock::now(); + profiler_guard.reset(); + LOG(INFO) << "Finished benchmark"; + + BenchmarkExecutionStats stats; + // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) + float total_time_ms = std::chrono::duration_cast( + end_time - start_time) + .count() / + 1000.0 / 1000.0; + // We use config.num_iters instead of num_attempted_iters as it is + // repsesatative of the real work done. Last attempted iteration on each + // calling threads doesn't represent the real work (i.e. running the model) + stats.latency_avg_ms = + // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) + total_time_ms * config.num_calling_threads / config.num_iters; + stats.num_iters = config.num_iters; + + for (auto& t : callers) { + t.join(); + } + return stats; +} + +} // namespace detail +} // namespace throughput_benchmark +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/throughput_benchmark.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/throughput_benchmark.h new file mode 100644 index 0000000000000000000000000000000000000000..2fca95ca16bf79a8d7306ed260dce216fd94cac2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/throughput_benchmark.h @@ -0,0 +1,199 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +namespace py = pybind11; + +namespace torch { +namespace throughput_benchmark { + +/** + * The struct is used to provide results of a benchmark to the caller + * In the future all additional statics should be added here. + */ +struct BenchmarkExecutionStats { + float latency_avg_ms{-1}; + int64_t num_iters{-1}; +}; + +std::ostream& operator<<( + std::ostream& os, + const BenchmarkExecutionStats& value); + +/** + * Use this struct in order to configure a throughput benchmark run. + * This struct should include parameters related to threading, batching, number + * of iterations, warm-up, etc. More configs can be added as needed. + * General rule here is that only things that c++ must(!) to be aware of should + * be here. If we can keep other parts in python, we should keep them there. + * This is typical for things that are not perf critical and don't affect + * execution statistics benchmark returns. + */ +struct BenchmarkConfig { + public: + // Calling threads are those threads that are calling into a module in + // parallel. + int num_calling_threads{1}; + // Worker threads are not supported yet. This is just an example that we plan + // to support some sort of multi-threaded forward calls. We may change this + // setting in the future to support different intra and inter op parallelism + // which is not available in PyTorch yet + int num_worker_threads{1}; + // Warmup iters are used to make sure we run a module a few times before + // actually measuring things. This way we avoid cold caches and any other + // similar problems + int num_warmup_iters{1}; + // Number of iterations the benchmark should run with. This number is separate + // from the warmup iterations + int64_t num_iters{100}; + // If set autograd profiler will be enabled. I.e. this variable would be + // created before the main benchmark loop (but after the warmup): + // RecordProfile guard(profiler_output_path); + std::string profiler_output_path{""}; +}; + +namespace detail { + +/** + * A helper class to abstract out different models we test throughput of + */ +template +class BenchmarkHelper { + public: + BenchmarkHelper(); + explicit BenchmarkHelper(Model model) + : model_(std::move(model)), initialized_(true) {} + + // This method to be used in benchmark() method + // Note that there is no result. This way we don't have to call this under GIL + // even when running in the nn.Module mode. Otherwise destructor of the result + // would race with Python + void runOnce(Input&&) const; + // This method is to be used when calling from Python directly + Output runOnce(py::args&&, const py::kwargs&) const; + // Aggregate input in the format Model expects in order to avoid further + // conversions at the benchmark time + void addInput(py::args&&, py::kwargs&&); + void addInput(Input&&); + BenchmarkExecutionStats benchmark(const BenchmarkConfig& config) const; + + bool initialized() const { + return initialized_; + } + + // Destructor doesn't require the GIL because it is going to be executed on + // the PyThon thread + std::vector inputs_; + Model model_; + bool initialized_{false}; +}; + +struct C10_HIDDEN ModuleInput { + ModuleInput(ModuleInput&& other) = default; + + ModuleInput(const ModuleInput&) = delete; + ModuleInput& operator=(ModuleInput& other) = delete; + ModuleInput& operator=(ModuleInput&& other) = delete; + + ModuleInput(py::args&& args, py::kwargs&& kwargs) + : args(std::move(args)), kwargs(std::move(kwargs)) {} + + py::args args; + py::kwargs kwargs; +}; +typedef py::object ModuleOutput; +typedef std::vector ScriptModuleInput; +typedef at::IValue ScriptModuleOutput; + +template +Input cloneInput(const Input& input); + +typedef BenchmarkHelper + ScriptModuleBenchmark; +template <> +inline BenchmarkHelper:: + BenchmarkHelper() + : model_("Module", std::make_shared()), + initialized_(false) {} +typedef BenchmarkHelper ModuleBenchmark; +template <> +inline BenchmarkHelper::BenchmarkHelper() + : initialized_(false) {} + +template <> +void ScriptModuleBenchmark::runOnce(ScriptModuleInput&& input) const; + +template <> +ScriptModuleOutput ScriptModuleBenchmark::runOnce( + py::args&& args, + const py::kwargs& kwargs) const; + +template <> +void ModuleBenchmark::runOnce(ModuleInput&& input) const; + +template <> +ModuleOutput ModuleBenchmark::runOnce(py::args&& args, const py::kwargs& kwargs) + const; + +template <> +void ScriptModuleBenchmark::addInput(py::args&& args, py::kwargs&& kwargs); +template <> +void ScriptModuleBenchmark::addInput(ScriptModuleInput&& input); + +template <> +void ModuleBenchmark::addInput(py::args&& args, py::kwargs&& kwargs); + +} // namespace detail + +/** + * This class is a small c++ component responsible for executing a PyTorch + * module under an inference server like load. It can emulate multiple calling + * threads to a single module provided. In the future we plan to enhance this + * component to support inter and intra-op parallelism as well as multiple + * models running in a single process. + * + * For current available configurations refer to the BenchmarkConfig + * documentation + * + * The class supports working with either nn.Module or ScriptModule. + * Under the hood it just dispatches to corresponding specialization of + * class BenchmarkHelper + */ +class C10_HIDDEN ThroughputBenchmark { + public: + explicit ThroughputBenchmark(const jit::Module& module); + explicit ThroughputBenchmark(py::object module); + + // Add one more input example. This input example should be in the exact + // format the module under test expects. It is responsibility of the module to + // perform any such format checks, the benchmark doesn't perform any + // validation of its own + void addInput(py::args args, py::kwargs kwargs); + + // Equivalent to just running the model directly on the given input + py::object runOnce(py::args&& args, const py::kwargs& kwargs); + + // The main method of the class allows to perform a multi-threaded benchmark + // It returns BenchmarkExecutionStats object with a lot of useful statistics + // about runtime execution. We can enhance this class in the future to provide + // more information to the user + BenchmarkExecutionStats benchmark(const BenchmarkConfig& config) const; + + private: + detail::ScriptModuleBenchmark script_module_; + detail::ModuleBenchmark module_; +}; +} // namespace throughput_benchmark +} // namespace torch + +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/torch_dispatch_mode.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/torch_dispatch_mode.h new file mode 100644 index 0000000000000000000000000000000000000000..81729f27df84fa2142abdb25774e5606c1f7b365 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/torch_dispatch_mode.h @@ -0,0 +1,58 @@ +#pragma once + +#include + +namespace torch { +namespace torch_dispatch_mode { + +struct StashTorchDispatchModeGuard { + public: + StashTorchDispatchModeGuard() { + if (c10::impl::TorchDispatchModeTLS::any_modes_set( + /*skip_infra_modes=*/true)) { + saved_mode_ = c10::impl::TorchDispatchModeTLS::pop_stack(); + } else { + auto mode_and_key = + c10::impl::TorchDispatchModeTLS::pop_highest_infra_mode(); + saved_mode_ = std::move(std::get<0>(mode_and_key)); + saved_mode_key_ = std::get<1>(mode_and_key); + } + } + + ~StashTorchDispatchModeGuard() { + if (saved_mode_key_ != c10::nullopt) { + c10::impl::TorchDispatchModeTLS::set_mode( + saved_mode_, saved_mode_key_.value()); + } else { + c10::impl::TorchDispatchModeTLS::push_non_infra_mode_onto_stack( + std::move(saved_mode_)); + } + } + + const std::shared_ptr& get_cur_mode() { + return saved_mode_; + } + + private: + std::shared_ptr saved_mode_; + c10::optional saved_mode_key_; +}; + +struct StashTorchDispatchStackGuard { + public: + StashTorchDispatchStackGuard() { + auto old = c10::impl::TorchDispatchModeTLS::get_state(); + c10::impl::TorchDispatchModeTLS::set_state(std::move(saved_state_)); + saved_state_ = std::move(old); + } + + ~StashTorchDispatchStackGuard() { + c10::impl::TorchDispatchModeTLS::set_state(std::move(saved_state_)); + } + + private: + c10::impl::TorchDispatchModeTLS saved_state_; +}; + +} // namespace torch_dispatch_mode +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/variadic.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/variadic.h new file mode 100644 index 0000000000000000000000000000000000000000..9c021d9f5cd3d0ea70a55fd8ec11b465b067c7ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/variadic.h @@ -0,0 +1,152 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +namespace torch { + +using at::IterArgs; + +struct CountTensors : IterArgs { + size_t out = 0; + void operator()(const at::Tensor& x) { + out += 1; + } + void operator()(const c10::optional& x) { + out += x.has_value(); + } + void operator()(at::ArrayRef xs) { + out += xs.size(); + } +}; + +template +size_t count_tensors(Args&&... args) { + return CountTensors().apply(std::forward(args)...).out; +} + +struct CountVariables : IterArgs { + size_t out = 0; + void operator()(const autograd::Variable& x) { + out += 1; + } + void operator()(at::ArrayRef xs) { + out += xs.size(); + } +}; + +template +inline size_t count_variables(Args&&... args) { + return CountVariables().apply(std::forward(args)...).out; +} + +//===----------------------------------------------------------------------===// +// std::index_sequence shim for C++11 +//===----------------------------------------------------------------------===// + +// A container of type-template parameter indices. +template +struct Indices {}; + +// Decrements the index N, adds N-1 to the list of indices and forwards +// whatever we already have. +template +struct MakeIndices : MakeIndices {}; + +// Partial specialization that forms our base case. When N is zero, we stop +// and define a typedef that will be visible to earlier classes due to +// inheritance. The typedef we define is an index list containing the numbers +// 0 through N-1. +template +struct MakeIndices<0, Is...> { + using indices = Indices; +}; + +//===----------------------------------------------------------------------===// +// Utilities +//===----------------------------------------------------------------------===// + +template +using enable_if_t = std::enable_if_t; + +template +using disable_if_t = enable_if_t; + +template +using decay_t = std::decay_t; + +namespace detail { +template +struct pack; +} // namespace detail + +template +struct all_of : std::is_same< + detail::pack, + detail::pack> {}; + +template +struct any_of; + +template <> +struct any_of<> : std::false_type {}; + +template +struct any_of { + static constexpr bool value = head || any_of::value; +}; + +template +struct none_of { + static constexpr bool value = !any_of::value; +}; + +template +using enable_if_all_of_t = enable_if_t::value>; + +template +using disable_if_contains_t = + enable_if_all_of_t<(!std::is_same_v>)...>; + +template +void apply(Function function, Ts&&... ts) { + // https://stackoverflow.com/questions/13978916/inserting-a-variadic-argument-list-into-a-vector + // Creates a dummy array, so that each function call is evaluated in order. + // `(function(), 0)` is because `function` should (!) return `void`, so + // according to the comma operator, it is evaluated and its result (`void`) + // is discarded. Then the zero is evaluated and used as an element in the + // array. The first zero ensures the array is not empty. + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + int _[]{0, (function(std::forward(ts)), 0)...}; + (void)_; +} + +template < + typename ReturnType, + typename... Ts, + typename Function, + typename Accessor> +ReturnType unpack(Function function, Accessor accessor) { + return ReturnType(unpack( + std::move(function), + std::move(accessor), + typename MakeIndices::indices())); +} + +template < + typename ReturnType, + typename... Ts, + typename Function, + typename Accessor, + size_t... Is> +ReturnType unpack(Function function, Accessor accessor, Indices) { + return ReturnType(function(accessor.template operator()(Is)...)); +} + +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/verbose.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/verbose.h new file mode 100644 index 0000000000000000000000000000000000000000..f6c5eae461bcd6d8fe52a9b9700e85dd29c7765f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/utils/verbose.h @@ -0,0 +1,8 @@ +#pragma once +#include + +namespace torch { + +void initVerboseBindings(PyObject* module); + +} // namespace torch