diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATenGeneral.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATenGeneral.h new file mode 100644 index 0000000000000000000000000000000000000000..9b787a2163e87c903ce0bd034b424eb1773c644d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATenGeneral.h @@ -0,0 +1,3 @@ +#pragma once + +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATenOpList.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATenOpList.h new file mode 100644 index 0000000000000000000000000000000000000000..1419376a9017db4c7ca788816bd0c9a6d65a82fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATenOpList.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +namespace c10 { +struct OperatorName; +} + +namespace at { + +// check if an op is a custom op (i.e. did not come from native_functions.yaml) +TORCH_API bool is_custom_op(const c10::OperatorName& opName); +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATen_fwd.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATen_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..a66523ab183923e1c15862268b1b0260ccfb9e14 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATen_fwd.h @@ -0,0 +1,46 @@ +#pragma once +#include + +// Forward declarations of core ATen types used in dispatch functions +namespace c10 { + +template +class List; +template +class IListRef; +class Stream; +class Scalar; +class SymInt; +class SymIntList; +struct Storage; +struct TensorOptions; +template +class ArrayRef; +template +class OptionalArrayRef; + +} // namespace c10 + +namespace at { + +class Tensor; +class OptionalTensorRef; +struct Dimname; +struct Generator; +using TensorList = c10::ArrayRef; +using ITensorListRef = c10::IListRef; +using IOptTensorListRef = c10::IListRef; +using DimnameList = c10::ArrayRef; +using IntArrayRef = c10::ArrayRef; +using OptionalIntArrayRef = c10::OptionalArrayRef; +using OptionalSymIntArrayRef = c10::OptionalArrayRef; + +using c10::Stream; +using c10::Storage; +using c10::QScheme; +using c10::Scalar; +using c10::SymInt; +using c10::SymIntList; +using c10::TensorOptions; + +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATen_pch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATen_pch.h new file mode 100644 index 0000000000000000000000000000000000000000..bf984f05ed2bbd0cb4c87fb1dfa591c672e1f5a4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ATen_pch.h @@ -0,0 +1,164 @@ +// This global header must not depend on native_functions.yaml or +// incremental builds will be next to useless +#pragma push_macro("TORCH_ASSERT_NO_OPERATORS") +#define TORCH_ASSERT_NO_OPERATORS + +// This macro doesn't work if defined after the first time inttypes.h +// is included, so won't work anywhere if not defined here. +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif +#include + +// This list of headers was generated using a script that finds +// high-impact headers and then manually tweaked to remove OS specific +// or duplicate headers (e.g. and ) and to remove +// "impl" headers (e.g BFloat16-inl.h or complex_math.h in c10). + +// To generate the initial list: +// 1. Build pytorch from scratch with all build caching disabled +// 2. Generate a build trace with ninjatracing (https://github.com/nico/ninjatracing) +// $ ninjatracing /path/to/pytorch/build/.ninja_log > trace_all.json +// 3. Run pch_gen.py from https://github.com/peterbell10/build_analysis/ +// $ python pch_gen.py --threshold .80 --target torch_cpu --build_dir /path/to/pytorch/build --trace trace_all.json +// Where the threshold can be tweaked until c10 and some of ATen +// core are included but TORCH_ASSERT_NO_OPERATORS still passes. + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#pragma pop_macro("TORCH_ASSERT_NO_OPERATORS") diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Array.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Array.h new file mode 100644 index 0000000000000000000000000000000000000000..300ae51cef6b9044b1060376b19f7278fcf18a94 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Array.h @@ -0,0 +1,39 @@ +#pragma once + +// A fixed-size array type usable from both host and +// device code. + +#include +#include + +namespace at { namespace detail { + +template +struct Array { + T data[size_]; + + C10_HOST_DEVICE T operator[](int i) const { + return data[i]; + } + C10_HOST_DEVICE T& operator[](int i) { + return data[i]; + } +#if defined(USE_ROCM) + C10_HOST_DEVICE Array() = default; + C10_HOST_DEVICE Array(const Array&) = default; + C10_HOST_DEVICE Array& operator=(const Array&) = default; +#else + Array() = default; + Array(const Array&) = default; + Array& operator=(const Array&) = default; +#endif + static constexpr int size(){return size_;} + // Fill the array with x. + C10_HOST_DEVICE Array(T x) { + for (int i = 0; i < size_; i++) { + data[i] = x; + } + } +}; + +}} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Backtrace.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Backtrace.h new file mode 100644 index 0000000000000000000000000000000000000000..ac728968750297227c1be4aa3e444557c1899b03 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Backtrace.h @@ -0,0 +1,2 @@ +#include +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/CheckMemoryFormat.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/CheckMemoryFormat.h new file mode 100644 index 0000000000000000000000000000000000000000..3d1712a2ff19b159beaba48064bfd37b1587673f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/CheckMemoryFormat.h @@ -0,0 +1,25 @@ +#include + +namespace c10 { namespace impl { + +inline c10::optional +check_tensor_options_and_extract_memory_format( + const TensorOptions& options, + c10::optional memory_format) { + TORCH_CHECK( + options.requires_grad_opt() == c10::nullopt || + options.requires_grad_opt().value() == false, + "Operators taking TensorOptions cannot take a TensorOptions with " + "options.requires_grad set as true. This isn't implemented yet."); + TORCH_CHECK( + !(options.has_memory_format() && memory_format.has_value()), + "Cannot set memory_format both in TensorOptions and explicit argument; please delete " + "the redundant setter."); + if (memory_format.has_value()) { + return memory_format; + } else { + return options.memory_format_opt(); + } +} + +}} // namespace impl namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypeProperties.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypeProperties.h new file mode 100644 index 0000000000000000000000000000000000000000..b77b09d595d3c33242359ee841a1f9ec0133ef13 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypeProperties.h @@ -0,0 +1,135 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + + +namespace at { + +class Tensor; + +// This class specifies a Backend and a ScalarType. Currently, it primarily +// serves as a replacement return value for Tensor::type(). Previously, +// Tensor::type() returned Type&, but we are changing Type to not be +// dtype-specific. +class TORCH_API DeprecatedTypeProperties { + public: + DeprecatedTypeProperties(Backend backend, ScalarType scalar_type) + : backend_(backend), scalar_type_(scalar_type) {} + + Backend backend() const { + return backend_; + } + + Layout layout() const { + return layout_from_backend(backend_); + } + + bool is_sparse() const { + return layout_from_backend(backend()) == kSparse; + } + + bool is_sparse_csr() const { + return layout_from_backend(backend()) == kSparseCsr; + } + + c10::DeviceType device_type() const { + return backendToDeviceType(backend_); + } + + bool is_cuda() const { + return backendToDeviceType(backend_) == kCUDA; + } + + ScalarType scalarType() const { + return scalar_type_; + } + + caffe2::TypeMeta typeMeta() const { + return scalarTypeToTypeMeta(scalar_type_); + } + + bool operator==(const DeprecatedTypeProperties& other) const { + return backend_ == other.backend() && scalar_type_ == other.scalarType(); + } + + bool operator!=(const DeprecatedTypeProperties& other) const { + return !(*this == other); + } + + std::string toString() const { + std::string base_str; + if (backend_ == Backend::Undefined || scalar_type_ == ScalarType::Undefined) { + base_str = "UndefinedType"; + } else { + base_str = std::string(at::toString(backend_)) + at::toString(scalar_type_) + "Type"; + } + return base_str; + } + + DeprecatedTypeProperties & toBackend(Backend b) const { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + b, scalar_type_); + } + + DeprecatedTypeProperties & toScalarType(ScalarType s) const { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + backend_, s); + } + + DeprecatedTypeProperties & cpu() const { + return toBackend(Backend::CPU); + } + + DeprecatedTypeProperties & cuda() const { + return toBackend(Backend::CUDA); + } + + DeprecatedTypeProperties & hip() const { + return toBackend(Backend::HIP); + } + + /// Constructs the `TensorOptions` from a type and a `device_index`. + TensorOptions options(int16_t device_index = -1) const { + return TensorOptions().dtype(typeMeta()) + .device(device_type(), static_cast(device_index)) + .layout(layout()); + } + + /// Constructs the `TensorOptions` from a type and a Device. Asserts that + /// the device type matches the device type of the type. + TensorOptions options(c10::optional device_opt) const { + if (!device_opt.has_value()) { + return options(-1); + } else { + Device device = device_opt.value(); + AT_ASSERT(device.type() == device_type()); + return options(device.index()); + } + } + + operator TensorOptions() const { + return options(); + } + + int64_t id() const { + return static_cast(backend()) * + static_cast(ScalarType::NumOptions) + + static_cast(scalarType()); + } + + Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const; + Storage unsafeStorageFromTH(void * th_pointer, bool retain) const; + Tensor copy(const Tensor & src, bool non_blocking=false, c10::optional to_device={}) const; + + private: + Backend backend_; + ScalarType scalar_type_; +}; + +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypePropertiesRegistry.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypePropertiesRegistry.h new file mode 100644 index 0000000000000000000000000000000000000000..a21f1abbe97f4327005d10be3358076858c3d09d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DeprecatedTypePropertiesRegistry.h @@ -0,0 +1,31 @@ +#pragma once + +// In order to preserve bc, we make DeprecatedTypeProperties instances unique +// just like they are for Type. + +#include +#include + +namespace at { + +class DeprecatedTypeProperties; + +struct TORCH_API DeprecatedTypePropertiesDeleter { + void operator()(DeprecatedTypeProperties * ptr); +}; + +class TORCH_API DeprecatedTypePropertiesRegistry { + public: + DeprecatedTypePropertiesRegistry(); + + DeprecatedTypeProperties& getDeprecatedTypeProperties(Backend p, ScalarType s) const; + +private: + std::unique_ptr registry + [static_cast(Backend::NumOptions)] + [static_cast(ScalarType::NumOptions)]; +}; + +TORCH_API DeprecatedTypePropertiesRegistry& globalDeprecatedTypePropertiesRegistry(); + +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Dict.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Dict.h new file mode 100644 index 0000000000000000000000000000000000000000..c4fb44ce0c636be15176e5ee6da82fc93e71f9aa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Dict.h @@ -0,0 +1,397 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { +struct IValue; +template class Dict; +struct Type; + +namespace impl { + +using valid_dict_key_types = guts::typelist::typelist< + int64_t, + std::string, + double, + c10::complex, + bool, + at::Tensor +>; +} + +namespace detail { + +struct DictKeyHash { + size_t operator()(const IValue& ivalue) const; +}; + +struct DictKeyEqualTo { + bool operator()(const IValue& lhs, const IValue& rhs) const; +}; + +struct DictImpl final : public c10::intrusive_ptr_target { + using dict_map_type = ska_ordered::order_preserving_flat_hash_map; + struct DictElementTypes final { + TypePtr keyType; + TypePtr valueType; + }; + + explicit DictImpl(dict_map_type dict_, DictElementTypes elementTypes_) + : dict(std::move(dict_)) + , elementTypes(std::move(elementTypes_)) {} + dict_map_type dict; + + DictElementTypes elementTypes; + + intrusive_ptr copy() const; + friend TORCH_API bool operator==(const DictImpl& lhs, const DictImpl& rhs); +}; + +} + +namespace impl { +template class DictIterator; + +/** + * A reference to an entry in the Dict. + * Use the `key()` and `value()` methods to read the element. + */ +template +class DictEntryRef final { +public: + explicit DictEntryRef(Iterator iterator) + : iterator_(std::move(iterator)) {} + + decltype(auto) key() const { + return iterator_->first.template to(); + } + + decltype(auto) value() const { + return iterator_->second.template to(); + } + + template + void setValue(Value_&& value) const { + static_assert(std::is_constructible::value, "Wrong type for the value argument of setValue()"); + iterator_->second = Value(std::forward(value)); + } + +private: + // allow copying and moving, but only our friends (i.e. the Dict class) can do + // it. Copying/moving this reference wrapper would be too ambiguous to allow it + // in the public API. + DictEntryRef(const DictEntryRef&) = default; + DictEntryRef& operator=(const DictEntryRef&) = default; + DictEntryRef(DictEntryRef&&) noexcept = default; + DictEntryRef& operator=(DictEntryRef&& rhs) & noexcept = default; + + Iterator iterator_; + friend class DictIterator; + friend class Dict; +}; + +// this wraps map_type::iterator to make sure user code can't rely +// on it being the type of the underlying map. +template +class DictIterator final { +public: + // C++17 friendly std::iterator implementation + using iterator_category = std::forward_iterator_tag; + using value_type = DictEntryRef; + using difference_type = std::ptrdiff_t; + using pointer = value_type*; + using reference = value_type&; + + explicit DictIterator() = default; + ~DictIterator() = default; + + DictIterator(const DictIterator& rhs): entryRef_(rhs.entryRef_) {} + DictIterator(DictIterator&& rhs) noexcept: entryRef_(std::move(rhs.entryRef_)) {} + DictIterator& operator=(const DictIterator& rhs) { + entryRef_ = rhs.entryRef_; + return *this; + } + DictIterator& operator=(DictIterator&& rhs) noexcept { + entryRef_ = std::move(rhs.entryRef_); + return *this; + } + + DictIterator& operator++() { + ++entryRef_.iterator_; + return *this; + } + + DictIterator operator++(int) { + DictIterator copy(*this); + ++*this; + return copy; + } + + const DictEntryRef& operator*() const { + return entryRef_; + } + + const DictEntryRef* operator->() const { + return &entryRef_; + } + + friend difference_type operator-(const DictIterator& lhs, const DictIterator& rhs) { + return lhs.entryRef_.iterator_ - rhs.entryRef_.iterator_; + } + +private: + explicit DictIterator(Iterator iterator): entryRef_(std::move(iterator)) {} + + const Iterator& get_iterator_() const { + return entryRef_.iterator_; + } + + friend bool operator==(const DictIterator& lhs, const DictIterator& rhs) { + return lhs.get_iterator_() == rhs.get_iterator_(); + } + + friend bool operator!=(const DictIterator& lhs, const DictIterator& rhs) { + return lhs.get_iterator_() != rhs.get_iterator_(); + } + + friend bool operator<(const DictIterator& lhs, const DictIterator& rhs) { + return lhs.get_iterator_() < rhs.get_iterator_(); + } + + friend bool operator<=(const DictIterator& lhs, const DictIterator& rhs) { + return lhs.get_iterator_() <= rhs.get_iterator_(); + } + + friend bool operator>(const DictIterator& lhs, const DictIterator& rhs) { + return lhs.get_iterator_() > rhs.get_iterator_(); + } + + friend bool operator>=(const DictIterator& lhs, const DictIterator& rhs) { + return lhs.get_iterator_() >= rhs.get_iterator_(); + } + + DictEntryRef entryRef_; + + friend class DictIterator; + friend class Dict; +}; + +template Dict toTypedDict(Dict dict); +template Dict toGenericDict(Dict dict); +} + +/** + * An object of this class stores a map from Key to Value. + * + * This is a pointer type. After a copy, both Dicts + * will share the same storage: + * + * > Dict a; + * > Dict b = a; + * > b.insert(3, "three"); + * > ASSERT("three" == a.at(3)); + * + * We use this class in the PyTorch kernel API because that + * allows us to do optimizations and switch out the underlying + * map implementation without breaking backwards compatibility + * for the kernel API. + */ +template +class Dict final { +private: + static_assert((std::is_same::value && std::is_same::value) || guts::typelist::contains::value, "Invalid Key type for Dict. We only support int64_t, double, bool, and string."); + + // impl_ stores the underlying map as a ska_ordered::order_preserving_flat_hash_map. + // We intentionally don't offer conversion from/to + // order_preserving_flat_hash_map, return references to it or something like that, + // because such operations would get expensive if we switch out + // the actual map implementation. + // This is an intrusive_ptr because Dict is a pointer type. + // Invariant: This will never be a nullptr, there will always be a valid + // DictImpl. + c10::intrusive_ptr impl_; + + explicit Dict(c10::intrusive_ptr&& impl); + friend struct IValue; + template friend Dict impl::toTypedDict(Dict); + template friend Dict impl::toGenericDict(Dict); + +public: + using key_type = Key; + using mapped_type = Value; + using size_type = typename detail::DictImpl::dict_map_type::size_type; + using iterator = impl::DictIterator; + + /** + * Creates an empty dict. + */ + explicit Dict(); + + /** + * Create a generic dict with runtime type information. + * This only works for c10::impl::GenericDict and is not part of the public API + * but only supposed to be used internally by PyTorch. + */ + explicit Dict(TypePtr keyType, TypePtr valueType); + + ~Dict() = default; + + Dict(const Dict&) = default; + Dict& operator=(const Dict&) = default; + + /** + * Create a new Dict pointing to a deep copy of the same data. + * The Dict returned is a new dict with separate storage. + * Changes in it are not reflected in the original dict or vice versa. + */ + Dict copy() const; + + /** + * Returns an iterator to the first element of the container. + * If the container is empty, the returned iterator will be equal to end(). + */ + iterator begin() const; + + /** + * Returns an iterator to the element following the last element of the container. + * This element acts as a placeholder; attempting to access it results in undefined behavior. + */ + iterator end() const; + + /** + * Checks if the container has no elements. + */ + bool empty() const; + + /** + * Returns the number of elements in the container. + */ + size_type size() const; + + /** + * Erases all elements from the container. After this call, size() returns zero. + * Invalidates any references, pointers, or iterators referring to contained elements. May also invalidate past-the-end iterators. + */ + void clear() const; + + /** + * Inserts element(s) into the container, if the container doesn't already contain an element with an equivalent key. + * May invalidate any references, pointers, or iterators referring to contained elements. + * + * @return A pair consisting of an iterator to the inserted element (or to the element that prevented the insertion) and a bool denoting whether the insertion took place. + */ + template + std::pair insert(Key_&& key, Value_&& value) const; + + /** + * If an element with the given key already exists, it is overwritten with the given value. + * Otherwise, a new element with the given key and value are inserted. + * May invalidate any references, pointers, or iterators referring to contained elements. + * + * @return The bool component is true if the insertion took place and false if the assignment took place. The iterator component is pointing at the element that was inserted or updated. + */ + template + std::pair insert_or_assign(Key_&& key, Value_&& value) const; + + /** + * Removes the element pointed to by iter. + * May invalidate any references, pointers, or iterators referring to contained elements. + * The iterator iter must be valid and dereferenceable. Thus the end() iterator (which is valid, but is not dereferenceable) cannot be used as a value for iter. + */ + void erase(iterator iter) const; + + /** + * Removes the element with the given key, if it exists. + * May invalidate any references, pointers, or iterators referring to contained elements. + * + * @return The number of elements removed. This is either '1' if an element with the key existed, or '0' if it didn't. + */ + C10_NODISCARD size_t erase(const Key& key) const; + + /** + * Returns the mapped value of the element with key equivalent to key. + * If no such element exists, an exception of type std::out_of_range is thrown. + */ + Value at(const Key& key) const; + + /** + * Finds an element with key equivalent to key. + * + * @return Iterator to an element with key equivalent to key. + * If no such element is found, past-the-end (see end()) iterator is returned. + */ + iterator find(const Key& key) const; + + /** + * Checks if there is an element with key equivalent to key in the container. + * + * @return true if there is such an element, otherwise false. + */ + bool contains(const Key& key) const; + + /** + * Increase the capacity so that at least count elements can be stored without + * having to reallocate or rehash. + */ + void reserve(size_type count) const; + + /** + * Value equality comparison. This function implements Python-like semantics for + * equality: two dicts with the same identity (e.g. same pointer) trivially + * compare equal, otherwise each element is compared for equality. + */ + template + friend bool operator==( + const Dict& lhs, + const Dict& rhs); + template + friend bool operator!=( + const Dict& lhs, + const Dict& rhs); + + /** + * Identity comparison. Returns true if and only if `rhs` represents the same + * Dict object as `this`. + */ + bool is(const Dict& rhs) const; + + // private API for now because the return type will change to TypePtr + // instead of optional once types are mandatory. + TypePtr keyType() const; + TypePtr valueType() const; + + // [unsafe set type] + // These functions mutate the tagged type of this dictionary in place. + // There is no checking that the members of the dictionary are instances + // of the new types, nor is there a check that other IValues which + // hold references to this dictionary have the right static type. + // This functionality is used only in the unpickler, where at + // creation type the real type of the dictionary is unknown, but + // then later recovered from the static type information of the + // unpickled object. + void unsafeSetKeyType(TypePtr t); + void unsafeSetValueType(TypePtr t); +}; + +namespace impl { +// GenericDict is how IValue stores dicts. It is, however, not part of the +// public API. Kernels should use Dicts with concrete Key, Value types instead +// (maybe except for some internal prim ops). +using GenericDict = Dict; + +} +} + +namespace torch { + template using Dict = c10::Dict; +} + +#include // IWYU pragma: keep diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Dict_inl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Dict_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..69f6791d91cace6637d772b7d65dda769a7c9f4b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Dict_inl.h @@ -0,0 +1,209 @@ +#pragma once + +#include +#include + +namespace c10 { +namespace detail { +inline bool DictKeyEqualTo::operator()(const IValue& lhs, const IValue& rhs) const { + if (lhs.isTensor() && rhs.isTensor()) { + // for tensors, we compare only by identity (following how it's done in Python). + return lhs.is(rhs); + } + // Otherwise, we first compare by identity for efficiency, then by value (see: + // [container equality]) + return _fastEqualsForContainer(lhs, rhs); +} +} + +template decltype(auto) getTypePtr(); +std::string toString(const Type& type); + +namespace impl { + +template +Dict toTypedDict(GenericDict dict) { + TORCH_INTERNAL_ASSERT(*getTypePtr() == *dict.impl_->elementTypes.keyType, "Tried to cast a Dict<", toString(*dict.impl_->elementTypes.keyType), ", ", toString(*dict.impl_->elementTypes.valueType) ,"> to a Dict<", toString(*getTypePtr()), ", ", toString(*getTypePtr()), ">. Key types mismatch."); + TORCH_INTERNAL_ASSERT(*getTypePtr() == *dict.impl_->elementTypes.valueType, "Tried to cast a Dict<", toString(*dict.impl_->elementTypes.keyType), ", ", toString(*dict.impl_->elementTypes.valueType) ,"> to a Dict<", toString(*getTypePtr()), ", ", toString(*getTypePtr()), ">. Value types mismatch."); + + return Dict(std::move(dict.impl_)); +} + +template +GenericDict toGenericDict(Dict dict) { + return GenericDict(std::move(dict.impl_)); +} +} + +namespace detail { + +inline size_t DictKeyHash::operator()(const IValue& ivalue) const { + if (ivalue.isInt()) { + return std::hash()(ivalue.toInt()); + } else if (ivalue.isString()) { + return std::hash()(ivalue.toStringView()); + } else if (ivalue.isDouble()) { + return std::hash()(ivalue.toDouble()); + } else if (ivalue.isComplexDouble()) { + return c10::hash>()(ivalue.toComplexDouble()); + } else if (ivalue.isBool()) { + return std::hash()(ivalue.toBool()); + } else if (ivalue.isTensor()) { + return std::hash()(ivalue.toTensor().unsafeGetTensorImpl()); + } else if (ivalue.isDevice()) { + return std::hash()(ivalue.toDevice()); + } else { + throw std::runtime_error( + "Can't hash IValues with tag '" + ivalue.tagKind() + "'"); + } +} + +inline intrusive_ptr DictImpl::copy() const { + return make_intrusive(dict, elementTypes); +} + +} + +template +Dict::Dict() + :Dict(make_intrusive( + detail::DictImpl::dict_map_type(), + detail::DictImpl::DictElementTypes{getTypePtr(), getTypePtr()})) { + static_assert(!std::is_same::value, "This constructor is not valid for Dict. Please use c10::impl::GenericDict(keyType, valueType) instead."); + static_assert(!std::is_same::value, "This constructor is not valid for Dict<_, IValue>. Please use c10::impl::GenericDict(keyType, valueType) instead."); +} + +template +Dict::Dict(TypePtr keyType, TypePtr valueType) +: Dict(make_intrusive( + detail::DictImpl::dict_map_type(), + detail::DictImpl::DictElementTypes {std::move(keyType), std::move(valueType)})) { + static_assert(std::is_same::value, "This constructor is only valid for c10::impl::GenericDict."); + static_assert(std::is_same::value, "This constructor is only valid for c10::impl::GenericDict."); +} + +template +Dict::Dict(c10::intrusive_ptr&& impl): impl_(std::move(impl)) {} + +template +Dict Dict::copy() const { + return Dict(impl_->copy()); +} + +template +typename Dict::iterator Dict::begin() const { + return iterator{impl_->dict.begin()}; +} + +template +typename Dict::iterator Dict::end() const { + return iterator{impl_->dict.end()}; +} + +template +bool Dict::empty() const { + return impl_->dict.empty(); +} + +template +typename Dict::size_type Dict::size() const { + return impl_->dict.size(); +} + +template +void Dict::clear() const { + impl_->dict.clear(); +} + +template +template +std::pair::iterator, bool> Dict::insert(Key_&& key, Value_&& value) const { + static_assert(std::is_constructible::value, "Wrong type for the key argument of Dict::insert"); + static_assert(std::is_constructible::value, "Wrong type for the value argument of Dict::insert"); + auto inserted = impl_->dict.insert(std::pair{ + Key(std::forward(key)), + Value(std::forward(value))}); + return {iterator{inserted.first}, inserted.second}; +} + +template +template +std::pair::iterator, bool> Dict::insert_or_assign(Key_&& key, Value_&& value) const { + static_assert(std::is_constructible::value, "Wrong type for the key argument of Dict::insert_or_assign"); + static_assert(std::is_constructible::value, "Wrong type for the value argument of Dict::insert_or_assign"); + auto inserted = impl_->dict.insert_or_assign( + Key(std::forward(key)), + Value(std::forward(value))); + return {iterator{inserted.first}, inserted.second}; +} + +template +void Dict::erase(iterator iter) const { + impl_->dict.erase(iter.entryRef_.iterator_); +} + +template +C10_NODISCARD size_t Dict::erase(const Key& key) const { + return impl_->dict.erase(key); +} + +template +Value Dict::at(const Key& key) const { + return impl_->dict.at(key).template to(); +} + +template +typename Dict::iterator Dict::find(const Key& key) const { + return iterator{impl_->dict.find(key)}; +} + +template +bool Dict::contains(const Key& key) const { + return end() != find(key); +} + +template +void Dict::reserve(size_type count) const { + impl_->dict.reserve(count); +} + +template +TypePtr Dict::keyType() const { + return impl_->elementTypes.keyType; +} + +template +TypePtr Dict::valueType() const { + return impl_->elementTypes.valueType; +} +template +void Dict::unsafeSetKeyType(TypePtr t) { + impl_->elementTypes.keyType = std::move(t); +} + +template +void Dict::unsafeSetValueType(TypePtr t) { + impl_->elementTypes.valueType = std::move(t); +} + +template +bool operator==(const Dict& lhs, const Dict& rhs) { + // Dicts with the same identity trivially compare equal. + if (lhs.impl_ == rhs.impl_) { + return true; + } + + // Otherwise compare the values + return *lhs.impl_ == *rhs.impl_; +} + +template +bool operator!=(const Dict& lhs, const Dict& rhs) { + return !(lhs == rhs); +} + +template +bool Dict::is(const Dict& rhs) const { + return this->impl_ == rhs.impl_; +} +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DimVector.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DimVector.h new file mode 100644 index 0000000000000000000000000000000000000000..576b9e142ebf17d048262763c888845a7d0386e8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DimVector.h @@ -0,0 +1,13 @@ +#pragma once +#include + +namespace at { + +// Re-declaring 'DimVector' type and size inside 'at' namespace. +// This is done to avoid modifying every use into their 'c10' +// equivalent. + +using c10::kDimVectorStaticSize; +using c10::DimVector; + +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DistributionsHelper.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DistributionsHelper.h new file mode 100644 index 0000000000000000000000000000000000000000..8b399510e94aac8dcbd4aee404fcf333f84db21d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/DistributionsHelper.h @@ -0,0 +1,337 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/** + * Distributions kernel adapted from THRandom.cpp + * The kernels try to follow std::random distributions signature + * For instance: in ATen + * auto gen = at::detail::createCPUGenerator(); + * at::uniform_real_distribution uniform(0, 1); + * auto sample = uniform(gen.get()); + * + * vs std::random + * + * std::mt19937 gen; + * std::uniform_real_distribution uniform(0, 1); + * auto sample = uniform(gen); + */ + + +namespace at { +namespace { + +/** + * Samples a discrete uniform distribution in the range [base, base+range) of type T + */ +template +struct uniform_int_from_to_distribution { + + C10_HOST_DEVICE inline uniform_int_from_to_distribution(uint64_t range, int64_t base) : range_(range), base_(base) {} + + template + C10_HOST_DEVICE inline T operator()(RNG generator) { + if (( + std::is_same::value || + std::is_same::value || + std::is_same::value || + std::is_same::value) && range_ >= 1ULL << 32) + { + return transformation::uniform_int_from_to(generator->random64(), range_, base_); + } else { + return transformation::uniform_int_from_to(generator->random(), range_, base_); + } + } + + private: + uint64_t range_; + int64_t base_; +}; + +/** + * Samples a discrete uniform distribution in the range [min_value(int64_t), max_value(int64_t)] + */ +template +struct uniform_int_full_range_distribution { + + template + C10_HOST_DEVICE inline T operator()(RNG generator) { + return transformation::uniform_int_full_range(generator->random64()); + } + +}; + +/** + * Samples a discrete uniform distribution in the range [0, max_value(T)] for integral types + * and [0, 2^mantissa] for floating-point types. + */ +template +struct uniform_int_distribution { + + template + C10_HOST_DEVICE inline T operator()(RNG generator) { + if constexpr (std::is_same_v || std::is_same_v) { + return transformation::uniform_int(generator->random64()); + } else { + return transformation::uniform_int(generator->random()); + } + } + +}; + +/** + * Samples a uniform distribution in the range [from, to) of type T + */ +template +struct uniform_real_distribution { + + C10_HOST_DEVICE inline uniform_real_distribution(T from, T to) { + TORCH_CHECK_IF_NOT_ON_CUDA(from <= to); + TORCH_CHECK_IF_NOT_ON_CUDA(to - from <= std::numeric_limits::max()); + from_ = from; + to_ = to; + } + + template + C10_HOST_DEVICE inline dist_acctype operator()(RNG generator){ + if constexpr (std::is_same_v) { + return transformation::uniform_real(generator->random64(), from_, to_); + } else { + return transformation::uniform_real(generator->random(), from_, to_); + } + } + + private: + T from_; + T to_; +}; + +// The SFINAE checks introduced in #39816 looks overcomplicated and must revisited +// https://github.com/pytorch/pytorch/issues/40052 +#define DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(member) \ +template \ +struct has_member_##member \ +{ \ + typedef char yes; \ + typedef long no; \ + template static yes test(decltype(&U::member)); \ + template static no test(...); \ + static constexpr bool value = sizeof(test(0)) == sizeof(yes); \ +} + +DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(next_double_normal_sample); +DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(set_next_double_normal_sample); +DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(next_float_normal_sample); +DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(set_next_float_normal_sample); + +#define DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(TYPE) \ + \ +template ::value && \ + has_member_set_next_##TYPE##_normal_sample::value \ + ), int> = 0> \ +C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* generator, ret_type* ret) { \ + if (generator->next_##TYPE##_normal_sample()) { \ + *ret = *(generator->next_##TYPE##_normal_sample()); \ + generator->set_next_##TYPE##_normal_sample(c10::optional()); \ + return true; \ + } \ + return false; \ +} \ + \ +template ::value || \ + !has_member_set_next_##TYPE##_normal_sample::value \ + ), int> = 0> \ +C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type* /*ret*/) { \ + return false; \ +} \ + \ +template ::value \ + ), int> = 0> \ +C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* generator, ret_type cache) { \ + generator->set_next_##TYPE##_normal_sample(cache); \ +} \ + \ +template ::value \ + ), int> = 0> \ +C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type /*cache*/) { \ +} + +DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(double); +DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(float); + +/** + * Samples a normal distribution using the Box-Muller method + * Takes mean and standard deviation as inputs + * Note that Box-muller method returns two samples at a time. + * Hence, we cache the "next" sample in the CPUGeneratorImpl class. + */ +template +struct normal_distribution { + + C10_HOST_DEVICE inline normal_distribution(T mean_in, T stdv_in) { + TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in >= 0, "stdv_in must be positive: ", stdv_in); + mean = mean_in; + stdv = stdv_in; + } + + template + C10_HOST_DEVICE inline dist_acctype operator()(RNG generator){ + dist_acctype ret; + // return cached values if available + if constexpr (std::is_same_v) { + if (maybe_get_next_double_normal_sample(generator, &ret)) { + return transformation::normal(ret, mean, stdv); + } + } else { + if (maybe_get_next_float_normal_sample(generator, &ret)) { + return transformation::normal(ret, mean, stdv); + } + } + // otherwise generate new normal values + uniform_real_distribution uniform(0.0, 1.0); + const dist_acctype u1 = uniform(generator); + const dist_acctype u2 = uniform(generator); + const dist_acctype r = ::sqrt(static_cast(-2.0) * ::log1p(-u2)); + const dist_acctype theta = static_cast(2.0) * c10::pi * u1; + if constexpr (std::is_same_v) { + maybe_set_next_double_normal_sample(generator, r * ::sin(theta)); + } else { + maybe_set_next_float_normal_sample(generator, r * ::sin(theta)); + } + ret = r * ::cos(theta); + return transformation::normal(ret, mean, stdv); + } + + private: + T mean; + T stdv; +}; + +template +struct DiscreteDistributionType { using type = float; }; + +template <> struct DiscreteDistributionType { using type = double; }; + +/** + * Samples a bernoulli distribution given a probability input + */ +template +struct bernoulli_distribution { + + C10_HOST_DEVICE inline bernoulli_distribution(T p_in) { + TORCH_CHECK_IF_NOT_ON_CUDA(p_in >= 0 && p_in <= 1); + p = p_in; + } + + template + C10_HOST_DEVICE inline T operator()(RNG generator) { + uniform_real_distribution uniform(0.0, 1.0); + return transformation::bernoulli(uniform(generator), p); + } + + private: + T p; +}; + +/** + * Samples a geometric distribution given a probability input + */ +template +struct geometric_distribution { + + C10_HOST_DEVICE inline geometric_distribution(T p_in) { + TORCH_CHECK_IF_NOT_ON_CUDA(p_in > 0 && p_in < 1); + p = p_in; + } + + template + C10_HOST_DEVICE inline T operator()(RNG generator) { + uniform_real_distribution uniform(0.0, 1.0); + return transformation::geometric(uniform(generator), p); + } + + private: + T p; +}; + +/** + * Samples an exponential distribution given a lambda input + */ +template +struct exponential_distribution { + + C10_HOST_DEVICE inline exponential_distribution(T lambda_in) : lambda(lambda_in) {} + + template + C10_HOST_DEVICE inline T operator()(RNG generator) { + uniform_real_distribution uniform(0.0, 1.0); + return transformation::exponential(uniform(generator), lambda); + } + + private: + T lambda; +}; + +/** + * Samples a cauchy distribution given median and sigma as inputs + */ +template +struct cauchy_distribution { + + C10_HOST_DEVICE inline cauchy_distribution(T median_in, T sigma_in) : median(median_in), sigma(sigma_in) {} + + template + C10_HOST_DEVICE inline T operator()(RNG generator) { + uniform_real_distribution uniform(0.0, 1.0); + return transformation::cauchy(uniform(generator), median, sigma); + } + + private: + T median; + T sigma; +}; + +/** + * Samples a lognormal distribution + * Takes mean and standard deviation as inputs + * Outputs two samples at a time + */ +template +struct lognormal_distribution { + + C10_HOST_DEVICE inline lognormal_distribution(T mean_in, T stdv_in) { + TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in > 0); + mean = mean_in; + stdv = stdv_in; + } + + template + C10_HOST_DEVICE inline T operator()(RNG generator){ + normal_distribution normal(mean, stdv); + return transformation::log_normal(normal(generator)); + } + + private: + T mean; + T stdv; +}; +} +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Formatting.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Formatting.h new file mode 100644 index 0000000000000000000000000000000000000000..9dcd14e1902ee93a8380378741507e29b95d880f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Formatting.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +#include +#include + +namespace c10 { +TORCH_API std::ostream& operator<<(std::ostream& out, Backend b); +TORCH_API std::ostream& operator<<(std::ostream & out, const Scalar& s); +TORCH_API std::string toString(const Scalar& s); +} +namespace at { + +TORCH_API std::ostream& operator<<(std::ostream& out, const DeprecatedTypeProperties& t); +TORCH_API std::ostream& print( + std::ostream& stream, + const Tensor& tensor, + int64_t linesize); +static inline std::ostream& operator<<(std::ostream & out, const Tensor & t) { + return print(out,t,80); +} +TORCH_API void print(const Tensor & t, int64_t linesize=80); +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Generator.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Generator.h new file mode 100644 index 0000000000000000000000000000000000000000..36f2eac9667fc392ded1d557aa6e6da307658ff3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Generator.h @@ -0,0 +1,191 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +// For the record I don't think this is a correct pimpl idiom. +// Including Impl header in interface header defeats the purpose +// because you can't change Impl private members without forcing +// everything that included the interface to rebuild. +// Impl should be forward-declared in the interface header instead. +#include + +/** + * Note [Generator] + * ~~~~~~~~~~~~~~~~ + * A Pseudo Random Number Generator (PRNG) is an engine that uses an algorithm to + * generate a seemingly random sequence of numbers, that may be later be used in creating + * a random distribution. Such an engine almost always maintains a state and requires a + * seed to start off the creation of random numbers. Often times, users have + * found it beneficial to be able to explicitly create, retain, and destroy + * PRNG states and also be able to have control over the seed value. + * + * A Generator in ATen gives users the ability to read, write and modify a PRNG engine. + * For instance, it does so by letting users seed a PRNG engine, fork the state of the + * engine, etc. + * + * By default, there is one generator per device, and a device's generator is + * lazily created. A user can use the torch.Generator() api to create their own generator. + */ + +/** + * Note [Acquire lock when using random generators] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Generator and its derived classes are NOT thread-safe. Please note that most of the + * places where we have inserted locking for generators are historically based, and we + * haven't actually checked that everything is truly thread safe (and it probably isn't). + * Please use the public mutex_ when using any methods from these classes, except for the + * read-only methods. You can learn about the usage by looking into the unittests + * (aten/src/ATen/cpu_generator_test.cpp) and other places where we have used lock_guard. + * + * TODO: Look into changing the threading semantics of Generators in ATen (e.g., making + * them non-thread safe and instead making the generator state splittable, to accommodate + * forks into other threads). + */ + +namespace at { + +class Tensor; + +struct TORCH_API Generator { + Generator() = default; + + explicit Generator(c10::intrusive_ptr gen_impl) + : impl_(std::move(gen_impl)) { + if (impl_.get() == nullptr) { + throw std::runtime_error("GeneratorImpl with nullptr is not supported"); + } + } + + bool operator==(const Generator& rhs) const { + return this->impl_ == rhs.impl_; + } + + bool operator!=(const Generator& rhs) const { + return !((*this) == rhs); + } + + bool defined() const { + return static_cast(impl_); + } + + c10::GeneratorImpl* unsafeGetGeneratorImpl() const { + return impl_.get(); + } + + c10::GeneratorImpl* unsafeReleaseGeneratorImpl() { + return impl_.release(); + } + + const c10::intrusive_ptr& getIntrusivePtr() const { + return impl_; + } + + void set_current_seed(uint64_t seed) { impl_->set_current_seed(seed); } + // Sets the offset of Generator state to the desired offset. This is currently + // supported for only Philox based Generators, i.e., CUDA and MPS. + void set_offset(uint64_t offset) { impl_->set_offset(offset); } + + // Returns the offset of Generator state. This is currently supported for only + // Philox based Generators, i.e., CUDA and MPS. + uint64_t get_offset() const { return impl_->get_offset(); } + + uint64_t current_seed() const { return impl_->current_seed(); } + + uint64_t seed() { return impl_->seed(); } + + // Implementation not inlined to prevent cycle reference between + // `ATen/core/Generator.h` and `ATen/core/Tensor.h` + void set_state(const at::Tensor& new_state); + + at::Tensor get_state() const; + + std::mutex& mutex() { + return impl_->mutex_; + } + + DispatchKeySet key_set() const { + return impl_->key_set(); + } + + Device device() const { return impl_->device(); } + + inline void set_pyobj(PyObject* pyobj) const noexcept { + impl_->set_pyobj(pyobj); + } + + inline PyObject* pyobj() const noexcept { + return impl_->pyobj(); + } + + template + T* get() const { return static_cast(impl_.get()); } + + Generator clone() const { + return Generator(impl_->clone()); + } + + private: + c10::intrusive_ptr impl_; +}; + +template +Generator make_generator(Args&&... args) { + return Generator(c10::make_intrusive(std::forward(args)...)); +} + +/** + * Utility function to static cast input Generator* to + * the backend generator type (CPU/CUDAGeneratorImpl etc.) + */ +template +static inline T * check_generator(c10::optional gen) { + TORCH_CHECK(gen.has_value(), "Expected Generator but received nullopt"); + TORCH_CHECK(gen->defined(), "Generator with undefined implementation is not allowed"); + TORCH_CHECK(T::device_type() == gen->device().type(), "Expected a '", T::device_type(), "' device type for generator but found '", gen->device().type(), "'"); + return gen->get(); +} + +/** + * Utility function used in tensor implementations, which + * supplies the default generator to tensors, if an input generator + * is not supplied. The input Generator* is also static casted to + * the backend generator type (CPU/CUDAGeneratorImpl etc.) + */ +template +static inline T* get_generator_or_default(const c10::optional& gen, const Generator& default_gen) { + return gen.has_value() && gen->defined() ? check_generator(gen) : check_generator(default_gen); +} + +namespace detail { + +/** + * Helper function for checking the validity of new random generator + * state. Right now following conditions are checked: + * + * - The new state tensor must be a torch.ByteTensor + * - Data of the new state tensor must be contiguous + */ +static inline void check_rng_state(const c10::TensorImpl& new_state) { + TORCH_CHECK_TYPE( + new_state.layout() == kStrided && new_state.device().type() == kCPU && new_state.dtype() == kByte, + "RNG state must be a torch.ByteTensor" + ); + + TORCH_CHECK(new_state.is_contiguous(), "RNG state must be contiguous"); +} + +} // namespace detail + +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/GeneratorForPrivateuseone.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/GeneratorForPrivateuseone.h new file mode 100644 index 0000000000000000000000000000000000000000..9b84f162a765236c0d362c5b2ddd3fa8cc2495d1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/GeneratorForPrivateuseone.h @@ -0,0 +1,39 @@ +#pragma once + +#include +#include + +namespace at { + +using GeneratorFuncType = std::function; + +c10::optional& GetGeneratorPrivate(); + +class TORCH_API _GeneratorRegister { + public: + explicit _GeneratorRegister(const GeneratorFuncType& func); +}; + +TORCH_API at::Generator GetGeneratorForPrivateuse1( + c10::DeviceIndex device_index); + +/** + * This is used to register Generator to PyTorch for `privateuse1` key. + * + * Usage: REGISTER_GENERATOR_PRIVATEUSE1(MakeGeneratorForPrivateuse1) + * + * class CustomGeneratorImpl : public c10::GeneratorImpl { + * CustomGeneratorImpl(DeviceIndex device_index = -1); + * explicit ~CustomGeneratorImpl() override = default; + * ... + * }; + * + * at::Generator MakeGeneratorForPrivateuse1(c10::DeviceIndex id) { + * return at::make_generator(id); + * } + */ + +#define REGISTER_GENERATOR_PRIVATEUSE1(GeneratorPrivate) \ + static auto temp##GeneratorPrivate = at::_GeneratorRegister(GeneratorPrivate); + +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/IListRef_inl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/IListRef_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..534272f69b64fd6ecb5d6a562ee2a5fda5691e12 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/IListRef_inl.h @@ -0,0 +1,201 @@ +#pragma once + +#include +#include + +namespace at { +class Tensor; +class OptionalTensorRef; +} + +namespace c10 { +namespace detail { + +/* + * Specializations of `IListRefTagImplBase` that implement the default + * implementation for `IListRefTag::Unboxed`. + */ +template +class IListRefTagImplBase { + public: + using elem_type = ListElemT; + using list_type = ArrayRef; + + /* + * These `unwrap` static methods unwraps the inner containers out + * of `IListRef` (and `IListRefIterator`). They are required when + * the macro `TORCH_ILISTREF_UNWRAP` is called. + */ + static const list_type& unwrap(const IListRef& ilist) { + return ilist.payload_.unboxed; + } + + static typename list_type::const_iterator& unwrap(IListRefIterator& it) { + return it.payload_.unboxed_iterator; + } + + static const typename list_type::const_iterator& unwrap( + const IListRefIterator& it) { + return it.payload_.unboxed_iterator; + } + + /* + * We have these function (besides the `unwrap`s above) because the + * implementation for both `IListRef::operator[]` and `IListRefIterator::operator*` + * weren't syntatically equal for the existing tags at the time + * (`Unboxed` and `Boxed`). + */ + static IListRefConstRef front(const list_type& lst) { + return lst.front(); + } + + static IListRefConstRef iterator_get( + const typename list_type::const_iterator& it) { + return *it; + } +}; + +/* + * Specializations of `IListRefTagImplBase` that implement the default + * implementation for `IListRefTag::Boxed`. + */ +template +class IListRefTagImplBase { + public: + using elem_type = ListElemT; + using list_type = List; + + static const list_type& unwrap(const IListRef& ilist) { + return *ilist.payload_.boxed; + } + + static typename list_type::const_iterator& unwrap(IListRefIterator& it) { + return it.payload_.boxed_iterator; + } + + static const typename list_type::const_iterator& unwrap( + const IListRefIterator& it) { + return it.payload_.boxed_iterator; + } + + static IListRefConstRef front(const list_type& lst) { + return lst[0]; + } + + static IListRefConstRef iterator_get( + const typename list_type::const_iterator& it) { + return (*it).get().toTensor(); + } +}; + +/* + * Specializations of `IListRefTagImplBase` that implement the default + * implementation for `IListRefTag::Materialized`. + */ +template +class IListRefTagImplBase> { + public: + using elem_type = MaterializedIListRefElem; + using list_type = MaterializedIListRef; + + static const list_type& unwrap(const IListRef& ilist) { + return *ilist.payload_.materialized; + } + + static typename list_type::const_iterator& unwrap(IListRefIterator& it) { + return it.payload_.materialized_iterator; + } + + static const typename list_type::const_iterator& unwrap( + const IListRefIterator& it) { + return it.payload_.materialized_iterator; + } + + static IListRefConstRef front(const list_type& lst) { + return lst[0]; + } + + static IListRefConstRef iterator_get( + const typename list_type::const_iterator& it) { + return *it; + } +}; + +/* + * [Note: ITensorListRef] + * Specializations necessary for `IListRef` type. + * + * Since the default implementations are usually done with supporting + * `Tensor` in mind, we only have to inherit from the base implementations. + */ +template <> +class IListRefTagImpl + : public IListRefTagImplBase {}; + +template <> +class IListRefTagImpl + : public IListRefTagImplBase {}; + +template <> +class IListRefTagImpl + : public IListRefTagImplBase< + IListRefTag::Materialized, + at::Tensor, + MaterializedIListRefElem> {}; + +/* + * [Note: IOptTensorListRef] + * Specializations necessary for `IListRef` type. + * + * We can't get an `at::OptionalTensorRef` directly from an instance of + * `List>` (the type that corresponds to the boxed world). + * + * So, the default implementation won't help us. Thus, we have to implement + * this method ourselves. + */ +template <> +class IListRefTagImpl + : public IListRefTagImplBase {}; + +template <> +class IListRefTagImpl + : public IListRefTagImplBase> { + + public: + /* + * Given an instance of the types corresponding to the `Boxed` tag, we override + * the default implementation, so that we can return a `at::OptionalTensorRef`. + */ + static IListRefConstRef iterator_get( + const typename list_type::const_iterator& it) { + const auto& ivalue = (*it).get(); + if (!ivalue.isNone()) { + const auto& tensor = ivalue.toTensor(); + return (tensor.defined()) ? tensor : at::OptionalTensorRef{}; + } + return {}; + } +}; + +template <> +class IListRefTagImpl + : public IListRefTagImplBase< + IListRefTag::Materialized, + at::OptionalTensorRef, + MaterializedIListRefElem> {}; + +} // namespace detail +} // namespace c10 + +namespace at { + +// [Note: ITensorListRef] +using ITensorListRef = c10::IListRef; +using ITensorListRefIterator = c10::IListRefIterator; +using MaterializedITensorListRef = c10::detail::MaterializedIListRef; +// [Note: IOptTensorListRef] +using IOptTensorListRef = c10::IListRef; +using IOptTensorListRefIterator = c10::IListRefIterator; +using MaterializedIOptTensorListRef = c10::detail::MaterializedIListRef; + +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/LegacyTypeDispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/LegacyTypeDispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3efe5e0f7b87b66a1cf22ef1d5fb0f93e2f78494 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/LegacyTypeDispatch.h @@ -0,0 +1,111 @@ +#pragma once + +// The legacy mechanism for dispatching operators in ATen is a Type +// object, which is essentially a giant virtual dispatch table +// for every operation we support dynamically dispatching over. +// +// This has been deprecated in favor of ATenDispatch, and in the future, +// c10 dispatcher. +// TODO: Clean up what remains here + +#include + +namespace at { + +// A RAII, thread local (!) guard that will disable dispatch to variable +// handler. +// +// NOTE [ Treating Variables as non-Variables in type dispatch ] +// +// What exactly does AutoDispatchBelowAutograd do? The short answer is, it causes +// dispatches on ATen functions to go to the non-variable implementation, +// bypassing autograd handling (and also profiling and tracing). +// +// To understand why this guard exists, it's helpful to understand the history +// behind how Variable was implemented. Previously, Variables were implemented +// as a wrapper on Tensors; so the act of processing a Variable involved +// unwrapping the underlying Tensor, and then calling the underlying base +// operation on /that/ operation +// +// However, after the Variable/Tensor merge, there is no concept of unwrapping +// a tensor anymore. If you just call the operation on the same variable +// again inside your VariableType handler, you'll dispatch back to +// VariableType, which is not what we want. +// +// The solution to the above problem is to add `at::AutoDispatchBelowAutograd`, which +// when enabled will cause `legacyTensorType()` and `getType()` to always return +// non-Variable type, even if the tensor being called on is a variable. + +/* Note [AutoDispatchBelowAutograd] + * AutoDispatchBelowAutograd is **INTERNAL ONLY** that it should be used + * for kernel implementations and customized C++ kernels. + * If you are looking for a guard to run workload in inference mode, please use + * c10::InferenceMode RAII which is user facing API. + * In the past AutoDispatchBelowAutograd(or its old version AutoNonVariableTypeMode) + * was used in the user code for inference-only workload, this was under risk of + * producing wrong results silently in some edge cases. For example: + * ``` + * torch::Tensor s = torch::ones({1, 2, 3}).set_requires_grad(true); + * torch::Tensor out = s * s; + * { + * at::AutoDispatchBelowAutograd guard; + * s.add_(1); // Skips version bump on `s`. + * } + * // WRONG GRADIENT! s.grad() are now computed using `s` value after the + * // inplace update. + * out.backward(torch::ones_like(out)); + * ``` + * Users should use `c10::InferenceMode` here so that it'll properly throw an + * error saying "one of the variables needed for gradient computation has be modified." + */ +struct TORCH_API AutoDispatchBelowAutograd { + AutoDispatchBelowAutograd() : + autograd_guard_(c10::autograd_dispatch_keyset) { + } + + // disable all autograd dispatch keys + c10::impl::ExcludeDispatchKeyGuard autograd_guard_; +}; + +// TODO: AutoNonVariableTypeMode should be removed in release 1.10. +struct TORCH_API AutoNonVariableTypeMode { + AutoNonVariableTypeMode(bool enabled = true) : + autograd_guard_(c10::autograd_dispatch_keyset) { + TORCH_WARN_ONCE("AutoNonVariableTypeMode is deprecated and will be removed in 1.10 release. " + "For kernel implementations please use AutoDispatchBelowADInplaceOrView instead, " + "If you are looking for a user facing API to enable running your inference-only " + "workload, please use c10::InferenceMode. Using AutoDispatchBelowADInplaceOrView in user code " + "is under risk of producing silent wrong result in some edge cases. " + "See Note [AutoDispatchBelowAutograd] for more details."); + TORCH_INTERNAL_ASSERT(enabled); + } + + // disable all autograd dispatch keys + c10::impl::ExcludeDispatchKeyGuard autograd_guard_; +}; + +struct TORCH_API AutoDispatchSkipFunctionalize { + AutoDispatchSkipFunctionalize() : + dispatch_key_guard_(c10::DispatchKeySet(c10::DispatchKey::Functionalize)) { + } + c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_; +}; + +/* Note [AutoDispatchBelowADInplaceOrView] + * AutoDispatchBelowADInplaceOrView is equivalent to AutoNonVariableTypeMode + * before we split inplace & view ops out of VariableType kernel. + * Note this guard is used in VariableType kernels for functional ops + * as well as ADInplaceOrView kernels for inplace/view ops to enforce the + * Invariant: + * Once you are in VariableType/ADInplaceOrView kernel for an op, + * you never go back to a kernel on same dispatch key until + * you finish the current op. + */ +struct TORCH_API AutoDispatchBelowADInplaceOrView { + AutoDispatchBelowADInplaceOrView() : + dispatch_key_guard_(c10::autograd_dispatch_keyset_with_ADInplaceOrView) { + } + // disable Autograd & ADInplaceOrView dispatch keys + c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_; +}; +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/List.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/List.h new file mode 100644 index 0000000000000000000000000000000000000000..d1271dadec2ac203b5ebae91d33d8ad7e8da101f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/List.h @@ -0,0 +1,490 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +class Tensor; +} +namespace c10 { +struct IValue; +template class List; +struct Type; + +namespace detail { + +struct ListImpl final : public c10::intrusive_ptr_target { + using list_type = std::vector; + + explicit TORCH_API ListImpl(list_type list_, TypePtr elementType_); + + list_type list; + + TypePtr elementType; + + intrusive_ptr copy() const { + return make_intrusive(list, elementType); + } + friend TORCH_API bool operator==(const ListImpl& lhs, const ListImpl& rhs); +}; +} + +namespace impl { + +template class ListIterator; + +template class ListElementReference; + +template +void swap(ListElementReference&& lhs, ListElementReference&& rhs); + +template +bool operator==(const ListElementReference& lhs, const T& rhs); + +template +bool operator==(const T& lhs, const ListElementReference& rhs); + +template +struct ListElementConstReferenceTraits { + // In the general case, we use IValue::to(). + using const_reference = typename c10::detail::ivalue_to_const_ref_overload_return::type; +}; + +// There is no to() overload for c10::optional. +template<> +struct ListElementConstReferenceTraits> { + using const_reference = c10::optional>; +}; + +template +class ListElementReference final { +public: + operator std::conditional_t< + std::is_reference::type>::value, + const T&, + T>() const; + + ListElementReference& operator=(T&& new_value) &&; + + ListElementReference& operator=(const T& new_value) &&; + + // assigning another ref to this assigns the underlying value + ListElementReference& operator=(ListElementReference&& rhs) && noexcept; + + const IValue& get() const& { + return *iterator_; + } + + friend void swap(ListElementReference&& lhs, ListElementReference&& rhs); + + ListElementReference(const ListElementReference&) = delete; + ListElementReference& operator=(const ListElementReference&) = delete; + +private: + ListElementReference(Iterator iter) + : iterator_(iter) {} + + // allow moving, but only our friends (i.e. the List class) can move us + ListElementReference(ListElementReference&&) noexcept = default; + ListElementReference& operator=(ListElementReference&& rhs) & noexcept { + iterator_ = std::move(rhs.iterator_); + return *this; + } + + friend class List; + friend class ListIterator; + + Iterator iterator_; +}; + +// this wraps vector::iterator to make sure user code can't rely +// on it being the type of the underlying vector. +template +class ListIterator final { + public: + // C++17 friendly std::iterator implementation + using iterator_category = std::random_access_iterator_tag; + using value_type = T; + using difference_type = std::ptrdiff_t; + using pointer = T*; + using reference = ListElementReference; + + explicit ListIterator() = default; + ~ListIterator() = default; + + ListIterator(const ListIterator&) = default; + ListIterator(ListIterator&&) noexcept = default; + ListIterator& operator=(const ListIterator&) = default; + ListIterator& operator=(ListIterator&&) noexcept = default; + + ListIterator& operator++() { + ++iterator_; + return *this; + } + + ListIterator operator++(int) { + ListIterator copy(*this); + ++*this; + return copy; + } + + ListIterator& operator--() { + --iterator_; + return *this; + } + + ListIterator operator--(int) { + ListIterator copy(*this); + --*this; + return copy; + } + + ListIterator& operator+=(typename List::size_type offset) { + iterator_ += offset; + return *this; + } + + ListIterator& operator-=(typename List::size_type offset) { + iterator_ -= offset; + return *this; + } + + ListIterator operator+(typename List::size_type offset) const { + return ListIterator{iterator_ + offset}; + } + + ListIterator operator-(typename List::size_type offset) const { + return ListIterator{iterator_ - offset}; + } + + friend difference_type operator-(const ListIterator& lhs, const ListIterator& rhs) { + return lhs.iterator_ - rhs.iterator_; + } + + ListElementReference operator*() const { + return {iterator_}; + } + + ListElementReference operator[](typename List::size_type offset) const { + return {iterator_ + offset}; + } + +private: + explicit ListIterator(Iterator iterator): iterator_(std::move(iterator)) {} + + Iterator iterator_; + + friend bool operator==(const ListIterator& lhs, const ListIterator& rhs) { + return lhs.iterator_ == rhs.iterator_; + } + + friend bool operator!=(const ListIterator& lhs, const ListIterator& rhs) { + return !(lhs == rhs); + } + + friend bool operator<(const ListIterator& lhs, const ListIterator& rhs) { + return lhs.iterator_ < rhs.iterator_; + } + + friend bool operator<=(const ListIterator& lhs, const ListIterator& rhs) { + return lhs.iterator_ <= rhs.iterator_; + } + + friend bool operator>(const ListIterator& lhs, const ListIterator& rhs) { + return lhs.iterator_ > rhs.iterator_; + } + + friend bool operator>=(const ListIterator& lhs, const ListIterator& rhs) { + return lhs.iterator_ >= rhs.iterator_; + } + + friend class ListIterator; + friend class List; +}; + +template List toTypedList(List list); +template List toList(List&& list); +template List toList(const List& list); +const IValue* ptr_to_first_element(const List& list); +} + +/** + * An object of this class stores a list of values of type T. + * + * This is a pointer type. After a copy, both Lists + * will share the same storage: + * + * > List a; + * > List b = a; + * > b.push_back("three"); + * > ASSERT("three" == a.get(0)); + * + * We use this class in the PyTorch kernel API instead of + * std::vector, because that allows us to do optimizations + * and switch out the underlying list implementation without + * breaking backwards compatibility for the kernel API. + */ +template +class List final { +private: + // This is an intrusive_ptr because List is a pointer type. + // Invariant: This will never be a nullptr, there will always be a valid + // ListImpl. + c10::intrusive_ptr impl_; + + using internal_reference_type = impl::ListElementReference; + using internal_const_reference_type = typename impl::ListElementConstReferenceTraits::const_reference; + +public: + using value_type = T; + using size_type = typename c10::detail::ListImpl::list_type::size_type; + using iterator = impl::ListIterator; + using const_iterator = impl::ListIterator; + using reverse_iterator = impl::ListIterator; + + /** + * Constructs an empty list. + */ + explicit List(); + + /** + * Constructs a list with some initial values. + * Example: + * List a({2, 3, 4}); + */ + List(std::initializer_list initial_values); + explicit List(ArrayRef initial_values); + + /** + * Create a generic list with runtime type information. + * This only works for c10::impl::GenericList and is not part of the public API + * but only supposed to be used internally by PyTorch. + */ + explicit List(TypePtr elementType); + + List(const List&) = default; + List& operator=(const List&) = default; + + /** + * Create a new List pointing to a deep copy of the same data. + * The List returned is a new list with separate storage. + * Changes in it are not reflected in the original list or vice versa. + */ + List copy() const; + + /** + * Returns the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + */ + value_type get(size_type pos) const; + + /** + * Moves out the element at the specified location pos and returns it, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * The list contains an invalid element at position pos afterwards. Any operations + * on it before re-setting it are invalid. + */ + value_type extract(size_type pos) const; + + /** + * Returns a reference to the element at specified location pos, with bounds checking. + * If pos is not within the range of the container, an exception of type std::out_of_range is thrown. + * + * You cannot store the reference, but you can read it and assign new values to it: + * + * List list = ...; + * list[2] = 5; + * int64_t v = list[1]; + */ + internal_const_reference_type operator[](size_type pos) const; + + internal_reference_type operator[](size_type pos); + + /** + * Assigns a new value to the element at location pos. + */ + void set(size_type pos, const value_type& value) const; + + /** + * Assigns a new value to the element at location pos. + */ + void set(size_type pos, value_type&& value) const; + + /** + * Returns an iterator to the first element of the container. + * If the container is empty, the returned iterator will be equal to end(). + */ + iterator begin() const; + + /** + * Returns an iterator to the element following the last element of the container. + * This element acts as a placeholder; attempting to access it results in undefined behavior. + */ + iterator end() const; + + /** + * Checks if the container has no elements. + */ + bool empty() const; + + /** + * Returns the number of elements in the container + */ + size_type size() const; + + /** + * Increase the capacity of the vector to a value that's greater or equal to new_cap. + */ + void reserve(size_type new_cap) const; + + /** + * Erases all elements from the container. After this call, size() returns zero. + * Invalidates any references, pointers, or iterators referring to contained elements. Any past-the-end iterators are also invalidated. + */ + void clear() const; + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + iterator insert(iterator pos, const T& value) const; + + /** + * Inserts value before pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + iterator insert(iterator pos, T&& value) const; + + /** + * Inserts a new element into the container directly before pos. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + template + iterator emplace(iterator pos, Args&&... value) const; + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + void push_back(const T& value) const; + + /** + * Appends the given element value to the end of the container. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + void push_back(T&& value) const; + + /** + * Appends the given list to the end of the container. Uses at most one memory allocation. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + void append(List lst) const; + + /** + * Appends the given element value to the end of the container. + * The new element is constructed with the given arguments. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + template + void emplace_back(Args&&... args) const; + + /** + * Removes the element at pos. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + iterator erase(iterator pos) const; + + /** + * Removes the elements in the range [first, last). + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + iterator erase(iterator first, iterator last) const; + + /** + * Removes the last element of the container. + * Calling pop_back on an empty container is undefined. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + void pop_back() const; + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional default-inserted elements are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + void resize(size_type count) const; + + /** + * Resizes the container to contain count elements. + * If the current size is less than count, additional copies of value are appended. + * May invalidate any references, pointers, or iterators referring to contained elements. Any past-the-end iterators may also be invalidated. + */ + void resize(size_type count, const T& value) const; + + /** + * Value equality comparison. This function implements Python-like semantics for + * equality: two lists with the same identity (e.g. same pointer) trivially + * compare equal, otherwise each element is compared for equality. + */ + template + friend bool operator==(const List& lhs, const List& rhs); + + template + friend bool operator!=(const List& lhs, const List& rhs); + + /** + * Identity comparison. Returns true if and only if `rhs` represents the same + * List object as `this`. + */ + bool is(const List& rhs) const; + + std::vector vec() const; + + /** + * Returns the number of Lists currently pointing to this same list. + * If this is the only instance pointing to this list, returns 1. + */ + // TODO Test use_count + size_t use_count() const; + + TypePtr elementType() const; + + // See [unsafe set type] for why this exists. + void unsafeSetElementType(TypePtr t); + +private: + explicit List(c10::intrusive_ptr&& elements); + explicit List(const c10::intrusive_ptr& elements); + friend struct IValue; + template friend List impl::toTypedList(List); + template friend List impl::toList(List&&); + template friend List impl::toList(const List&); + friend const IValue* impl::ptr_to_first_element(const List& list); +}; + +namespace impl { +// GenericList is how IValue stores lists. It is, however, not part of the +// public API. Kernels should use Lists with concrete types instead +// (maybe except for some internal prim ops). +using GenericList = List; + +const IValue* ptr_to_first_element(const GenericList& list); + +} +} + +namespace torch { + template using List = c10::List; +} + +#include // IWYU pragma: keep diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/List_inl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/List_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..0fb911278a919c65aea95253e5384fe843fe1022 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/List_inl.h @@ -0,0 +1,360 @@ +#pragma once + +#include +#include + +namespace c10 { + +template decltype(auto) getTypePtr(); +std::string toString(const Type& type); + +template +List::List(c10::intrusive_ptr&& elements) +: impl_(std::move(elements)) {} + +template +List::List(const c10::intrusive_ptr& elements) +: impl_(elements) {} + +template +List::List() +: List(make_intrusive( + typename c10::detail::ListImpl::list_type(), + getTypePtr())) { + static_assert(!std::is_same::value, "This constructor is not valid for List. Please use c10::impl::GenericList(elementType) instead."); +} + +template +List::List(ArrayRef values) +: List(make_intrusive( + typename c10::detail::ListImpl::list_type(), + getTypePtr())) { + static_assert(!std::is_same::value, "This constructor is not valid for List. Please use c10::impl::GenericList(elementType)."); + impl_->list.reserve(values.size()); + for (const T& element : values) { + impl_->list.push_back(element); + } +} + +template +List::List(std::initializer_list initial_values) +: List(ArrayRef(initial_values)) { + static_assert(!std::is_same::value, "This constructor is not valid for List. Please use c10::impl::GenericList(elementType)."); +} + +template +List::List(TypePtr elementType) +: List(make_intrusive( + typename c10::detail::ListImpl::list_type(), + std::move(elementType))) { + static_assert(std::is_same::value || std::is_same>::value, + "This constructor is only valid for c10::impl::GenericList or List."); +} + +namespace impl { +template +List toTypedList(impl::GenericList list) { + // If there's other instances of the list (i.e. list.use_count() > 1), then we have to be invariant + // because upcasting would allow people to add types into the new list that would break the old list. + // However, if there aren't any other instances of this list (i.e. list.use_count() == 1), then we can + // allow upcasting. This can be a perf improvement since we can cast List to List> + // without having to copy it. This is also used to provide backwards compatibility with some old models + // that serialized the index arguments to aten::index, aten::index_put, aten::index_put_ and aten::index_put_impl_ + // as List before we changed that argument to be List>. When deserializing, we + // have list.use_count() == 1 and can deserialize the List directly as List>. + TORCH_CHECK(*list.impl_->elementType == *getTypePtr() + || (list.use_count() == 1 && list.impl_->elementType->isSubtypeOf(*getTypePtr())) + , "Tried to cast a List<", toString(*list.impl_->elementType), "> to a List<", toString(*getTypePtr()), ">. Types mismatch."); + return List(std::move(list.impl_)); +} + +template +impl::GenericList toList(List&& list) { + return GenericList(std::move(list.impl_)); +} +template +impl::GenericList toList(const List& list) { + return GenericList(list.impl_); +} +} + +template +List List::copy() const { + return List(impl_->copy()); +} + +namespace detail { + template + T list_element_to(T element) { + return element; + } + template + T list_element_to(const IValue& element) { + return element.template to(); + } + template + T list_element_to(IValue&& element) { + return std::move(element).template to(); + } + template + struct ListElementFrom { + static IValue from(const T& element) { + return element; + } + static IValue from(T&& element) { + return std::move(element); + } + }; + template<> + struct ListElementFrom { + static const IValue& from(const IValue& element) { + return element; + } + static IValue&& from(IValue&& element) { + return std::move(element); + } + }; +} + +namespace impl { + +template +ListElementReference::operator std::conditional_t< + std::is_reference::type>::value, + const T&, + T>() const { + return iterator_->template to(); +} + +template +ListElementReference& ListElementReference::operator=(T&& new_value) && { + *iterator_ = c10::detail::ListElementFrom::from(std::move(new_value)); + return *this; +} + +template +ListElementReference& ListElementReference::operator=(const T& new_value) && { + *iterator_ = c10::detail::ListElementFrom::from(new_value); + return *this; +} + +template +ListElementReference& ListElementReference::operator=(ListElementReference&& rhs) && noexcept { + *iterator_ = *rhs.iterator_; + return *this; +} + +template +void swap(ListElementReference&& lhs, ListElementReference&& rhs) { + std::swap(*lhs.iterator_, *rhs.iterator_); +} + +template +bool operator==(const ListElementReference& lhs, const T& rhs) { + const T& lhs_tmp = lhs; + return lhs_tmp == rhs; +} + +template +inline bool operator==(const T& lhs, const ListElementReference& rhs) { + return rhs == lhs; +} + +template +inline typename ListElementConstReferenceTraits::const_reference +list_element_to_const_ref(const IValue& element) { + return element.template to(); +} + +template<> +inline typename ListElementConstReferenceTraits>::const_reference +list_element_to_const_ref>(const IValue& element) { + return element.toOptionalStringRef(); +} + +} // namespace impl + +template +void List::set(size_type pos, const value_type& value) const { + impl_->list.at(pos) = c10::detail::ListElementFrom::from(value); +} + +template +void List::set(size_type pos, value_type&& value) const { + impl_->list.at(pos) = c10::detail::ListElementFrom::from(std::move(value)); +} + +template +typename List::value_type List::get(size_type pos) const { + return c10::detail::list_element_to(impl_->list.at(pos)); +} + +template +typename List::internal_const_reference_type List::operator[](size_type pos) const { + return c10::impl::list_element_to_const_ref(impl_->list.at(pos)); +} + +template +typename List::internal_reference_type List::operator[](size_type pos) { + static_cast(impl_->list.at(pos)); // Throw the exception if it is out of range. + return {impl_->list.begin() + static_castlist)::difference_type>(pos)}; +} + +template +typename List::value_type List::extract(size_type pos) const { + auto& elem = impl_->list.at(pos); + auto result = c10::detail::list_element_to(std::move(elem)); + // Reset the list element to a T() instead of None to keep it correctly typed + elem = c10::detail::ListElementFrom::from(T{}); + return result; +} + +template +typename List::iterator List::begin() const { + return iterator(impl_->list.begin()); +} + +template +typename List::iterator List::end() const { + return iterator(impl_->list.end()); +} + +template +bool List::empty() const { + return impl_->list.empty(); +} + +template +typename List::size_type List::size() const { + return impl_->list.size(); +} + +template +void List::reserve(size_type new_cap) const { + impl_->list.reserve(new_cap); +} + +template +void List::clear() const { + impl_->list.clear(); +} + +template +typename List::iterator List::insert(iterator pos, const T& value) const { + return iterator { impl_->list.insert(pos.iterator_, c10::detail::ListElementFrom::from(value)) }; +} + +template +typename List::iterator List::insert(iterator pos, T&& value) const { + return iterator { impl_->list.insert(pos.iterator_, c10::detail::ListElementFrom::from(std::move(value))) }; +} + +template +template +typename List::iterator List::emplace(iterator pos, Args&&... value) const { + // TODO Use list_element_from? + return iterator { impl_->list.emplace(pos.iterator_, std::forward(value)...) }; +} + +template +void List::push_back(const T& value) const { + impl_->list.push_back(c10::detail::ListElementFrom::from(value)); +} + +template +void List::push_back(T&& value) const { + impl_->list.push_back(c10::detail::ListElementFrom::from(std::move(value))); +} + +template +void List::append(List b) const { + if (b.use_count() == 1) { + impl_->list.insert(impl_->list.end(), make_move_iterator(b.impl_->list.begin()), make_move_iterator(b.impl_->list.end())); + } else { + impl_->list.insert(impl_->list.end(), b.impl_->list.begin(), b.impl_->list.end()); + } +} + +template +template +void List::emplace_back(Args&&... args) const { + // TODO Use list_element_from? + impl_->list.push_back(T(std::forward(args)...)); +} + +template +typename List::iterator List::erase(iterator pos) const { + return iterator { impl_->list.erase(pos.iterator_) }; +} + +template +typename List::iterator List::erase(iterator first, iterator last) const { + return iterator { impl_->list.erase(first.iterator_, last.iterator_) }; +} + +template +void List::pop_back() const { + impl_->list.pop_back(); +} + +template +void List::resize(size_type count) const { + impl_->list.resize(count, T{}); +} + +template +void List::resize(size_type count, const T& value) const { + impl_->list.resize(count, value); +} + +template +bool operator==(const List& lhs, const List& rhs) { + // Lists with the same identity trivially compare equal. + if (lhs.impl_ == rhs.impl_) { + return true; + } + + // Otherwise, just compare values directly. + return *lhs.impl_ == *rhs.impl_; +} + +template +bool operator!=(const List& lhs, const List& rhs) { + return !(lhs == rhs); +} + +template +bool List::is(const List& rhs) const { + return this->impl_ == rhs.impl_; +} + +template +std::vector List::vec() const { + std::vector result(begin(), end()); + return result; +} + +template +size_t List::use_count() const { + return impl_.use_count(); +} + +template +TypePtr List::elementType() const { + return impl_->elementType; +} + +template +void List::unsafeSetElementType(TypePtr t) { + impl_->elementType = std::move(t); +} + +namespace impl { + +inline const IValue* ptr_to_first_element(const GenericList& list) { + return &list.impl_->list[0]; +} + +} +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/MT19937RNGEngine.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/MT19937RNGEngine.h new file mode 100644 index 0000000000000000000000000000000000000000..5bcc4c0c15d967aedc4841052c89d0a18abe6fcd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/MT19937RNGEngine.h @@ -0,0 +1,194 @@ +#pragma once + +#include + +// define constants like M_PI and C keywords for MSVC +#ifdef _MSC_VER +#ifndef _USE_MATH_DEFINES +#define _USE_MATH_DEFINES +#endif +#include +#endif + +#include +#include +#include + +namespace at { + +constexpr int MERSENNE_STATE_N = 624; +constexpr int MERSENNE_STATE_M = 397; +constexpr uint32_t MATRIX_A = 0x9908b0df; +constexpr uint32_t UMASK = 0x80000000; +constexpr uint32_t LMASK = 0x7fffffff; + +/** + * Note [Mt19937 Engine implementation] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Originally implemented in: + * http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/MTARCOK/mt19937ar-cok.c + * and modified with C++ constructs. Moreover the state array of the engine + * has been modified to hold 32 bit uints instead of 64 bits. + * + * Note that we reimplemented mt19937 instead of using std::mt19937 because, + * at::mt19937 turns out to be faster in the pytorch codebase. PyTorch builds with -O2 + * by default and following are the benchmark numbers (benchmark code can be found at + * https://github.com/syed-ahmed/benchmark-rngs): + * + * with -O2 + * Time to get 100000000 philox randoms with at::uniform_real_distribution = 0.462759s + * Time to get 100000000 at::mt19937 randoms with at::uniform_real_distribution = 0.39628s + * Time to get 100000000 std::mt19937 randoms with std::uniform_real_distribution = 0.352087s + * Time to get 100000000 std::mt19937 randoms with at::uniform_real_distribution = 0.419454s + * + * std::mt19937 is faster when used in conjunction with std::uniform_real_distribution, + * however we can't use std::uniform_real_distribution because of this bug: + * http://open-std.org/JTC1/SC22/WG21/docs/lwg-active.html#2524. Plus, even if we used + * std::uniform_real_distribution and filtered out the 1's, it is a different algorithm + * than what's in pytorch currently and that messes up the tests in tests_distributions.py. + * The other option, using std::mt19937 with at::uniform_real_distribution is a tad bit slower + * than at::mt19937 with at::uniform_real_distribution and hence, we went with the latter. + * + * Copyright notice: + * A C-program for MT19937, with initialization improved 2002/2/10. + * Coded by Takuji Nishimura and Makoto Matsumoto. + * This is a faster version by taking Shawn Cokus's optimization, + * Matthe Bellew's simplification, Isaku Wada's real version. + * + * Before using, initialize the state by using init_genrand(seed) + * or init_by_array(init_key, key_length). + * + * Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. The names of its contributors may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Any feedback is very welcome. + * http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html + * email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space) + */ + +/** + * mt19937_data_pod is used to get POD data in and out + * of mt19937_engine. Used in torch.get_rng_state and + * torch.set_rng_state functions. + */ +struct mt19937_data_pod { + uint64_t seed_; + int left_; + bool seeded_; + uint32_t next_; + std::array state_; +}; + +class mt19937_engine { +public: + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + inline explicit mt19937_engine(uint64_t seed = 5489) { + init_with_uint32(seed); + } + + inline mt19937_data_pod data() const { + return data_; + } + + inline void set_data(const mt19937_data_pod& data) { + data_ = data; + } + + inline uint64_t seed() const { + return data_.seed_; + } + + inline bool is_valid() { + if ((data_.seeded_ == true) + && (data_.left_ > 0 && data_.left_ <= MERSENNE_STATE_N) + && (data_.next_ <= MERSENNE_STATE_N)) { + return true; + } + return false; + } + + inline uint32_t operator()() { + if (--(data_.left_) == 0) { + next_state(); + } + uint32_t y = *(data_.state_.data() + data_.next_++); + y ^= (y >> 11); + y ^= (y << 7) & 0x9d2c5680; + y ^= (y << 15) & 0xefc60000; + y ^= (y >> 18); + + return y; + } + +private: + mt19937_data_pod data_; + + inline void init_with_uint32(uint64_t seed) { + data_.seed_ = seed; + data_.seeded_ = true; + data_.state_[0] = seed & 0xffffffff; + for (const auto j : c10::irange(1, MERSENNE_STATE_N)) { + data_.state_[j] = (1812433253 * (data_.state_[j-1] ^ (data_.state_[j-1] >> 30)) + j); + } + data_.left_ = 1; + data_.next_ = 0; + } + + inline uint32_t mix_bits(uint32_t u, uint32_t v) { + return (u & UMASK) | (v & LMASK); + } + + inline uint32_t twist(uint32_t u, uint32_t v) { + return (mix_bits(u,v) >> 1) ^ (v & 1 ? MATRIX_A : 0); + } + + inline void next_state() { + uint32_t* p = data_.state_.data(); + data_.left_ = MERSENNE_STATE_N; + data_.next_ = 0; + + for(int j = MERSENNE_STATE_N - MERSENNE_STATE_M + 1; --j; p++) { + *p = p[MERSENNE_STATE_M] ^ twist(p[0], p[1]); + } + + for(int j = MERSENNE_STATE_M; --j; p++) { + *p = p[MERSENNE_STATE_M - MERSENNE_STATE_N] ^ twist(p[0], p[1]); + } + + *p = p[MERSENNE_STATE_M - MERSENNE_STATE_N] ^ twist(p[0], data_.state_[0]); + } + +}; + +typedef mt19937_engine mt19937; + +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/NamedTensor.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/NamedTensor.h new file mode 100644 index 0000000000000000000000000000000000000000..73a0d7d02551b40b37a4632034c9d555e3dac088 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/NamedTensor.h @@ -0,0 +1,140 @@ +#pragma once + +#include +#include +#include + +namespace at { + +class TensorBase; + +// XXX: This file exists because TensorImpl is in c10, but Dimname is in ATen. +// Due to the c10/ATen library split, TensorImpl cannot depend on Dimname, +// so we have a couple of workarounds. +// +// In the long term, we'll move Dimname to c10 and everything in this file +// can be refactored out. The main blocker for that is that "c10::Symbol" +// actually exists outside of c10 and needs to be moved in. + +// TensorImpl has a unique_ptr field. +// XXX: Ideally we would just put optional> into TensorImpl. +// +// This class has an important invariant: there must be at least ONE +// non-wildcard +struct TORCH_API NamedTensorMeta final : public c10::NamedTensorMetaInterface { + // This enum is to remind people that the invariant on constructors is that + // the list of dimnames must have at least one non-wildcard + enum HAS_NON_WILDCARD { + HasNonWildcard + }; + + explicit NamedTensorMeta(HAS_NON_WILDCARD, DimnameList names) + : names_(names.vec()) { + check_invariants(); + } + explicit NamedTensorMeta(HAS_NON_WILDCARD, std::vector&& names) + : names_(std::move(names)) { + check_invariants(); + } + + std::unique_ptr clone() const override { + return std::make_unique(HasNonWildcard, names_); + } + + DimnameList names() const { return names_; } + + // Used for an assertion in TensorImpl.h + int64_t slow_dim() const override { + return names_.size(); + } + + void check_invariants() const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + std::any_of(names_.begin(), names_.end(), [](const Dimname& n) { return !n.isWildcard(); })); + } + + void set_names(HAS_NON_WILDCARD, DimnameList new_names) { + TORCH_INTERNAL_ASSERT(new_names.size() == names_.size()); + std::copy(new_names.begin(), new_names.end(), names_.begin()); + check_invariants(); + } + + void set_names(HAS_NON_WILDCARD, std::vector&& new_names) { + TORCH_INTERNAL_ASSERT(new_names.size() == names_.size()); + names_ = std::move(new_names); + check_invariants(); + } + + // INVARIANT: at least one Dimname is non-WILDCARD + std::vector names_; +}; + +// When NamesMode is disabled, then all operations ignore tensors' names fields. +// Concretely speaking, all tensors are treated as having nullopt names. +struct TORCH_API NamesMode { + static bool is_enabled(); + static void set_enabled(bool enabled); +}; + + +// A RAII, thread local (!) guard that enables or disables names upon +// construction, and sets it back to the original value upon destruction. +struct TORCH_API NoNamesGuard { + NoNamesGuard() : prev_mode(NamesMode::is_enabled()), initialized(true) { + NamesMode::set_enabled(false); + } + ~NoNamesGuard() { + if (initialized) { + reset(); + } + } + void reset() { + TORCH_INTERNAL_ASSERT(initialized); + NamesMode::set_enabled(prev_mode); + } + private: + bool prev_mode; + bool initialized; +}; + +void check_names_valid_for(const TensorBase& tensor, DimnameList names); +void check_names_valid_for(size_t tensor_dim, DimnameList names); + +// Sets the names of `tensor` to be `names`. +TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, c10::optional names); +TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, std::vector&& names, bool validate_names); + +constexpr size_t kMaxNamedTensorDim = 64; + +DimnameList default_names(size_t len); + +namespace impl { + +// Some helper functions on TensorImpl. Useful for working with names in TH. +// XXX: Ideally these would exist as methods on TensorImpl +TORCH_API void internal_set_names_inplace(TensorImpl* impl, c10::optional names, bool validate_names); +TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::vector&& names, bool validate_names); + +void check_names_valid_for(TensorImpl* impl, DimnameList names); + +// Returns true if the tensor's names exist and are not all 'None'. +// Returns false if the tensor's names don't exist (were not allocated), +// or if all names are 'None'. +// We treat not-allocated-names the same as allocated names that are all 'None'. +TORCH_API bool has_names(const TensorImpl* impl); + +// Returns the names of the tensor's dimensions. +// Unnamed tensors are treated as having 'None' in all dimension; this method +// would return a DimnameList of all 'None's for an unnamed tensor. +TORCH_API DimnameList get_names(const TensorImpl* impl); + +// This is more of an implementation detail; one should use impl::get_names / +// Tensor::names() whenever possible because it provides a cleaner API. +// Returns the names of the tensor if they have been allocated; returns nullopt +// instead if the haven't been. The names of a tensor are not allocated if a +// tensor is constructed with names=None. +TORCH_API c10::optional get_opt_names(const TensorImpl* impl); + +} // namespace impl + +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/PhiloxRNGEngine.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/PhiloxRNGEngine.h new file mode 100644 index 0000000000000000000000000000000000000000..ebcd1228061e27368eb6acc98b45afb16ac8c567 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/PhiloxRNGEngine.h @@ -0,0 +1,242 @@ +#pragma once + +// define constants like M_PI and C keywords for MSVC +#ifdef _MSC_VER +#define _USE_MATH_DEFINES +#include +#endif + + +#ifdef __CUDACC__ +#include +#endif + +#include +#include +#include +#include +#include +#include + +namespace at { + +// typedefs for holding vector data +namespace detail { + +typedef at::detail::Array UINT4; +typedef at::detail::Array UINT2; +typedef at::detail::Array DOUBLE2; +typedef at::detail::Array FLOAT2; + +} // namespace detail + +/** + * Note [Philox Engine implementation] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Originally implemented in PyTorch's fusion compiler + * Refer to: http://www.thesalmons.org/john/random123/papers/random123sc11.pdf + * for details regarding the engine. + * + * Note that currently this implementation of the philox engine is not used + * anywhere except for tests in cpu_generator_test.cpp. However, this engine + * will replace curandStatePhilox4_32_10_t in the future. + * + * The philox engine takes a seed value, a subsequeunce + * for starting the generation and an offset for the subsequence. + * Think of this engine as an algorithm producing a huge array. We are + * parallelizing this array by partitioning the huge array and assigning + * a thread index to each partition. In other words, each seed value + * (there are 2^64 possible seed values) gives a sub array of size + * 2^128 (each element in that array is a 128 bit number). Reasoning + * behind the array being of size 2^128 is, there are 2^64 possible + * thread index value and there is an array of size 2^64 for each of + * those thread index. Hence 2^64 * 2^64 = 2^128 for each seed value. + * + * In short, this generator can produce 2^64 (seed values) * 2^128 (number + * of elements in an array given by a seed value) = 2^192 values. + * + * Arguments: + * seed: Seed values could be any number from 0 to 2^64-1. + * subsequence: Subsequence is just the cuda thread indexing with: + * - blockIdx.x * blockDim.x + threadIdx.x + * offset: The offset variable in PhiloxEngine decides how many 128-bit + * random numbers to skip (i.e. how many groups of 4, 32-bit numbers to skip) + * and hence really decides the total number of randoms that can be achieved + * for the given subsequence. + */ + +class philox_engine { +public: + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + C10_HOST_DEVICE inline explicit philox_engine(uint64_t seed = 67280421310721, + uint64_t subsequence = 0, + uint64_t offset = 0) { + + reset_state(seed, subsequence); + incr_n(offset); + } + + C10_HOST_DEVICE inline void reset_state(uint64_t seed = 67280421310721, + uint64_t subsequence = 0) { + key_[0] = static_cast(seed); + key_[1] = static_cast(seed >> 32); + counter_ = detail::UINT4(0); + counter_[2] = static_cast(subsequence); + counter_[3] = static_cast(subsequence >> 32); + STATE = 0; + } + + /** + * Set the offset field of Philox Generator to the desired offset. + */ + C10_HOST_DEVICE inline void set_offset(uint64_t offset) { + counter_[0] = static_cast(offset); + counter_[1] = static_cast(offset >> 32); + } + + /** + * Gets the current offset of the Philox Generator. + */ + C10_HOST_DEVICE uint64_t get_offset() const { + uint64_t lo = static_cast(counter_[0]); + uint64_t hi = static_cast(counter_[1]) << 32; + return lo | hi; + } + + /** + * Produces a unique 32-bit pseudo random number on every invocation. Bookeeps state to avoid waste. + */ + C10_HOST_DEVICE inline uint32_t operator()(int32_t n_rounds = 10) { // 10 here to preserve back-compat behavior + if(STATE == 0) { + detail::UINT4 counter = counter_; + detail::UINT2 key = key_; + output_ = rand(counter, key, n_rounds); + incr(); + } + uint32_t ret = output_[static_cast(STATE)]; + STATE = (STATE + 1) & 3; + return ret; + } + + inline float randn(uint32_t n_rounds) { + #ifdef __CUDA_ARCH__ + AT_ASSERT(false, "Unsupported invocation of randn on CUDA"); + #endif + if(STATE == 0) { + detail::UINT4 counter = counter_; + detail::UINT2 key = key_; + output_ = rand(counter, key, n_rounds); + incr(); + } + // TODO(min-jean-cho) change to Polar method, a more efficient version of Box-Muller method + // TODO(voz) We use std:: below, and thus need a separate impl for CUDA. + float u1 = 1 - uint32_to_uniform_float(output_[0]); // uint32_to_uniform_float returns [0,1), we need (0,1] to avoid passing 0 to log. + float u2 = 1 - uint32_to_uniform_float(output_[1]); + return static_cast(std::sqrt(-2.0 * std::log(u1)) * std::cos(2.0 * M_PI * u2)); + } + + /** + * Function that Skips N 128 bit numbers in a subsequence + */ + C10_HOST_DEVICE inline void incr_n(uint64_t n) { + uint32_t nlo = static_cast(n); + uint32_t nhi = static_cast(n >> 32); + counter_[0] += nlo; + // if overflow in x has occurred, carry over to nhi + if (counter_[0] < nlo) { + nhi++; + // if overflow in nhi has occurred during carry over, + // propagate that overflow to y and exit to increment z + // otherwise return + counter_[1] += nhi; + if(nhi != 0) { + if (nhi <= counter_[1]) { + return; + } + } + } else { + // if overflow in y has occurred during addition, + // exit to increment z + // otherwise return + counter_[1] += nhi; + if (nhi <= counter_[1]) { + return; + } + } + if (++counter_[2]) + return; + ++counter_[3]; + } + + /** + * Function that Skips one 128 bit number in a subsequence + */ + C10_HOST_DEVICE inline void incr() { + if (++counter_[0]) + return; + if (++counter_[1]) + return; + if (++counter_[2]) { + return; + } + ++counter_[3]; + } + +private: + detail::UINT4 counter_; + detail::UINT4 output_; + detail::UINT2 key_; + uint32_t STATE; + + C10_HOST_DEVICE inline uint32_t mulhilo32(uint32_t a, uint32_t b, + uint32_t *result_high) { + #ifdef __CUDA_ARCH__ + *result_high = __umulhi(a, b); + return a*b; + #else + const uint64_t product = static_cast(a) * b; + *result_high = static_cast(product >> 32); + return static_cast(product); + #endif + } + + C10_HOST_DEVICE inline detail::UINT4 single_round(detail::UINT4 ctr, detail::UINT2 in_key) { + uint32_t hi0 = 0; + uint32_t hi1 = 0; + uint32_t lo0 = mulhilo32(kPhiloxSA, ctr[0], &hi0); + uint32_t lo1 = mulhilo32(kPhiloxSB, ctr[2], &hi1); + detail::UINT4 ret; + ret[0] = hi1 ^ ctr[1] ^ in_key[0]; + ret[1] = lo1; + ret[2] = hi0 ^ ctr[3] ^ in_key[1]; + ret[3] = lo0; + return ret; + } + + C10_HOST_DEVICE constexpr float uint32_to_uniform_float(uint32_t value) { + // maximum value such that `MAX_INT * scale < 1.0` (with float rounding) + constexpr float scale = 4.6566127342e-10; + return static_cast(value & 0x7FFFFFFF) * scale; + } + + + + C10_HOST_DEVICE inline detail::UINT4 rand(detail::UINT4& counter, detail::UINT2& key, uint32_t n_rounds) { + for (uint32_t round = 0; round < (n_rounds - 1); round++) { + counter = single_round(counter, key); + key[0] += (kPhilox10A); key[1] += (kPhilox10B); + } + return single_round(counter, key); + } + + + static const uint32_t kPhilox10A = 0x9E3779B9; + static const uint32_t kPhilox10B = 0xBB67AE85; + static const uint32_t kPhiloxSA = 0xD2511F53; + static const uint32_t kPhiloxSB = 0xCD9E8D57; +}; + +typedef philox_engine Philox4_32; + +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/PythonFallbackKernel.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/PythonFallbackKernel.h new file mode 100644 index 0000000000000000000000000000000000000000..f38bdd2ada90a99a231df4193a72c8bd44e699d9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/PythonFallbackKernel.h @@ -0,0 +1,28 @@ +#pragma once +#include + +namespace at { +namespace impl { + +struct TORCH_API RestorePythonTLSSnapshot { + RestorePythonTLSSnapshot(); + ~RestorePythonTLSSnapshot(); + +private: + c10::impl::LocalDispatchKeySet saved_; + c10::impl::ForceDispatchKeyGuard guard_; +}; + + +// RAII guard to make working with the above TLS safer. +struct TORCH_API MaybeSetTLSOnEntryGuard { +public: + MaybeSetTLSOnEntryGuard(); + ~MaybeSetTLSOnEntryGuard(); + +private: + bool value_set_; +}; + +} // namespace impl +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/PythonOpRegistrationTrampoline.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/PythonOpRegistrationTrampoline.h new file mode 100644 index 0000000000000000000000000000000000000000..b1a2b30685f30ee67b8ed5ac4b64e1ef6c838a3b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/PythonOpRegistrationTrampoline.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +// TODO: this can probably live in c10 + +namespace at { +namespace impl { + +class TORCH_API PythonOpRegistrationTrampoline final { + static std::atomic interpreter_; + +public: + // Returns true if you successfully registered yourself (that means + // you are in the hot seat for doing the operator registrations!) + static bool registerInterpreter(c10::impl::PyInterpreter*); + + // Returns nullptr if no interpreter has been registered yet. + static c10::impl::PyInterpreter* getInterpreter(); +}; + +} // namespace impl +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/QuantizerBase.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/QuantizerBase.h new file mode 100644 index 0000000000000000000000000000000000000000..b6031f0d7798300f36798e6357f278056326f3a0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/QuantizerBase.h @@ -0,0 +1,83 @@ +#pragma once + +#include +#include +#include + +namespace at { + +class Tensor; +struct QTensorImpl; +struct Quantizer; +using ConstQuantizerPtr = const c10::intrusive_ptr&; +using QuantizerPtr = c10::intrusive_ptr; + +/** + * Quantizer is the class for storing all the information + * that's necessary to perform quantize and dequantize + * operation. + * + * We might have different types of quantization schemes and this is + * the base class for all quantizers. + * + * QTensorImpl will hold a pointer to Quantizer so that we can support + * different quantization schemes on Tensor. + * + * For example, the most common quantization scheme, Affine Quantization, + * requires scale and zero_point as parameters, we'll store scale and zero_point + * inside the instance and we can use it to quantize a float Tensor or + * dequantize a quantized Tensor. + * + * When you add new types of leaf Quantizer class, please also + * make sure to add a corresponding QScheme enum since + * they should have one to one mapping. + * + * Note about intrusive_ptr: + * Quantized Tensor holds an intrusive_ptr to Quantizer, and multiple Tensor can + * share the same Quantizer. Quantizer should be immutable. + */ +struct TORCH_API Quantizer : public c10::intrusive_ptr_target { + const ScalarType scalar_type_; + explicit Quantizer(ScalarType scalar_type) : scalar_type_(scalar_type) {} + ~Quantizer() override; + + // Copied from torch/csrc/jit/ir/scope.h + QuantizerPtr intrusive_from_this() { + c10::raw::intrusive_ptr::incref(this); // we are creating a new pointer + // from a raw `this` pointer + // so we need to bump the refcount + // to account for this ownership + return c10::intrusive_ptr::reclaim(this); + } + + /** + * Each concrete Quantizer type should have a unique QScheme type. + */ + virtual QScheme qscheme() const = 0; + + ScalarType scalar_type() const { + return scalar_type_; + } + + /** + * quantize a float Tensor into a quantized Tensor. + */ + virtual Tensor quantize(const Tensor& t) = 0; + + /** + * dequantize a quantized Tensor into a float Tensor. + */ + virtual Tensor dequantize(const Tensor& t) = 0; + + /** + * dequantize a quantized Tensor into a float Tensor, out= variant + */ + virtual Tensor& dequantize_out(Tensor& out, const Tensor& t) = 0; + + /** + * Compare against `other` for equality. + */ + virtual bool equalTo(QuantizerPtr other) const = 0; +}; + +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Range.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Range.h new file mode 100644 index 0000000000000000000000000000000000000000..2bf6b2b73ac4d4c178ac0388e9b45e262e506b86 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Range.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include + +namespace at { + +struct Range { + Range(int64_t begin, int64_t end) + : begin(begin) + , end(end) {} + + int64_t size() const { return end - begin; } + + Range operator/(int64_t divisor) { + return Range(begin / divisor, end / divisor); + } + + int64_t begin; + int64_t end; +}; + +std::ostream& operator<<(std::ostream& out, const Range& range); + +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Reduction.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Reduction.h new file mode 100644 index 0000000000000000000000000000000000000000..23c6ea3cabefb2ee72f4aea61af4b10f4e769975 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Reduction.h @@ -0,0 +1,16 @@ +#pragma once + +namespace at { +namespace Reduction { + +// NB: Keep this in sync with Reduction class in torch/nn/_reduction.py +// These constants control the reduction behavior of loss functions. +// Ideally, this would be a scoped enum, but jit doesn't support that +enum Reduction { + None, // Do not reduce + Mean, // (Possibly weighted) mean of losses + Sum, // Sum losses + END +}; +} // namespace Reduction +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Scalar.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Scalar.h new file mode 100644 index 0000000000000000000000000000000000000000..a14b48f0120cbfc23c45db14dd363b0c88c59a2c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Scalar.h @@ -0,0 +1 @@ +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ScalarType.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ScalarType.h new file mode 100644 index 0000000000000000000000000000000000000000..eb30ee86f737a3544e727c42f72147fdb64a5e3b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ScalarType.h @@ -0,0 +1 @@ +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Tensor.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..d20ab49d42783a9a3d61808d35b2e7820f10256b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Tensor.h @@ -0,0 +1,92 @@ +#pragma once + +#include +#include + +namespace at { +class TORCH_API OptionalTensorRef { + public: + OptionalTensorRef() = default; + + ~OptionalTensorRef() { + ref_.unsafeReleaseTensorImpl(); + } + + OptionalTensorRef(const TensorBase& src) + : ref_(Tensor::unsafe_borrow_t{}, src) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(src.defined()); + } + + OptionalTensorRef(const OptionalTensorRef& rhs) + : ref_(Tensor::unsafe_borrow_t{}, rhs.ref_) {} + + OptionalTensorRef& operator=(OptionalTensorRef rhs) { + std::swap(ref_, rhs.ref_); + return *this; + } + + bool has_value() const { + return ref_.defined(); + } + + const Tensor& getTensorRef() const & { + return ref_; + } + + const Tensor& operator*() const & { + return ref_; + } + + const Tensor* operator->() const & { + return &ref_; + } + + operator bool() const { + return ref_.defined(); + } + + private: + Tensor ref_; +}; + +// Use to convert a TensorBase (that may be undefined) to an at::Tensor +// without bumping refcount. +class TORCH_API TensorRef { + public: + ~TensorRef() { + ref_.unsafeReleaseTensorImpl(); + } + + TensorRef(const TensorBase& src) + : ref_(Tensor::unsafe_borrow_t{}, src) {} + + const Tensor& operator*() const & { + return ref_; + } + private: + Tensor ref_; +}; + +template +auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_void_t { + // Return the grad argument in case of a hook with void return type to have an + // std::function with Tensor return type + static_assert(std::is_same::value, + "Expected hook to return void"); + return _register_hook([fn=std::forward(hook)](const TensorBase& grad_base) { + TensorRef grad(grad_base); + fn(*grad); + return Tensor(); + }); +} + +template +auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_var_t { + return _register_hook([fn=std::forward(hook)](const TensorBase& grad_base) { + TensorRef grad(grad_base); + Tensor ret = fn(*grad); + return TensorBase(std::move(ret)); + }); +} + +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TensorAccessor.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TensorAccessor.h new file mode 100644 index 0000000000000000000000000000000000000000..7440376d0e976f3442f1068411c476dbc451cc33 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TensorAccessor.h @@ -0,0 +1,276 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace at { + +// The PtrTraits argument to the TensorAccessor/GenericPackedTensorAccessor +// is used to enable the __restrict__ keyword/modifier for the data +// passed to cuda. +template +struct DefaultPtrTraits { + typedef T* PtrType; +}; + +#if defined(__CUDACC__) || defined(__HIPCC__) +template +struct RestrictPtrTraits { + typedef T* __restrict__ PtrType; +}; +#endif + +// TensorAccessorBase and TensorAccessor are used for both CPU and CUDA tensors. +// For CUDA tensors it is used in device code (only). This means that we restrict ourselves +// to functions and types available there (e.g. IntArrayRef isn't). + +// The PtrTraits argument is only relevant to cuda to support `__restrict__` pointers. +template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> +class TensorAccessorBase { +public: + typedef typename PtrTraits::PtrType PtrType; + + C10_HOST_DEVICE TensorAccessorBase( + PtrType data_, + const index_t* sizes_, + const index_t* strides_) + : data_(data_), sizes_(sizes_), strides_(strides_) {} + C10_HOST IntArrayRef sizes() const { + return IntArrayRef(sizes_,N); + } + C10_HOST IntArrayRef strides() const { + return IntArrayRef(strides_,N); + } + C10_HOST_DEVICE index_t stride(index_t i) const { + return strides_[i]; + } + C10_HOST_DEVICE index_t size(index_t i) const { + return sizes_[i]; + } + C10_HOST_DEVICE PtrType data() { + return data_; + } + C10_HOST_DEVICE const PtrType data() const { + return data_; + } +protected: + PtrType data_; + const index_t* sizes_; + const index_t* strides_; +}; + +// The `TensorAccessor` is typically instantiated for CPU `Tensor`s using +// `Tensor.accessor()`. +// For CUDA `Tensor`s, `GenericPackedTensorAccessor` is used on the host and only +// indexing on the device uses `TensorAccessor`s. +template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> +class TensorAccessor : public TensorAccessorBase { +public: + typedef typename PtrTraits::PtrType PtrType; + + C10_HOST_DEVICE TensorAccessor( + PtrType data_, + const index_t* sizes_, + const index_t* strides_) + : TensorAccessorBase(data_,sizes_,strides_) {} + + C10_HOST_DEVICE TensorAccessor operator[](index_t i) { + return TensorAccessor(this->data_ + this->strides_[0]*i,this->sizes_+1,this->strides_+1); + } + + C10_HOST_DEVICE const TensorAccessor operator[](index_t i) const { + return TensorAccessor(this->data_ + this->strides_[0]*i,this->sizes_+1,this->strides_+1); + } +}; + +template class PtrTraits, typename index_t> +class TensorAccessor : public TensorAccessorBase { +public: + typedef typename PtrTraits::PtrType PtrType; + + C10_HOST_DEVICE TensorAccessor( + PtrType data_, + const index_t* sizes_, + const index_t* strides_) + : TensorAccessorBase(data_,sizes_,strides_) {} + C10_HOST_DEVICE T & operator[](index_t i) { + // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) + return this->data_[this->strides_[0]*i]; + } + C10_HOST_DEVICE const T & operator[](index_t i) const { + return this->data_[this->strides_[0]*i]; + } +}; + + +// GenericPackedTensorAccessorBase and GenericPackedTensorAccessor are used on for CUDA `Tensor`s on the host +// and as +// In contrast to `TensorAccessor`s, they copy the strides and sizes on instantiation (on the host) +// in order to transfer them on the device when calling kernels. +// On the device, indexing of multidimensional tensors gives to `TensorAccessor`s. +// Use RestrictPtrTraits as PtrTraits if you want the tensor's data pointer to be marked as __restrict__. +// Instantiation from data, sizes, strides is only needed on the host and std::copy isn't available +// on the device, so those functions are host only. +template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> +class GenericPackedTensorAccessorBase { +public: + typedef typename PtrTraits::PtrType PtrType; + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + C10_HOST GenericPackedTensorAccessorBase( + PtrType data_, + const index_t* sizes_, + const index_t* strides_) + : data_(data_) { + std::copy(sizes_, sizes_ + N, std::begin(this->sizes_)); + std::copy(strides_, strides_ + N, std::begin(this->strides_)); + } + + // if index_t is not int64_t, we want to have an int64_t constructor + template ::value>::type> + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + C10_HOST GenericPackedTensorAccessorBase( + PtrType data_, + const source_index_t* sizes_, + const source_index_t* strides_) + : data_(data_) { + for (const auto i : c10::irange(N)) { + this->sizes_[i] = sizes_[i]; + this->strides_[i] = strides_[i]; + } + } + + C10_HOST_DEVICE index_t stride(index_t i) const { + return strides_[i]; + } + C10_HOST_DEVICE index_t size(index_t i) const { + return sizes_[i]; + } + C10_HOST_DEVICE PtrType data() { + return data_; + } + C10_HOST_DEVICE const PtrType data() const { + return data_; + } +protected: + PtrType data_; + // NOLINTNEXTLINE(*c-arrays*) + index_t sizes_[N]; + // NOLINTNEXTLINE(*c-arrays*) + index_t strides_[N]; + C10_HOST void bounds_check_(index_t i) const { + TORCH_CHECK_INDEX( + 0 <= i && i < index_t{N}, + "Index ", + i, + " is not within bounds of a tensor of dimension ", + N); + } +}; + +template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> +class GenericPackedTensorAccessor : public GenericPackedTensorAccessorBase { +public: + typedef typename PtrTraits::PtrType PtrType; + + C10_HOST GenericPackedTensorAccessor( + PtrType data_, + const index_t* sizes_, + const index_t* strides_) + : GenericPackedTensorAccessorBase(data_, sizes_, strides_) {} + + // if index_t is not int64_t, we want to have an int64_t constructor + template ::value>::type> + C10_HOST GenericPackedTensorAccessor( + PtrType data_, + const source_index_t* sizes_, + const source_index_t* strides_) + : GenericPackedTensorAccessorBase(data_, sizes_, strides_) {} + + C10_DEVICE TensorAccessor operator[](index_t i) { + index_t* new_sizes = this->sizes_ + 1; + index_t* new_strides = this->strides_ + 1; + return TensorAccessor(this->data_ + this->strides_[0]*i, new_sizes, new_strides); + } + + C10_DEVICE const TensorAccessor operator[](index_t i) const { + const index_t* new_sizes = this->sizes_ + 1; + const index_t* new_strides = this->strides_ + 1; + return TensorAccessor(this->data_ + this->strides_[0]*i, new_sizes, new_strides); + } + + /// Returns a PackedTensorAccessor of the same dimension after transposing the + /// two dimensions given. Does not actually move elements; transposition is + /// made by permuting the size/stride arrays. If the dimensions are not valid, + /// asserts. + C10_HOST GenericPackedTensorAccessor transpose( + index_t dim1, + index_t dim2) const { + this->bounds_check_(dim1); + this->bounds_check_(dim2); + GenericPackedTensorAccessor result( + this->data_, this->sizes_, this->strides_); + std::swap(result.strides_[dim1], result.strides_[dim2]); + std::swap(result.sizes_[dim1], result.sizes_[dim2]); + return result; + } +}; + +template class PtrTraits, typename index_t> +class GenericPackedTensorAccessor : public GenericPackedTensorAccessorBase { +public: + typedef typename PtrTraits::PtrType PtrType; + C10_HOST GenericPackedTensorAccessor( + PtrType data_, + const index_t* sizes_, + const index_t* strides_) + : GenericPackedTensorAccessorBase(data_, sizes_, strides_) {} + + // if index_t is not int64_t, we want to have an int64_t constructor + template ::value>::type> + C10_HOST GenericPackedTensorAccessor( + PtrType data_, + const source_index_t* sizes_, + const source_index_t* strides_) + : GenericPackedTensorAccessorBase(data_, sizes_, strides_) {} + + C10_DEVICE T & operator[](index_t i) { + return this->data_[this->strides_[0] * i]; + } + C10_DEVICE const T& operator[](index_t i) const { + return this->data_[this->strides_[0]*i]; + } + + // Same as in the general N-dimensional case, but note that in the + // 1-dimensional case the returned PackedTensorAccessor will always be an + // identical copy of the original + C10_HOST GenericPackedTensorAccessor transpose( + index_t dim1, + index_t dim2) const { + this->bounds_check_(dim1); + this->bounds_check_(dim2); + return GenericPackedTensorAccessor( + this->data_, this->sizes_, this->strides_); + } +}; + + +// Can't put this directly into the macro function args because of commas +#define AT_X GenericPackedTensorAccessor + +// Old name for `GenericPackedTensorAccessor` +template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> +C10_DEFINE_DEPRECATED_USING(PackedTensorAccessor, AT_X) + +#undef AT_X + +template class PtrTraits = DefaultPtrTraits> +using PackedTensorAccessor32 = GenericPackedTensorAccessor; + +template class PtrTraits = DefaultPtrTraits> +using PackedTensorAccessor64 = GenericPackedTensorAccessor; +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TensorBase.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TensorBase.h new file mode 100644 index 0000000000000000000000000000000000000000..90a90e60c08ec6cea7bb05cb8d4249edbaab2dd9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TensorBase.h @@ -0,0 +1,1039 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace c10 { +class Scalar; +} + +namespace torch { namespace autograd { + +struct Node; + +}} // namespace torch::autograd + +namespace at { + +class Tensor; +class TensorBase; + +// Convert Tensor to TensorBase without any need to include Tensor.h +TORCH_API const TensorBase& get_tensor_base(const Tensor& t); + +namespace impl { +inline bool variable_excluded_from_dispatch() { +#ifdef C10_MOBILE + // Please read the comment in `VariableFallbackKernel.cpp` about the background of this change. + return true; +#else + return c10::impl::tls_local_dispatch_key_set().excluded_.isSupersetOf(c10::autograd_dispatch_keyset); +#endif +} + +} + +// NOTE: [Tensor vs. TensorBase] +// +// Tensor, being the central data structure in PyTorch, gets used and +// it's header included almost everywhere. Unfortunately this means +// every time an operator signature is updated or changed in +// native_functions.yaml, you (and every other PyTorch developer) need +// to recompile all of ATen and it's dependencies. +// +// TensorBase aims to break up these header dependencies, and improve +// incremental build times for all PyTorch developers. TensorBase +// represents a reference counted handle to TensorImpl, exactly the +// same as Tensor. However, TensorBase doesn't have code generated +// methods in it's API and thus no dependence on native_functions.yaml. +// +// Usage tips +// ---------- +// - You can `#define TORCH_ASSERT_NO_OPERATORS` at the top of a .cpp +// or .cu file to ensure it has no header dependencies on +// native_functions.yaml (direct or indirect). +// - Tensor inherits from TensorBase, so functions taking +// `const TensorBase &` are callable with Tensor as well. +// - TensorBase can be converted to tensor with `Tensor(tensor_base)`, +// but this requires a reference-count bump. OptionalTensorRef on +// the other hand can materialize a `const Tensor &` without +// touching the reference-count. +class TORCH_API TensorBase { + public: + struct unsafe_borrow_t { explicit unsafe_borrow_t() = default; }; + + protected: + // Create a Tensor with a +0 reference count. Special care must be + // taken to avoid decrementing this reference count at destruction + // time. Intended to support MaybeOwnedTraits. + explicit TensorBase(unsafe_borrow_t, const TensorBase& rhs) + : impl_(c10::intrusive_ptr::reclaim(rhs.impl_.get())) {} + friend MaybeOwnedTraits; + + public: + TensorBase() = default; + // This constructor should not be used by end users and is an implementation + // detail invoked by autogenerated code. + explicit TensorBase( + c10::intrusive_ptr tensor_impl) + : impl_(std::move(tensor_impl)) { + if (impl_.get() == nullptr) { + throw std::runtime_error("TensorImpl with nullptr is not supported"); + } + } + TensorBase(const TensorBase&) = default; + TensorBase(TensorBase&&) noexcept = default; + + public: + // Creates a new wrapper from TensorImpl. Intentionally a free method because + // it should be used with care. Checks necessary invariants + static TensorBase wrap_tensor_impl( + c10::intrusive_ptr tensor_impl) { + TensorBase r(std::move(tensor_impl)); + r.enforce_invariants(); + return r; + } + + int64_t dim() const { + return impl_->dim(); + } + int64_t storage_offset() const { + return impl_->storage_offset(); + } + + TensorBase contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const { + if (is_contiguous(memory_format)) { + return *this; + } else { + return __dispatch_contiguous(memory_format); + } + } + + /// Should be used if *this can reasonably be expected to be contiguous and + /// performance is important. + /// Compared to contiguous, it saves a reference count + /// increment/decrement if *this is already contiguous, at the cost + /// in all cases of an extra pointer of stack usage, an extra branch + /// to access, and an extra branch at destruction time. + c10::MaybeOwned expect_contiguous( + MemoryFormat memory_format=MemoryFormat::Contiguous) const &; + + // Use .contiguous() instead. Trying to borrow from a prvalue + // will only lead to trouble and dangling references. + c10::MaybeOwned expect_contiguous( + MemoryFormat memory_format=MemoryFormat::Contiguous) && = delete; + + const TensorBase& fill_(const c10::Scalar& scalar) const; + const TensorBase& zero_() const; + + TensorBase to(at::TensorOptions options={}, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) const; + + bool is_complex() const { + return at::isComplexType(this->scalar_type()); + } + + bool is_floating_point() const { + return at::isFloatingType(this->scalar_type()); + } + + bool is_signed() const { + return at::isSignedType(this->scalar_type()); + } + + c10::SymInt sym_size(int64_t dim) const { + return impl_->sym_size(dim); + } + + c10::SymInt sym_stride(int64_t dim) const { + const auto sizes = this->sym_strides(); + const auto ndim = static_cast(sizes.size()); + // false is passed to maybe_wrap_dim so behavior is identical to array access (but with wrapping) + return sizes[c10::maybe_wrap_dim(dim, ndim, /*wrap_scalar=*/false)]; + + } + + int64_t size(int64_t dim) const { + return impl_->size(dim); + } + + int64_t stride(int64_t dim) const { + const auto strides = this->strides(); + const auto ndim = static_cast(strides.size()); + // false is passed to maybe_wrap_dim so behavior is identical to array access (but with wrapping) + return strides[c10::maybe_wrap_dim(dim, ndim, /*wrap_scalar=*/false)]; + } + + TensorImpl * unsafeGetTensorImpl() const { + return impl_.get(); + } + TensorImpl * unsafeReleaseTensorImpl() { + return impl_.release(); + } + const c10::intrusive_ptr& getIntrusivePtr() const { + return impl_; + } + + c10::intrusive_ptr unsafeReleaseIntrusivePtr() { + return std::move(impl_); + } + + bool defined() const { + return impl_; + } + + void reset() { + impl_.reset(); + } + +#if defined (_MSC_VER) + TensorBase& operator=(const TensorBase& x) & { + impl_ = x.impl_; + return *this; + }; + TensorBase& operator=(TensorBase&& x) & noexcept { + impl_ = std::move(x.impl_); + return *this; + } +#else + TensorBase& operator=(const TensorBase& x) & = default; + TensorBase& operator=(TensorBase&& x) & noexcept = default; +#endif + + // Ban assignment to rvalues, since at::Tensor (weirdly) performs a deep copy here + TensorBase& operator=(const TensorBase&) && = delete; + TensorBase& operator=(TensorBase&&) && noexcept = delete; + + bool is_same(const TensorBase& other) const noexcept { + return impl_ == other.impl_; + } + size_t use_count() const noexcept { + return impl_.use_count(); + } + size_t weak_use_count() const noexcept { + return impl_.weak_use_count(); + } + + std::string toString() const; + + IntArrayRef sizes() const { + return impl_->sizes(); + } + c10::SymIntArrayRef sym_sizes() const { + return impl_->sym_sizes(); + } + c10::SymIntArrayRef sym_strides() const { + return impl_->sym_strides(); + } + IntArrayRef strides() const { + return impl_->strides(); + } + // See impl::get_opt_names in ATen/NamedTensor.h for docs. + c10::optional opt_names() const { + return impl::get_opt_names(unsafeGetTensorImpl()); + } + // See impl::get_names in ATen/NamedTensor.h for docs. + DimnameList names() const { + return impl::get_names(unsafeGetTensorImpl()); + } + int64_t ndimension() const { + return dim(); + } + + bool is_contiguous(at::MemoryFormat memory_format=at::MemoryFormat::Contiguous) const { + return impl_->is_contiguous(memory_format); + } + + bool is_non_overlapping_and_dense() const { + return impl_->is_non_overlapping_and_dense(); + } + + at::MemoryFormat suggest_memory_format( + bool channels_last_strides_exact_match = false) const { + // Setting channels_last_strides_exact_match to true forces function to + // check 0,1 - sized dimension strides. + if (layout() == at::kStrided) { + if (impl_->is_strides_like_channels_last()) { + if (!channels_last_strides_exact_match || + get_channels_last_strides_2d(sizes()) == strides()) { + return at::MemoryFormat::ChannelsLast; + } + } + else if (impl_->is_strides_like_channels_last_3d()) { + if (!channels_last_strides_exact_match || + get_channels_last_strides_3d(sizes()) == strides()) { + return at::MemoryFormat::ChannelsLast3d; + } + } + } + return at::MemoryFormat::Contiguous; + } + + // Total bytes consumed by the "view" of elements of the array. Does not + // include size of metadata. The number reported here does not necessarily + // correspond to the true physical memory consumed by a tensor; instead, + // it reports the memory the tensor would take *if* it were contiguous. + // Defined to be numel() * itemsize() + size_t nbytes() const { + TORCH_CHECK(layout () != at::kSparse, + "nbytes is not defined for sparse tensors. If you want the size of the constituent " \ + "tensors, add the nbytes of the indices and values. If you want the size of the " \ + "equivalent dense tensor, multiply numel() by element_size()"); + return impl_->numel() * impl_->itemsize(); + } + + c10::SymInt sym_nbytes() const { + TORCH_CHECK(layout () != at::kSparse, + "nbytes is not defined for sparse tensors. If you want the size of the constituent " \ + "tensors, add the nbytes of the indices and values. If you want the size of the " \ + "equivalent dense tensor, multiply numel() by element_size()"); + return impl_->sym_numel() * impl_->itemsize(); + } + + int64_t numel() const { + return impl_->numel(); + } + + c10::SymInt sym_numel() const { + return impl_->sym_numel(); + } + + c10::SymInt sym_storage_offset() const { + return impl_->sym_storage_offset(); + } + + // Length of one array element in bytes. This is the traditional + // Numpy naming. + size_t itemsize() const { + return impl_->itemsize(); + } + + // Same as itemsize(). This is the PyTorch naming. + int64_t element_size() const { + return static_cast(impl_->itemsize()); + } + + DispatchKeySet key_set() const { + return impl_->key_set(); + } + ScalarType scalar_type() const { + return typeMetaToScalarType(impl_->dtype()); + } + bool has_storage() const { + return defined() && impl_->has_storage(); + } + const Storage& storage() const { + return impl_->storage(); + } + bool is_alias_of(const at::TensorBase& other) const{ + return impl_->storage().is_alias_of(other.storage()); + } + + // Move the storage backend to shm based + // to enable memory sharing across processes. + // + // NB1: the ideal behavior of this API still requires further discussion + // but for now we are inclined to keep it consistent with existing THP behavior + // https://github.com/pytorch/pytorch/blob/4dca9bde0552afc67b5b74f4a0696fe6055709c4/torch/storage.py#L196-L212 + // so we don't assert on anything here and rely on caller knowing + // what it's doing. + // + // NB2: this currently provides Linux fd based shm support only + // to simplify the storage lifetime management logic in ATen + // and similarly for now we are not adding support for file system based + // shm support like in THP due to additional GC manager support needed + // to prevent leaks. + // As such, calling this from non supported systems (e.g. Windows) would fail. + void share_memory_() { + at::share_memory_(*this); + } + + inline bool _is_zerotensor() const { + return impl_->_is_zerotensor(); + } + + inline void _set_zero(bool zero) const { + impl_->_set_zero(zero); + } + + inline bool is_conj() const { + return impl_->is_conj(); + } + + // sets the conjugate bit of a tensor. + // NOTE: Conjugate bit is supposed to be a read-only field. Only change this, if you are sure + // that's what you want. Changing this might lead to incorrect behavior since conjugation is + // a lazy operation and we rely on this bit to determine if a conjugation needs to be materialized. + inline void _set_conj(bool conjugate) const { + impl_->_set_conj(conjugate); + } + + inline bool is_neg() const { + return impl_->is_neg(); + } + + // sets the negative bit of a tensor. + // NOTE: Negative bit is supposed to be a read-only field. Only change this, if you are sure + // that's what you want. Changing this might lead to incorrect behavior since we rely on this + // bit to determine if a negation needs to be materialized. + inline void _set_neg(bool negative) const { + impl_->_set_neg(negative); + } + + /// Returns a `Tensor`'s layout. + Layout layout() const { + return impl_->layout(); + } + + /// Returns a `Tensor`'s dtype (`TypeMeta`). + caffe2::TypeMeta dtype() const { + return impl_->dtype(); + } + + /// Returns a `Tensor`'s device. + inline Device device() const { + return impl_->device(); + } + + /// Returns a `Tensor`'s device index. + int64_t get_device() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->get_device(); + } + + /// Returns if a `Tensor` has CPU backend. + bool is_cpu() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_cpu(); + } + + /// Returns if a `Tensor` has CUDA backend. + bool is_cuda() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_cuda(); + } + + /// Returns if a `Tensor` has IPU backend. + bool is_ipu() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_ipu(); + } + + /// Returns if a `Tensor` has XPU backend. + bool is_xpu() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_xpu(); + } + + /// Returns if a `Tensor` has XLA backend. + bool is_xla() const { + return impl_->is_xla(); + } + + /// Returns if a `Tensor` has MTIA backend. + bool is_mtia() const { + return impl_->is_mtia(); + } + + /// Returns if a `Tensor` has HPU backend. + bool is_hpu() const { + return impl_->is_hpu(); + } + + /// Returns if a `Tensor` has Lazy backend. + bool is_lazy() const { + return impl_->is_lazy(); + } + + /// Returns if a `Tensor` has HIP backend. + bool is_hip() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_hip(); + } + + /// Returns if a `Tensor` has VE backend. + bool is_ve() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_ve(); + } + + /// Returns if a `Tensor` has PrivateUse1 backend. + bool is_privateuseone() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_privateuseone(); + } + + /// Returns if a `Tensor` has sparse backend. + bool is_sparse() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_sparse(); + } + + /// Returns is a `Tensor` has a sparse CSR backend. + bool is_sparse_csr() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_sparse_csr(); + } + + /// Returns if a `Tensor` is mkldnn tensor. + bool is_mkldnn() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_mkldnn(); + } + + /// Returns if a `Tensor` is mps tensor. + bool is_mps() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_mps(); + } + + /// Returns if a `Tensor` is ort tensor. + bool is_ort() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_ort(); + } + + /// Returns if a `Tensor` is vulkan tensor. + bool is_vulkan() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_vulkan(); + } + + /// Returns if a `Tensor` is metal tensor. + bool is_metal() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_metal(); + } + + /// Returns if a `Tensor` has quantized backend. + bool is_quantized() const { + // NB: this is not a native function to avoid dispatching overhead. + return impl_->is_quantized(); + } + + /// Returns if a `Tensor` is a meta tensor. Meta tensors can + /// also have other designations. + bool is_meta() const { + return impl_->is_meta(); + } + + /// Returns if a `Tensor` is an inference tensor. + bool is_inference() const { + return impl_->is_inference(); + } + + // Returns if a `Tensor` is a NestedTensor. + bool is_nested() const { + return impl_->is_nested(); + } + + /// If a tensor is a quantized tensor, returns its quantizer + /// TODO: it's not in native_functions.yaml yet as it's not exposed to python + QuantizerPtr quantizer() const; + + /// Returns if a `Tensor` has any dimension names + bool has_names() const { + // If a user is using unnamed tensors, then we can short-circuit right here. + // Otherwise, impl::has_names attempts to retrieve names. + if (!impl_->has_named_tensor_meta()) { + return false; + } + return impl::has_names(unsafeGetTensorImpl()); + } + + /// Returns a `Tensor`'s dimension names data structure + const NamedTensorMeta* get_named_tensor_meta() const { + return static_cast(impl_->named_tensor_meta()); + } + + NamedTensorMeta* get_named_tensor_meta() { + return static_cast(impl_->named_tensor_meta()); + } + + /// Returns the `TensorOptions` corresponding to this `Tensor`. Defined in + /// TensorOptions.h. + TensorOptions options() const { + return TensorOptions().dtype(dtype()) + .device(device()) + .layout(layout()); + } + + const void* const_data_ptr() const { + return this->unsafeGetTensorImpl()->data(); + } + + void* mutable_data_ptr() const { + return this->unsafeGetTensorImpl()->mutable_data(); + } + + // TODO(#97856) Make this return a const pointer. This currently + // returns a non-const pointer because of the large + // number of clients that we still want to audit before + // migrating to mutable_data_ptr(). + void* data_ptr() const { + return mutable_data_ptr(); + } + + template + const T* const_data_ptr() const; + + template + T* mutable_data_ptr() const; + + // Legacy interface during the migration to indicate that a callsite + // has not been audited for mutability. + // + // Do not add new uses of this, use const_data_ptr() if possible, + // mutable_data_ptr() otherwise. + // + // TODO(#97856) Make this return a const pointer. This is currently + // const because of the vast number of clients that + // rely on this. + template + T* data_ptr() const; + + // Purposely not defined here to avoid inlining + void print() const; + + // Return a `TensorAccessor` for CPU `Tensor`s. You have to specify scalar type and + // dimension. + template + TensorAccessor accessor() const& { + static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data_ptr()"); + TORCH_CHECK(dim() == N, "TensorAccessor expected ", N, " dims but tensor has ", dim()); + return TensorAccessor(data_ptr(),sizes().data(),strides().data()); + } + template + TensorAccessor accessor() && = delete; + + // Return a `GenericPackedTensorAccessor` for CUDA `Tensor`s. You have to specify scalar type and + // dimension. You can optionally specify RestrictPtrTraits as a template parameter to + // cast the data pointer to a __restrict__ pointer. + // In order to use this, your CUDA kernel has to take a corresponding GenericPackedTensorAccessor + // as an argument. + template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> + GenericPackedTensorAccessor generic_packed_accessor() const& { + static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data_ptr()"); + TORCH_CHECK(dim() == N, "TensorAccessor expected ", N, " dims but tensor has ", dim()); + return GenericPackedTensorAccessor(static_cast::PtrType>(data_ptr()),sizes().data(),strides().data()); + } + template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> + GenericPackedTensorAccessor generic_packed_accessor() && = delete; + + template class PtrTraits = DefaultPtrTraits> + PackedTensorAccessor32 packed_accessor32() const& { + TORCH_CHECK( + impl_->numel() <= + static_cast(std::numeric_limits::max()), + "numel needs to be smaller than int32_t max; otherwise, please use packed_accessor64"); + return generic_packed_accessor(); + } + template class PtrTraits = DefaultPtrTraits> + PackedTensorAccessor32 packed_accessor32() && = delete; + + template class PtrTraits = DefaultPtrTraits> + PackedTensorAccessor64 packed_accessor64() const& { + return generic_packed_accessor(); + } + template class PtrTraits = DefaultPtrTraits> + PackedTensorAccessor64 packed_accessor64() && = delete; + + // ~~~~~ Autograd API ~~~~~ + + /// \fn bool is_leaf() const; + /// + /// All Tensors that have `requires_grad()` which is ``false`` will be leaf Tensors by convention. + /// + /// For Tensors that have `requires_grad()` which is ``true``, they will be leaf Tensors if they were + /// created by the user. This means that they are not the result of an operation and so + /// `grad_fn()` is `nullptr`. + /// + /// Only leaf Tensors will have their `grad()` populated during a call to `backward()`. + /// To get `grad()` populated for non-leaf Tensors, you can use `retain_grad()`. + /// + /// Example: + /// @code + /// auto a = torch::rand(10, torch::requires_grad()); + /// std::cout << a.is_leaf() << std::endl; // prints `true` + /// + /// auto b = torch::rand(10, torch::requires_grad()).to(torch::kCUDA); + /// std::cout << b.is_leaf() << std::endl; // prints `false` + /// // b was created by the operation that cast a cpu Tensor into a cuda Tensor + /// + /// auto c = torch::rand(10, torch::requires_grad()) + 2; + /// std::cout << c.is_leaf() << std::endl; // prints `false` + /// // c was created by the addition operation + /// + /// auto d = torch::rand(10).cuda(); + /// std::cout << d.is_leaf() << std::endl; // prints `true` + /// // d does not require gradients and so has no operation creating it (that is tracked by the autograd engine) + /// + /// auto e = torch::rand(10).cuda().requires_grad_(); + /// std::cout << e.is_leaf() << std::endl; // prints `true` + /// // e requires gradients and has no operations creating it + /// + /// auto f = torch::rand(10, torch::device(torch::kCUDA).requires_grad(true)); + /// std::cout << f.is_leaf() << std::endl; // prints `true` + /// // f requires grad, has no operation creating it + /// @endcode + + /// \fn void backward(const Tensor & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false, c10::optional inputs=c10::nullopt) const; + /// + /// Computes the gradient of current tensor with respect to graph leaves. + /// + /// The graph is differentiated using the chain rule. If the tensor is + /// non-scalar (i.e. its data has more than one element) and requires + /// gradient, the function additionally requires specifying ``gradient``. + /// It should be a tensor of matching type and location, that contains + /// the gradient of the differentiated function w.r.t. this Tensor. + /// + /// This function accumulates gradients in the leaves - you might need to + /// zero them before calling it. + /// + /// \param gradient Gradient w.r.t. the + /// tensor. If it is a tensor, it will be automatically converted + /// to a Tensor that does not require grad unless ``create_graph`` is True. + /// None values can be specified for scalar Tensors or ones that + /// don't require grad. If a None value would be acceptable then + /// this argument is optional. + /// \param retain_graph If ``false``, the graph used to compute + /// the grads will be freed. Note that in nearly all cases setting + /// this option to True is not needed and often can be worked around + /// in a much more efficient way. Defaults to the value of + /// ``create_graph``. + /// \param create_graph If ``true``, graph of the derivative will + /// be constructed, allowing to compute higher order derivative + /// products. Defaults to ``false``. + /// \param inputs Inputs w.r.t. which the gradient will be accumulated into + /// ``at::Tensor::grad``. All other Tensors will be ignored. If not + /// provided, the gradient is accumulated into all the leaf Tensors + /// that were used to compute the current tensor. + /// When inputs are provided and a given input is not a leaf, + /// the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients). + /// It is an implementation detail on which the user should not rely. + /// See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details. + + /// \fn Tensor detach() const; + /// + /// Returns a new Tensor, detached from the current graph. + /// The result will never require gradient. + + /// \fn Tensor & detach_() const; + /// + /// Detaches the Tensor from the graph that created it, making it a leaf. + /// Views cannot be detached in-place. + + /// \fn void retain_grad() const; + /// + /// Enables this Tensor to have their :attr:`grad` populated during + /// :func:`backward`. This is a no-op for leaf tensors. + + /// \fn bool retains_grad() const; + /// + /// Is ``true`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be + /// populated during :func:`backward`, ``false`` otherwise. + + const TensorBase& set_requires_grad(bool requires_grad) const { + impl_->set_requires_grad(requires_grad); + return *this; + } + bool requires_grad() const { + return impl_->requires_grad(); + } + + // The Forward AD API functions below are low level and are not to be used by end + // users who should use the API provided in torch/csrc/autograd.h + + /// This function returns the forward gradient for this Tensor at the given level. + const Tensor& _fw_grad(uint64_t level) const { + return impl_->_fw_grad(level, *this); + } + + /// This function can be used to set the value of the forward grad. + /// Note that the given new_grad might not be used directly if it has different + /// metadata (size/stride/storage offset) compared to this Tensor. In that case, + /// new_grad content will be copied into a new Tensor + void _set_fw_grad(const TensorBase& new_grad, uint64_t level, bool is_inplace_op) const { + impl_->_set_fw_grad(new_grad, *this, level, is_inplace_op); + } + + /// NOTE: This is similar to the legacy `.data()` function on `Variable`, and is intended + /// to be used from functions that need to access the `Variable`'s equivalent `Tensor` + /// (i.e. `Tensor` that shares the same storage and tensor metadata with the `Variable`). + /// + /// One notable difference with the legacy `.data()` function is that changes to the + /// returned `Tensor`'s tensor metadata (e.g. sizes / strides / storage / storage_offset) + /// will not update the original `Variable`, due to the fact that this function + /// shallow-copies the `Variable`'s underlying TensorImpl. + at::TensorBase tensor_data() const; + + /// NOTE: `var.variable_data()` in C++ has the same semantics as `tensor.data` + /// in Python, which create a new `Variable` that shares the same storage and + /// tensor metadata with the original `Variable`, but with a completely new + /// autograd history. + /// + /// NOTE: If we change the tensor metadata (e.g. sizes / strides / + /// storage / storage_offset) of a variable created from `var.variable_data()`, those + /// changes will not update the original variable `var`. In `.variable_data()`, we set + /// `allow_tensor_metadata_change_` to false to make such changes explicitly illegal, + /// in order to prevent users from changing metadata of `var.variable_data()` + /// and expecting the original variable `var` to also be updated. + at::TensorBase variable_data() const; + + // Gradient Node and Edges + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + /// Gets the gradient function of the `Variable`. If this is a leaf variable, + /// the pointer returned will be null. + /// + /// For View Variables: + /// Gets the up-to-date grad_fn. If the shared data or base was modified, we + /// re-create the grad_fn to express the up-to-date view relationship between + /// this and the base Variable. + const std::shared_ptr& grad_fn() const; + + // Hooks + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + template + using hook_return_void_t = std::enable_if_t>::value, unsigned>; + template + using hook_return_var_t = std::enable_if_t, TensorBase>::value, unsigned>; + + /// Registers a backward hook. + /// + /// The hook will be called every time a gradient with respect to the Tensor is computed. + /// The hook should have one of the following signature: + /// ``` + /// hook(TensorBase grad) -> TensorBase + /// ``` + /// ``` + /// hook(TensorBase grad) -> void + /// ``` + /// The hook should not modify its argument, but it can optionally return a new gradient + /// which will be used in place of `grad`. + /// + /// This function returns the index of the hook in the list which can be used to remove hook. + /// + /// Example: + /// @code + /// auto v = torch::tensor({0., 0., 0.}, torch::requires_grad()); + /// auto h = v.register_hook([](torch::Tensor grad){ return grad * 2; }); // double the gradient + /// v.backward(torch::tensor({1., 2., 3.})); + /// // This prints: + /// // ``` + /// // 2 + /// // 4 + /// // 6 + /// // [ CPUFloatType{3} ] + /// // ``` + /// std::cout << v.grad() << std::endl; + /// v.remove_hook(h); // removes the hook + /// @endcode + template + hook_return_void_t register_hook(T&& hook) const; + template + hook_return_var_t register_hook(T&& hook) const; + +protected: + unsigned _register_hook(std::function hook) const; + +public: + + /// Remove hook at given position + void remove_hook(unsigned pos) const; + + // Variable methods + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + bool is_leaf() const; + + int64_t output_nr() const; + + void set_data(const TensorBase & new_data) const; + + TensorBase data() const; + + int64_t _version() const; + + void retain_grad() const; + + bool retains_grad() const; + + const TensorBase& requires_grad_(bool _requires_grad=true) const; + + // View Variables + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + /// Returns true if this `Variable` is a view of another `Variable`. + bool is_view() const; + + /// Returns the `Variable` that this `Variable` is a view of. If this + /// `Variable` is not a view, throw a `std::runtime_error`. + const TensorBase& _base() const; + + // Miscellaneous + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + const std::string& name() const; + +protected: + void enforce_invariants(); + c10::intrusive_ptr impl_; + +private: + TensorBase __dispatch_contiguous(c10::MemoryFormat) const; +}; + +inline int64_t get_device(const TensorBase& self) { + return self.get_device(); +} + +template +auto TensorBase::register_hook(T&& hook) const -> TensorBase::hook_return_void_t { + // Return the grad argument in case of a hook with void return type to have an + // std::function with Tensor return type + static_assert(std::is_same::value, + "Expected hook to return void"); + return _register_hook([fn=std::forward(hook)](const TensorBase& grad) { + fn(grad); + return TensorBase(); + }); +} + +template +auto TensorBase::register_hook(T&& hook) const -> TensorBase::hook_return_var_t { + return _register_hook(std::forward(hook)); +} + +namespace detail { +// Helper creator for Tensor class which doesn't requires the users to pass +// in an intrusive_ptr instead it just converts the argument passed to +// requested intrusive_ptr type. +template +TensorBase make_tensor_base(Args&&... args) { + return TensorBase(c10::make_intrusive(std::forward(args)...)); +} + +} // namespace detail + +static inline DispatchKey legacyExtractDispatchKey(const TensorBase& t) { + return legacyExtractDispatchKey(t.key_set()); +} + +} // namespace at + +namespace c10 { +template <> +struct MaybeOwnedTraits { + using owned_type = at::TensorBase; + using borrow_type = at::TensorBase; + + static borrow_type createBorrow(const owned_type& from) { + // NOTE: this can be implemented without the special + // unsafe_borrow_t Tensor constructor as + // + // return borrow_type(c10::intrusive_ptr::reclaim(from.unsafeGetTensorImpl())); + // + // but that hurts inlining due to the nullptr check in the + // Tensor(c10::intrusive_ptr<...>) constructor. We already know + // that from.impl_ isn't null because from is a valid Tensor, so + // we needn't do the check again. (using __builtin_assume can + // avoid this, but wouldn't be portable to MSVC.) + return borrow_type(borrow_type::unsafe_borrow_t{}, from); + } + + static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) { + lhs.unsafeReleaseTensorImpl(); + // See above note: this can be implemented with public API + // similarly to createBorrow(), but that would hurt inlining. + lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs); + } + + static void destroyBorrow(borrow_type& toDestroy) { + toDestroy.unsafeReleaseTensorImpl(); // "leak" it, but it was already +0. + } + + static const owned_type& referenceFromBorrow(const borrow_type& borrow) { + return borrow; + } + + static const owned_type* pointerFromBorrow(const borrow_type& borrow) { + return &borrow; + } + + static bool debugBorrowIsValid(const borrow_type& /*borrow*/) { + return true; + } +}; + +template <> +struct ExclusivelyOwnedTraits : public c10::ExclusivelyOwnedTensorTraits {}; +} // namespace c10 + +namespace at { + +inline c10::MaybeOwned borrow_from_optional_tensor( + const c10::optional& opt) { + return opt.has_value() + ? c10::MaybeOwned::borrowed(*opt) + : c10::MaybeOwned::owned(c10::in_place); +} + +inline c10::MaybeOwned TensorBase::expect_contiguous(MemoryFormat memory_format) const & { + if (is_contiguous(memory_format)) { + return c10::MaybeOwned::borrowed(*this); + } else { + return c10::MaybeOwned::owned(__dispatch_contiguous(memory_format)); + } +} + +namespace symint { + +template +using enable_if_symint = std::enable_if_t::value>; +template +using enable_if_int = std::enable_if_t::value>; + +template > +c10::SymIntArrayRef sizes(const TensorBase& t) { return t.sym_sizes(); } +template > +IntArrayRef sizes(const TensorBase& t) { return t.sizes(); } + +template > +c10::SymInt size(const TensorBase& t, int64_t dim) { return t.sym_size(dim); } +template > +int64_t size(const TensorBase& t, int64_t dim) { return t.size(dim); } + +template > +c10::SymIntArrayRef strides(const TensorBase& t) { return t.sym_strides(); } +template > +IntArrayRef strides(const TensorBase& t) { return t.strides(); } + +template > +c10::SymInt numel(const TensorBase& t) { return t.sym_numel(); } +template > +int64_t numel(const TensorBase& t) { return t.numel(); } + +} // namespace symint + +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TensorBody.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TensorBody.h new file mode 100644 index 0000000000000000000000000000000000000000..0694342fda978676575c24202c337ab43ace9d2f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TensorBody.h @@ -0,0 +1,5713 @@ +#pragma once + +#ifdef TORCH_ASSERT_NO_OPERATORS +#error This change adds a dependency on native_functions.yaml, \ + meaning the file will need to be re-compiled every time an operator \ + is changed or added. Consider if your change would be better placed in \ + another file, or if a more specific header might achieve the same goal. \ + See NOTE: [Tensor vs. TensorBase] +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include + +namespace c10{ +template class List; +template class IListRef; +} +namespace at { +struct Generator; +struct Type; +class DeprecatedTypeProperties; +class Tensor; +} // namespace at +namespace at { +namespace indexing { +struct TensorIndex; +} // namespace indexing +} // namespace at + +namespace torch { namespace autograd { + +struct Node; + +}} // namespace torch::autograd + +namespace at { + +class OptionalTensorRef; +class TensorRef; +class Tensor; +using TensorList = ArrayRef; +using ITensorList = c10::IListRef; + +using Stream = c10::Stream; + +// Tensor is a "generic" object holding a pointer to the underlying TensorImpl object, which +// has an embedded reference count. In this way, Tensor is similar to boost::intrusive_ptr. +// +// For example: +// +// void func(Tensor a) { +// Tensor b = a; +// ... +// } +// +// In this example, when we say Tensor b = a, we are creating a new object that points to the +// same underlying TensorImpl, and bumps its reference count. When b goes out of scope, the +// destructor decrements the reference count by calling release() on the TensorImpl it points to. +// The existing constructors, operator overloads, etc. take care to implement the correct semantics. +// +// Note that Tensor can also be NULL, i.e. it is not associated with any underlying TensorImpl, and +// special care must be taken to handle this. +class TORCH_API Tensor: public TensorBase { + protected: + // Create a Tensor with a +0 reference count. Special care must be + // taken to avoid decrementing this reference count at destruction + // time. Intended to support MaybeOwnedTraits. + explicit Tensor(unsafe_borrow_t, const TensorBase& rhs): TensorBase(unsafe_borrow_t{}, rhs) {} + friend MaybeOwnedTraits; + friend OptionalTensorRef; + friend TensorRef; + + public: + Tensor() = default; + // This constructor should not be used by end users and is an implementation + // detail invoked by autogenerated code. + explicit Tensor( + c10::intrusive_ptr tensor_impl) + : TensorBase(std::move(tensor_impl)) {} + Tensor(const Tensor &tensor) = default; + Tensor(Tensor &&tensor) = default; + + // Implicitly move-constructible from TensorBase, but must be explicit to increase refcount + explicit Tensor(const TensorBase &base): TensorBase(base) {} + /*implicit*/ Tensor(TensorBase &&base): TensorBase(std::move(base)) {} + + // Creates a new wrapper from TensorImpl. Intentionally a free method because + // it should be used with care. Checks necessary invariants + static Tensor wrap_tensor_impl( + c10::intrusive_ptr tensor_impl) { + return TensorBase::wrap_tensor_impl(std::move(tensor_impl)); + } + + Tensor contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const { + return TensorBase::contiguous(memory_format); + } + + Tensor conj() const { + if (!this->is_complex()) { + return *this; + } + + switch (this->layout()) { + case at::kSparse: + case at::kSparseCsr: + case at::kSparseCsc: + case at::kSparseBsr: + case at::kSparseBsc: + return this->conj_physical(); + default: + return this->_conj(); + } + } + + // Aliased by Dimname overloads, so need explicit using + using TensorBase::size; + using TensorBase::sym_size; + using TensorBase::stride; + + /// Should be used if *this can reasonably be expected to be contiguous and + /// performance is important. + /// Compared to contiguous, it saves a reference count + /// increment/decrement if *this is already contiguous, at the cost + /// in all cases of an extra pointer of stack usage, an extra branch + /// to access, and an extra branch at destruction time. + c10::MaybeOwned expect_contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const &; + + // Use .contiguous() instead. Trying to borrow from a prvalue Tensor + // will only lead to trouble and dangling references. + c10::MaybeOwned expect_contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) && = delete; + + // The following overloads are very intruiging. Consider the following + // program: + // + // x[1] = 3; + // + // We would expect that the first entry of x is written to 3. But how can we + // actually achieve this? x[1] evaluates to a tensor... + // + // The answer is, using a ref-qualifier. x[1] is an rvalue, which cannot be + // (profitably) assigned to in the traditional sense, so we overload + // assignment to mean, "Actually, copy 3 into the tensor data." This is done + // with an rvalue-reference ref-qualified overload (the methods with && at the + // end of their type.) + // + // There's one more fly in the ointment: We also want + // + // Tensor x = y; + // + // to work, and we want it NOT to copy. So we need a traditional operator= + // overload. But we MUST specify a mutable lvalue ref-qualifier, to + // disambiguate the traditional overload from the rvalue-reference + // ref-qualified overload. Otherwise, it will be ambiguous, because + // a non ref-qualified method is eligible for all situations. + + // Unfortunately, we have to write these constructors out manually + // to work around an MSVC bug: + // error C2580: 'at::Tensor &at::Tensor::operator =(const at::Tensor &) &': + // multiple versions of a defaulted special member functions are not allowed + // Tensor& operator=(const Tensor&) & = default; + // Tensor& operator=(Tensor&&) & = default; + + // Also MSVC will wrongly issue the following warning with the aforementioned fix + // warning C4522: 'at::Tensor': multiple assignment operators specified + // Let's just skip the warning. + // + // TODO: temporarily disabled + + Tensor& operator=(const TensorBase& x) & { + impl_ = x.getIntrusivePtr(); + return *this; + } + Tensor& operator=(TensorBase&& x) & noexcept { + impl_ = x.unsafeReleaseIntrusivePtr(); + return *this; + } + + Tensor& operator=(const Tensor &x) & { + return operator=(static_cast(x)); + } + Tensor& operator=(Tensor &&x) & noexcept { + return operator=(static_cast(x)); + } + + Tensor& operator=(const Scalar &v) && { + return fill_(v); + } + Tensor& operator=(const Tensor &rhs) && { + return copy_(rhs); + } + Tensor& operator=(Tensor&& rhs) && { + return copy_(rhs); + } + + C10_DEPRECATED_MESSAGE("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().") + DeprecatedTypeProperties & type() const { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + dispatchKeyToBackend(legacyExtractDispatchKey(key_set())), + scalar_type()); + } + + Tensor toType(ScalarType t) const { + return to(options().dtype(t), /*non_blocking*/ false, /*copy*/ false); + } + + // TODO: Deprecate me + Tensor toBackend(Backend b) const { + return to(options().device(backendToDeviceType(b)).layout(layout_from_backend(b)), /*non_blocking*/ false, /*copy*/ false); + } + + C10_DEPRECATED_MESSAGE("Tensor.is_variable() is deprecated; everything is a variable now. (If you want to assert that variable has been appropriately handled already, use at::impl::variable_excluded_from_dispatch())") + bool is_variable() const noexcept { + return !at::impl::variable_excluded_from_dispatch(); + } + + template + C10_DEPRECATED_MESSAGE("Tensor.data() is deprecated. Please use Tensor.data_ptr() instead.") + T * data() const { + return data_ptr(); + } + + template + T item() const; + + template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> + C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead") + GenericPackedTensorAccessor packed_accessor() const & { + return generic_packed_accessor(); + } + template class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> + C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead") + GenericPackedTensorAccessor packed_accessor() && = delete; + + Tensor operator~() const { + return bitwise_not(); + } + Tensor operator-() const { + return neg(); + } + Tensor& operator+=(const Tensor & other) { + return add_(other); + } + Tensor& operator+=(const Scalar & other) { + return add_(other); + } + Tensor& operator-=(const Tensor & other) { + return sub_(other); + } + Tensor& operator-=(const Scalar & other) { + return sub_(other); + } + Tensor& operator*=(const Tensor & other) { + return mul_(other); + } + Tensor& operator*=(const Scalar & other) { + return mul_(other); + } + Tensor& operator/=(const Tensor & other) { + return div_(other); + } + Tensor& operator/=(const Scalar & other) { + return div_(other); + } + Tensor& operator&=(const Tensor & other) { + return bitwise_and_(other); + } + Tensor& operator|=(const Tensor & other) { + return bitwise_or_(other); + } + Tensor& operator^=(const Tensor & other) { + return bitwise_xor_(other); + } + Tensor operator[](const Scalar & index) const { + if (!index.isIntegral(false)) { + TORCH_CHECK_INDEX(false, "Can only index tensors with integral scalars"); + } + return this->operator[](index.toLong()); + } + Tensor operator[](const Tensor & index) const { + // These properties are checked in the Scalar constructor, but we already + // check them here to provide more useful diagnostics for the user. + if (!index.defined()) { + TORCH_CHECK_INDEX(false, "Can only index with tensors that are defined"); + } + if (index.dim() != 0) { + TORCH_CHECK_INDEX(false, + "Can only index with tensors that are scalars (zero-dim)"); + } + // The Scalar(Tensor) constructor is explicit, so we need to call it. + return this->operator[](index.item()); + } + Tensor operator[](int64_t index) const { + return select(0, index); + } + + Tensor index(ArrayRef indices) const; + Tensor index(std::initializer_list indices) const; + + Tensor & index_put_(ArrayRef indices, Tensor const & rhs); + Tensor & index_put_(ArrayRef indices, const Scalar& v); + Tensor & index_put_(std::initializer_list indices, Tensor const & rhs); + Tensor & index_put_(std::initializer_list indices, const Scalar& v); + + Tensor cpu() const { + return to(options().device(c10::DeviceType::CPU), /*non_blocking*/ false, /*copy*/ false); + } + + // TODO: The Python version also accepts arguments + Tensor cuda() const { + return to(options().device(c10::DeviceType::CUDA), /*non_blocking*/ false, /*copy*/ false); + } + + Tensor hip() const { + return to(options().device(c10::DeviceType::HIP), /*non_blocking*/ false, /*copy*/ false); + } + + Tensor ve() const { + return to(options().device(c10::DeviceType::VE), /*non_blocking*/ false, /*copy*/ false); + } + + Tensor vulkan() const { + return to(options().device(c10::DeviceType::Vulkan), /*non_blocking*/ false, /*copy*/ false); + } + + Tensor metal() const { + return to(options().device(c10::DeviceType::Metal), /*non_blocking*/ false, /*copy*/ false); + } + + Tensor meta() const { + return to(options().device(c10::DeviceType::Meta), /*non_blocking*/ false, /*copy*/ false); + } + + // ~~~~~ Autograd API ~~~~~ + + /// \fn bool is_leaf() const; + /// + /// All Tensors that have `requires_grad()` which is ``false`` will be leaf Tensors by convention. + /// + /// For Tensors that have `requires_grad()` which is ``true``, they will be leaf Tensors if they were + /// created by the user. This means that they are not the result of an operation and so + /// `grad_fn()` is `nullptr`. + /// + /// Only leaf Tensors will have their `grad()` populated during a call to `backward()`. + /// To get `grad()` populated for non-leaf Tensors, you can use `retain_grad()`. + /// + /// Example: + /// @code + /// auto a = torch::rand(10, torch::requires_grad()); + /// std::cout << a.is_leaf() << std::endl; // prints `true` + /// + /// auto b = torch::rand(10, torch::requires_grad()).to(torch::kCUDA); + /// std::cout << b.is_leaf() << std::endl; // prints `false` + /// // b was created by the operation that cast a cpu Tensor into a cuda Tensor + /// + /// auto c = torch::rand(10, torch::requires_grad()) + 2; + /// std::cout << c.is_leaf() << std::endl; // prints `false` + /// // c was created by the addition operation + /// + /// auto d = torch::rand(10).cuda(); + /// std::cout << d.is_leaf() << std::endl; // prints `true` + /// // d does not require gradients and so has no operation creating it (that is tracked by the autograd engine) + /// + /// auto e = torch::rand(10).cuda().requires_grad_(); + /// std::cout << e.is_leaf() << std::endl; // prints `true` + /// // e requires gradients and has no operations creating it + /// + /// auto f = torch::rand(10, torch::device(torch::kCUDA).requires_grad(true)); + /// std::cout << f.is_leaf() << std::endl; // prints `true` + /// // f requires grad, has no operation creating it + /// @endcode + + /// \fn void backward(const Tensor & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false, c10::optional inputs=c10::nullopt) const; + /// + /// Computes the gradient of current tensor with respect to graph leaves. + /// + /// The graph is differentiated using the chain rule. If the tensor is + /// non-scalar (i.e. its data has more than one element) and requires + /// gradient, the function additionally requires specifying ``gradient``. + /// It should be a tensor of matching type and location, that contains + /// the gradient of the differentiated function w.r.t. this Tensor. + /// + /// This function accumulates gradients in the leaves - you might need to + /// zero them before calling it. + /// + /// \param gradient Gradient w.r.t. the + /// tensor. If it is a tensor, it will be automatically converted + /// to a Tensor that does not require grad unless ``create_graph`` is True. + /// None values can be specified for scalar Tensors or ones that + /// don't require grad. If a None value would be acceptable then + /// this argument is optional. + /// \param retain_graph If ``false``, the graph used to compute + /// the grads will be freed. Note that in nearly all cases setting + /// this option to True is not needed and often can be worked around + /// in a much more efficient way. Defaults to the value of + /// ``create_graph``. + /// \param create_graph If ``true``, graph of the derivative will + /// be constructed, allowing to compute higher order derivative + /// products. Defaults to ``false``. + /// \param inputs Inputs w.r.t. which the gradient will be accumulated into + /// ``at::Tensor::grad``. All other Tensors will be ignored. If not + /// provided, the gradient is accumulated into all the leaf Tensors + /// that were used to compute the current tensor. + /// When inputs are provided and a given input is not a leaf, + /// the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients). + /// It is an implementation detail on which the user should not rely. + /// See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details. + void backward(const Tensor & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false, c10::optional inputs=c10::nullopt) const { + // NB: Adding this wrapper to _backward here because we'd like our + // 'backwards' api to accept the 'inputs' argument optionally. Since code gen + // currently does not support optional of TensorList our approach is to replace + // backward in native_functions.yaml with _backward and call it here instead. + if (inputs.has_value()) { + TORCH_CHECK(inputs.value().size() > 0, "'inputs' argument to backward cannot be empty") + this->_backward(inputs.value(), gradient, retain_graph, create_graph); + } else { + this->_backward({}, gradient, retain_graph, create_graph); + } + } + + /// \fn Tensor detach() const; + /// + /// Returns a new Tensor, detached from the current graph. + /// The result will never require gradient. + + /// \fn Tensor & detach_() const; + /// + /// Detaches the Tensor from the graph that created it, making it a leaf. + /// Views cannot be detached in-place. + + /// \fn void retain_grad() const; + /// + /// Enables this Tensor to have their :attr:`grad` populated during + /// :func:`backward`. This is a no-op for leaf tensors. + + /// \fn bool retains_grad() const; + /// + /// Is ``true`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be + /// populated during :func:`backward`, ``false`` otherwise. + + const Tensor& set_requires_grad(bool requires_grad) const { + TensorBase::set_requires_grad(requires_grad); + return *this; + } + + /// Return a mutable reference to the gradient. This is conventionally + /// used as `t.grad() = x` to set a gradient to a completely new tensor. + /// Note that this function work with a non-const Tensor and is not + /// thread safe. + Tensor& mutable_grad() const { + return impl_->mutable_grad(); + } + + /// This function returns an undefined tensor by default and returns a defined tensor + /// the first time a call to `backward()` computes gradients for this Tensor. + /// The attribute will then contain the gradients computed and future calls + /// to `backward()` will accumulate (add) gradients into it. + const Tensor& grad() const { + const Tensor& maybe_grad = impl_->grad(); + if (!is_leaf() && !retains_grad() && !maybe_grad.defined()) { + TORCH_WARN( + "The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad " + "attribute won't be populated during autograd.backward(). If you indeed want the .grad " + "field to be populated for a non-leaf Tensor, use .retain_grad() on the non-leaf Tensor. " + "If you access the non-leaf Tensor by mistake, make sure you access the leaf Tensor " + "instead. See github.com/pytorch/pytorch/pull/30531 for more informations."); + } + return maybe_grad; + } + + // The Forward AD API functions below are low level and are not to be used by end + // users who should use the API provided in torch/csrc/autograd.h + + /// This function returns the forward gradient for this Tensor at the given level. + const Tensor& _fw_grad(uint64_t level) const { + return impl_->_fw_grad(level, *this); + } + + /// This function can be used to set the value of the forward grad. + /// Note that the given new_grad might not be used directly if it has different + /// metadata (size/stride/storage offset) compared to this Tensor. In that case, + /// new_grad content will be copied into a new Tensor + void _set_fw_grad(const TensorBase& new_grad, uint64_t level, bool is_inplace_op) const { + impl_->_set_fw_grad(new_grad, *this, level, is_inplace_op); + } + + + // STOP. Thinking of adding a method here, which only makes use + // of other ATen methods? Define it in native_functions.yaml. + + //example + //Tensor * add(Tensor & b); + void __dispatch__backward(at::TensorList inputs, const c10::optional & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false) const; + void __dispatch_set_data(const at::Tensor & new_data) const; + at::Tensor __dispatch_data() const; + bool __dispatch_is_leaf() const; + int64_t __dispatch_output_nr() const; + int64_t __dispatch__version() const; + at::Tensor & __dispatch_requires_grad_(bool requires_grad=true) const; + void __dispatch_retain_grad() const; + bool __dispatch_retains_grad() const; + at::Tensor _fw_primal(int64_t level) const; + at::Tensor & rename_(c10::optional names) const; + at::Tensor rename(c10::optional names) const; + at::Tensor align_to(at::DimnameList names) const; + at::Tensor align_to(at::DimnameList order, int64_t ellipsis_idx) const; + at::Tensor align_as(const at::Tensor & other) const; + at::Tensor refine_names(at::DimnameList names) const; + at::Tensor abs() const; + at::Tensor & abs_() const; + at::Tensor absolute() const; + at::Tensor & absolute_() const; + at::Tensor angle() const; + at::Tensor sgn() const; + at::Tensor & sgn_() const; + at::Tensor chalf(c10::optional memory_format=c10::nullopt) const; + at::Tensor _conj() const; + at::Tensor __dispatch_conj() const; + at::Tensor _conj_physical() const; + at::Tensor conj_physical() const; + at::Tensor & conj_physical_() const; + at::Tensor resolve_conj() const; + at::Tensor resolve_neg() const; + at::Tensor _neg_view() const; + at::Tensor acos() const; + at::Tensor & acos_() const; + at::Tensor arccos() const; + at::Tensor & arccos_() const; + at::Tensor add(const at::Tensor & other, const at::Scalar & alpha=1) const; + at::Tensor & add_(const at::Tensor & other, const at::Scalar & alpha=1) const; + at::Tensor add(const at::Scalar & other, const at::Scalar & alpha=1) const; + at::Tensor & add_(const at::Scalar & other, const at::Scalar & alpha=1) const; + at::Tensor addmv(const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor & addmv_(const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor addr(const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor & addr_(const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor _is_all_true() const; + at::Tensor _is_any_true() const; + at::Tensor all(int64_t dim, bool keepdim=false) const; + at::Tensor all(at::OptionalIntArrayRef dim, bool keepdim=false) const; + at::Tensor all(at::Dimname dim, bool keepdim=false) const; + bool allclose(const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) const; + at::Tensor any(int64_t dim, bool keepdim=false) const; + at::Tensor any(at::OptionalIntArrayRef dim, bool keepdim=false) const; + at::Tensor any(at::Dimname dim, bool keepdim=false) const; + at::Tensor argmax(c10::optional dim=c10::nullopt, bool keepdim=false) const; + at::Tensor argmin(c10::optional dim=c10::nullopt, bool keepdim=false) const; + at::Tensor acosh() const; + at::Tensor & acosh_() const; + at::Tensor arccosh() const; + at::Tensor & arccosh_() const; + at::Tensor asinh() const; + at::Tensor & asinh_() const; + at::Tensor arcsinh() const; + at::Tensor & arcsinh_() const; + at::Tensor atanh() const; + at::Tensor & atanh_() const; + at::Tensor arctanh() const; + at::Tensor & arctanh_() const; + at::Tensor as_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) const; + at::Tensor as_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) const; + const at::Tensor & as_strided_(at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) const; + const at::Tensor & as_strided__symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) const; + at::Tensor asin() const; + at::Tensor & asin_() const; + at::Tensor arcsin() const; + at::Tensor & arcsin_() const; + at::Tensor atan() const; + at::Tensor & atan_() const; + at::Tensor arctan() const; + at::Tensor & arctan_() const; + at::Tensor baddbmm(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor & baddbmm_(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor bernoulli(c10::optional generator=c10::nullopt) const; + at::Tensor & bernoulli_(const at::Tensor & p, c10::optional generator=c10::nullopt) const; + at::Tensor & bernoulli_(double p=0.5, c10::optional generator=c10::nullopt) const; + at::Tensor bernoulli(double p, c10::optional generator=c10::nullopt) const; + at::Tensor bincount(const c10::optional & weights={}, int64_t minlength=0) const; + at::Tensor bitwise_not() const; + at::Tensor & bitwise_not_() const; + at::Tensor copysign(const at::Tensor & other) const; + at::Tensor & copysign_(const at::Tensor & other) const; + at::Tensor copysign(const at::Scalar & other) const; + at::Tensor & copysign_(const at::Scalar & other) const; + at::Tensor logical_not() const; + at::Tensor & logical_not_() const; + at::Tensor logical_xor(const at::Tensor & other) const; + at::Tensor & logical_xor_(const at::Tensor & other) const; + at::Tensor logical_and(const at::Tensor & other) const; + at::Tensor & logical_and_(const at::Tensor & other) const; + at::Tensor logical_or(const at::Tensor & other) const; + at::Tensor & logical_or_(const at::Tensor & other) const; + at::Tensor bmm(const at::Tensor & mat2) const; + at::Tensor broadcast_to(at::IntArrayRef size) const; + at::Tensor broadcast_to_symint(c10::SymIntArrayRef size) const; + at::Tensor ceil() const; + at::Tensor & ceil_() const; + ::std::vector unsafe_chunk(int64_t chunks, int64_t dim=0) const; + ::std::vector chunk(int64_t chunks, int64_t dim=0) const; + ::std::vector tensor_split(int64_t sections, int64_t dim=0) const; + ::std::vector tensor_split_symint(c10::SymInt sections, int64_t dim=0) const; + ::std::vector tensor_split(at::IntArrayRef indices, int64_t dim=0) const; + ::std::vector tensor_split_symint(c10::SymIntArrayRef indices, int64_t dim=0) const; + ::std::vector tensor_split(const at::Tensor & tensor_indices_or_sections, int64_t dim=0) const; + at::Tensor clamp(const c10::optional & min, const c10::optional & max=c10::nullopt) const; + at::Tensor clamp(const c10::optional & min={}, const c10::optional & max={}) const; + at::Tensor & clamp_(const c10::optional & min, const c10::optional & max=c10::nullopt) const; + at::Tensor & clamp_(const c10::optional & min={}, const c10::optional & max={}) const; + at::Tensor clamp_max(const at::Scalar & max) const; + at::Tensor clamp_max(const at::Tensor & max) const; + at::Tensor & clamp_max_(const at::Scalar & max) const; + at::Tensor & clamp_max_(const at::Tensor & max) const; + at::Tensor clamp_min(const at::Scalar & min) const; + at::Tensor clamp_min(const at::Tensor & min) const; + at::Tensor & clamp_min_(const at::Scalar & min) const; + at::Tensor & clamp_min_(const at::Tensor & min) const; + at::Tensor clip(const c10::optional & min, const c10::optional & max=c10::nullopt) const; + at::Tensor clip(const c10::optional & min={}, const c10::optional & max={}) const; + at::Tensor & clip_(const c10::optional & min, const c10::optional & max=c10::nullopt) const; + at::Tensor & clip_(const c10::optional & min={}, const c10::optional & max={}) const; + at::Tensor __dispatch_contiguous(at::MemoryFormat memory_format=MemoryFormat::Contiguous) const; + at::Tensor & copy_(const at::Tensor & src, bool non_blocking=false) const; + at::Tensor cos() const; + at::Tensor & cos_() const; + at::Tensor cosh() const; + at::Tensor & cosh_() const; + at::Tensor count_nonzero(at::IntArrayRef dim) const; + at::Tensor count_nonzero(c10::optional dim=c10::nullopt) const; + at::Tensor cov(int64_t correction=1, const c10::optional & fweights={}, const c10::optional & aweights={}) const; + at::Tensor corrcoef() const; + ::std::tuple cummax(int64_t dim) const; + ::std::tuple cummax(at::Dimname dim) const; + ::std::tuple cummin(int64_t dim) const; + ::std::tuple cummin(at::Dimname dim) const; + at::Tensor cumprod(int64_t dim, c10::optional dtype=c10::nullopt) const; + at::Tensor & cumprod_(int64_t dim, c10::optional dtype=c10::nullopt) const; + at::Tensor cumprod(at::Dimname dim, c10::optional dtype=c10::nullopt) const; + at::Tensor & cumprod_(at::Dimname dim, c10::optional dtype=c10::nullopt) const; + at::Tensor cumsum(int64_t dim, c10::optional dtype=c10::nullopt) const; + at::Tensor & cumsum_(int64_t dim, c10::optional dtype=c10::nullopt) const; + at::Tensor cumsum(at::Dimname dim, c10::optional dtype=c10::nullopt) const; + at::Tensor & cumsum_(at::Dimname dim, c10::optional dtype=c10::nullopt) const; + at::Tensor diag_embed(int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) const; + at::Tensor diagflat(int64_t offset=0) const; + at::Tensor diagonal(int64_t offset=0, int64_t dim1=0, int64_t dim2=1) const; + at::Tensor diagonal(at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset=0) const; + at::Tensor & fill_diagonal_(const at::Scalar & fill_value, bool wrap=false) const; + at::Tensor diff(int64_t n=1, int64_t dim=-1, const c10::optional & prepend={}, const c10::optional & append={}) const; + at::Tensor div(const at::Tensor & other) const; + at::Tensor & div_(const at::Tensor & other) const; + at::Tensor div(const at::Tensor & other, c10::optional rounding_mode) const; + at::Tensor & div_(const at::Tensor & other, c10::optional rounding_mode) const; + at::Tensor div(const at::Scalar & other) const; + at::Tensor & div_(const at::Scalar & other) const; + at::Tensor div(const at::Scalar & other, c10::optional rounding_mode) const; + at::Tensor & div_(const at::Scalar & other, c10::optional rounding_mode) const; + at::Tensor divide(const at::Tensor & other) const; + at::Tensor & divide_(const at::Tensor & other) const; + at::Tensor divide(const at::Scalar & other) const; + at::Tensor & divide_(const at::Scalar & other) const; + at::Tensor divide(const at::Tensor & other, c10::optional rounding_mode) const; + at::Tensor & divide_(const at::Tensor & other, c10::optional rounding_mode) const; + at::Tensor divide(const at::Scalar & other, c10::optional rounding_mode) const; + at::Tensor & divide_(const at::Scalar & other, c10::optional rounding_mode) const; + at::Tensor true_divide(const at::Tensor & other) const; + at::Tensor & true_divide_(const at::Tensor & other) const; + at::Tensor true_divide(const at::Scalar & other) const; + at::Tensor & true_divide_(const at::Scalar & other) const; + at::Tensor dot(const at::Tensor & tensor) const; + at::Tensor vdot(const at::Tensor & other) const; + at::Tensor new_empty(at::IntArrayRef size, at::TensorOptions options={}) const; + at::Tensor new_empty(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_empty_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) const; + at::Tensor new_empty_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options={}) const; + at::Tensor new_empty_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options={}) const; + at::Tensor new_empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) const; + at::Tensor new_full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) const; + at::Tensor new_full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_zeros(at::IntArrayRef size, at::TensorOptions options={}) const; + at::Tensor new_zeros(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) const; + at::Tensor new_zeros_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_ones(at::IntArrayRef size, at::TensorOptions options={}) const; + at::Tensor new_ones(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + at::Tensor new_ones_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) const; + at::Tensor new_ones_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const; + const at::Tensor & resize_(at::IntArrayRef size, c10::optional memory_format=c10::nullopt) const; + const at::Tensor & resize__symint(c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) const; + at::Tensor erf() const; + at::Tensor & erf_() const; + at::Tensor erfc() const; + at::Tensor & erfc_() const; + at::Tensor exp() const; + at::Tensor & exp_() const; + at::Tensor exp2() const; + at::Tensor & exp2_() const; + at::Tensor expm1() const; + at::Tensor & expm1_() const; + at::Tensor expand(at::IntArrayRef size, bool implicit=false) const; + at::Tensor expand_symint(c10::SymIntArrayRef size, bool implicit=false) const; + at::Tensor expand_as(const at::Tensor & other) const; + at::Tensor flatten(int64_t start_dim=0, int64_t end_dim=-1) const; + at::Tensor flatten(int64_t start_dim, int64_t end_dim, at::Dimname out_dim) const; + at::Tensor flatten(at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) const; + at::Tensor flatten(at::DimnameList dims, at::Dimname out_dim) const; + at::Tensor unflatten(int64_t dim, at::IntArrayRef sizes) const; + at::Tensor unflatten_symint(int64_t dim, c10::SymIntArrayRef sizes) const; + at::Tensor unflatten(at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) const; + at::Tensor unflatten_symint(at::Dimname dim, c10::SymIntArrayRef sizes, at::DimnameList names) const; + at::Tensor & fill_(const at::Scalar & value) const; + at::Tensor & fill_(const at::Tensor & value) const; + at::Tensor floor() const; + at::Tensor & floor_() const; + at::Tensor floor_divide(const at::Tensor & other) const; + at::Tensor & floor_divide_(const at::Tensor & other) const; + at::Tensor floor_divide(const at::Scalar & other) const; + at::Tensor & floor_divide_(const at::Scalar & other) const; + at::Tensor frac() const; + at::Tensor & frac_() const; + at::Tensor gcd(const at::Tensor & other) const; + at::Tensor & gcd_(const at::Tensor & other) const; + at::Tensor lcm(const at::Tensor & other) const; + at::Tensor & lcm_(const at::Tensor & other) const; + at::Tensor index(const c10::List> & indices) const; + at::Tensor & index_copy_(int64_t dim, const at::Tensor & index, const at::Tensor & source) const; + at::Tensor index_copy(int64_t dim, const at::Tensor & index, const at::Tensor & source) const; + at::Tensor & index_copy_(at::Dimname dim, const at::Tensor & index, const at::Tensor & source) const; + at::Tensor index_copy(at::Dimname dim, const at::Tensor & index, const at::Tensor & source) const; + at::Tensor & index_put_(const c10::List> & indices, const at::Tensor & values, bool accumulate=false) const; + at::Tensor index_put(const c10::List> & indices, const at::Tensor & values, bool accumulate=false) const; + at::Tensor isclose(const at::Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) const; + at::Tensor isnan() const; + bool is_distributed() const; + bool __dispatch_is_floating_point() const; + bool __dispatch_is_complex() const; + bool __dispatch_is_conj() const; + bool __dispatch__is_zerotensor() const; + bool __dispatch_is_neg() const; + at::Tensor isreal() const; + bool is_nonzero() const; + bool is_same_size(const at::Tensor & other) const; + bool __dispatch_is_signed() const; + bool __dispatch_is_inference() const; + at::Tensor kron(const at::Tensor & other) const; + ::std::tuple kthvalue(int64_t k, int64_t dim=-1, bool keepdim=false) const; + ::std::tuple kthvalue(int64_t k, at::Dimname dim, bool keepdim=false) const; + at::Tensor nan_to_num(c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt) const; + at::Tensor & nan_to_num_(c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt) const; + at::Tensor ldexp(const at::Tensor & other) const; + at::Tensor & ldexp_(const at::Tensor & other) const; + at::Tensor log() const; + at::Tensor & log_() const; + at::Tensor log10() const; + at::Tensor & log10_() const; + at::Tensor log1p() const; + at::Tensor & log1p_() const; + at::Tensor log2() const; + at::Tensor & log2_() const; + at::Tensor logaddexp(const at::Tensor & other) const; + at::Tensor logaddexp2(const at::Tensor & other) const; + at::Tensor xlogy(const at::Tensor & other) const; + at::Tensor xlogy(const at::Scalar & other) const; + at::Tensor & xlogy_(const at::Tensor & other) const; + at::Tensor & xlogy_(const at::Scalar & other) const; + at::Tensor log_softmax(int64_t dim, c10::optional dtype=c10::nullopt) const; + at::Tensor log_softmax(at::Dimname dim, c10::optional dtype=c10::nullopt) const; + at::Tensor logcumsumexp(int64_t dim) const; + at::Tensor logcumsumexp(at::Dimname dim) const; + at::Tensor logsumexp(at::IntArrayRef dim, bool keepdim=false) const; + at::Tensor logsumexp(at::DimnameList dim, bool keepdim=false) const; + at::Tensor matmul(const at::Tensor & other) const; + at::Tensor matrix_power(int64_t n) const; + at::Tensor matrix_exp() const; + ::std::tuple aminmax(c10::optional dim=c10::nullopt, bool keepdim=false) const; + ::std::tuple max(int64_t dim, bool keepdim=false) const; + ::std::tuple max(at::Dimname dim, bool keepdim=false) const; + at::Tensor amax(at::IntArrayRef dim={}, bool keepdim=false) const; + at::Tensor mean(c10::optional dtype=c10::nullopt) const; + at::Tensor mean(at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) const; + at::Tensor mean(at::DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt) const; + at::Tensor nanmean(at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) const; + at::Tensor median() const; + ::std::tuple median(int64_t dim, bool keepdim=false) const; + ::std::tuple median(at::Dimname dim, bool keepdim=false) const; + at::Tensor nanmedian() const; + ::std::tuple nanmedian(int64_t dim, bool keepdim=false) const; + ::std::tuple nanmedian(at::Dimname dim, bool keepdim=false) const; + ::std::tuple min(int64_t dim, bool keepdim=false) const; + ::std::tuple min(at::Dimname dim, bool keepdim=false) const; + at::Tensor amin(at::IntArrayRef dim={}, bool keepdim=false) const; + at::Tensor mm(const at::Tensor & mat2) const; + ::std::tuple mode(int64_t dim=-1, bool keepdim=false) const; + ::std::tuple mode(at::Dimname dim, bool keepdim=false) const; + at::Tensor mul(const at::Tensor & other) const; + at::Tensor & mul_(const at::Tensor & other) const; + at::Tensor mul(const at::Scalar & other) const; + at::Tensor & mul_(const at::Scalar & other) const; + at::Tensor multiply(const at::Tensor & other) const; + at::Tensor & multiply_(const at::Tensor & other) const; + at::Tensor multiply(const at::Scalar & other) const; + at::Tensor & multiply_(const at::Scalar & other) const; + at::Tensor mv(const at::Tensor & vec) const; + at::Tensor mvlgamma(int64_t p) const; + at::Tensor & mvlgamma_(int64_t p) const; + at::Tensor narrow_copy(int64_t dim, int64_t start, int64_t length) const; + at::Tensor narrow_copy_symint(int64_t dim, c10::SymInt start, c10::SymInt length) const; + at::Tensor narrow(int64_t dim, int64_t start, int64_t length) const; + at::Tensor narrow_symint(int64_t dim, c10::SymInt start, c10::SymInt length) const; + at::Tensor narrow(int64_t dim, const at::Tensor & start, int64_t length) const; + at::Tensor narrow_symint(int64_t dim, const at::Tensor & start, c10::SymInt length) const; + at::Tensor permute(at::IntArrayRef dims) const; + at::Tensor movedim(at::IntArrayRef source, at::IntArrayRef destination) const; + at::Tensor movedim(int64_t source, int64_t destination) const; + at::Tensor moveaxis(at::IntArrayRef source, at::IntArrayRef destination) const; + at::Tensor moveaxis(int64_t source, int64_t destination) const; + at::Tensor numpy_T() const; + at::Tensor matrix_H() const; + at::Tensor mT() const; + at::Tensor mH() const; + at::Tensor adjoint() const; + bool is_pinned(c10::optional device=c10::nullopt) const; + at::Tensor pin_memory(c10::optional device=c10::nullopt) const; + at::Tensor pinverse(double rcond=1e-15) const; + at::Tensor rad2deg() const; + at::Tensor & rad2deg_() const; + at::Tensor deg2rad() const; + at::Tensor & deg2rad_() const; + at::Tensor ravel() const; + at::Tensor reciprocal() const; + at::Tensor & reciprocal_() const; + at::Tensor neg() const; + at::Tensor & neg_() const; + at::Tensor negative() const; + at::Tensor & negative_() const; + at::Tensor repeat(at::IntArrayRef repeats) const; + at::Tensor repeat_symint(c10::SymIntArrayRef repeats) const; + at::Tensor repeat_interleave(const at::Tensor & repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) const; + at::Tensor repeat_interleave_symint(const at::Tensor & repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) const; + at::Tensor repeat_interleave(int64_t repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) const; + at::Tensor repeat_interleave_symint(c10::SymInt repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt) const; + at::Tensor reshape(at::IntArrayRef shape) const; + at::Tensor reshape_symint(c10::SymIntArrayRef shape) const; + at::Tensor _reshape_alias(at::IntArrayRef size, at::IntArrayRef stride) const; + at::Tensor _reshape_alias_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride) const; + at::Tensor reshape_as(const at::Tensor & other) const; + at::Tensor round() const; + at::Tensor & round_() const; + at::Tensor round(int64_t decimals) const; + at::Tensor & round_(int64_t decimals) const; + at::Tensor relu() const; + at::Tensor & relu_() const; + at::Tensor prelu(const at::Tensor & weight) const; + at::Tensor hardshrink(const at::Scalar & lambd=0.5) const; + at::Tensor hardshrink_backward(const at::Tensor & grad_out, const at::Scalar & lambd) const; + at::Tensor rsqrt() const; + at::Tensor & rsqrt_() const; + at::Tensor select(at::Dimname dim, int64_t index) const; + at::Tensor select(int64_t dim, int64_t index) const; + at::Tensor select_symint(int64_t dim, c10::SymInt index) const; + at::Tensor sigmoid() const; + at::Tensor & sigmoid_() const; + at::Tensor logit(c10::optional eps=c10::nullopt) const; + at::Tensor & logit_(c10::optional eps=c10::nullopt) const; + at::Tensor sin() const; + at::Tensor & sin_() const; + at::Tensor sinc() const; + at::Tensor & sinc_() const; + at::Tensor sinh() const; + at::Tensor & sinh_() const; + at::Tensor detach() const; + at::Tensor & detach_() const; + int64_t size(at::Dimname dim) const; + at::Tensor slice(int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) const; + at::Tensor slice_symint(int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) const; + at::Tensor slice_scatter(const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1) const; + at::Tensor slice_scatter_symint(const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1) const; + at::Tensor select_scatter(const at::Tensor & src, int64_t dim, int64_t index) const; + at::Tensor select_scatter_symint(const at::Tensor & src, int64_t dim, c10::SymInt index) const; + at::Tensor diagonal_scatter(const at::Tensor & src, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) const; + at::Tensor as_strided_scatter(const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset=c10::nullopt) const; + at::Tensor as_strided_scatter_symint(const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset=c10::nullopt) const; + at::Tensor smm(const at::Tensor & mat2) const; + at::Tensor softmax(int64_t dim, c10::optional dtype=c10::nullopt) const; + at::Tensor softmax(at::Dimname dim, c10::optional dtype=c10::nullopt) const; + ::std::vector unsafe_split(int64_t split_size, int64_t dim=0) const; + ::std::vector unsafe_split_symint(c10::SymInt split_size, int64_t dim=0) const; + ::std::vector split(int64_t split_size, int64_t dim=0) const; + ::std::vector split_symint(c10::SymInt split_size, int64_t dim=0) const; + ::std::vector split(at::IntArrayRef split_size, int64_t dim=0) const; + ::std::vector split_symint(c10::SymIntArrayRef split_size, int64_t dim=0) const; + ::std::vector unsafe_split_with_sizes(at::IntArrayRef split_sizes, int64_t dim=0) const; + ::std::vector unsafe_split_with_sizes_symint(c10::SymIntArrayRef split_sizes, int64_t dim=0) const; + ::std::vector split_with_sizes(at::IntArrayRef split_sizes, int64_t dim=0) const; + ::std::vector split_with_sizes_symint(c10::SymIntArrayRef split_sizes, int64_t dim=0) const; + ::std::vector hsplit(int64_t sections) const; + ::std::vector hsplit(at::IntArrayRef indices) const; + ::std::vector vsplit(int64_t sections) const; + ::std::vector vsplit(at::IntArrayRef indices) const; + ::std::vector dsplit(int64_t sections) const; + ::std::vector dsplit(at::IntArrayRef indices) const; + at::Tensor squeeze() const; + at::Tensor squeeze(int64_t dim) const; + at::Tensor squeeze(at::Dimname dim) const; + at::Tensor squeeze(at::IntArrayRef dim) const; + at::Tensor & squeeze_() const; + at::Tensor & squeeze_(int64_t dim) const; + at::Tensor & squeeze_(at::IntArrayRef dim) const; + at::Tensor & squeeze_(at::Dimname dim) const; + at::Tensor sspaddmm(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor stft(int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool normalized, c10::optional onesided=c10::nullopt, c10::optional return_complex=c10::nullopt) const; + at::Tensor stft(int64_t n_fft, c10::optional hop_length=c10::nullopt, c10::optional win_length=c10::nullopt, const c10::optional & window={}, bool center=true, c10::string_view pad_mode="reflect", bool normalized=false, c10::optional onesided=c10::nullopt, c10::optional return_complex=c10::nullopt) const; + at::Tensor istft(int64_t n_fft, c10::optional hop_length=c10::nullopt, c10::optional win_length=c10::nullopt, const c10::optional & window={}, bool center=true, bool normalized=false, c10::optional onesided=c10::nullopt, c10::optional length=c10::nullopt, bool return_complex=false) const; + int64_t stride(at::Dimname dim) const; + at::Tensor sum(c10::optional dtype=c10::nullopt) const; + at::Tensor sum(at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt) const; + at::Tensor sum(at::DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt) const; + at::Tensor nansum(at::OptionalIntArrayRef dim=c10::nullopt, bool keepdim=false, c10::optional dtype=c10::nullopt) const; + at::Tensor sum_to_size(at::IntArrayRef size) const; + at::Tensor sum_to_size_symint(c10::SymIntArrayRef size) const; + at::Tensor sqrt() const; + at::Tensor & sqrt_() const; + at::Tensor square() const; + at::Tensor & square_() const; + at::Tensor std(bool unbiased) const; + at::Tensor std(at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) const; + at::Tensor std(at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false) const; + at::Tensor std(at::DimnameList dim, bool unbiased, bool keepdim=false) const; + at::Tensor std(at::DimnameList dim, const c10::optional & correction=c10::nullopt, bool keepdim=false) const; + at::Tensor prod(c10::optional dtype=c10::nullopt) const; + at::Tensor prod(int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt) const; + at::Tensor prod(at::Dimname dim, bool keepdim=false, c10::optional dtype=c10::nullopt) const; + at::Tensor t() const; + at::Tensor & t_() const; + at::Tensor tan() const; + at::Tensor & tan_() const; + at::Tensor tanh() const; + at::Tensor & tanh_() const; + at::Tensor tile(at::IntArrayRef dims) const; + at::Tensor tile_symint(c10::SymIntArrayRef dims) const; + at::Tensor transpose(int64_t dim0, int64_t dim1) const; + at::Tensor transpose(at::Dimname dim0, at::Dimname dim1) const; + at::Tensor & transpose_(int64_t dim0, int64_t dim1) const; + at::Tensor flip(at::IntArrayRef dims) const; + at::Tensor fliplr() const; + at::Tensor flipud() const; + at::Tensor roll(at::IntArrayRef shifts, at::IntArrayRef dims={}) const; + at::Tensor roll_symint(c10::SymIntArrayRef shifts, at::IntArrayRef dims={}) const; + at::Tensor rot90(int64_t k=1, at::IntArrayRef dims={0,1}) const; + at::Tensor _nested_tensor_size() const; + at::Tensor _nested_tensor_strides() const; + at::Tensor _nested_tensor_storage_offsets() const; + at::Tensor trunc() const; + at::Tensor & trunc_() const; + at::Tensor fix() const; + at::Tensor & fix_() const; + at::Tensor type_as(const at::Tensor & other) const; + at::Tensor unsqueeze(int64_t dim) const; + at::Tensor & unsqueeze_(int64_t dim) const; + at::Tensor var(bool unbiased) const; + at::Tensor var(at::OptionalIntArrayRef dim, bool unbiased, bool keepdim=false) const; + at::Tensor var(at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false) const; + at::Tensor var(at::DimnameList dim, bool unbiased, bool keepdim=false) const; + at::Tensor var(at::DimnameList dim, const c10::optional & correction=c10::nullopt, bool keepdim=false) const; + at::Tensor view_as(const at::Tensor & other) const; + at::Tensor where(const at::Tensor & condition, const at::Tensor & other) const; + at::Tensor where(const at::Tensor & condition, const at::Scalar & other) const; + at::Tensor norm(const c10::optional & p, at::ScalarType dtype) const; + at::Tensor norm(const at::Scalar & p=2) const; + at::Tensor norm(const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) const; + at::Tensor norm(const c10::optional & p, at::IntArrayRef dim, bool keepdim=false) const; + at::Tensor norm(const c10::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) const; + at::Tensor norm(const c10::optional & p, at::DimnameList dim, bool keepdim=false) const; + ::std::tuple frexp() const; + at::Tensor clone(c10::optional memory_format=c10::nullopt) const; + at::Tensor positive() const; + const at::Tensor & resize_as_(const at::Tensor & the_template, c10::optional memory_format=c10::nullopt) const; + const at::Tensor & resize_as_sparse_(const at::Tensor & the_template) const; + at::Tensor & zero_() const; + at::Tensor sub(const at::Tensor & other, const at::Scalar & alpha=1) const; + at::Tensor & sub_(const at::Tensor & other, const at::Scalar & alpha=1) const; + at::Tensor sub(const at::Scalar & other, const at::Scalar & alpha=1) const; + at::Tensor & sub_(const at::Scalar & other, const at::Scalar & alpha=1) const; + at::Tensor subtract(const at::Tensor & other, const at::Scalar & alpha=1) const; + at::Tensor & subtract_(const at::Tensor & other, const at::Scalar & alpha=1) const; + at::Tensor subtract(const at::Scalar & other, const at::Scalar & alpha=1) const; + at::Tensor & subtract_(const at::Scalar & other, const at::Scalar & alpha=1) const; + at::Tensor heaviside(const at::Tensor & values) const; + at::Tensor & heaviside_(const at::Tensor & values) const; + at::Tensor addmm(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor & addmm_(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor _addmm_activation(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false) const; + const at::Tensor & sparse_resize_(at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const; + const at::Tensor & sparse_resize_and_clear_(at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const; + at::Tensor sparse_mask(const at::Tensor & mask) const; + at::Tensor _sparse_mask_projection(const at::Tensor & mask, bool accumulate_matches=false) const; + at::Tensor to_dense(c10::optional dtype=c10::nullopt, c10::optional masked_grad=c10::nullopt) const; + at::Tensor _to_dense(c10::optional dtype=c10::nullopt, c10::optional masked_grad=c10::nullopt) const; + int64_t sparse_dim() const; + int64_t _dimI() const; + int64_t dense_dim() const; + int64_t _dimV() const; + int64_t _nnz() const; + at::Tensor coalesce() const; + bool is_coalesced() const; + at::Tensor _indices() const; + at::Tensor _values() const; + at::Tensor & _coalesced_(bool coalesced) const; + at::Tensor indices() const; + at::Tensor values() const; + at::Tensor crow_indices() const; + at::Tensor col_indices() const; + at::Tensor ccol_indices() const; + at::Tensor row_indices() const; + ::std::vector unbind(int64_t dim=0) const; + ::std::vector unbind(at::Dimname dim) const; + at::Tensor to_sparse(int64_t sparse_dim) const; + at::Tensor _to_sparse(int64_t sparse_dim) const; + at::Tensor to_sparse(c10::optional layout=c10::nullopt, at::OptionalIntArrayRef blocksize=c10::nullopt, c10::optional dense_dim=c10::nullopt) const; + at::Tensor _to_sparse(c10::optional layout=c10::nullopt, at::OptionalIntArrayRef blocksize=c10::nullopt, c10::optional dense_dim=c10::nullopt) const; + at::Tensor to_sparse_csr(c10::optional dense_dim=c10::nullopt) const; + at::Tensor _to_sparse_csr(c10::optional dense_dim=c10::nullopt) const; + at::Tensor to_sparse_csc(c10::optional dense_dim=c10::nullopt) const; + at::Tensor _to_sparse_csc(c10::optional dense_dim=c10::nullopt) const; + at::Tensor to_sparse_bsr(at::IntArrayRef blocksize, c10::optional dense_dim=c10::nullopt) const; + at::Tensor _to_sparse_bsr(at::IntArrayRef blocksize, c10::optional dense_dim=c10::nullopt) const; + at::Tensor to_sparse_bsc(at::IntArrayRef blocksize, c10::optional dense_dim=c10::nullopt) const; + at::Tensor _to_sparse_bsc(at::IntArrayRef blocksize, c10::optional dense_dim=c10::nullopt) const; + at::Tensor to_mkldnn(c10::optional dtype=c10::nullopt) const; + at::Tensor dequantize() const; + double q_scale() const; + int64_t q_zero_point() const; + at::Tensor q_per_channel_scales() const; + at::Tensor q_per_channel_zero_points() const; + int64_t q_per_channel_axis() const; + at::Tensor int_repr() const; + at::QScheme qscheme() const; + at::Tensor _autocast_to_reduced_precision(bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) const; + at::Tensor _autocast_to_full_precision(bool cuda_enabled, bool cpu_enabled) const; + at::Tensor to(at::TensorOptions options={}, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) const; + at::Tensor to(c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, bool copy, c10::optional memory_format) const; + at::Tensor to(at::Device device, at::ScalarType dtype, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) const; + at::Tensor to(at::ScalarType dtype, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) const; + at::Tensor to(const at::Tensor & other, bool non_blocking=false, bool copy=false, c10::optional memory_format=c10::nullopt) const; + at::Scalar item() const; + at::Tensor & set_(at::Storage source) const; + at::Tensor & set_(at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) const; + at::Tensor & set__symint(at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) const; + at::Tensor & set_(const at::Tensor & source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}) const; + at::Tensor & set__symint(const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}) const; + at::Tensor & set_(const at::Tensor & source) const; + at::Tensor & set_() const; + bool is_set_to(const at::Tensor & tensor) const; + at::Tensor & masked_fill_(const at::Tensor & mask, const at::Scalar & value) const; + at::Tensor masked_fill(const at::Tensor & mask, const at::Scalar & value) const; + at::Tensor & masked_fill_(const at::Tensor & mask, const at::Tensor & value) const; + at::Tensor masked_fill(const at::Tensor & mask, const at::Tensor & value) const; + at::Tensor & masked_scatter_(const at::Tensor & mask, const at::Tensor & source) const; + at::Tensor masked_scatter(const at::Tensor & mask, const at::Tensor & source) const; + at::Tensor view(at::IntArrayRef size) const; + at::Tensor view_symint(c10::SymIntArrayRef size) const; + at::Tensor view(at::ScalarType dtype) const; + at::Tensor & put_(const at::Tensor & index, const at::Tensor & source, bool accumulate=false) const; + at::Tensor put(const at::Tensor & index, const at::Tensor & source, bool accumulate=false) const; + at::Tensor & index_add_(int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) const; + at::Tensor index_add(int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) const; + at::Tensor index_add(at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1) const; + at::Tensor & index_reduce_(int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) const; + at::Tensor index_reduce(int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true) const; + at::Tensor & index_fill_(int64_t dim, const at::Tensor & index, const at::Scalar & value) const; + at::Tensor index_fill(int64_t dim, const at::Tensor & index, const at::Scalar & value) const; + at::Tensor & index_fill_(int64_t dim, const at::Tensor & index, const at::Tensor & value) const; + at::Tensor index_fill(int64_t dim, const at::Tensor & index, const at::Tensor & value) const; + at::Tensor & index_fill_(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const; + at::Tensor & index_fill_(at::Dimname dim, const at::Tensor & index, const at::Tensor & value) const; + at::Tensor index_fill(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const; + at::Tensor index_fill(at::Dimname dim, const at::Tensor & index, const at::Tensor & value) const; + at::Tensor scatter(int64_t dim, const at::Tensor & index, const at::Tensor & src) const; + at::Tensor & scatter_(int64_t dim, const at::Tensor & index, const at::Tensor & src) const; + at::Tensor scatter(int64_t dim, const at::Tensor & index, const at::Scalar & value) const; + at::Tensor & scatter_(int64_t dim, const at::Tensor & index, const at::Scalar & value) const; + at::Tensor scatter(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) const; + at::Tensor & scatter_(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) const; + at::Tensor scatter(int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) const; + at::Tensor & scatter_(int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) const; + at::Tensor scatter(at::Dimname dim, const at::Tensor & index, const at::Tensor & src) const; + at::Tensor scatter(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const; + at::Tensor scatter_add(int64_t dim, const at::Tensor & index, const at::Tensor & src) const; + at::Tensor & scatter_add_(int64_t dim, const at::Tensor & index, const at::Tensor & src) const; + at::Tensor scatter_add(at::Dimname dim, const at::Tensor & index, const at::Tensor & src) const; + at::Tensor scatter_reduce(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) const; + at::Tensor & scatter_reduce_(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self=true) const; + at::Tensor & eq_(const at::Scalar & other) const; + at::Tensor & eq_(const at::Tensor & other) const; + at::Tensor bitwise_and(const at::Scalar & other) const; + at::Tensor bitwise_and(const at::Tensor & other) const; + at::Tensor & bitwise_and_(const at::Scalar & other) const; + at::Tensor & bitwise_and_(const at::Tensor & other) const; + at::Tensor __and__(const at::Scalar & other) const; + at::Tensor __and__(const at::Tensor & other) const; + at::Tensor & __iand__(const at::Scalar & other) const; + at::Tensor & __iand__(const at::Tensor & other) const; + at::Tensor bitwise_or(const at::Scalar & other) const; + at::Tensor bitwise_or(const at::Tensor & other) const; + at::Tensor & bitwise_or_(const at::Scalar & other) const; + at::Tensor & bitwise_or_(const at::Tensor & other) const; + at::Tensor __or__(const at::Scalar & other) const; + at::Tensor __or__(const at::Tensor & other) const; + at::Tensor & __ior__(const at::Scalar & other) const; + at::Tensor & __ior__(const at::Tensor & other) const; + at::Tensor bitwise_xor(const at::Scalar & other) const; + at::Tensor bitwise_xor(const at::Tensor & other) const; + at::Tensor & bitwise_xor_(const at::Scalar & other) const; + at::Tensor & bitwise_xor_(const at::Tensor & other) const; + at::Tensor __xor__(const at::Scalar & other) const; + at::Tensor __xor__(const at::Tensor & other) const; + at::Tensor & __ixor__(const at::Scalar & other) const; + at::Tensor & __ixor__(const at::Tensor & other) const; + at::Tensor __lshift__(const at::Scalar & other) const; + at::Tensor __lshift__(const at::Tensor & other) const; + at::Tensor & __ilshift__(const at::Scalar & other) const; + at::Tensor & __ilshift__(const at::Tensor & other) const; + at::Tensor bitwise_left_shift(const at::Tensor & other) const; + at::Tensor & bitwise_left_shift_(const at::Tensor & other) const; + at::Tensor bitwise_left_shift(const at::Scalar & other) const; + at::Tensor & bitwise_left_shift_(const at::Scalar & other) const; + at::Tensor __rshift__(const at::Scalar & other) const; + at::Tensor __rshift__(const at::Tensor & other) const; + at::Tensor & __irshift__(const at::Scalar & other) const; + at::Tensor & __irshift__(const at::Tensor & other) const; + at::Tensor bitwise_right_shift(const at::Tensor & other) const; + at::Tensor & bitwise_right_shift_(const at::Tensor & other) const; + at::Tensor bitwise_right_shift(const at::Scalar & other) const; + at::Tensor & bitwise_right_shift_(const at::Scalar & other) const; + at::Tensor & tril_(int64_t diagonal=0) const; + at::Tensor & triu_(int64_t diagonal=0) const; + at::Tensor & digamma_() const; + at::Tensor & lerp_(const at::Tensor & end, const at::Scalar & weight) const; + at::Tensor & lerp_(const at::Tensor & end, const at::Tensor & weight) const; + at::Tensor & addbmm_(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor addbmm(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) const; + at::Tensor & random_(int64_t from, c10::optional to, c10::optional generator=c10::nullopt) const; + at::Tensor & random_(int64_t to, c10::optional generator=c10::nullopt) const; + at::Tensor & random_(c10::optional generator=c10::nullopt) const; + at::Tensor & uniform_(double from=0, double to=1, c10::optional generator=c10::nullopt) const; + at::Tensor & cauchy_(double median=0, double sigma=1, c10::optional generator=c10::nullopt) const; + at::Tensor & log_normal_(double mean=1, double std=2, c10::optional generator=c10::nullopt) const; + at::Tensor & exponential_(double lambd=1, c10::optional generator=c10::nullopt) const; + at::Tensor & geometric_(double p, c10::optional generator=c10::nullopt) const; + at::Tensor diag(int64_t diagonal=0) const; + at::Tensor cross(const at::Tensor & other, c10::optional dim=c10::nullopt) const; + at::Tensor triu(int64_t diagonal=0) const; + at::Tensor tril(int64_t diagonal=0) const; + at::Tensor trace() const; + at::Tensor ne(const at::Scalar & other) const; + at::Tensor ne(const at::Tensor & other) const; + at::Tensor & ne_(const at::Scalar & other) const; + at::Tensor & ne_(const at::Tensor & other) const; + at::Tensor not_equal(const at::Scalar & other) const; + at::Tensor not_equal(const at::Tensor & other) const; + at::Tensor & not_equal_(const at::Scalar & other) const; + at::Tensor & not_equal_(const at::Tensor & other) const; + at::Tensor eq(const at::Scalar & other) const; + at::Tensor eq(const at::Tensor & other) const; + at::Tensor ge(const at::Scalar & other) const; + at::Tensor ge(const at::Tensor & other) const; + at::Tensor & ge_(const at::Scalar & other) const; + at::Tensor & ge_(const at::Tensor & other) const; + at::Tensor greater_equal(const at::Scalar & other) const; + at::Tensor greater_equal(const at::Tensor & other) const; + at::Tensor & greater_equal_(const at::Scalar & other) const; + at::Tensor & greater_equal_(const at::Tensor & other) const; + at::Tensor le(const at::Scalar & other) const; + at::Tensor le(const at::Tensor & other) const; + at::Tensor & le_(const at::Scalar & other) const; + at::Tensor & le_(const at::Tensor & other) const; + at::Tensor less_equal(const at::Scalar & other) const; + at::Tensor less_equal(const at::Tensor & other) const; + at::Tensor & less_equal_(const at::Scalar & other) const; + at::Tensor & less_equal_(const at::Tensor & other) const; + at::Tensor gt(const at::Scalar & other) const; + at::Tensor gt(const at::Tensor & other) const; + at::Tensor & gt_(const at::Scalar & other) const; + at::Tensor & gt_(const at::Tensor & other) const; + at::Tensor greater(const at::Scalar & other) const; + at::Tensor greater(const at::Tensor & other) const; + at::Tensor & greater_(const at::Scalar & other) const; + at::Tensor & greater_(const at::Tensor & other) const; + at::Tensor lt(const at::Scalar & other) const; + at::Tensor lt(const at::Tensor & other) const; + at::Tensor & lt_(const at::Scalar & other) const; + at::Tensor & lt_(const at::Tensor & other) const; + at::Tensor less(const at::Scalar & other) const; + at::Tensor less(const at::Tensor & other) const; + at::Tensor & less_(const at::Scalar & other) const; + at::Tensor & less_(const at::Tensor & other) const; + at::Tensor take(const at::Tensor & index) const; + at::Tensor take_along_dim(const at::Tensor & indices, c10::optional dim=c10::nullopt) const; + at::Tensor index_select(int64_t dim, const at::Tensor & index) const; + at::Tensor index_select(at::Dimname dim, const at::Tensor & index) const; + at::Tensor masked_select(const at::Tensor & mask) const; + at::Tensor nonzero() const; + at::Tensor nonzero_static(int64_t size, int64_t fill_value=-1) const; + ::std::vector nonzero_numpy() const; + at::Tensor argwhere() const; + at::Tensor gather(int64_t dim, const at::Tensor & index, bool sparse_grad=false) const; + at::Tensor gather(at::Dimname dim, const at::Tensor & index, bool sparse_grad=false) const; + at::Tensor addcmul(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) const; + at::Tensor & addcmul_(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) const; + at::Tensor addcdiv(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) const; + at::Tensor & addcdiv_(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) const; + ::std::tuple triangular_solve(const at::Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false) const; + ::std::tuple svd(bool some=true, bool compute_uv=true) const; + at::Tensor swapaxes(int64_t axis0, int64_t axis1) const; + at::Tensor & swapaxes_(int64_t axis0, int64_t axis1) const; + at::Tensor swapdims(int64_t dim0, int64_t dim1) const; + at::Tensor & swapdims_(int64_t dim0, int64_t dim1) const; + at::Tensor cholesky(bool upper=false) const; + at::Tensor cholesky_solve(const at::Tensor & input2, bool upper=false) const; + at::Tensor cholesky_inverse(bool upper=false) const; + ::std::tuple qr(bool some=true) const; + ::std::tuple geqrf() const; + at::Tensor orgqr(const at::Tensor & input2) const; + at::Tensor ormqr(const at::Tensor & input2, const at::Tensor & input3, bool left=true, bool transpose=false) const; + at::Tensor lu_solve(const at::Tensor & LU_data, const at::Tensor & LU_pivots) const; + at::Tensor multinomial(int64_t num_samples, bool replacement=false, c10::optional generator=c10::nullopt) const; + at::Tensor & lgamma_() const; + at::Tensor lgamma() const; + at::Tensor digamma() const; + at::Tensor polygamma(int64_t n) const; + at::Tensor & polygamma_(int64_t n) const; + at::Tensor erfinv() const; + at::Tensor & erfinv_() const; + at::Tensor i0() const; + at::Tensor & i0_() const; + at::Tensor sign() const; + at::Tensor & sign_() const; + at::Tensor signbit() const; + at::Tensor dist(const at::Tensor & other, const at::Scalar & p=2) const; + at::Tensor & atan2_(const at::Tensor & other) const; + at::Tensor atan2(const at::Tensor & other) const; + at::Tensor arctan2(const at::Tensor & other) const; + at::Tensor & arctan2_(const at::Tensor & other) const; + at::Tensor lerp(const at::Tensor & end, const at::Scalar & weight) const; + at::Tensor lerp(const at::Tensor & end, const at::Tensor & weight) const; + at::Tensor histc(int64_t bins=100, const at::Scalar & min=0, const at::Scalar & max=0) const; + ::std::tuple histogram(const at::Tensor & bins, const c10::optional & weight={}, bool density=false) const; + ::std::tuple histogram(int64_t bins=100, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false) const; + at::Tensor fmod(const at::Scalar & other) const; + at::Tensor & fmod_(const at::Scalar & other) const; + at::Tensor fmod(const at::Tensor & other) const; + at::Tensor & fmod_(const at::Tensor & other) const; + at::Tensor hypot(const at::Tensor & other) const; + at::Tensor & hypot_(const at::Tensor & other) const; + at::Tensor igamma(const at::Tensor & other) const; + at::Tensor & igamma_(const at::Tensor & other) const; + at::Tensor igammac(const at::Tensor & other) const; + at::Tensor & igammac_(const at::Tensor & other) const; + at::Tensor nextafter(const at::Tensor & other) const; + at::Tensor & nextafter_(const at::Tensor & other) const; + at::Tensor remainder(const at::Scalar & other) const; + at::Tensor & remainder_(const at::Scalar & other) const; + at::Tensor remainder(const at::Tensor & other) const; + at::Tensor & remainder_(const at::Tensor & other) const; + at::Tensor min() const; + at::Tensor fmin(const at::Tensor & other) const; + at::Tensor max() const; + at::Tensor fmax(const at::Tensor & other) const; + at::Tensor maximum(const at::Tensor & other) const; + at::Tensor max(const at::Tensor & other) const; + at::Tensor minimum(const at::Tensor & other) const; + at::Tensor min(const at::Tensor & other) const; + at::Tensor quantile(const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") const; + at::Tensor quantile(double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") const; + at::Tensor nanquantile(const at::Tensor & q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") const; + at::Tensor nanquantile(double q, c10::optional dim=c10::nullopt, bool keepdim=false, c10::string_view interpolation="linear") const; + ::std::tuple sort(int64_t dim=-1, bool descending=false) const; + ::std::tuple sort(c10::optional stable, int64_t dim=-1, bool descending=false) const; + ::std::tuple sort(at::Dimname dim, bool descending=false) const; + ::std::tuple sort(c10::optional stable, at::Dimname dim, bool descending=false) const; + at::Tensor msort() const; + at::Tensor argsort(int64_t dim=-1, bool descending=false) const; + at::Tensor argsort(bool stable, int64_t dim=-1, bool descending=false) const; + at::Tensor argsort(at::Dimname dim, bool descending=false) const; + ::std::tuple topk(int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true) const; + ::std::tuple topk_symint(c10::SymInt k, int64_t dim=-1, bool largest=true, bool sorted=true) const; + at::Tensor all() const; + at::Tensor any() const; + at::Tensor renorm(const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) const; + at::Tensor & renorm_(const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) const; + at::Tensor unfold(int64_t dimension, int64_t size, int64_t step) const; + bool equal(const at::Tensor & other) const; + at::Tensor pow(const at::Tensor & exponent) const; + at::Tensor pow(const at::Scalar & exponent) const; + at::Tensor & pow_(const at::Scalar & exponent) const; + at::Tensor & pow_(const at::Tensor & exponent) const; + at::Tensor float_power(const at::Tensor & exponent) const; + at::Tensor float_power(const at::Scalar & exponent) const; + at::Tensor & float_power_(const at::Scalar & exponent) const; + at::Tensor & float_power_(const at::Tensor & exponent) const; + at::Tensor & normal_(double mean=0, double std=1, c10::optional generator=c10::nullopt) const; + at::Tensor alias() const; + at::Tensor isfinite() const; + at::Tensor isinf() const; + void record_stream(at::Stream s) const; + at::Tensor isposinf() const; + at::Tensor isneginf() const; + at::Tensor det() const; + ::std::tuple slogdet() const; + at::Tensor logdet() const; + at::Tensor inverse() const; + at::Tensor inner(const at::Tensor & other) const; + at::Tensor outer(const at::Tensor & vec2) const; + at::Tensor ger(const at::Tensor & vec2) const; + at::Tensor to_padded_tensor(double padding, at::OptionalIntArrayRef output_size=c10::nullopt) const; + at::Tensor to_padded_tensor_symint(double padding, at::OptionalSymIntArrayRef output_size=c10::nullopt) const; + + // Special C++ only overloads for std()-like functions (See gh-40287) + // These are needed because int -> bool conversion takes precedence over int -> IntArrayRef + // So, for example std(0) would select the std(unbiased=False) overload + + Tensor var(int dim) const { + return var(IntArrayRef{dim}); + } + + Tensor std(int dim) const { + return std(IntArrayRef{dim}); + } + + // We changed .dtype() to return a TypeMeta in #12766. Ideally, we want the + // at::kDouble and its friends to be TypeMeta's, but that hasn't happened yet. + // Before that change, we make this method to maintain BC for C++ usage like + // `x.to(y.dtype)`. + // TODO: remove following two after at::kDouble and its friends are TypeMeta's. + inline Tensor to(caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const { + return this->to(/*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy); + } + inline Tensor to(Device device, caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const { + return this->to(device, /*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy); + } + + template + decltype(auto) m(F func, Args&&... params) const { + return func(*this, std::forward(params)...); + } + + /// NOTE: This is similar to the legacy `.data()` function on `Variable`, and is intended + /// to be used from functions that need to access the `Variable`'s equivalent `Tensor` + /// (i.e. `Tensor` that shares the same storage and tensor metadata with the `Variable`). + /// + /// One notable difference with the legacy `.data()` function is that changes to the + /// returned `Tensor`'s tensor metadata (e.g. sizes / strides / storage / storage_offset) + /// will not update the original `Variable`, due to the fact that this function + /// shallow-copies the `Variable`'s underlying TensorImpl. + at::Tensor tensor_data() const { + return TensorBase::tensor_data(); + } + + /// NOTE: `var.variable_data()` in C++ has the same semantics as `tensor.data` + /// in Python, which create a new `Variable` that shares the same storage and + /// tensor metadata with the original `Variable`, but with a completely new + /// autograd history. + /// + /// NOTE: If we change the tensor metadata (e.g. sizes / strides / + /// storage / storage_offset) of a variable created from `var.variable_data()`, those + /// changes will not update the original variable `var`. In `.variable_data()`, we set + /// `allow_tensor_metadata_change_` to false to make such changes explicitly illegal, + /// in order to prevent users from changing metadata of `var.variable_data()` + /// and expecting the original variable `var` to also be updated. + at::Tensor variable_data() const { + return TensorBase::variable_data(); + } + + // Hooks + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + template + using hook_return_void_t = std::enable_if_t>::value, unsigned>; + template + using hook_return_var_t = std::enable_if_t, Tensor>::value, unsigned>; + + /// Registers a backward hook. + /// + /// The hook will be called every time a gradient with respect to the Tensor is computed. + /// The hook should have one of the following signature: + /// ``` + /// hook(Tensor grad) -> Tensor + /// ``` + /// ``` + /// hook(Tensor grad) -> void + /// ``` + /// The hook should not modify its argument, but it can optionally return a new gradient + /// which will be used in place of `grad`. + /// + /// This function returns the index of the hook in the list which can be used to remove hook. + /// + /// Example: + /// @code + /// auto v = torch::tensor({0., 0., 0.}, torch::requires_grad()); + /// auto h = v.register_hook([](torch::Tensor grad){ return grad * 2; }); // double the gradient + /// v.backward(torch::tensor({1., 2., 3.})); + /// // This prints: + /// // ``` + /// // 2 + /// // 4 + /// // 6 + /// // [ CPUFloatType{3} ] + /// // ``` + /// std::cout << v.grad() << std::endl; + /// v.remove_hook(h); // removes the hook + /// @endcode + template + hook_return_void_t register_hook(T&& hook) const; + template + hook_return_var_t register_hook(T&& hook) const; + + // Variable methods + //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Tensor data() const { + return TensorBase::data(); + } + + void _backward(TensorList inputs, const c10::optional& gradient, c10::optional keep_graph, bool create_graph) const; + + const Tensor& requires_grad_(bool _requires_grad=true) const { + TensorBase::requires_grad_(_requires_grad); + return *this; + } +}; + +namespace detail { +// Helper creator for Tensor class which doesn't requires the users to pass +// in an intrusive_ptr instead it just converts the argument passed to +// requested intrusive_ptr type. +template +Tensor make_tensor(Args&&... args) { + return Tensor(c10::make_intrusive(std::forward(args)...)); +} + +} // namespace detail + +} // namespace at + + +namespace at { + +// aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> () +inline void Tensor::__dispatch__backward(at::TensorList inputs, const c10::optional & gradient, c10::optional retain_graph, bool create_graph) const { + return at::_ops::_backward::call(const_cast(*this), inputs, gradient, retain_graph, create_graph); +} + +// aten::set_data(Tensor(a!) self, Tensor new_data) -> () +inline void Tensor::__dispatch_set_data(const at::Tensor & new_data) const { + return at::_ops::set_data::call(const_cast(*this), new_data); +} + +// aten::data(Tensor self) -> Tensor +inline at::Tensor Tensor::__dispatch_data() const { + return at::_ops::data::call(const_cast(*this)); +} + +// aten::is_leaf(Tensor self) -> bool +inline bool Tensor::__dispatch_is_leaf() const { + return at::_ops::is_leaf::call(const_cast(*this)); +} + +// aten::output_nr(Tensor self) -> int +inline int64_t Tensor::__dispatch_output_nr() const { + return at::_ops::output_nr::call(const_cast(*this)); +} + +// aten::_version(Tensor self) -> int +inline int64_t Tensor::__dispatch__version() const { + return at::_ops::_version::call(const_cast(*this)); +} + +// aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!) +inline at::Tensor & Tensor::__dispatch_requires_grad_(bool requires_grad) const { + return at::_ops::requires_grad_::call(const_cast(*this), requires_grad); +} + +// aten::retain_grad(Tensor(a!) self) -> () +inline void Tensor::__dispatch_retain_grad() const { + return at::_ops::retain_grad::call(const_cast(*this)); +} + +// aten::retains_grad(Tensor self) -> bool +inline bool Tensor::__dispatch_retains_grad() const { + return at::_ops::retains_grad::call(const_cast(*this)); +} + +// aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a) +inline at::Tensor Tensor::_fw_primal(int64_t level) const { + return at::_ops::_fw_primal::call(const_cast(*this), level); +} + +// aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!) +inline at::Tensor & Tensor::rename_(c10::optional names) const { + return at::_ops::rename_::call(const_cast(*this), names); +} + +// aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a) +inline at::Tensor Tensor::rename(c10::optional names) const { + return at::_ops::rename::call(const_cast(*this), names); +} + +// aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a) +inline at::Tensor Tensor::align_to(at::DimnameList names) const { + return at::_ops::align_to::call(const_cast(*this), names); +} + +// aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a) +inline at::Tensor Tensor::align_to(at::DimnameList order, int64_t ellipsis_idx) const { + return at::_ops::align_to_ellipsis_idx::call(const_cast(*this), order, ellipsis_idx); +} + +// aten::align_as(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::align_as(const at::Tensor & other) const { + return at::_ops::align_as::call(const_cast(*this), other); +} + +// aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a) +inline at::Tensor Tensor::refine_names(at::DimnameList names) const { + return at::_ops::refine_names::call(const_cast(*this), names); +} + +// aten::abs(Tensor self) -> Tensor +inline at::Tensor Tensor::abs() const { + return at::_ops::abs::call(const_cast(*this)); +} + +// aten::abs_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::abs_() const { + return at::_ops::abs_::call(const_cast(*this)); +} + +// aten::absolute(Tensor self) -> Tensor +inline at::Tensor Tensor::absolute() const { + return at::_ops::absolute::call(const_cast(*this)); +} + +// aten::absolute_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::absolute_() const { + return at::_ops::absolute_::call(const_cast(*this)); +} + +// aten::angle(Tensor self) -> Tensor +inline at::Tensor Tensor::angle() const { + return at::_ops::angle::call(const_cast(*this)); +} + +// aten::sgn(Tensor self) -> Tensor +inline at::Tensor Tensor::sgn() const { + return at::_ops::sgn::call(const_cast(*this)); +} + +// aten::sgn_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::sgn_() const { + return at::_ops::sgn_::call(const_cast(*this)); +} + +// aten::chalf(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor Tensor::chalf(c10::optional memory_format) const { + return at::_ops::chalf::call(const_cast(*this), memory_format); +} + +// aten::_conj(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::_conj() const { + return at::_ops::_conj::call(const_cast(*this)); +} + +// aten::conj(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::__dispatch_conj() const { + return at::_ops::conj::call(const_cast(*this)); +} + +// aten::_conj_physical(Tensor self) -> Tensor +inline at::Tensor Tensor::_conj_physical() const { + return at::_ops::_conj_physical::call(const_cast(*this)); +} + +// aten::conj_physical(Tensor self) -> Tensor +inline at::Tensor Tensor::conj_physical() const { + return at::_ops::conj_physical::call(const_cast(*this)); +} + +// aten::conj_physical_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::conj_physical_() const { + return at::_ops::conj_physical_::call(const_cast(*this)); +} + +// aten::resolve_conj(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::resolve_conj() const { + return at::_ops::resolve_conj::call(const_cast(*this)); +} + +// aten::resolve_neg(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::resolve_neg() const { + return at::_ops::resolve_neg::call(const_cast(*this)); +} + +// aten::_neg_view(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::_neg_view() const { + return at::_ops::_neg_view::call(const_cast(*this)); +} + +// aten::acos(Tensor self) -> Tensor +inline at::Tensor Tensor::acos() const { + return at::_ops::acos::call(const_cast(*this)); +} + +// aten::acos_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::acos_() const { + return at::_ops::acos_::call(const_cast(*this)); +} + +// aten::arccos(Tensor self) -> Tensor +inline at::Tensor Tensor::arccos() const { + return at::_ops::arccos::call(const_cast(*this)); +} + +// aten::arccos_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::arccos_() const { + return at::_ops::arccos_::call(const_cast(*this)); +} + +// aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::add(const at::Tensor & other, const at::Scalar & alpha) const { + return at::_ops::add_Tensor::call(const_cast(*this), other, alpha); +} + +// aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::add_(const at::Tensor & other, const at::Scalar & alpha) const { + return at::_ops::add__Tensor::call(const_cast(*this), other, alpha); +} + +// aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::add(const at::Scalar & other, const at::Scalar & alpha) const { + return at::_ops::add_Scalar::call(const_cast(*this), other, alpha); +} + +// aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::add_(const at::Scalar & other, const at::Scalar & alpha) const { + return at::_ops::add__Scalar::call(const_cast(*this), other, alpha); +} + +// aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::addmv(const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::addmv::call(const_cast(*this), mat, vec, beta, alpha); +} + +// aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::addmv_(const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::addmv_::call(const_cast(*this), mat, vec, beta, alpha); +} + +// aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::addr(const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::addr::call(const_cast(*this), vec1, vec2, beta, alpha); +} + +// aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::addr_(const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::addr_::call(const_cast(*this), vec1, vec2, beta, alpha); +} + +// aten::_is_all_true(Tensor self) -> Tensor +inline at::Tensor Tensor::_is_all_true() const { + return at::_ops::_is_all_true::call(const_cast(*this)); +} + +// aten::_is_any_true(Tensor self) -> Tensor +inline at::Tensor Tensor::_is_any_true() const { + return at::_ops::_is_any_true::call(const_cast(*this)); +} + +// aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::all(int64_t dim, bool keepdim) const { + return at::_ops::all_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::all(at::OptionalIntArrayRef dim, bool keepdim) const { + return at::_ops::all_dims::call(const_cast(*this), dim, keepdim); +} + +// aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::all(at::Dimname dim, bool keepdim) const { + return at::_ops::all_dimname::call(const_cast(*this), dim, keepdim); +} + +// aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool +inline bool Tensor::allclose(const at::Tensor & other, double rtol, double atol, bool equal_nan) const { + return at::_ops::allclose::call(const_cast(*this), other, rtol, atol, equal_nan); +} + +// aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::any(int64_t dim, bool keepdim) const { + return at::_ops::any_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::any(at::OptionalIntArrayRef dim, bool keepdim) const { + return at::_ops::any_dims::call(const_cast(*this), dim, keepdim); +} + +// aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::any(at::Dimname dim, bool keepdim) const { + return at::_ops::any_dimname::call(const_cast(*this), dim, keepdim); +} + +// aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::argmax(c10::optional dim, bool keepdim) const { + return at::_ops::argmax::call(const_cast(*this), dim, keepdim); +} + +// aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::argmin(c10::optional dim, bool keepdim) const { + return at::_ops::argmin::call(const_cast(*this), dim, keepdim); +} + +// aten::acosh(Tensor self) -> Tensor +inline at::Tensor Tensor::acosh() const { + return at::_ops::acosh::call(const_cast(*this)); +} + +// aten::acosh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::acosh_() const { + return at::_ops::acosh_::call(const_cast(*this)); +} + +// aten::arccosh(Tensor self) -> Tensor +inline at::Tensor Tensor::arccosh() const { + return at::_ops::arccosh::call(const_cast(*this)); +} + +// aten::arccosh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::arccosh_() const { + return at::_ops::arccosh_::call(const_cast(*this)); +} + +// aten::asinh(Tensor self) -> Tensor +inline at::Tensor Tensor::asinh() const { + return at::_ops::asinh::call(const_cast(*this)); +} + +// aten::asinh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::asinh_() const { + return at::_ops::asinh_::call(const_cast(*this)); +} + +// aten::arcsinh(Tensor self) -> Tensor +inline at::Tensor Tensor::arcsinh() const { + return at::_ops::arcsinh::call(const_cast(*this)); +} + +// aten::arcsinh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::arcsinh_() const { + return at::_ops::arcsinh_::call(const_cast(*this)); +} + +// aten::atanh(Tensor self) -> Tensor +inline at::Tensor Tensor::atanh() const { + return at::_ops::atanh::call(const_cast(*this)); +} + +// aten::atanh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::atanh_() const { + return at::_ops::atanh_::call(const_cast(*this)); +} + +// aten::arctanh(Tensor self) -> Tensor +inline at::Tensor Tensor::arctanh() const { + return at::_ops::arctanh::call(const_cast(*this)); +} + +// aten::arctanh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::arctanh_() const { + return at::_ops::arctanh_::call(const_cast(*this)); +} + +// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) +inline at::Tensor Tensor::as_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset) const { + return at::_ops::as_strided::call(const_cast(*this), c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); +} + +// aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) +inline at::Tensor Tensor::as_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset) const { + return at::_ops::as_strided::call(const_cast(*this), size, stride, storage_offset); +} + +// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) +inline const at::Tensor & Tensor::as_strided_(at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset) const { + return at::_ops::as_strided_::call(const_cast(*this), c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); +} + +// aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!) +inline const at::Tensor & Tensor::as_strided__symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset) const { + return at::_ops::as_strided_::call(const_cast(*this), size, stride, storage_offset); +} + +// aten::asin(Tensor self) -> Tensor +inline at::Tensor Tensor::asin() const { + return at::_ops::asin::call(const_cast(*this)); +} + +// aten::asin_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::asin_() const { + return at::_ops::asin_::call(const_cast(*this)); +} + +// aten::arcsin(Tensor self) -> Tensor +inline at::Tensor Tensor::arcsin() const { + return at::_ops::arcsin::call(const_cast(*this)); +} + +// aten::arcsin_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::arcsin_() const { + return at::_ops::arcsin_::call(const_cast(*this)); +} + +// aten::atan(Tensor self) -> Tensor +inline at::Tensor Tensor::atan() const { + return at::_ops::atan::call(const_cast(*this)); +} + +// aten::atan_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::atan_() const { + return at::_ops::atan_::call(const_cast(*this)); +} + +// aten::arctan(Tensor self) -> Tensor +inline at::Tensor Tensor::arctan() const { + return at::_ops::arctan::call(const_cast(*this)); +} + +// aten::arctan_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::arctan_() const { + return at::_ops::arctan_::call(const_cast(*this)); +} + +// aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::baddbmm(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::baddbmm::call(const_cast(*this), batch1, batch2, beta, alpha); +} + +// aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::baddbmm_(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::baddbmm_::call(const_cast(*this), batch1, batch2, beta, alpha); +} + +// aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor +inline at::Tensor Tensor::bernoulli(c10::optional generator) const { + return at::_ops::bernoulli::call(const_cast(*this), generator); +} + +// aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::bernoulli_(const at::Tensor & p, c10::optional generator) const { + return at::_ops::bernoulli__Tensor::call(const_cast(*this), p, generator); +} + +// aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::bernoulli_(double p, c10::optional generator) const { + return at::_ops::bernoulli__float::call(const_cast(*this), p, generator); +} + +// aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor +inline at::Tensor Tensor::bernoulli(double p, c10::optional generator) const { + return at::_ops::bernoulli_p::call(const_cast(*this), p, generator); +} + +// aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor +inline at::Tensor Tensor::bincount(const c10::optional & weights, int64_t minlength) const { + return at::_ops::bincount::call(const_cast(*this), weights, minlength); +} + +// aten::bitwise_not(Tensor self) -> Tensor +inline at::Tensor Tensor::bitwise_not() const { + return at::_ops::bitwise_not::call(const_cast(*this)); +} + +// aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_not_() const { + return at::_ops::bitwise_not_::call(const_cast(*this)); +} + +// aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::copysign(const at::Tensor & other) const { + return at::_ops::copysign_Tensor::call(const_cast(*this), other); +} + +// aten::copysign_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::copysign_(const at::Tensor & other) const { + return at::_ops::copysign__Tensor::call(const_cast(*this), other); +} + +// aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::copysign(const at::Scalar & other) const { + return at::_ops::copysign_Scalar::call(const_cast(*this), other); +} + +// aten::copysign_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::copysign_(const at::Scalar & other) const { + return at::_ops::copysign__Scalar::call(const_cast(*this), other); +} + +// aten::logical_not(Tensor self) -> Tensor +inline at::Tensor Tensor::logical_not() const { + return at::_ops::logical_not::call(const_cast(*this)); +} + +// aten::logical_not_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::logical_not_() const { + return at::_ops::logical_not_::call(const_cast(*this)); +} + +// aten::logical_xor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::logical_xor(const at::Tensor & other) const { + return at::_ops::logical_xor::call(const_cast(*this), other); +} + +// aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::logical_xor_(const at::Tensor & other) const { + return at::_ops::logical_xor_::call(const_cast(*this), other); +} + +// aten::logical_and(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::logical_and(const at::Tensor & other) const { + return at::_ops::logical_and::call(const_cast(*this), other); +} + +// aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::logical_and_(const at::Tensor & other) const { + return at::_ops::logical_and_::call(const_cast(*this), other); +} + +// aten::logical_or(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::logical_or(const at::Tensor & other) const { + return at::_ops::logical_or::call(const_cast(*this), other); +} + +// aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::logical_or_(const at::Tensor & other) const { + return at::_ops::logical_or_::call(const_cast(*this), other); +} + +// aten::bmm(Tensor self, Tensor mat2) -> Tensor +inline at::Tensor Tensor::bmm(const at::Tensor & mat2) const { + return at::_ops::bmm::call(const_cast(*this), mat2); +} + +// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a) +inline at::Tensor Tensor::broadcast_to(at::IntArrayRef size) const { + return at::_ops::broadcast_to::call(const_cast(*this), c10::fromIntArrayRefSlow(size)); +} + +// aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a) +inline at::Tensor Tensor::broadcast_to_symint(c10::SymIntArrayRef size) const { + return at::_ops::broadcast_to::call(const_cast(*this), size); +} + +// aten::ceil(Tensor self) -> Tensor +inline at::Tensor Tensor::ceil() const { + return at::_ops::ceil::call(const_cast(*this)); +} + +// aten::ceil_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::ceil_() const { + return at::_ops::ceil_::call(const_cast(*this)); +} + +// aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[] +inline ::std::vector Tensor::unsafe_chunk(int64_t chunks, int64_t dim) const { + return at::_ops::unsafe_chunk::call(const_cast(*this), chunks, dim); +} + +// aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::chunk(int64_t chunks, int64_t dim) const { + return at::_ops::chunk::call(const_cast(*this), chunks, dim); +} + +// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::tensor_split(int64_t sections, int64_t dim) const { + return at::_ops::tensor_split_sections::call(const_cast(*this), sections, dim); +} + +// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::tensor_split_symint(c10::SymInt sections, int64_t dim) const { + return at::_ops::tensor_split_sections::call(const_cast(*this), sections, dim); +} + +// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::tensor_split(at::IntArrayRef indices, int64_t dim) const { + return at::_ops::tensor_split_indices::call(const_cast(*this), c10::fromIntArrayRefSlow(indices), dim); +} + +// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::tensor_split_symint(c10::SymIntArrayRef indices, int64_t dim) const { + return at::_ops::tensor_split_indices::call(const_cast(*this), indices, dim); +} + +// aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::tensor_split(const at::Tensor & tensor_indices_or_sections, int64_t dim) const { + return at::_ops::tensor_split_tensor_indices_or_sections::call(const_cast(*this), tensor_indices_or_sections, dim); +} + +// aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor +inline at::Tensor Tensor::clamp(const c10::optional & min, const c10::optional & max) const { + return at::_ops::clamp::call(const_cast(*this), min, max); +} + +// aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor +inline at::Tensor Tensor::clamp(const c10::optional & min, const c10::optional & max) const { + return at::_ops::clamp_Tensor::call(const_cast(*this), min, max); +} + +// aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) +inline at::Tensor & Tensor::clamp_(const c10::optional & min, const c10::optional & max) const { + return at::_ops::clamp_::call(const_cast(*this), min, max); +} + +// aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) +inline at::Tensor & Tensor::clamp_(const c10::optional & min, const c10::optional & max) const { + return at::_ops::clamp__Tensor::call(const_cast(*this), min, max); +} + +// aten::clamp_max(Tensor self, Scalar max) -> Tensor +inline at::Tensor Tensor::clamp_max(const at::Scalar & max) const { + return at::_ops::clamp_max::call(const_cast(*this), max); +} + +// aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor +inline at::Tensor Tensor::clamp_max(const at::Tensor & max) const { + return at::_ops::clamp_max_Tensor::call(const_cast(*this), max); +} + +// aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) +inline at::Tensor & Tensor::clamp_max_(const at::Scalar & max) const { + return at::_ops::clamp_max_::call(const_cast(*this), max); +} + +// aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!) +inline at::Tensor & Tensor::clamp_max_(const at::Tensor & max) const { + return at::_ops::clamp_max__Tensor::call(const_cast(*this), max); +} + +// aten::clamp_min(Tensor self, Scalar min) -> Tensor +inline at::Tensor Tensor::clamp_min(const at::Scalar & min) const { + return at::_ops::clamp_min::call(const_cast(*this), min); +} + +// aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor +inline at::Tensor Tensor::clamp_min(const at::Tensor & min) const { + return at::_ops::clamp_min_Tensor::call(const_cast(*this), min); +} + +// aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!) +inline at::Tensor & Tensor::clamp_min_(const at::Scalar & min) const { + return at::_ops::clamp_min_::call(const_cast(*this), min); +} + +// aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!) +inline at::Tensor & Tensor::clamp_min_(const at::Tensor & min) const { + return at::_ops::clamp_min__Tensor::call(const_cast(*this), min); +} + +// aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor +inline at::Tensor Tensor::clip(const c10::optional & min, const c10::optional & max) const { + return at::_ops::clip::call(const_cast(*this), min, max); +} + +// aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor +inline at::Tensor Tensor::clip(const c10::optional & min, const c10::optional & max) const { + return at::_ops::clip_Tensor::call(const_cast(*this), min, max); +} + +// aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) +inline at::Tensor & Tensor::clip_(const c10::optional & min, const c10::optional & max) const { + return at::_ops::clip_::call(const_cast(*this), min, max); +} + +// aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!) +inline at::Tensor & Tensor::clip_(const c10::optional & min, const c10::optional & max) const { + return at::_ops::clip__Tensor::call(const_cast(*this), min, max); +} + +// aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a) +inline at::Tensor Tensor::__dispatch_contiguous(at::MemoryFormat memory_format) const { + return at::_ops::contiguous::call(const_cast(*this), memory_format); +} + +// aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) +inline at::Tensor & Tensor::copy_(const at::Tensor & src, bool non_blocking) const { + return at::_ops::copy_::call(const_cast(*this), src, non_blocking); +} + +// aten::cos(Tensor self) -> Tensor +inline at::Tensor Tensor::cos() const { + return at::_ops::cos::call(const_cast(*this)); +} + +// aten::cos_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::cos_() const { + return at::_ops::cos_::call(const_cast(*this)); +} + +// aten::cosh(Tensor self) -> Tensor +inline at::Tensor Tensor::cosh() const { + return at::_ops::cosh::call(const_cast(*this)); +} + +// aten::cosh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::cosh_() const { + return at::_ops::cosh_::call(const_cast(*this)); +} + +// aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor +inline at::Tensor Tensor::count_nonzero(at::IntArrayRef dim) const { + return at::_ops::count_nonzero_dim_IntList::call(const_cast(*this), dim); +} + +// aten::count_nonzero(Tensor self, int? dim=None) -> Tensor +inline at::Tensor Tensor::count_nonzero(c10::optional dim) const { + return at::_ops::count_nonzero::call(const_cast(*this), dim); +} + +// aten::cov(Tensor self, *, int correction=1, Tensor? fweights=None, Tensor? aweights=None) -> Tensor +inline at::Tensor Tensor::cov(int64_t correction, const c10::optional & fweights, const c10::optional & aweights) const { + return at::_ops::cov::call(const_cast(*this), correction, fweights, aweights); +} + +// aten::corrcoef(Tensor self) -> Tensor +inline at::Tensor Tensor::corrcoef() const { + return at::_ops::corrcoef::call(const_cast(*this)); +} + +// aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::cummax(int64_t dim) const { + return at::_ops::cummax::call(const_cast(*this), dim); +} + +// aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::cummax(at::Dimname dim) const { + return at::_ops::cummax_dimname::call(const_cast(*this), dim); +} + +// aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::cummin(int64_t dim) const { + return at::_ops::cummin::call(const_cast(*this), dim); +} + +// aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::cummin(at::Dimname dim) const { + return at::_ops::cummin_dimname::call(const_cast(*this), dim); +} + +// aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::cumprod(int64_t dim, c10::optional dtype) const { + return at::_ops::cumprod::call(const_cast(*this), dim, dtype); +} + +// aten::cumprod_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) +inline at::Tensor & Tensor::cumprod_(int64_t dim, c10::optional dtype) const { + return at::_ops::cumprod_::call(const_cast(*this), dim, dtype); +} + +// aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::cumprod(at::Dimname dim, c10::optional dtype) const { + return at::_ops::cumprod_dimname::call(const_cast(*this), dim, dtype); +} + +// aten::cumprod_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) +inline at::Tensor & Tensor::cumprod_(at::Dimname dim, c10::optional dtype) const { + return at::_ops::cumprod__dimname::call(const_cast(*this), dim, dtype); +} + +// aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::cumsum(int64_t dim, c10::optional dtype) const { + return at::_ops::cumsum::call(const_cast(*this), dim, dtype); +} + +// aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!) +inline at::Tensor & Tensor::cumsum_(int64_t dim, c10::optional dtype) const { + return at::_ops::cumsum_::call(const_cast(*this), dim, dtype); +} + +// aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::cumsum(at::Dimname dim, c10::optional dtype) const { + return at::_ops::cumsum_dimname::call(const_cast(*this), dim, dtype); +} + +// aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!) +inline at::Tensor & Tensor::cumsum_(at::Dimname dim, c10::optional dtype) const { + return at::_ops::cumsum__dimname::call(const_cast(*this), dim, dtype); +} + +// aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor +inline at::Tensor Tensor::diag_embed(int64_t offset, int64_t dim1, int64_t dim2) const { + return at::_ops::diag_embed::call(const_cast(*this), offset, dim1, dim2); +} + +// aten::diagflat(Tensor self, int offset=0) -> Tensor +inline at::Tensor Tensor::diagflat(int64_t offset) const { + return at::_ops::diagflat::call(const_cast(*this), offset); +} + +// aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) +inline at::Tensor Tensor::diagonal(int64_t offset, int64_t dim1, int64_t dim2) const { + return at::_ops::diagonal::call(const_cast(*this), offset, dim1, dim2); +} + +// aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a) +inline at::Tensor Tensor::diagonal(at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) const { + return at::_ops::diagonal_Dimname::call(const_cast(*this), outdim, dim1, dim2, offset); +} + +// aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!) +inline at::Tensor & Tensor::fill_diagonal_(const at::Scalar & fill_value, bool wrap) const { + return at::_ops::fill_diagonal_::call(const_cast(*this), fill_value, wrap); +} + +// aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor +inline at::Tensor Tensor::diff(int64_t n, int64_t dim, const c10::optional & prepend, const c10::optional & append) const { + return at::_ops::diff::call(const_cast(*this), n, dim, prepend, append); +} + +// aten::div.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::div(const at::Tensor & other) const { + return at::_ops::div_Tensor::call(const_cast(*this), other); +} + +// aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::div_(const at::Tensor & other) const { + return at::_ops::div__Tensor::call(const_cast(*this), other); +} + +// aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor +inline at::Tensor Tensor::div(const at::Tensor & other, c10::optional rounding_mode) const { + return at::_ops::div_Tensor_mode::call(const_cast(*this), other, rounding_mode); +} + +// aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) +inline at::Tensor & Tensor::div_(const at::Tensor & other, c10::optional rounding_mode) const { + return at::_ops::div__Tensor_mode::call(const_cast(*this), other, rounding_mode); +} + +// aten::div.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::div(const at::Scalar & other) const { + return at::_ops::div_Scalar::call(const_cast(*this), other); +} + +// aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::div_(const at::Scalar & other) const { + return at::_ops::div__Scalar::call(const_cast(*this), other); +} + +// aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor +inline at::Tensor Tensor::div(const at::Scalar & other, c10::optional rounding_mode) const { + return at::_ops::div_Scalar_mode::call(const_cast(*this), other, rounding_mode); +} + +// aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) +inline at::Tensor & Tensor::div_(const at::Scalar & other, c10::optional rounding_mode) const { + return at::_ops::div__Scalar_mode::call(const_cast(*this), other, rounding_mode); +} + +// aten::divide.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::divide(const at::Tensor & other) const { + return at::_ops::divide_Tensor::call(const_cast(*this), other); +} + +// aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::divide_(const at::Tensor & other) const { + return at::_ops::divide__Tensor::call(const_cast(*this), other); +} + +// aten::divide.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::divide(const at::Scalar & other) const { + return at::_ops::divide_Scalar::call(const_cast(*this), other); +} + +// aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::divide_(const at::Scalar & other) const { + return at::_ops::divide__Scalar::call(const_cast(*this), other); +} + +// aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor +inline at::Tensor Tensor::divide(const at::Tensor & other, c10::optional rounding_mode) const { + return at::_ops::divide_Tensor_mode::call(const_cast(*this), other, rounding_mode); +} + +// aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!) +inline at::Tensor & Tensor::divide_(const at::Tensor & other, c10::optional rounding_mode) const { + return at::_ops::divide__Tensor_mode::call(const_cast(*this), other, rounding_mode); +} + +// aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor +inline at::Tensor Tensor::divide(const at::Scalar & other, c10::optional rounding_mode) const { + return at::_ops::divide_Scalar_mode::call(const_cast(*this), other, rounding_mode); +} + +// aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!) +inline at::Tensor & Tensor::divide_(const at::Scalar & other, c10::optional rounding_mode) const { + return at::_ops::divide__Scalar_mode::call(const_cast(*this), other, rounding_mode); +} + +// aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::true_divide(const at::Tensor & other) const { + return at::_ops::true_divide_Tensor::call(const_cast(*this), other); +} + +// aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::true_divide_(const at::Tensor & other) const { + return at::_ops::true_divide__Tensor::call(const_cast(*this), other); +} + +// aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::true_divide(const at::Scalar & other) const { + return at::_ops::true_divide_Scalar::call(const_cast(*this), other); +} + +// aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::true_divide_(const at::Scalar & other) const { + return at::_ops::true_divide__Scalar::call(const_cast(*this), other); +} + +// aten::dot(Tensor self, Tensor tensor) -> Tensor +inline at::Tensor Tensor::dot(const at::Tensor & tensor) const { + return at::_ops::dot::call(const_cast(*this), tensor); +} + +// aten::vdot(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::vdot(const at::Tensor & other) const { + return at::_ops::vdot::call(const_cast(*this), other); +} + +// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_empty(at::IntArrayRef size, at::TensorOptions options) const { + return at::_ops::new_empty::call(const_cast(*this), c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_empty(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_empty::call(const_cast(*this), c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); +} + +// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_empty_symint(c10::SymIntArrayRef size, at::TensorOptions options) const { + return at::_ops::new_empty::call(const_cast(*this), size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_empty_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_empty::call(const_cast(*this), size, dtype, layout, device, pin_memory); +} + +// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options) const { + return at::_ops::new_empty_strided::call(const_cast(*this), c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_empty_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_empty_strided::call(const_cast(*this), c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), dtype, layout, device, pin_memory); +} + +// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options) const { + return at::_ops::new_empty_strided::call(const_cast(*this), size, stride, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_empty_strided::call(const_cast(*this), size, stride, dtype, layout, device, pin_memory); +} + +// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options) const { + return at::_ops::new_full::call(const_cast(*this), c10::fromIntArrayRefSlow(size), fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_full::call(const_cast(*this), c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory); +} + +// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options) const { + return at::_ops::new_full::call(const_cast(*this), size, fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_full::call(const_cast(*this), size, fill_value, dtype, layout, device, pin_memory); +} + +// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_zeros(at::IntArrayRef size, at::TensorOptions options) const { + return at::_ops::new_zeros::call(const_cast(*this), c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_zeros(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_zeros::call(const_cast(*this), c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); +} + +// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options) const { + return at::_ops::new_zeros::call(const_cast(*this), size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_zeros_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_zeros::call(const_cast(*this), size, dtype, layout, device, pin_memory); +} + +// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_ones(at::IntArrayRef size, at::TensorOptions options) const { + return at::_ops::new_ones::call(const_cast(*this), c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_ones(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_ones::call(const_cast(*this), c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); +} + +// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_ones_symint(c10::SymIntArrayRef size, at::TensorOptions options) const { + return at::_ops::new_ones::call(const_cast(*this), size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} + +// aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor Tensor::new_ones_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) const { + return at::_ops::new_ones::call(const_cast(*this), size, dtype, layout, device, pin_memory); +} + +// aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) +inline const at::Tensor & Tensor::resize_(at::IntArrayRef size, c10::optional memory_format) const { + return at::_ops::resize_::call(const_cast(*this), c10::fromIntArrayRefSlow(size), memory_format); +} + +// aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) +inline const at::Tensor & Tensor::resize__symint(c10::SymIntArrayRef size, c10::optional memory_format) const { + return at::_ops::resize_::call(const_cast(*this), size, memory_format); +} + +// aten::erf(Tensor self) -> Tensor +inline at::Tensor Tensor::erf() const { + return at::_ops::erf::call(const_cast(*this)); +} + +// aten::erf_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::erf_() const { + return at::_ops::erf_::call(const_cast(*this)); +} + +// aten::erfc(Tensor self) -> Tensor +inline at::Tensor Tensor::erfc() const { + return at::_ops::erfc::call(const_cast(*this)); +} + +// aten::erfc_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::erfc_() const { + return at::_ops::erfc_::call(const_cast(*this)); +} + +// aten::exp(Tensor self) -> Tensor +inline at::Tensor Tensor::exp() const { + return at::_ops::exp::call(const_cast(*this)); +} + +// aten::exp_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::exp_() const { + return at::_ops::exp_::call(const_cast(*this)); +} + +// aten::exp2(Tensor self) -> Tensor +inline at::Tensor Tensor::exp2() const { + return at::_ops::exp2::call(const_cast(*this)); +} + +// aten::exp2_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::exp2_() const { + return at::_ops::exp2_::call(const_cast(*this)); +} + +// aten::expm1(Tensor self) -> Tensor +inline at::Tensor Tensor::expm1() const { + return at::_ops::expm1::call(const_cast(*this)); +} + +// aten::expm1_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::expm1_() const { + return at::_ops::expm1_::call(const_cast(*this)); +} + +// aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a) +inline at::Tensor Tensor::expand(at::IntArrayRef size, bool implicit) const { + return at::_ops::expand::call(const_cast(*this), c10::fromIntArrayRefSlow(size), implicit); +} + +// aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a) +inline at::Tensor Tensor::expand_symint(c10::SymIntArrayRef size, bool implicit) const { + return at::_ops::expand::call(const_cast(*this), size, implicit); +} + +// aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a) +inline at::Tensor Tensor::expand_as(const at::Tensor & other) const { + return at::_ops::expand_as::call(const_cast(*this), other); +} + +// aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a) +inline at::Tensor Tensor::flatten(int64_t start_dim, int64_t end_dim) const { + return at::_ops::flatten_using_ints::call(const_cast(*this), start_dim, end_dim); +} + +// aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a) +inline at::Tensor Tensor::flatten(int64_t start_dim, int64_t end_dim, at::Dimname out_dim) const { + return at::_ops::flatten_named_out_dim::call(const_cast(*this), start_dim, end_dim, out_dim); +} + +// aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a) +inline at::Tensor Tensor::flatten(at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) const { + return at::_ops::flatten_using_names::call(const_cast(*this), start_dim, end_dim, out_dim); +} + +// aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a) +inline at::Tensor Tensor::flatten(at::DimnameList dims, at::Dimname out_dim) const { + return at::_ops::flatten_DimnameList::call(const_cast(*this), dims, out_dim); +} + +// aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a) +inline at::Tensor Tensor::unflatten(int64_t dim, at::IntArrayRef sizes) const { + return at::_ops::unflatten_int::call(const_cast(*this), dim, c10::fromIntArrayRefSlow(sizes)); +} + +// aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a) +inline at::Tensor Tensor::unflatten_symint(int64_t dim, c10::SymIntArrayRef sizes) const { + return at::_ops::unflatten_int::call(const_cast(*this), dim, sizes); +} + +// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a) +inline at::Tensor Tensor::unflatten(at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) const { + return at::_ops::unflatten_Dimname::call(const_cast(*this), dim, c10::fromIntArrayRefSlow(sizes), names); +} + +// aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a) +inline at::Tensor Tensor::unflatten_symint(at::Dimname dim, c10::SymIntArrayRef sizes, at::DimnameList names) const { + return at::_ops::unflatten_Dimname::call(const_cast(*this), dim, sizes, names); +} + +// aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) +inline at::Tensor & Tensor::fill_(const at::Scalar & value) const { + return at::_ops::fill__Scalar::call(const_cast(*this), value); +} + +// aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) +inline at::Tensor & Tensor::fill_(const at::Tensor & value) const { + return at::_ops::fill__Tensor::call(const_cast(*this), value); +} + +// aten::floor(Tensor self) -> Tensor +inline at::Tensor Tensor::floor() const { + return at::_ops::floor::call(const_cast(*this)); +} + +// aten::floor_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::floor_() const { + return at::_ops::floor_::call(const_cast(*this)); +} + +// aten::floor_divide(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::floor_divide(const at::Tensor & other) const { + return at::_ops::floor_divide::call(const_cast(*this), other); +} + +// aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::floor_divide_(const at::Tensor & other) const { + return at::_ops::floor_divide__Tensor::call(const_cast(*this), other); +} + +// aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::floor_divide(const at::Scalar & other) const { + return at::_ops::floor_divide_Scalar::call(const_cast(*this), other); +} + +// aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::floor_divide_(const at::Scalar & other) const { + return at::_ops::floor_divide__Scalar::call(const_cast(*this), other); +} + +// aten::frac(Tensor self) -> Tensor +inline at::Tensor Tensor::frac() const { + return at::_ops::frac::call(const_cast(*this)); +} + +// aten::frac_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::frac_() const { + return at::_ops::frac_::call(const_cast(*this)); +} + +// aten::gcd(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::gcd(const at::Tensor & other) const { + return at::_ops::gcd::call(const_cast(*this), other); +} + +// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::gcd_(const at::Tensor & other) const { + return at::_ops::gcd_::call(const_cast(*this), other); +} + +// aten::lcm(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::lcm(const at::Tensor & other) const { + return at::_ops::lcm::call(const_cast(*this), other); +} + +// aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::lcm_(const at::Tensor & other) const { + return at::_ops::lcm_::call(const_cast(*this), other); +} + +// aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor +inline at::Tensor Tensor::index(const c10::List> & indices) const { + return at::_ops::index_Tensor::call(const_cast(*this), indices); +} + +// aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) +inline at::Tensor & Tensor::index_copy_(int64_t dim, const at::Tensor & index, const at::Tensor & source) const { + return at::_ops::index_copy_::call(const_cast(*this), dim, index, source); +} + +// aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor +inline at::Tensor Tensor::index_copy(int64_t dim, const at::Tensor & index, const at::Tensor & source) const { + return at::_ops::index_copy::call(const_cast(*this), dim, index, source); +} + +// aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!) +inline at::Tensor & Tensor::index_copy_(at::Dimname dim, const at::Tensor & index, const at::Tensor & source) const { + return at::_ops::index_copy__dimname::call(const_cast(*this), dim, index, source); +} + +// aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor +inline at::Tensor Tensor::index_copy(at::Dimname dim, const at::Tensor & index, const at::Tensor & source) const { + return at::_ops::index_copy_dimname::call(const_cast(*this), dim, index, source); +} + +// aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) +inline at::Tensor & Tensor::index_put_(const c10::List> & indices, const at::Tensor & values, bool accumulate) const { + return at::_ops::index_put_::call(const_cast(*this), indices, values, accumulate); +} + +// aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor +inline at::Tensor Tensor::index_put(const c10::List> & indices, const at::Tensor & values, bool accumulate) const { + return at::_ops::index_put::call(const_cast(*this), indices, values, accumulate); +} + +// aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor +inline at::Tensor Tensor::isclose(const at::Tensor & other, double rtol, double atol, bool equal_nan) const { + return at::_ops::isclose::call(const_cast(*this), other, rtol, atol, equal_nan); +} + +// aten::isnan(Tensor self) -> Tensor +inline at::Tensor Tensor::isnan() const { + return at::_ops::isnan::call(const_cast(*this)); +} + +// aten::is_distributed(Tensor self) -> bool +inline bool Tensor::is_distributed() const { + return at::_ops::is_distributed::call(const_cast(*this)); +} + +// aten::is_floating_point(Tensor self) -> bool +inline bool Tensor::__dispatch_is_floating_point() const { + return at::_ops::is_floating_point::call(const_cast(*this)); +} + +// aten::is_complex(Tensor self) -> bool +inline bool Tensor::__dispatch_is_complex() const { + return at::_ops::is_complex::call(const_cast(*this)); +} + +// aten::is_conj(Tensor self) -> bool +inline bool Tensor::__dispatch_is_conj() const { + return at::_ops::is_conj::call(const_cast(*this)); +} + +// aten::_is_zerotensor(Tensor self) -> bool +inline bool Tensor::__dispatch__is_zerotensor() const { + return at::_ops::_is_zerotensor::call(const_cast(*this)); +} + +// aten::is_neg(Tensor self) -> bool +inline bool Tensor::__dispatch_is_neg() const { + return at::_ops::is_neg::call(const_cast(*this)); +} + +// aten::isreal(Tensor self) -> Tensor +inline at::Tensor Tensor::isreal() const { + return at::_ops::isreal::call(const_cast(*this)); +} + +// aten::is_nonzero(Tensor self) -> bool +inline bool Tensor::is_nonzero() const { + return at::_ops::is_nonzero::call(const_cast(*this)); +} + +// aten::is_same_size(Tensor self, Tensor other) -> bool +inline bool Tensor::is_same_size(const at::Tensor & other) const { + return at::_ops::is_same_size::call(const_cast(*this), other); +} + +// aten::is_signed(Tensor self) -> bool +inline bool Tensor::__dispatch_is_signed() const { + return at::_ops::is_signed::call(const_cast(*this)); +} + +// aten::is_inference(Tensor self) -> bool +inline bool Tensor::__dispatch_is_inference() const { + return at::_ops::is_inference::call(const_cast(*this)); +} + +// aten::kron(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::kron(const at::Tensor & other) const { + return at::_ops::kron::call(const_cast(*this), other); +} + +// aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::kthvalue(int64_t k, int64_t dim, bool keepdim) const { + return at::_ops::kthvalue::call(const_cast(*this), k, dim, keepdim); +} + +// aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::kthvalue(int64_t k, at::Dimname dim, bool keepdim) const { + return at::_ops::kthvalue_dimname::call(const_cast(*this), k, dim, keepdim); +} + +// aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor +inline at::Tensor Tensor::nan_to_num(c10::optional nan, c10::optional posinf, c10::optional neginf) const { + return at::_ops::nan_to_num::call(const_cast(*this), nan, posinf, neginf); +} + +// aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!) +inline at::Tensor & Tensor::nan_to_num_(c10::optional nan, c10::optional posinf, c10::optional neginf) const { + return at::_ops::nan_to_num_::call(const_cast(*this), nan, posinf, neginf); +} + +// aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::ldexp(const at::Tensor & other) const { + return at::_ops::ldexp_Tensor::call(const_cast(*this), other); +} + +// aten::ldexp_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::ldexp_(const at::Tensor & other) const { + return at::_ops::ldexp_::call(const_cast(*this), other); +} + +// aten::log(Tensor self) -> Tensor +inline at::Tensor Tensor::log() const { + return at::_ops::log::call(const_cast(*this)); +} + +// aten::log_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::log_() const { + return at::_ops::log_::call(const_cast(*this)); +} + +// aten::log10(Tensor self) -> Tensor +inline at::Tensor Tensor::log10() const { + return at::_ops::log10::call(const_cast(*this)); +} + +// aten::log10_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::log10_() const { + return at::_ops::log10_::call(const_cast(*this)); +} + +// aten::log1p(Tensor self) -> Tensor +inline at::Tensor Tensor::log1p() const { + return at::_ops::log1p::call(const_cast(*this)); +} + +// aten::log1p_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::log1p_() const { + return at::_ops::log1p_::call(const_cast(*this)); +} + +// aten::log2(Tensor self) -> Tensor +inline at::Tensor Tensor::log2() const { + return at::_ops::log2::call(const_cast(*this)); +} + +// aten::log2_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::log2_() const { + return at::_ops::log2_::call(const_cast(*this)); +} + +// aten::logaddexp(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::logaddexp(const at::Tensor & other) const { + return at::_ops::logaddexp::call(const_cast(*this), other); +} + +// aten::logaddexp2(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::logaddexp2(const at::Tensor & other) const { + return at::_ops::logaddexp2::call(const_cast(*this), other); +} + +// aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::xlogy(const at::Tensor & other) const { + return at::_ops::xlogy_Tensor::call(const_cast(*this), other); +} + +// aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::xlogy(const at::Scalar & other) const { + return at::_ops::xlogy_Scalar_Other::call(const_cast(*this), other); +} + +// aten::xlogy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::xlogy_(const at::Tensor & other) const { + return at::_ops::xlogy__Tensor::call(const_cast(*this), other); +} + +// aten::xlogy_.Scalar_Other(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::xlogy_(const at::Scalar & other) const { + return at::_ops::xlogy__Scalar_Other::call(const_cast(*this), other); +} + +// aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::log_softmax(int64_t dim, c10::optional dtype) const { + return at::_ops::log_softmax_int::call(const_cast(*this), dim, dtype); +} + +// aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::log_softmax(at::Dimname dim, c10::optional dtype) const { + return at::_ops::log_softmax_Dimname::call(const_cast(*this), dim, dtype); +} + +// aten::logcumsumexp(Tensor self, int dim) -> Tensor +inline at::Tensor Tensor::logcumsumexp(int64_t dim) const { + return at::_ops::logcumsumexp::call(const_cast(*this), dim); +} + +// aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor +inline at::Tensor Tensor::logcumsumexp(at::Dimname dim) const { + return at::_ops::logcumsumexp_dimname::call(const_cast(*this), dim); +} + +// aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::logsumexp(at::IntArrayRef dim, bool keepdim) const { + return at::_ops::logsumexp::call(const_cast(*this), dim, keepdim); +} + +// aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::logsumexp(at::DimnameList dim, bool keepdim) const { + return at::_ops::logsumexp_names::call(const_cast(*this), dim, keepdim); +} + +// aten::matmul(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::matmul(const at::Tensor & other) const { + return at::_ops::matmul::call(const_cast(*this), other); +} + +// aten::matrix_power(Tensor self, int n) -> Tensor +inline at::Tensor Tensor::matrix_power(int64_t n) const { + return at::_ops::matrix_power::call(const_cast(*this), n); +} + +// aten::matrix_exp(Tensor self) -> Tensor +inline at::Tensor Tensor::matrix_exp() const { + return at::_ops::matrix_exp::call(const_cast(*this)); +} + +// aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max) +inline ::std::tuple Tensor::aminmax(c10::optional dim, bool keepdim) const { + return at::_ops::aminmax::call(const_cast(*this), dim, keepdim); +} + +// aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::max(int64_t dim, bool keepdim) const { + return at::_ops::max_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::max(at::Dimname dim, bool keepdim) const { + return at::_ops::max_names_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor +inline at::Tensor Tensor::amax(at::IntArrayRef dim, bool keepdim) const { + return at::_ops::amax::call(const_cast(*this), dim, keepdim); +} + +// aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::mean(c10::optional dtype) const { + return at::_ops::mean::call(const_cast(*this), dtype); +} + +// aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::mean(at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype) const { + return at::_ops::mean_dim::call(const_cast(*this), dim, keepdim, dtype); +} + +// aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::mean(at::DimnameList dim, bool keepdim, c10::optional dtype) const { + return at::_ops::mean_names_dim::call(const_cast(*this), dim, keepdim, dtype); +} + +// aten::nanmean(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::nanmean(at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype) const { + return at::_ops::nanmean::call(const_cast(*this), dim, keepdim, dtype); +} + +// aten::median(Tensor self) -> Tensor +inline at::Tensor Tensor::median() const { + return at::_ops::median::call(const_cast(*this)); +} + +// aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::median(int64_t dim, bool keepdim) const { + return at::_ops::median_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::median(at::Dimname dim, bool keepdim) const { + return at::_ops::median_names_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::nanmedian(Tensor self) -> Tensor +inline at::Tensor Tensor::nanmedian() const { + return at::_ops::nanmedian::call(const_cast(*this)); +} + +// aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::nanmedian(int64_t dim, bool keepdim) const { + return at::_ops::nanmedian_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::nanmedian.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::nanmedian(at::Dimname dim, bool keepdim) const { + return at::_ops::nanmedian_names_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::min(int64_t dim, bool keepdim) const { + return at::_ops::min_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::min(at::Dimname dim, bool keepdim) const { + return at::_ops::min_names_dim::call(const_cast(*this), dim, keepdim); +} + +// aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor +inline at::Tensor Tensor::amin(at::IntArrayRef dim, bool keepdim) const { + return at::_ops::amin::call(const_cast(*this), dim, keepdim); +} + +// aten::mm(Tensor self, Tensor mat2) -> Tensor +inline at::Tensor Tensor::mm(const at::Tensor & mat2) const { + return at::_ops::mm::call(const_cast(*this), mat2); +} + +// aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::mode(int64_t dim, bool keepdim) const { + return at::_ops::mode::call(const_cast(*this), dim, keepdim); +} + +// aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::mode(at::Dimname dim, bool keepdim) const { + return at::_ops::mode_dimname::call(const_cast(*this), dim, keepdim); +} + +// aten::mul.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::mul(const at::Tensor & other) const { + return at::_ops::mul_Tensor::call(const_cast(*this), other); +} + +// aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::mul_(const at::Tensor & other) const { + return at::_ops::mul__Tensor::call(const_cast(*this), other); +} + +// aten::mul.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::mul(const at::Scalar & other) const { + return at::_ops::mul_Scalar::call(const_cast(*this), other); +} + +// aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::mul_(const at::Scalar & other) const { + return at::_ops::mul__Scalar::call(const_cast(*this), other); +} + +// aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::multiply(const at::Tensor & other) const { + return at::_ops::multiply_Tensor::call(const_cast(*this), other); +} + +// aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::multiply_(const at::Tensor & other) const { + return at::_ops::multiply__Tensor::call(const_cast(*this), other); +} + +// aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::multiply(const at::Scalar & other) const { + return at::_ops::multiply_Scalar::call(const_cast(*this), other); +} + +// aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::multiply_(const at::Scalar & other) const { + return at::_ops::multiply__Scalar::call(const_cast(*this), other); +} + +// aten::mv(Tensor self, Tensor vec) -> Tensor +inline at::Tensor Tensor::mv(const at::Tensor & vec) const { + return at::_ops::mv::call(const_cast(*this), vec); +} + +// aten::mvlgamma(Tensor self, int p) -> Tensor +inline at::Tensor Tensor::mvlgamma(int64_t p) const { + return at::_ops::mvlgamma::call(const_cast(*this), p); +} + +// aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!) +inline at::Tensor & Tensor::mvlgamma_(int64_t p) const { + return at::_ops::mvlgamma_::call(const_cast(*this), p); +} + +// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor +inline at::Tensor Tensor::narrow_copy(int64_t dim, int64_t start, int64_t length) const { + return at::_ops::narrow_copy::call(const_cast(*this), dim, start, length); +} + +// aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor +inline at::Tensor Tensor::narrow_copy_symint(int64_t dim, c10::SymInt start, c10::SymInt length) const { + return at::_ops::narrow_copy::call(const_cast(*this), dim, start, length); +} + +// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a) +inline at::Tensor Tensor::narrow(int64_t dim, int64_t start, int64_t length) const { + return at::_ops::narrow::call(const_cast(*this), dim, start, length); +} + +// aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a) +inline at::Tensor Tensor::narrow_symint(int64_t dim, c10::SymInt start, c10::SymInt length) const { + return at::_ops::narrow::call(const_cast(*this), dim, start, length); +} + +// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a) +inline at::Tensor Tensor::narrow(int64_t dim, const at::Tensor & start, int64_t length) const { + return at::_ops::narrow_Tensor::call(const_cast(*this), dim, start, length); +} + +// aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a) +inline at::Tensor Tensor::narrow_symint(int64_t dim, const at::Tensor & start, c10::SymInt length) const { + return at::_ops::narrow_Tensor::call(const_cast(*this), dim, start, length); +} + +// aten::permute(Tensor(a) self, int[] dims) -> Tensor(a) +inline at::Tensor Tensor::permute(at::IntArrayRef dims) const { + return at::_ops::permute::call(const_cast(*this), dims); +} + +// aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) +inline at::Tensor Tensor::movedim(at::IntArrayRef source, at::IntArrayRef destination) const { + return at::_ops::movedim_intlist::call(const_cast(*this), source, destination); +} + +// aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a) +inline at::Tensor Tensor::movedim(int64_t source, int64_t destination) const { + return at::_ops::movedim_int::call(const_cast(*this), source, destination); +} + +// aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) +inline at::Tensor Tensor::moveaxis(at::IntArrayRef source, at::IntArrayRef destination) const { + return at::_ops::moveaxis_intlist::call(const_cast(*this), source, destination); +} + +// aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a) +inline at::Tensor Tensor::moveaxis(int64_t source, int64_t destination) const { + return at::_ops::moveaxis_int::call(const_cast(*this), source, destination); +} + +// aten::numpy_T(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::numpy_T() const { + return at::_ops::numpy_T::call(const_cast(*this)); +} + +// aten::matrix_H(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::matrix_H() const { + return at::_ops::matrix_H::call(const_cast(*this)); +} + +// aten::mT(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::mT() const { + return at::_ops::mT::call(const_cast(*this)); +} + +// aten::mH(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::mH() const { + return at::_ops::mH::call(const_cast(*this)); +} + +// aten::adjoint(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::adjoint() const { + return at::_ops::adjoint::call(const_cast(*this)); +} + +// aten::is_pinned(Tensor self, Device? device=None) -> bool +inline bool Tensor::is_pinned(c10::optional device) const { + return at::_ops::is_pinned::call(const_cast(*this), device); +} + +// aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a) +inline at::Tensor Tensor::pin_memory(c10::optional device) const { + return at::_ops::pin_memory::call(const_cast(*this), device); +} + +// aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor +inline at::Tensor Tensor::pinverse(double rcond) const { + return at::_ops::pinverse::call(const_cast(*this), rcond); +} + +// aten::rad2deg(Tensor self) -> Tensor +inline at::Tensor Tensor::rad2deg() const { + return at::_ops::rad2deg::call(const_cast(*this)); +} + +// aten::rad2deg_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::rad2deg_() const { + return at::_ops::rad2deg_::call(const_cast(*this)); +} + +// aten::deg2rad(Tensor self) -> Tensor +inline at::Tensor Tensor::deg2rad() const { + return at::_ops::deg2rad::call(const_cast(*this)); +} + +// aten::deg2rad_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::deg2rad_() const { + return at::_ops::deg2rad_::call(const_cast(*this)); +} + +// aten::ravel(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::ravel() const { + return at::_ops::ravel::call(const_cast(*this)); +} + +// aten::reciprocal(Tensor self) -> Tensor +inline at::Tensor Tensor::reciprocal() const { + return at::_ops::reciprocal::call(const_cast(*this)); +} + +// aten::reciprocal_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::reciprocal_() const { + return at::_ops::reciprocal_::call(const_cast(*this)); +} + +// aten::neg(Tensor self) -> Tensor +inline at::Tensor Tensor::neg() const { + return at::_ops::neg::call(const_cast(*this)); +} + +// aten::neg_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::neg_() const { + return at::_ops::neg_::call(const_cast(*this)); +} + +// aten::negative(Tensor self) -> Tensor +inline at::Tensor Tensor::negative() const { + return at::_ops::negative::call(const_cast(*this)); +} + +// aten::negative_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::negative_() const { + return at::_ops::negative_::call(const_cast(*this)); +} + +// aten::repeat(Tensor self, SymInt[] repeats) -> Tensor +inline at::Tensor Tensor::repeat(at::IntArrayRef repeats) const { + return at::_ops::repeat::call(const_cast(*this), c10::fromIntArrayRefSlow(repeats)); +} + +// aten::repeat(Tensor self, SymInt[] repeats) -> Tensor +inline at::Tensor Tensor::repeat_symint(c10::SymIntArrayRef repeats) const { + return at::_ops::repeat::call(const_cast(*this), repeats); +} + +// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor +inline at::Tensor Tensor::repeat_interleave(const at::Tensor & repeats, c10::optional dim, c10::optional output_size) const { + return at::_ops::repeat_interleave_self_Tensor::call(const_cast(*this), repeats, dim, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt); +} + +// aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor +inline at::Tensor Tensor::repeat_interleave_symint(const at::Tensor & repeats, c10::optional dim, c10::optional output_size) const { + return at::_ops::repeat_interleave_self_Tensor::call(const_cast(*this), repeats, dim, output_size); +} + +// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor +inline at::Tensor Tensor::repeat_interleave(int64_t repeats, c10::optional dim, c10::optional output_size) const { + return at::_ops::repeat_interleave_self_int::call(const_cast(*this), repeats, dim, output_size.has_value() ? c10::make_optional(c10::SymInt(*output_size)) : c10::nullopt); +} + +// aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor +inline at::Tensor Tensor::repeat_interleave_symint(c10::SymInt repeats, c10::optional dim, c10::optional output_size) const { + return at::_ops::repeat_interleave_self_int::call(const_cast(*this), repeats, dim, output_size); +} + +// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) +inline at::Tensor Tensor::reshape(at::IntArrayRef shape) const { + return at::_ops::reshape::call(const_cast(*this), c10::fromIntArrayRefSlow(shape)); +} + +// aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a) +inline at::Tensor Tensor::reshape_symint(c10::SymIntArrayRef shape) const { + return at::_ops::reshape::call(const_cast(*this), shape); +} + +// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) +inline at::Tensor Tensor::_reshape_alias(at::IntArrayRef size, at::IntArrayRef stride) const { + return at::_ops::_reshape_alias::call(const_cast(*this), c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); +} + +// aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a) +inline at::Tensor Tensor::_reshape_alias_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride) const { + return at::_ops::_reshape_alias::call(const_cast(*this), size, stride); +} + +// aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a) +inline at::Tensor Tensor::reshape_as(const at::Tensor & other) const { + return at::_ops::reshape_as::call(const_cast(*this), other); +} + +// aten::round(Tensor self) -> Tensor +inline at::Tensor Tensor::round() const { + return at::_ops::round::call(const_cast(*this)); +} + +// aten::round_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::round_() const { + return at::_ops::round_::call(const_cast(*this)); +} + +// aten::round.decimals(Tensor self, *, int decimals) -> Tensor +inline at::Tensor Tensor::round(int64_t decimals) const { + return at::_ops::round_decimals::call(const_cast(*this), decimals); +} + +// aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!) +inline at::Tensor & Tensor::round_(int64_t decimals) const { + return at::_ops::round__decimals::call(const_cast(*this), decimals); +} + +// aten::relu(Tensor self) -> Tensor +inline at::Tensor Tensor::relu() const { + return at::_ops::relu::call(const_cast(*this)); +} + +// aten::relu_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::relu_() const { + return at::_ops::relu_::call(const_cast(*this)); +} + +// aten::prelu(Tensor self, Tensor weight) -> Tensor +inline at::Tensor Tensor::prelu(const at::Tensor & weight) const { + return at::_ops::prelu::call(const_cast(*this), weight); +} + +// aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor +inline at::Tensor Tensor::hardshrink(const at::Scalar & lambd) const { + return at::_ops::hardshrink::call(const_cast(*this), lambd); +} + +// aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor +inline at::Tensor Tensor::hardshrink_backward(const at::Tensor & grad_out, const at::Scalar & lambd) const { + return at::_ops::hardshrink_backward::call(grad_out, const_cast(*this), lambd); +} + +// aten::rsqrt(Tensor self) -> Tensor +inline at::Tensor Tensor::rsqrt() const { + return at::_ops::rsqrt::call(const_cast(*this)); +} + +// aten::rsqrt_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::rsqrt_() const { + return at::_ops::rsqrt_::call(const_cast(*this)); +} + +// aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a) +inline at::Tensor Tensor::select(at::Dimname dim, int64_t index) const { + return at::_ops::select_Dimname::call(const_cast(*this), dim, index); +} + +// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a) +inline at::Tensor Tensor::select(int64_t dim, int64_t index) const { + return at::_ops::select_int::call(const_cast(*this), dim, index); +} + +// aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a) +inline at::Tensor Tensor::select_symint(int64_t dim, c10::SymInt index) const { + return at::_ops::select_int::call(const_cast(*this), dim, index); +} + +// aten::sigmoid(Tensor self) -> Tensor +inline at::Tensor Tensor::sigmoid() const { + return at::_ops::sigmoid::call(const_cast(*this)); +} + +// aten::sigmoid_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::sigmoid_() const { + return at::_ops::sigmoid_::call(const_cast(*this)); +} + +// aten::logit(Tensor self, float? eps=None) -> Tensor +inline at::Tensor Tensor::logit(c10::optional eps) const { + return at::_ops::logit::call(const_cast(*this), eps); +} + +// aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!) +inline at::Tensor & Tensor::logit_(c10::optional eps) const { + return at::_ops::logit_::call(const_cast(*this), eps); +} + +// aten::sin(Tensor self) -> Tensor +inline at::Tensor Tensor::sin() const { + return at::_ops::sin::call(const_cast(*this)); +} + +// aten::sin_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::sin_() const { + return at::_ops::sin_::call(const_cast(*this)); +} + +// aten::sinc(Tensor self) -> Tensor +inline at::Tensor Tensor::sinc() const { + return at::_ops::sinc::call(const_cast(*this)); +} + +// aten::sinc_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::sinc_() const { + return at::_ops::sinc_::call(const_cast(*this)); +} + +// aten::sinh(Tensor self) -> Tensor +inline at::Tensor Tensor::sinh() const { + return at::_ops::sinh::call(const_cast(*this)); +} + +// aten::sinh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::sinh_() const { + return at::_ops::sinh_::call(const_cast(*this)); +} + +// aten::detach(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::detach() const { + return at::_ops::detach::call(const_cast(*this)); +} + +// aten::detach_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::detach_() const { + return at::_ops::detach_::call(const_cast(*this)); +} + +// aten::size.Dimname(Tensor self, Dimname dim) -> int +inline int64_t Tensor::size(at::Dimname dim) const { + return at::_ops::size_Dimname::call(const_cast(*this), dim); +} + +// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) +inline at::Tensor Tensor::slice(int64_t dim, c10::optional start, c10::optional end, int64_t step) const { + return at::_ops::slice_Tensor::call(const_cast(*this), dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); +} + +// aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) +inline at::Tensor Tensor::slice_symint(int64_t dim, c10::optional start, c10::optional end, c10::SymInt step) const { + return at::_ops::slice_Tensor::call(const_cast(*this), dim, start, end, step); +} + +// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor +inline at::Tensor Tensor::slice_scatter(const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, int64_t step) const { + return at::_ops::slice_scatter::call(const_cast(*this), src, dim, start.has_value() ? c10::make_optional(c10::SymInt(*start)) : c10::nullopt, end.has_value() ? c10::make_optional(c10::SymInt(*end)) : c10::nullopt, step); +} + +// aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor +inline at::Tensor Tensor::slice_scatter_symint(const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step) const { + return at::_ops::slice_scatter::call(const_cast(*this), src, dim, start, end, step); +} + +// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor +inline at::Tensor Tensor::select_scatter(const at::Tensor & src, int64_t dim, int64_t index) const { + return at::_ops::select_scatter::call(const_cast(*this), src, dim, index); +} + +// aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor +inline at::Tensor Tensor::select_scatter_symint(const at::Tensor & src, int64_t dim, c10::SymInt index) const { + return at::_ops::select_scatter::call(const_cast(*this), src, dim, index); +} + +// aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor +inline at::Tensor Tensor::diagonal_scatter(const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) const { + return at::_ops::diagonal_scatter::call(const_cast(*this), src, offset, dim1, dim2); +} + +// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor +inline at::Tensor Tensor::as_strided_scatter(const at::Tensor & src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional storage_offset) const { + return at::_ops::as_strided_scatter::call(const_cast(*this), src, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride), storage_offset.has_value() ? c10::make_optional(c10::SymInt(*storage_offset)) : c10::nullopt); +} + +// aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor +inline at::Tensor Tensor::as_strided_scatter_symint(const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional storage_offset) const { + return at::_ops::as_strided_scatter::call(const_cast(*this), src, size, stride, storage_offset); +} + +// aten::smm(Tensor self, Tensor mat2) -> Tensor +inline at::Tensor Tensor::smm(const at::Tensor & mat2) const { + return at::_ops::smm::call(const_cast(*this), mat2); +} + +// aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::softmax(int64_t dim, c10::optional dtype) const { + return at::_ops::softmax_int::call(const_cast(*this), dim, dtype); +} + +// aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::softmax(at::Dimname dim, c10::optional dtype) const { + return at::_ops::softmax_Dimname::call(const_cast(*this), dim, dtype); +} + +// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] +inline ::std::vector Tensor::unsafe_split(int64_t split_size, int64_t dim) const { + return at::_ops::unsafe_split_Tensor::call(const_cast(*this), split_size, dim); +} + +// aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[] +inline ::std::vector Tensor::unsafe_split_symint(c10::SymInt split_size, int64_t dim) const { + return at::_ops::unsafe_split_Tensor::call(const_cast(*this), split_size, dim); +} + +// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::split(int64_t split_size, int64_t dim) const { + return at::_ops::split_Tensor::call(const_cast(*this), split_size, dim); +} + +// aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::split_symint(c10::SymInt split_size, int64_t dim) const { + return at::_ops::split_Tensor::call(const_cast(*this), split_size, dim); +} + +// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::split(at::IntArrayRef split_size, int64_t dim) const { + return at::_ops::split_sizes::call(const_cast(*this), c10::fromIntArrayRefSlow(split_size), dim); +} + +// aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::split_symint(c10::SymIntArrayRef split_size, int64_t dim) const { + return at::_ops::split_sizes::call(const_cast(*this), split_size, dim); +} + +// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] +inline ::std::vector Tensor::unsafe_split_with_sizes(at::IntArrayRef split_sizes, int64_t dim) const { + return at::_ops::unsafe_split_with_sizes::call(const_cast(*this), c10::fromIntArrayRefSlow(split_sizes), dim); +} + +// aten::unsafe_split_with_sizes(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] +inline ::std::vector Tensor::unsafe_split_with_sizes_symint(c10::SymIntArrayRef split_sizes, int64_t dim) const { + return at::_ops::unsafe_split_with_sizes::call(const_cast(*this), split_sizes, dim); +} + +// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::split_with_sizes(at::IntArrayRef split_sizes, int64_t dim) const { + return at::_ops::split_with_sizes::call(const_cast(*this), c10::fromIntArrayRefSlow(split_sizes), dim); +} + +// aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::split_with_sizes_symint(c10::SymIntArrayRef split_sizes, int64_t dim) const { + return at::_ops::split_with_sizes::call(const_cast(*this), split_sizes, dim); +} + +// aten::hsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] +inline ::std::vector Tensor::hsplit(int64_t sections) const { + return at::_ops::hsplit_int::call(const_cast(*this), sections); +} + +// aten::hsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] +inline ::std::vector Tensor::hsplit(at::IntArrayRef indices) const { + return at::_ops::hsplit_array::call(const_cast(*this), indices); +} + +// aten::vsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] +inline ::std::vector Tensor::vsplit(int64_t sections) const { + return at::_ops::vsplit_int::call(const_cast(*this), sections); +} + +// aten::vsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] +inline ::std::vector Tensor::vsplit(at::IntArrayRef indices) const { + return at::_ops::vsplit_array::call(const_cast(*this), indices); +} + +// aten::dsplit.int(Tensor(a -> *) self, int sections) -> Tensor(a)[] +inline ::std::vector Tensor::dsplit(int64_t sections) const { + return at::_ops::dsplit_int::call(const_cast(*this), sections); +} + +// aten::dsplit.array(Tensor(a -> *) self, int[] indices) -> Tensor(a)[] +inline ::std::vector Tensor::dsplit(at::IntArrayRef indices) const { + return at::_ops::dsplit_array::call(const_cast(*this), indices); +} + +// aten::squeeze(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::squeeze() const { + return at::_ops::squeeze::call(const_cast(*this)); +} + +// aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) +inline at::Tensor Tensor::squeeze(int64_t dim) const { + return at::_ops::squeeze_dim::call(const_cast(*this), dim); +} + +// aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a) +inline at::Tensor Tensor::squeeze(at::Dimname dim) const { + return at::_ops::squeeze_dimname::call(const_cast(*this), dim); +} + +// aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a) +inline at::Tensor Tensor::squeeze(at::IntArrayRef dim) const { + return at::_ops::squeeze_dims::call(const_cast(*this), dim); +} + +// aten::squeeze_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::squeeze_() const { + return at::_ops::squeeze_::call(const_cast(*this)); +} + +// aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!) +inline at::Tensor & Tensor::squeeze_(int64_t dim) const { + return at::_ops::squeeze__dim::call(const_cast(*this), dim); +} + +// aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!) +inline at::Tensor & Tensor::squeeze_(at::IntArrayRef dim) const { + return at::_ops::squeeze__dims::call(const_cast(*this), dim); +} + +// aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!) +inline at::Tensor & Tensor::squeeze_(at::Dimname dim) const { + return at::_ops::squeeze__dimname::call(const_cast(*this), dim); +} + +// aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::sspaddmm(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::sspaddmm::call(const_cast(*this), mat1, mat2, beta, alpha); +} + +// aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor +inline at::Tensor Tensor::stft(int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool normalized, c10::optional onesided, c10::optional return_complex) const { + return at::_ops::stft::call(const_cast(*this), n_fft, hop_length, win_length, window, normalized, onesided, return_complex); +} + +// aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode="reflect", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor +inline at::Tensor Tensor::stft(int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional onesided, c10::optional return_complex) const { + return at::_ops::stft_center::call(const_cast(*this), n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex); +} + +// aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor +inline at::Tensor Tensor::istft(int64_t n_fft, c10::optional hop_length, c10::optional win_length, const c10::optional & window, bool center, bool normalized, c10::optional onesided, c10::optional length, bool return_complex) const { + return at::_ops::istft::call(const_cast(*this), n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex); +} + +// aten::stride.Dimname(Tensor self, Dimname dim) -> int +inline int64_t Tensor::stride(at::Dimname dim) const { + return at::_ops::stride_Dimname::call(const_cast(*this), dim); +} + +// aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::sum(c10::optional dtype) const { + return at::_ops::sum::call(const_cast(*this), dtype); +} + +// aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::sum(at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype) const { + return at::_ops::sum_dim_IntList::call(const_cast(*this), dim, keepdim, dtype); +} + +// aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::sum(at::DimnameList dim, bool keepdim, c10::optional dtype) const { + return at::_ops::sum_dim_DimnameList::call(const_cast(*this), dim, keepdim, dtype); +} + +// aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::nansum(at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype) const { + return at::_ops::nansum::call(const_cast(*this), dim, keepdim, dtype); +} + +// aten::sum_to_size(Tensor self, SymInt[] size) -> Tensor +inline at::Tensor Tensor::sum_to_size(at::IntArrayRef size) const { + return at::_ops::sum_to_size::call(const_cast(*this), c10::fromIntArrayRefSlow(size)); +} + +// aten::sum_to_size(Tensor self, SymInt[] size) -> Tensor +inline at::Tensor Tensor::sum_to_size_symint(c10::SymIntArrayRef size) const { + return at::_ops::sum_to_size::call(const_cast(*this), size); +} + +// aten::sqrt(Tensor self) -> Tensor +inline at::Tensor Tensor::sqrt() const { + return at::_ops::sqrt::call(const_cast(*this)); +} + +// aten::sqrt_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::sqrt_() const { + return at::_ops::sqrt_::call(const_cast(*this)); +} + +// aten::square(Tensor self) -> Tensor +inline at::Tensor Tensor::square() const { + return at::_ops::square::call(const_cast(*this)); +} + +// aten::square_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::square_() const { + return at::_ops::square_::call(const_cast(*this)); +} + +// aten::std(Tensor self, bool unbiased=True) -> Tensor +inline at::Tensor Tensor::std(bool unbiased) const { + return at::_ops::std::call(const_cast(*this), unbiased); +} + +// aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::std(at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) const { + return at::_ops::std_dim::call(const_cast(*this), dim, unbiased, keepdim); +} + +// aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::std(at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim) const { + return at::_ops::std_correction::call(const_cast(*this), dim, correction, keepdim); +} + +// aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::std(at::DimnameList dim, bool unbiased, bool keepdim) const { + return at::_ops::std_names_dim::call(const_cast(*this), dim, unbiased, keepdim); +} + +// aten::std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::std(at::DimnameList dim, const c10::optional & correction, bool keepdim) const { + return at::_ops::std_correction_names::call(const_cast(*this), dim, correction, keepdim); +} + +// aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::prod(c10::optional dtype) const { + return at::_ops::prod::call(const_cast(*this), dtype); +} + +// aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::prod(int64_t dim, bool keepdim, c10::optional dtype) const { + return at::_ops::prod_dim_int::call(const_cast(*this), dim, keepdim, dtype); +} + +// aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::prod(at::Dimname dim, bool keepdim, c10::optional dtype) const { + return at::_ops::prod_dim_Dimname::call(const_cast(*this), dim, keepdim, dtype); +} + +// aten::t(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::t() const { + return at::_ops::t::call(const_cast(*this)); +} + +// aten::t_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::t_() const { + return at::_ops::t_::call(const_cast(*this)); +} + +// aten::tan(Tensor self) -> Tensor +inline at::Tensor Tensor::tan() const { + return at::_ops::tan::call(const_cast(*this)); +} + +// aten::tan_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::tan_() const { + return at::_ops::tan_::call(const_cast(*this)); +} + +// aten::tanh(Tensor self) -> Tensor +inline at::Tensor Tensor::tanh() const { + return at::_ops::tanh::call(const_cast(*this)); +} + +// aten::tanh_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::tanh_() const { + return at::_ops::tanh_::call(const_cast(*this)); +} + +// aten::tile(Tensor self, SymInt[] dims) -> Tensor +inline at::Tensor Tensor::tile(at::IntArrayRef dims) const { + return at::_ops::tile::call(const_cast(*this), c10::fromIntArrayRefSlow(dims)); +} + +// aten::tile(Tensor self, SymInt[] dims) -> Tensor +inline at::Tensor Tensor::tile_symint(c10::SymIntArrayRef dims) const { + return at::_ops::tile::call(const_cast(*this), dims); +} + +// aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) +inline at::Tensor Tensor::transpose(int64_t dim0, int64_t dim1) const { + return at::_ops::transpose_int::call(const_cast(*this), dim0, dim1); +} + +// aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) +inline at::Tensor Tensor::transpose(at::Dimname dim0, at::Dimname dim1) const { + return at::_ops::transpose_Dimname::call(const_cast(*this), dim0, dim1); +} + +// aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) +inline at::Tensor & Tensor::transpose_(int64_t dim0, int64_t dim1) const { + return at::_ops::transpose_::call(const_cast(*this), dim0, dim1); +} + +// aten::flip(Tensor self, int[] dims) -> Tensor +inline at::Tensor Tensor::flip(at::IntArrayRef dims) const { + return at::_ops::flip::call(const_cast(*this), dims); +} + +// aten::fliplr(Tensor self) -> Tensor +inline at::Tensor Tensor::fliplr() const { + return at::_ops::fliplr::call(const_cast(*this)); +} + +// aten::flipud(Tensor self) -> Tensor +inline at::Tensor Tensor::flipud() const { + return at::_ops::flipud::call(const_cast(*this)); +} + +// aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor +inline at::Tensor Tensor::roll(at::IntArrayRef shifts, at::IntArrayRef dims) const { + return at::_ops::roll::call(const_cast(*this), c10::fromIntArrayRefSlow(shifts), dims); +} + +// aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor +inline at::Tensor Tensor::roll_symint(c10::SymIntArrayRef shifts, at::IntArrayRef dims) const { + return at::_ops::roll::call(const_cast(*this), shifts, dims); +} + +// aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor +inline at::Tensor Tensor::rot90(int64_t k, at::IntArrayRef dims) const { + return at::_ops::rot90::call(const_cast(*this), k, dims); +} + +// aten::_nested_tensor_size(Tensor self) -> Tensor +inline at::Tensor Tensor::_nested_tensor_size() const { + return at::_ops::_nested_tensor_size::call(const_cast(*this)); +} + +// aten::_nested_tensor_strides(Tensor self) -> Tensor +inline at::Tensor Tensor::_nested_tensor_strides() const { + return at::_ops::_nested_tensor_strides::call(const_cast(*this)); +} + +// aten::_nested_tensor_storage_offsets(Tensor self) -> Tensor +inline at::Tensor Tensor::_nested_tensor_storage_offsets() const { + return at::_ops::_nested_tensor_storage_offsets::call(const_cast(*this)); +} + +// aten::trunc(Tensor self) -> Tensor +inline at::Tensor Tensor::trunc() const { + return at::_ops::trunc::call(const_cast(*this)); +} + +// aten::trunc_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::trunc_() const { + return at::_ops::trunc_::call(const_cast(*this)); +} + +// aten::fix(Tensor self) -> Tensor +inline at::Tensor Tensor::fix() const { + return at::_ops::fix::call(const_cast(*this)); +} + +// aten::fix_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::fix_() const { + return at::_ops::fix_::call(const_cast(*this)); +} + +// aten::type_as(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::type_as(const at::Tensor & other) const { + return at::_ops::type_as::call(const_cast(*this), other); +} + +// aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a) +inline at::Tensor Tensor::unsqueeze(int64_t dim) const { + return at::_ops::unsqueeze::call(const_cast(*this), dim); +} + +// aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!) +inline at::Tensor & Tensor::unsqueeze_(int64_t dim) const { + return at::_ops::unsqueeze_::call(const_cast(*this), dim); +} + +// aten::var(Tensor self, bool unbiased=True) -> Tensor +inline at::Tensor Tensor::var(bool unbiased) const { + return at::_ops::var::call(const_cast(*this), unbiased); +} + +// aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::var(at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) const { + return at::_ops::var_dim::call(const_cast(*this), dim, unbiased, keepdim); +} + +// aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::var(at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim) const { + return at::_ops::var_correction::call(const_cast(*this), dim, correction, keepdim); +} + +// aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::var(at::DimnameList dim, bool unbiased, bool keepdim) const { + return at::_ops::var_names_dim::call(const_cast(*this), dim, unbiased, keepdim); +} + +// aten::var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::var(at::DimnameList dim, const c10::optional & correction, bool keepdim) const { + return at::_ops::var_correction_names::call(const_cast(*this), dim, correction, keepdim); +} + +// aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a) +inline at::Tensor Tensor::view_as(const at::Tensor & other) const { + return at::_ops::view_as::call(const_cast(*this), other); +} + +// aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::where(const at::Tensor & condition, const at::Tensor & other) const { + return at::_ops::where_self::call(condition, const_cast(*this), other); +} + +// aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::where(const at::Tensor & condition, const at::Scalar & other) const { + return at::_ops::where_ScalarOther::call(condition, const_cast(*this), other); +} + +// aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor +inline at::Tensor Tensor::norm(const c10::optional & p, at::ScalarType dtype) const { + return at::_ops::norm_ScalarOpt_dtype::call(const_cast(*this), p, dtype); +} + +// aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor +inline at::Tensor Tensor::norm(const at::Scalar & p) const { + return at::_ops::norm_Scalar::call(const_cast(*this), p); +} + +// aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor +inline at::Tensor Tensor::norm(const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) const { + return at::_ops::norm_ScalarOpt_dim_dtype::call(const_cast(*this), p, dim, keepdim, dtype); +} + +// aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::norm(const c10::optional & p, at::IntArrayRef dim, bool keepdim) const { + return at::_ops::norm_ScalarOpt_dim::call(const_cast(*this), p, dim, keepdim); +} + +// aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor +inline at::Tensor Tensor::norm(const c10::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) const { + return at::_ops::norm_names_ScalarOpt_dim_dtype::call(const_cast(*this), p, dim, keepdim, dtype); +} + +// aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor +inline at::Tensor Tensor::norm(const c10::optional & p, at::DimnameList dim, bool keepdim) const { + return at::_ops::norm_names_ScalarOpt_dim::call(const_cast(*this), p, dim, keepdim); +} + +// aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent) +inline ::std::tuple Tensor::frexp() const { + return at::_ops::frexp_Tensor::call(const_cast(*this)); +} + +// aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor Tensor::clone(c10::optional memory_format) const { + return at::_ops::clone::call(const_cast(*this), memory_format); +} + +// aten::positive(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::positive() const { + return at::_ops::positive::call(const_cast(*this)); +} + +// aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!) +inline const at::Tensor & Tensor::resize_as_(const at::Tensor & the_template, c10::optional memory_format) const { + return at::_ops::resize_as_::call(const_cast(*this), the_template, memory_format); +} + +// aten::resize_as_sparse_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) +inline const at::Tensor & Tensor::resize_as_sparse_(const at::Tensor & the_template) const { + return at::_ops::resize_as_sparse_::call(const_cast(*this), the_template); +} + +// aten::zero_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::zero_() const { + return at::_ops::zero_::call(const_cast(*this)); +} + +// aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::sub(const at::Tensor & other, const at::Scalar & alpha) const { + return at::_ops::sub_Tensor::call(const_cast(*this), other, alpha); +} + +// aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::sub_(const at::Tensor & other, const at::Scalar & alpha) const { + return at::_ops::sub__Tensor::call(const_cast(*this), other, alpha); +} + +// aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::sub(const at::Scalar & other, const at::Scalar & alpha) const { + return at::_ops::sub_Scalar::call(const_cast(*this), other, alpha); +} + +// aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::sub_(const at::Scalar & other, const at::Scalar & alpha) const { + return at::_ops::sub__Scalar::call(const_cast(*this), other, alpha); +} + +// aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::subtract(const at::Tensor & other, const at::Scalar & alpha) const { + return at::_ops::subtract_Tensor::call(const_cast(*this), other, alpha); +} + +// aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::subtract_(const at::Tensor & other, const at::Scalar & alpha) const { + return at::_ops::subtract__Tensor::call(const_cast(*this), other, alpha); +} + +// aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::subtract(const at::Scalar & other, const at::Scalar & alpha) const { + return at::_ops::subtract_Scalar::call(const_cast(*this), other, alpha); +} + +// aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::subtract_(const at::Scalar & other, const at::Scalar & alpha) const { + return at::_ops::subtract__Scalar::call(const_cast(*this), other, alpha); +} + +// aten::heaviside(Tensor self, Tensor values) -> Tensor +inline at::Tensor Tensor::heaviside(const at::Tensor & values) const { + return at::_ops::heaviside::call(const_cast(*this), values); +} + +// aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!) +inline at::Tensor & Tensor::heaviside_(const at::Tensor & values) const { + return at::_ops::heaviside_::call(const_cast(*this), values); +} + +// aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::addmm(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::addmm::call(const_cast(*this), mat1, mat2, beta, alpha); +} + +// aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::addmm_(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::addmm_::call(const_cast(*this), mat1, mat2, beta, alpha); +} + +// aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor +inline at::Tensor Tensor::_addmm_activation(const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) const { + return at::_ops::_addmm_activation::call(const_cast(*this), mat1, mat2, beta, alpha, use_gelu); +} + +// aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) +inline const at::Tensor & Tensor::sparse_resize_(at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const { + return at::_ops::sparse_resize_::call(const_cast(*this), size, sparse_dim, dense_dim); +} + +// aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) +inline const at::Tensor & Tensor::sparse_resize_and_clear_(at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const { + return at::_ops::sparse_resize_and_clear_::call(const_cast(*this), size, sparse_dim, dense_dim); +} + +// aten::sparse_mask(Tensor self, Tensor mask) -> Tensor +inline at::Tensor Tensor::sparse_mask(const at::Tensor & mask) const { + return at::_ops::sparse_mask::call(const_cast(*this), mask); +} + +// aten::_sparse_mask_projection(Tensor self, Tensor mask, bool accumulate_matches=False) -> Tensor +inline at::Tensor Tensor::_sparse_mask_projection(const at::Tensor & mask, bool accumulate_matches) const { + return at::_ops::_sparse_mask_projection::call(const_cast(*this), mask, accumulate_matches); +} + +// aten::to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor +inline at::Tensor Tensor::to_dense(c10::optional dtype, c10::optional masked_grad) const { + return at::_ops::to_dense::call(const_cast(*this), dtype, masked_grad); +} + +// aten::_to_dense(Tensor self, ScalarType? dtype=None, bool? masked_grad=None) -> Tensor +inline at::Tensor Tensor::_to_dense(c10::optional dtype, c10::optional masked_grad) const { + return at::_ops::_to_dense::call(const_cast(*this), dtype, masked_grad); +} + +// aten::sparse_dim(Tensor self) -> int +inline int64_t Tensor::sparse_dim() const { + return at::_ops::sparse_dim::call(const_cast(*this)); +} + +// aten::_dimI(Tensor self) -> int +inline int64_t Tensor::_dimI() const { + return at::_ops::_dimI::call(const_cast(*this)); +} + +// aten::dense_dim(Tensor self) -> int +inline int64_t Tensor::dense_dim() const { + return at::_ops::dense_dim::call(const_cast(*this)); +} + +// aten::_dimV(Tensor self) -> int +inline int64_t Tensor::_dimV() const { + return at::_ops::_dimV::call(const_cast(*this)); +} + +// aten::_nnz(Tensor self) -> int +inline int64_t Tensor::_nnz() const { + return at::_ops::_nnz::call(const_cast(*this)); +} + +// aten::coalesce(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::coalesce() const { + return at::_ops::coalesce::call(const_cast(*this)); +} + +// aten::is_coalesced(Tensor self) -> bool +inline bool Tensor::is_coalesced() const { + return at::_ops::is_coalesced::call(const_cast(*this)); +} + +// aten::_indices(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::_indices() const { + return at::_ops::_indices::call(const_cast(*this)); +} + +// aten::_values(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::_values() const { + return at::_ops::_values::call(const_cast(*this)); +} + +// aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!) +inline at::Tensor & Tensor::_coalesced_(bool coalesced) const { + return at::_ops::_coalesced_::call(const_cast(*this), coalesced); +} + +// aten::indices(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::indices() const { + return at::_ops::indices::call(const_cast(*this)); +} + +// aten::values(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::values() const { + return at::_ops::values::call(const_cast(*this)); +} + +// aten::crow_indices(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::crow_indices() const { + return at::_ops::crow_indices::call(const_cast(*this)); +} + +// aten::col_indices(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::col_indices() const { + return at::_ops::col_indices::call(const_cast(*this)); +} + +// aten::ccol_indices(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::ccol_indices() const { + return at::_ops::ccol_indices::call(const_cast(*this)); +} + +// aten::row_indices(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::row_indices() const { + return at::_ops::row_indices::call(const_cast(*this)); +} + +// aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[] +inline ::std::vector Tensor::unbind(int64_t dim) const { + return at::_ops::unbind_int::call(const_cast(*this), dim); +} + +// aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[] +inline ::std::vector Tensor::unbind(at::Dimname dim) const { + return at::_ops::unbind_Dimname::call(const_cast(*this), dim); +} + +// aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor +inline at::Tensor Tensor::to_sparse(int64_t sparse_dim) const { + return at::_ops::to_sparse_sparse_dim::call(const_cast(*this), sparse_dim); +} + +// aten::_to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor +inline at::Tensor Tensor::_to_sparse(int64_t sparse_dim) const { + return at::_ops::_to_sparse_sparse_dim::call(const_cast(*this), sparse_dim); +} + +// aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor +inline at::Tensor Tensor::to_sparse(c10::optional layout, at::OptionalIntArrayRef blocksize, c10::optional dense_dim) const { + return at::_ops::to_sparse::call(const_cast(*this), layout, blocksize, dense_dim); +} + +// aten::_to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor +inline at::Tensor Tensor::_to_sparse(c10::optional layout, at::OptionalIntArrayRef blocksize, c10::optional dense_dim) const { + return at::_ops::_to_sparse::call(const_cast(*this), layout, blocksize, dense_dim); +} + +// aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor +inline at::Tensor Tensor::to_sparse_csr(c10::optional dense_dim) const { + return at::_ops::to_sparse_csr::call(const_cast(*this), dense_dim); +} + +// aten::_to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor +inline at::Tensor Tensor::_to_sparse_csr(c10::optional dense_dim) const { + return at::_ops::_to_sparse_csr::call(const_cast(*this), dense_dim); +} + +// aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor +inline at::Tensor Tensor::to_sparse_csc(c10::optional dense_dim) const { + return at::_ops::to_sparse_csc::call(const_cast(*this), dense_dim); +} + +// aten::_to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor +inline at::Tensor Tensor::_to_sparse_csc(c10::optional dense_dim) const { + return at::_ops::_to_sparse_csc::call(const_cast(*this), dense_dim); +} + +// aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor +inline at::Tensor Tensor::to_sparse_bsr(at::IntArrayRef blocksize, c10::optional dense_dim) const { + return at::_ops::to_sparse_bsr::call(const_cast(*this), blocksize, dense_dim); +} + +// aten::_to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor +inline at::Tensor Tensor::_to_sparse_bsr(at::IntArrayRef blocksize, c10::optional dense_dim) const { + return at::_ops::_to_sparse_bsr::call(const_cast(*this), blocksize, dense_dim); +} + +// aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor +inline at::Tensor Tensor::to_sparse_bsc(at::IntArrayRef blocksize, c10::optional dense_dim) const { + return at::_ops::to_sparse_bsc::call(const_cast(*this), blocksize, dense_dim); +} + +// aten::_to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor +inline at::Tensor Tensor::_to_sparse_bsc(at::IntArrayRef blocksize, c10::optional dense_dim) const { + return at::_ops::_to_sparse_bsc::call(const_cast(*this), blocksize, dense_dim); +} + +// aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor +inline at::Tensor Tensor::to_mkldnn(c10::optional dtype) const { + return at::_ops::to_mkldnn::call(const_cast(*this), dtype); +} + +// aten::dequantize.self(Tensor self) -> Tensor +inline at::Tensor Tensor::dequantize() const { + return at::_ops::dequantize_self::call(const_cast(*this)); +} + +// aten::q_scale(Tensor self) -> float +inline double Tensor::q_scale() const { + return at::_ops::q_scale::call(const_cast(*this)); +} + +// aten::q_zero_point(Tensor self) -> int +inline int64_t Tensor::q_zero_point() const { + return at::_ops::q_zero_point::call(const_cast(*this)); +} + +// aten::q_per_channel_scales(Tensor self) -> Tensor +inline at::Tensor Tensor::q_per_channel_scales() const { + return at::_ops::q_per_channel_scales::call(const_cast(*this)); +} + +// aten::q_per_channel_zero_points(Tensor self) -> Tensor +inline at::Tensor Tensor::q_per_channel_zero_points() const { + return at::_ops::q_per_channel_zero_points::call(const_cast(*this)); +} + +// aten::q_per_channel_axis(Tensor self) -> int +inline int64_t Tensor::q_per_channel_axis() const { + return at::_ops::q_per_channel_axis::call(const_cast(*this)); +} + +// aten::int_repr(Tensor self) -> Tensor +inline at::Tensor Tensor::int_repr() const { + return at::_ops::int_repr::call(const_cast(*this)); +} + +// aten::qscheme(Tensor self) -> QScheme +inline at::QScheme Tensor::qscheme() const { + return at::_ops::qscheme::call(const_cast(*this)); +} + +// aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a) +inline at::Tensor Tensor::_autocast_to_reduced_precision(bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) const { + return at::_ops::_autocast_to_reduced_precision::call(const_cast(*this), cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype); +} + +// aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a) +inline at::Tensor Tensor::_autocast_to_full_precision(bool cuda_enabled, bool cpu_enabled) const { + return at::_ops::_autocast_to_full_precision::call(const_cast(*this), cuda_enabled, cpu_enabled); +} + +// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) +inline at::Tensor Tensor::to(at::TensorOptions options, bool non_blocking, bool copy, c10::optional memory_format) const { + return at::_ops::to_dtype_layout::call(const_cast(*this), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), non_blocking, copy, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} + +// aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) +inline at::Tensor Tensor::to(c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, bool copy, c10::optional memory_format) const { + return at::_ops::to_dtype_layout::call(const_cast(*this), dtype, layout, device, pin_memory, non_blocking, copy, memory_format); +} + +// aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) +inline at::Tensor Tensor::to(at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional memory_format) const { + return at::_ops::to_device::call(const_cast(*this), device, dtype, non_blocking, copy, memory_format); +} + +// aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) +inline at::Tensor Tensor::to(at::ScalarType dtype, bool non_blocking, bool copy, c10::optional memory_format) const { + return at::_ops::to_dtype::call(const_cast(*this), dtype, non_blocking, copy, memory_format); +} + +// aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a) +inline at::Tensor Tensor::to(const at::Tensor & other, bool non_blocking, bool copy, c10::optional memory_format) const { + return at::_ops::to_other::call(const_cast(*this), other, non_blocking, copy, memory_format); +} + +// aten::item(Tensor self) -> Scalar +inline at::Scalar Tensor::item() const { + return at::_ops::item::call(const_cast(*this)); +} + +// aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!) +inline at::Tensor & Tensor::set_(at::Storage source) const { + return at::_ops::set__source_Storage::call(const_cast(*this), source); +} + +// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) +inline at::Tensor & Tensor::set_(at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride) const { + return at::_ops::set__source_Storage_storage_offset::call(const_cast(*this), source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); +} + +// aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) +inline at::Tensor & Tensor::set__symint(at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) const { + return at::_ops::set__source_Storage_storage_offset::call(const_cast(*this), source, storage_offset, size, stride); +} + +// aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) +inline at::Tensor & Tensor::set_(const at::Tensor & source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride) const { + return at::_ops::set__source_Tensor_storage_offset::call(const_cast(*this), source, storage_offset, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride)); +} + +// aten::set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!) +inline at::Tensor & Tensor::set__symint(const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) const { + return at::_ops::set__source_Tensor_storage_offset::call(const_cast(*this), source, storage_offset, size, stride); +} + +// aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!) +inline at::Tensor & Tensor::set_(const at::Tensor & source) const { + return at::_ops::set__source_Tensor::call(const_cast(*this), source); +} + +// aten::set_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::set_() const { + return at::_ops::set_::call(const_cast(*this)); +} + +// aten::is_set_to(Tensor self, Tensor tensor) -> bool +inline bool Tensor::is_set_to(const at::Tensor & tensor) const { + return at::_ops::is_set_to::call(const_cast(*this), tensor); +} + +// aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!) +inline at::Tensor & Tensor::masked_fill_(const at::Tensor & mask, const at::Scalar & value) const { + return at::_ops::masked_fill__Scalar::call(const_cast(*this), mask, value); +} + +// aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor +inline at::Tensor Tensor::masked_fill(const at::Tensor & mask, const at::Scalar & value) const { + return at::_ops::masked_fill_Scalar::call(const_cast(*this), mask, value); +} + +// aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!) +inline at::Tensor & Tensor::masked_fill_(const at::Tensor & mask, const at::Tensor & value) const { + return at::_ops::masked_fill__Tensor::call(const_cast(*this), mask, value); +} + +// aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor +inline at::Tensor Tensor::masked_fill(const at::Tensor & mask, const at::Tensor & value) const { + return at::_ops::masked_fill_Tensor::call(const_cast(*this), mask, value); +} + +// aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!) +inline at::Tensor & Tensor::masked_scatter_(const at::Tensor & mask, const at::Tensor & source) const { + return at::_ops::masked_scatter_::call(const_cast(*this), mask, source); +} + +// aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor +inline at::Tensor Tensor::masked_scatter(const at::Tensor & mask, const at::Tensor & source) const { + return at::_ops::masked_scatter::call(const_cast(*this), mask, source); +} + +// aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a) +inline at::Tensor Tensor::view(at::IntArrayRef size) const { + return at::_ops::view::call(const_cast(*this), c10::fromIntArrayRefSlow(size)); +} + +// aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a) +inline at::Tensor Tensor::view_symint(c10::SymIntArrayRef size) const { + return at::_ops::view::call(const_cast(*this), size); +} + +// aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a) +inline at::Tensor Tensor::view(at::ScalarType dtype) const { + return at::_ops::view_dtype::call(const_cast(*this), dtype); +} + +// aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!) +inline at::Tensor & Tensor::put_(const at::Tensor & index, const at::Tensor & source, bool accumulate) const { + return at::_ops::put_::call(const_cast(*this), index, source, accumulate); +} + +// aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> Tensor +inline at::Tensor Tensor::put(const at::Tensor & index, const at::Tensor & source, bool accumulate) const { + return at::_ops::put::call(const_cast(*this), index, source, accumulate); +} + +// aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::index_add_(int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) const { + return at::_ops::index_add_::call(const_cast(*this), dim, index, source, alpha); +} + +// aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::index_add(int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) const { + return at::_ops::index_add::call(const_cast(*this), dim, index, source, alpha); +} + +// aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::index_add(at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) const { + return at::_ops::index_add_dimname::call(const_cast(*this), dim, index, source, alpha); +} + +// aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!) +inline at::Tensor & Tensor::index_reduce_(int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) const { + return at::_ops::index_reduce_::call(const_cast(*this), dim, index, source, reduce, include_self); +} + +// aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor +inline at::Tensor Tensor::index_reduce(int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) const { + return at::_ops::index_reduce::call(const_cast(*this), dim, index, source, reduce, include_self); +} + +// aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) +inline at::Tensor & Tensor::index_fill_(int64_t dim, const at::Tensor & index, const at::Scalar & value) const { + return at::_ops::index_fill__int_Scalar::call(const_cast(*this), dim, index, value); +} + +// aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor Tensor::index_fill(int64_t dim, const at::Tensor & index, const at::Scalar & value) const { + return at::_ops::index_fill_int_Scalar::call(const_cast(*this), dim, index, value); +} + +// aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!) +inline at::Tensor & Tensor::index_fill_(int64_t dim, const at::Tensor & index, const at::Tensor & value) const { + return at::_ops::index_fill__int_Tensor::call(const_cast(*this), dim, index, value); +} + +// aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor +inline at::Tensor Tensor::index_fill(int64_t dim, const at::Tensor & index, const at::Tensor & value) const { + return at::_ops::index_fill_int_Tensor::call(const_cast(*this), dim, index, value); +} + +// aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!) +inline at::Tensor & Tensor::index_fill_(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const { + return at::_ops::index_fill__Dimname_Scalar::call(const_cast(*this), dim, index, value); +} + +// aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!) +inline at::Tensor & Tensor::index_fill_(at::Dimname dim, const at::Tensor & index, const at::Tensor & value) const { + return at::_ops::index_fill__Dimname_Tensor::call(const_cast(*this), dim, index, value); +} + +// aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor Tensor::index_fill(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const { + return at::_ops::index_fill_Dimname_Scalar::call(const_cast(*this), dim, index, value); +} + +// aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor +inline at::Tensor Tensor::index_fill(at::Dimname dim, const at::Tensor & index, const at::Tensor & value) const { + return at::_ops::index_fill_Dimname_Tensor::call(const_cast(*this), dim, index, value); +} + +// aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor +inline at::Tensor Tensor::scatter(int64_t dim, const at::Tensor & index, const at::Tensor & src) const { + return at::_ops::scatter_src::call(const_cast(*this), dim, index, src); +} + +// aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) +inline at::Tensor & Tensor::scatter_(int64_t dim, const at::Tensor & index, const at::Tensor & src) const { + return at::_ops::scatter__src::call(const_cast(*this), dim, index, src); +} + +// aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor Tensor::scatter(int64_t dim, const at::Tensor & index, const at::Scalar & value) const { + return at::_ops::scatter_value::call(const_cast(*this), dim, index, value); +} + +// aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) +inline at::Tensor & Tensor::scatter_(int64_t dim, const at::Tensor & index, const at::Scalar & value) const { + return at::_ops::scatter__value::call(const_cast(*this), dim, index, value); +} + +// aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor +inline at::Tensor Tensor::scatter(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) const { + return at::_ops::scatter_reduce::call(const_cast(*this), dim, index, src, reduce); +} + +// aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!) +inline at::Tensor & Tensor::scatter_(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) const { + return at::_ops::scatter__reduce::call(const_cast(*this), dim, index, src, reduce); +} + +// aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor +inline at::Tensor Tensor::scatter(int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) const { + return at::_ops::scatter_value_reduce::call(const_cast(*this), dim, index, value, reduce); +} + +// aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!) +inline at::Tensor & Tensor::scatter_(int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) const { + return at::_ops::scatter__value_reduce::call(const_cast(*this), dim, index, value, reduce); +} + +// aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor +inline at::Tensor Tensor::scatter(at::Dimname dim, const at::Tensor & index, const at::Tensor & src) const { + return at::_ops::scatter_dimname_src::call(const_cast(*this), dim, index, src); +} + +// aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor +inline at::Tensor Tensor::scatter(at::Dimname dim, const at::Tensor & index, const at::Scalar & value) const { + return at::_ops::scatter_dimname_value::call(const_cast(*this), dim, index, value); +} + +// aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor +inline at::Tensor Tensor::scatter_add(int64_t dim, const at::Tensor & index, const at::Tensor & src) const { + return at::_ops::scatter_add::call(const_cast(*this), dim, index, src); +} + +// aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) +inline at::Tensor & Tensor::scatter_add_(int64_t dim, const at::Tensor & index, const at::Tensor & src) const { + return at::_ops::scatter_add_::call(const_cast(*this), dim, index, src); +} + +// aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor +inline at::Tensor Tensor::scatter_add(at::Dimname dim, const at::Tensor & index, const at::Tensor & src) const { + return at::_ops::scatter_add_dimname::call(const_cast(*this), dim, index, src); +} + +// aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor +inline at::Tensor Tensor::scatter_reduce(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) const { + return at::_ops::scatter_reduce_two::call(const_cast(*this), dim, index, src, reduce, include_self); +} + +// aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!) +inline at::Tensor & Tensor::scatter_reduce_(int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) const { + return at::_ops::scatter_reduce__two::call(const_cast(*this), dim, index, src, reduce, include_self); +} + +// aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::eq_(const at::Scalar & other) const { + return at::_ops::eq__Scalar::call(const_cast(*this), other); +} + +// aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::eq_(const at::Tensor & other) const { + return at::_ops::eq__Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::bitwise_and(const at::Scalar & other) const { + return at::_ops::bitwise_and_Scalar::call(const_cast(*this), other); +} + +// aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::bitwise_and(const at::Tensor & other) const { + return at::_ops::bitwise_and_Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_and_(const at::Scalar & other) const { + return at::_ops::bitwise_and__Scalar::call(const_cast(*this), other); +} + +// aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_and_(const at::Tensor & other) const { + return at::_ops::bitwise_and__Tensor::call(const_cast(*this), other); +} + +// aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::__and__(const at::Scalar & other) const { + return at::_ops::__and___Scalar::call(const_cast(*this), other); +} + +// aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::__and__(const at::Tensor & other) const { + return at::_ops::__and___Tensor::call(const_cast(*this), other); +} + +// aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::__iand__(const at::Scalar & other) const { + return at::_ops::__iand___Scalar::call(const_cast(*this), other); +} + +// aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::__iand__(const at::Tensor & other) const { + return at::_ops::__iand___Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::bitwise_or(const at::Scalar & other) const { + return at::_ops::bitwise_or_Scalar::call(const_cast(*this), other); +} + +// aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::bitwise_or(const at::Tensor & other) const { + return at::_ops::bitwise_or_Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_or_(const at::Scalar & other) const { + return at::_ops::bitwise_or__Scalar::call(const_cast(*this), other); +} + +// aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_or_(const at::Tensor & other) const { + return at::_ops::bitwise_or__Tensor::call(const_cast(*this), other); +} + +// aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::__or__(const at::Scalar & other) const { + return at::_ops::__or___Scalar::call(const_cast(*this), other); +} + +// aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::__or__(const at::Tensor & other) const { + return at::_ops::__or___Tensor::call(const_cast(*this), other); +} + +// aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::__ior__(const at::Scalar & other) const { + return at::_ops::__ior___Scalar::call(const_cast(*this), other); +} + +// aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::__ior__(const at::Tensor & other) const { + return at::_ops::__ior___Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::bitwise_xor(const at::Scalar & other) const { + return at::_ops::bitwise_xor_Scalar::call(const_cast(*this), other); +} + +// aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::bitwise_xor(const at::Tensor & other) const { + return at::_ops::bitwise_xor_Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_xor_(const at::Scalar & other) const { + return at::_ops::bitwise_xor__Scalar::call(const_cast(*this), other); +} + +// aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_xor_(const at::Tensor & other) const { + return at::_ops::bitwise_xor__Tensor::call(const_cast(*this), other); +} + +// aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::__xor__(const at::Scalar & other) const { + return at::_ops::__xor___Scalar::call(const_cast(*this), other); +} + +// aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::__xor__(const at::Tensor & other) const { + return at::_ops::__xor___Tensor::call(const_cast(*this), other); +} + +// aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::__ixor__(const at::Scalar & other) const { + return at::_ops::__ixor___Scalar::call(const_cast(*this), other); +} + +// aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::__ixor__(const at::Tensor & other) const { + return at::_ops::__ixor___Tensor::call(const_cast(*this), other); +} + +// aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::__lshift__(const at::Scalar & other) const { + return at::_ops::__lshift___Scalar::call(const_cast(*this), other); +} + +// aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::__lshift__(const at::Tensor & other) const { + return at::_ops::__lshift___Tensor::call(const_cast(*this), other); +} + +// aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::__ilshift__(const at::Scalar & other) const { + return at::_ops::__ilshift___Scalar::call(const_cast(*this), other); +} + +// aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::__ilshift__(const at::Tensor & other) const { + return at::_ops::__ilshift___Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::bitwise_left_shift(const at::Tensor & other) const { + return at::_ops::bitwise_left_shift_Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_left_shift_(const at::Tensor & other) const { + return at::_ops::bitwise_left_shift__Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::bitwise_left_shift(const at::Scalar & other) const { + return at::_ops::bitwise_left_shift_Tensor_Scalar::call(const_cast(*this), other); +} + +// aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_left_shift_(const at::Scalar & other) const { + return at::_ops::bitwise_left_shift__Tensor_Scalar::call(const_cast(*this), other); +} + +// aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::__rshift__(const at::Scalar & other) const { + return at::_ops::__rshift___Scalar::call(const_cast(*this), other); +} + +// aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::__rshift__(const at::Tensor & other) const { + return at::_ops::__rshift___Tensor::call(const_cast(*this), other); +} + +// aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::__irshift__(const at::Scalar & other) const { + return at::_ops::__irshift___Scalar::call(const_cast(*this), other); +} + +// aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::__irshift__(const at::Tensor & other) const { + return at::_ops::__irshift___Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::bitwise_right_shift(const at::Tensor & other) const { + return at::_ops::bitwise_right_shift_Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_right_shift_(const at::Tensor & other) const { + return at::_ops::bitwise_right_shift__Tensor::call(const_cast(*this), other); +} + +// aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::bitwise_right_shift(const at::Scalar & other) const { + return at::_ops::bitwise_right_shift_Tensor_Scalar::call(const_cast(*this), other); +} + +// aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::bitwise_right_shift_(const at::Scalar & other) const { + return at::_ops::bitwise_right_shift__Tensor_Scalar::call(const_cast(*this), other); +} + +// aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) +inline at::Tensor & Tensor::tril_(int64_t diagonal) const { + return at::_ops::tril_::call(const_cast(*this), diagonal); +} + +// aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) +inline at::Tensor & Tensor::triu_(int64_t diagonal) const { + return at::_ops::triu_::call(const_cast(*this), diagonal); +} + +// aten::digamma_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::digamma_() const { + return at::_ops::digamma_::call(const_cast(*this)); +} + +// aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!) +inline at::Tensor & Tensor::lerp_(const at::Tensor & end, const at::Scalar & weight) const { + return at::_ops::lerp__Scalar::call(const_cast(*this), end, weight); +} + +// aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!) +inline at::Tensor & Tensor::lerp_(const at::Tensor & end, const at::Tensor & weight) const { + return at::_ops::lerp__Tensor::call(const_cast(*this), end, weight); +} + +// aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) +inline at::Tensor & Tensor::addbmm_(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::addbmm_::call(const_cast(*this), batch1, batch2, beta, alpha); +} + +// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor Tensor::addbmm(const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) const { + return at::_ops::addbmm::call(const_cast(*this), batch1, batch2, beta, alpha); +} + +// aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::random_(int64_t from, c10::optional to, c10::optional generator) const { + return at::_ops::random__from::call(const_cast(*this), from, to, generator); +} + +// aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::random_(int64_t to, c10::optional generator) const { + return at::_ops::random__to::call(const_cast(*this), to, generator); +} + +// aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::random_(c10::optional generator) const { + return at::_ops::random_::call(const_cast(*this), generator); +} + +// aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::uniform_(double from, double to, c10::optional generator) const { + return at::_ops::uniform_::call(const_cast(*this), from, to, generator); +} + +// aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::cauchy_(double median, double sigma, c10::optional generator) const { + return at::_ops::cauchy_::call(const_cast(*this), median, sigma, generator); +} + +// aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::log_normal_(double mean, double std, c10::optional generator) const { + return at::_ops::log_normal_::call(const_cast(*this), mean, std, generator); +} + +// aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::exponential_(double lambd, c10::optional generator) const { + return at::_ops::exponential_::call(const_cast(*this), lambd, generator); +} + +// aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::geometric_(double p, c10::optional generator) const { + return at::_ops::geometric_::call(const_cast(*this), p, generator); +} + +// aten::diag(Tensor self, int diagonal=0) -> Tensor +inline at::Tensor Tensor::diag(int64_t diagonal) const { + return at::_ops::diag::call(const_cast(*this), diagonal); +} + +// aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor +inline at::Tensor Tensor::cross(const at::Tensor & other, c10::optional dim) const { + return at::_ops::cross::call(const_cast(*this), other, dim); +} + +// aten::triu(Tensor self, int diagonal=0) -> Tensor +inline at::Tensor Tensor::triu(int64_t diagonal) const { + return at::_ops::triu::call(const_cast(*this), diagonal); +} + +// aten::tril(Tensor self, int diagonal=0) -> Tensor +inline at::Tensor Tensor::tril(int64_t diagonal) const { + return at::_ops::tril::call(const_cast(*this), diagonal); +} + +// aten::trace(Tensor self) -> Tensor +inline at::Tensor Tensor::trace() const { + return at::_ops::trace::call(const_cast(*this)); +} + +// aten::ne.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::ne(const at::Scalar & other) const { + return at::_ops::ne_Scalar::call(const_cast(*this), other); +} + +// aten::ne.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::ne(const at::Tensor & other) const { + return at::_ops::ne_Tensor::call(const_cast(*this), other); +} + +// aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::ne_(const at::Scalar & other) const { + return at::_ops::ne__Scalar::call(const_cast(*this), other); +} + +// aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::ne_(const at::Tensor & other) const { + return at::_ops::ne__Tensor::call(const_cast(*this), other); +} + +// aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::not_equal(const at::Scalar & other) const { + return at::_ops::not_equal_Scalar::call(const_cast(*this), other); +} + +// aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::not_equal(const at::Tensor & other) const { + return at::_ops::not_equal_Tensor::call(const_cast(*this), other); +} + +// aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::not_equal_(const at::Scalar & other) const { + return at::_ops::not_equal__Scalar::call(const_cast(*this), other); +} + +// aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::not_equal_(const at::Tensor & other) const { + return at::_ops::not_equal__Tensor::call(const_cast(*this), other); +} + +// aten::eq.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::eq(const at::Scalar & other) const { + return at::_ops::eq_Scalar::call(const_cast(*this), other); +} + +// aten::eq.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::eq(const at::Tensor & other) const { + return at::_ops::eq_Tensor::call(const_cast(*this), other); +} + +// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::ge(const at::Scalar & other) const { + return at::_ops::ge_Scalar::call(const_cast(*this), other); +} + +// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::ge(const at::Tensor & other) const { + return at::_ops::ge_Tensor::call(const_cast(*this), other); +} + +// aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::ge_(const at::Scalar & other) const { + return at::_ops::ge__Scalar::call(const_cast(*this), other); +} + +// aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::ge_(const at::Tensor & other) const { + return at::_ops::ge__Tensor::call(const_cast(*this), other); +} + +// aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::greater_equal(const at::Scalar & other) const { + return at::_ops::greater_equal_Scalar::call(const_cast(*this), other); +} + +// aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::greater_equal(const at::Tensor & other) const { + return at::_ops::greater_equal_Tensor::call(const_cast(*this), other); +} + +// aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::greater_equal_(const at::Scalar & other) const { + return at::_ops::greater_equal__Scalar::call(const_cast(*this), other); +} + +// aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::greater_equal_(const at::Tensor & other) const { + return at::_ops::greater_equal__Tensor::call(const_cast(*this), other); +} + +// aten::le.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::le(const at::Scalar & other) const { + return at::_ops::le_Scalar::call(const_cast(*this), other); +} + +// aten::le.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::le(const at::Tensor & other) const { + return at::_ops::le_Tensor::call(const_cast(*this), other); +} + +// aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::le_(const at::Scalar & other) const { + return at::_ops::le__Scalar::call(const_cast(*this), other); +} + +// aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::le_(const at::Tensor & other) const { + return at::_ops::le__Tensor::call(const_cast(*this), other); +} + +// aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::less_equal(const at::Scalar & other) const { + return at::_ops::less_equal_Scalar::call(const_cast(*this), other); +} + +// aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::less_equal(const at::Tensor & other) const { + return at::_ops::less_equal_Tensor::call(const_cast(*this), other); +} + +// aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::less_equal_(const at::Scalar & other) const { + return at::_ops::less_equal__Scalar::call(const_cast(*this), other); +} + +// aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::less_equal_(const at::Tensor & other) const { + return at::_ops::less_equal__Tensor::call(const_cast(*this), other); +} + +// aten::gt.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::gt(const at::Scalar & other) const { + return at::_ops::gt_Scalar::call(const_cast(*this), other); +} + +// aten::gt.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::gt(const at::Tensor & other) const { + return at::_ops::gt_Tensor::call(const_cast(*this), other); +} + +// aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::gt_(const at::Scalar & other) const { + return at::_ops::gt__Scalar::call(const_cast(*this), other); +} + +// aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::gt_(const at::Tensor & other) const { + return at::_ops::gt__Tensor::call(const_cast(*this), other); +} + +// aten::greater.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::greater(const at::Scalar & other) const { + return at::_ops::greater_Scalar::call(const_cast(*this), other); +} + +// aten::greater.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::greater(const at::Tensor & other) const { + return at::_ops::greater_Tensor::call(const_cast(*this), other); +} + +// aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::greater_(const at::Scalar & other) const { + return at::_ops::greater__Scalar::call(const_cast(*this), other); +} + +// aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::greater_(const at::Tensor & other) const { + return at::_ops::greater__Tensor::call(const_cast(*this), other); +} + +// aten::lt.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::lt(const at::Scalar & other) const { + return at::_ops::lt_Scalar::call(const_cast(*this), other); +} + +// aten::lt.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::lt(const at::Tensor & other) const { + return at::_ops::lt_Tensor::call(const_cast(*this), other); +} + +// aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::lt_(const at::Scalar & other) const { + return at::_ops::lt__Scalar::call(const_cast(*this), other); +} + +// aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::lt_(const at::Tensor & other) const { + return at::_ops::lt__Tensor::call(const_cast(*this), other); +} + +// aten::less.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::less(const at::Scalar & other) const { + return at::_ops::less_Scalar::call(const_cast(*this), other); +} + +// aten::less.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::less(const at::Tensor & other) const { + return at::_ops::less_Tensor::call(const_cast(*this), other); +} + +// aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::less_(const at::Scalar & other) const { + return at::_ops::less__Scalar::call(const_cast(*this), other); +} + +// aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::less_(const at::Tensor & other) const { + return at::_ops::less__Tensor::call(const_cast(*this), other); +} + +// aten::take(Tensor self, Tensor index) -> Tensor +inline at::Tensor Tensor::take(const at::Tensor & index) const { + return at::_ops::take::call(const_cast(*this), index); +} + +// aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor +inline at::Tensor Tensor::take_along_dim(const at::Tensor & indices, c10::optional dim) const { + return at::_ops::take_along_dim::call(const_cast(*this), indices, dim); +} + +// aten::index_select(Tensor self, int dim, Tensor index) -> Tensor +inline at::Tensor Tensor::index_select(int64_t dim, const at::Tensor & index) const { + return at::_ops::index_select::call(const_cast(*this), dim, index); +} + +// aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor +inline at::Tensor Tensor::index_select(at::Dimname dim, const at::Tensor & index) const { + return at::_ops::index_select_dimname::call(const_cast(*this), dim, index); +} + +// aten::masked_select(Tensor self, Tensor mask) -> Tensor +inline at::Tensor Tensor::masked_select(const at::Tensor & mask) const { + return at::_ops::masked_select::call(const_cast(*this), mask); +} + +// aten::nonzero(Tensor self) -> Tensor +inline at::Tensor Tensor::nonzero() const { + return at::_ops::nonzero::call(const_cast(*this)); +} + +// aten::nonzero_static(Tensor self, *, int size, int fill_value=-1) -> Tensor +inline at::Tensor Tensor::nonzero_static(int64_t size, int64_t fill_value) const { + return at::_ops::nonzero_static::call(const_cast(*this), size, fill_value); +} + +// aten::nonzero_numpy(Tensor self) -> Tensor[] +inline ::std::vector Tensor::nonzero_numpy() const { + return at::_ops::nonzero_numpy::call(const_cast(*this)); +} + +// aten::argwhere(Tensor self) -> Tensor +inline at::Tensor Tensor::argwhere() const { + return at::_ops::argwhere::call(const_cast(*this)); +} + +// aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor +inline at::Tensor Tensor::gather(int64_t dim, const at::Tensor & index, bool sparse_grad) const { + return at::_ops::gather::call(const_cast(*this), dim, index, sparse_grad); +} + +// aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor +inline at::Tensor Tensor::gather(at::Dimname dim, const at::Tensor & index, bool sparse_grad) const { + return at::_ops::gather_dimname::call(const_cast(*this), dim, index, sparse_grad); +} + +// aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor +inline at::Tensor Tensor::addcmul(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) const { + return at::_ops::addcmul::call(const_cast(*this), tensor1, tensor2, value); +} + +// aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) +inline at::Tensor & Tensor::addcmul_(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) const { + return at::_ops::addcmul_::call(const_cast(*this), tensor1, tensor2, value); +} + +// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor +inline at::Tensor Tensor::addcdiv(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) const { + return at::_ops::addcdiv::call(const_cast(*this), tensor1, tensor2, value); +} + +// aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) +inline at::Tensor & Tensor::addcdiv_(const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) const { + return at::_ops::addcdiv_::call(const_cast(*this), tensor1, tensor2, value); +} + +// aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient) +inline ::std::tuple Tensor::triangular_solve(const at::Tensor & A, bool upper, bool transpose, bool unitriangular) const { + return at::_ops::triangular_solve::call(const_cast(*this), A, upper, transpose, unitriangular); +} + +// aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) +inline ::std::tuple Tensor::svd(bool some, bool compute_uv) const { + return at::_ops::svd::call(const_cast(*this), some, compute_uv); +} + +// aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a) +inline at::Tensor Tensor::swapaxes(int64_t axis0, int64_t axis1) const { + return at::_ops::swapaxes::call(const_cast(*this), axis0, axis1); +} + +// aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!) +inline at::Tensor & Tensor::swapaxes_(int64_t axis0, int64_t axis1) const { + return at::_ops::swapaxes_::call(const_cast(*this), axis0, axis1); +} + +// aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a) +inline at::Tensor Tensor::swapdims(int64_t dim0, int64_t dim1) const { + return at::_ops::swapdims::call(const_cast(*this), dim0, dim1); +} + +// aten::swapdims_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) +inline at::Tensor & Tensor::swapdims_(int64_t dim0, int64_t dim1) const { + return at::_ops::swapdims_::call(const_cast(*this), dim0, dim1); +} + +// aten::cholesky(Tensor self, bool upper=False) -> Tensor +inline at::Tensor Tensor::cholesky(bool upper) const { + return at::_ops::cholesky::call(const_cast(*this), upper); +} + +// aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor +inline at::Tensor Tensor::cholesky_solve(const at::Tensor & input2, bool upper) const { + return at::_ops::cholesky_solve::call(const_cast(*this), input2, upper); +} + +// aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor +inline at::Tensor Tensor::cholesky_inverse(bool upper) const { + return at::_ops::cholesky_inverse::call(const_cast(*this), upper); +} + +// aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R) +inline ::std::tuple Tensor::qr(bool some) const { + return at::_ops::qr::call(const_cast(*this), some); +} + +// aten::geqrf(Tensor self) -> (Tensor a, Tensor tau) +inline ::std::tuple Tensor::geqrf() const { + return at::_ops::geqrf::call(const_cast(*this)); +} + +// aten::orgqr(Tensor self, Tensor input2) -> Tensor +inline at::Tensor Tensor::orgqr(const at::Tensor & input2) const { + return at::_ops::orgqr::call(const_cast(*this), input2); +} + +// aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor +inline at::Tensor Tensor::ormqr(const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) const { + return at::_ops::ormqr::call(const_cast(*this), input2, input3, left, transpose); +} + +// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor +inline at::Tensor Tensor::lu_solve(const at::Tensor & LU_data, const at::Tensor & LU_pivots) const { + return at::_ops::lu_solve::call(const_cast(*this), LU_data, LU_pivots); +} + +// aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor +inline at::Tensor Tensor::multinomial(int64_t num_samples, bool replacement, c10::optional generator) const { + return at::_ops::multinomial::call(const_cast(*this), num_samples, replacement, generator); +} + +// aten::lgamma_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::lgamma_() const { + return at::_ops::lgamma_::call(const_cast(*this)); +} + +// aten::lgamma(Tensor self) -> Tensor +inline at::Tensor Tensor::lgamma() const { + return at::_ops::lgamma::call(const_cast(*this)); +} + +// aten::digamma(Tensor self) -> Tensor +inline at::Tensor Tensor::digamma() const { + return at::_ops::digamma::call(const_cast(*this)); +} + +// aten::polygamma(int n, Tensor self) -> Tensor +inline at::Tensor Tensor::polygamma(int64_t n) const { + return at::_ops::polygamma::call(n, const_cast(*this)); +} + +// aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!) +inline at::Tensor & Tensor::polygamma_(int64_t n) const { + return at::_ops::polygamma_::call(const_cast(*this), n); +} + +// aten::erfinv(Tensor self) -> Tensor +inline at::Tensor Tensor::erfinv() const { + return at::_ops::erfinv::call(const_cast(*this)); +} + +// aten::erfinv_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::erfinv_() const { + return at::_ops::erfinv_::call(const_cast(*this)); +} + +// aten::i0(Tensor self) -> Tensor +inline at::Tensor Tensor::i0() const { + return at::_ops::i0::call(const_cast(*this)); +} + +// aten::i0_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::i0_() const { + return at::_ops::i0_::call(const_cast(*this)); +} + +// aten::sign(Tensor self) -> Tensor +inline at::Tensor Tensor::sign() const { + return at::_ops::sign::call(const_cast(*this)); +} + +// aten::sign_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & Tensor::sign_() const { + return at::_ops::sign_::call(const_cast(*this)); +} + +// aten::signbit(Tensor self) -> Tensor +inline at::Tensor Tensor::signbit() const { + return at::_ops::signbit::call(const_cast(*this)); +} + +// aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor +inline at::Tensor Tensor::dist(const at::Tensor & other, const at::Scalar & p) const { + return at::_ops::dist::call(const_cast(*this), other, p); +} + +// aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::atan2_(const at::Tensor & other) const { + return at::_ops::atan2_::call(const_cast(*this), other); +} + +// aten::atan2(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::atan2(const at::Tensor & other) const { + return at::_ops::atan2::call(const_cast(*this), other); +} + +// aten::arctan2(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::arctan2(const at::Tensor & other) const { + return at::_ops::arctan2::call(const_cast(*this), other); +} + +// aten::arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::arctan2_(const at::Tensor & other) const { + return at::_ops::arctan2_::call(const_cast(*this), other); +} + +// aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor +inline at::Tensor Tensor::lerp(const at::Tensor & end, const at::Scalar & weight) const { + return at::_ops::lerp_Scalar::call(const_cast(*this), end, weight); +} + +// aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor +inline at::Tensor Tensor::lerp(const at::Tensor & end, const at::Tensor & weight) const { + return at::_ops::lerp_Tensor::call(const_cast(*this), end, weight); +} + +// aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor +inline at::Tensor Tensor::histc(int64_t bins, const at::Scalar & min, const at::Scalar & max) const { + return at::_ops::histc::call(const_cast(*this), bins, min, max); +} + +// aten::histogram.bins_tensor(Tensor self, Tensor bins, *, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) +inline ::std::tuple Tensor::histogram(const at::Tensor & bins, const c10::optional & weight, bool density) const { + return at::_ops::histogram_bins_tensor::call(const_cast(*this), bins, weight, density); +} + +// aten::histogram.bin_ct(Tensor self, int bins=100, *, float[]? range=None, Tensor? weight=None, bool density=False) -> (Tensor hist, Tensor bin_edges) +inline ::std::tuple Tensor::histogram(int64_t bins, c10::optional> range, const c10::optional & weight, bool density) const { + return at::_ops::histogram_bin_ct::call(const_cast(*this), bins, range, weight, density); +} + +// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::fmod(const at::Scalar & other) const { + return at::_ops::fmod_Scalar::call(const_cast(*this), other); +} + +// aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::fmod_(const at::Scalar & other) const { + return at::_ops::fmod__Scalar::call(const_cast(*this), other); +} + +// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::fmod(const at::Tensor & other) const { + return at::_ops::fmod_Tensor::call(const_cast(*this), other); +} + +// aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::fmod_(const at::Tensor & other) const { + return at::_ops::fmod__Tensor::call(const_cast(*this), other); +} + +// aten::hypot(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::hypot(const at::Tensor & other) const { + return at::_ops::hypot::call(const_cast(*this), other); +} + +// aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::hypot_(const at::Tensor & other) const { + return at::_ops::hypot_::call(const_cast(*this), other); +} + +// aten::igamma(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::igamma(const at::Tensor & other) const { + return at::_ops::igamma::call(const_cast(*this), other); +} + +// aten::igamma_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::igamma_(const at::Tensor & other) const { + return at::_ops::igamma_::call(const_cast(*this), other); +} + +// aten::igammac(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::igammac(const at::Tensor & other) const { + return at::_ops::igammac::call(const_cast(*this), other); +} + +// aten::igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::igammac_(const at::Tensor & other) const { + return at::_ops::igammac_::call(const_cast(*this), other); +} + +// aten::nextafter(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::nextafter(const at::Tensor & other) const { + return at::_ops::nextafter::call(const_cast(*this), other); +} + +// aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::nextafter_(const at::Tensor & other) const { + return at::_ops::nextafter_::call(const_cast(*this), other); +} + +// aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor Tensor::remainder(const at::Scalar & other) const { + return at::_ops::remainder_Scalar::call(const_cast(*this), other); +} + +// aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) +inline at::Tensor & Tensor::remainder_(const at::Scalar & other) const { + return at::_ops::remainder__Scalar::call(const_cast(*this), other); +} + +// aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::remainder(const at::Tensor & other) const { + return at::_ops::remainder_Tensor::call(const_cast(*this), other); +} + +// aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & Tensor::remainder_(const at::Tensor & other) const { + return at::_ops::remainder__Tensor::call(const_cast(*this), other); +} + +// aten::min(Tensor self) -> Tensor +inline at::Tensor Tensor::min() const { + return at::_ops::min::call(const_cast(*this)); +} + +// aten::fmin(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::fmin(const at::Tensor & other) const { + return at::_ops::fmin::call(const_cast(*this), other); +} + +// aten::max(Tensor self) -> Tensor +inline at::Tensor Tensor::max() const { + return at::_ops::max::call(const_cast(*this)); +} + +// aten::fmax(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::fmax(const at::Tensor & other) const { + return at::_ops::fmax::call(const_cast(*this), other); +} + +// aten::maximum(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::maximum(const at::Tensor & other) const { + return at::_ops::maximum::call(const_cast(*this), other); +} + +// aten::max.other(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::max(const at::Tensor & other) const { + return at::_ops::max_other::call(const_cast(*this), other); +} + +// aten::minimum(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::minimum(const at::Tensor & other) const { + return at::_ops::minimum::call(const_cast(*this), other); +} + +// aten::min.other(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::min(const at::Tensor & other) const { + return at::_ops::min_other::call(const_cast(*this), other); +} + +// aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +inline at::Tensor Tensor::quantile(const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation) const { + return at::_ops::quantile::call(const_cast(*this), q, dim, keepdim, interpolation); +} + +// aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +inline at::Tensor Tensor::quantile(double q, c10::optional dim, bool keepdim, c10::string_view interpolation) const { + return at::_ops::quantile_scalar::call(const_cast(*this), q, dim, keepdim, interpolation); +} + +// aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +inline at::Tensor Tensor::nanquantile(const at::Tensor & q, c10::optional dim, bool keepdim, c10::string_view interpolation) const { + return at::_ops::nanquantile::call(const_cast(*this), q, dim, keepdim, interpolation); +} + +// aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor +inline at::Tensor Tensor::nanquantile(double q, c10::optional dim, bool keepdim, c10::string_view interpolation) const { + return at::_ops::nanquantile_scalar::call(const_cast(*this), q, dim, keepdim, interpolation); +} + +// aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::sort(int64_t dim, bool descending) const { + return at::_ops::sort::call(const_cast(*this), dim, descending); +} + +// aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::sort(c10::optional stable, int64_t dim, bool descending) const { + return at::_ops::sort_stable::call(const_cast(*this), stable, dim, descending); +} + +// aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::sort(at::Dimname dim, bool descending) const { + return at::_ops::sort_dimname::call(const_cast(*this), dim, descending); +} + +// aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::sort(c10::optional stable, at::Dimname dim, bool descending) const { + return at::_ops::sort_dimname_stable::call(const_cast(*this), stable, dim, descending); +} + +// aten::msort(Tensor self) -> Tensor +inline at::Tensor Tensor::msort() const { + return at::_ops::msort::call(const_cast(*this)); +} + +// aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor +inline at::Tensor Tensor::argsort(int64_t dim, bool descending) const { + return at::_ops::argsort::call(const_cast(*this), dim, descending); +} + +// aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor +inline at::Tensor Tensor::argsort(bool stable, int64_t dim, bool descending) const { + return at::_ops::argsort_stable::call(const_cast(*this), stable, dim, descending); +} + +// aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor +inline at::Tensor Tensor::argsort(at::Dimname dim, bool descending) const { + return at::_ops::argsort_dimname::call(const_cast(*this), dim, descending); +} + +// aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::topk(int64_t k, int64_t dim, bool largest, bool sorted) const { + return at::_ops::topk::call(const_cast(*this), k, dim, largest, sorted); +} + +// aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) +inline ::std::tuple Tensor::topk_symint(c10::SymInt k, int64_t dim, bool largest, bool sorted) const { + return at::_ops::topk::call(const_cast(*this), k, dim, largest, sorted); +} + +// aten::all(Tensor self) -> Tensor +inline at::Tensor Tensor::all() const { + return at::_ops::all::call(const_cast(*this)); +} + +// aten::any(Tensor self) -> Tensor +inline at::Tensor Tensor::any() const { + return at::_ops::any::call(const_cast(*this)); +} + +// aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor +inline at::Tensor Tensor::renorm(const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) const { + return at::_ops::renorm::call(const_cast(*this), p, dim, maxnorm); +} + +// aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!) +inline at::Tensor & Tensor::renorm_(const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) const { + return at::_ops::renorm_::call(const_cast(*this), p, dim, maxnorm); +} + +// aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a) +inline at::Tensor Tensor::unfold(int64_t dimension, int64_t size, int64_t step) const { + return at::_ops::unfold::call(const_cast(*this), dimension, size, step); +} + +// aten::equal(Tensor self, Tensor other) -> bool +inline bool Tensor::equal(const at::Tensor & other) const { + return at::_ops::equal::call(const_cast(*this), other); +} + +// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor +inline at::Tensor Tensor::pow(const at::Tensor & exponent) const { + return at::_ops::pow_Tensor_Tensor::call(const_cast(*this), exponent); +} + +// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor +inline at::Tensor Tensor::pow(const at::Scalar & exponent) const { + return at::_ops::pow_Tensor_Scalar::call(const_cast(*this), exponent); +} + +// aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) +inline at::Tensor & Tensor::pow_(const at::Scalar & exponent) const { + return at::_ops::pow__Scalar::call(const_cast(*this), exponent); +} + +// aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) +inline at::Tensor & Tensor::pow_(const at::Tensor & exponent) const { + return at::_ops::pow__Tensor::call(const_cast(*this), exponent); +} + +// aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor +inline at::Tensor Tensor::float_power(const at::Tensor & exponent) const { + return at::_ops::float_power_Tensor_Tensor::call(const_cast(*this), exponent); +} + +// aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor +inline at::Tensor Tensor::float_power(const at::Scalar & exponent) const { + return at::_ops::float_power_Tensor_Scalar::call(const_cast(*this), exponent); +} + +// aten::float_power_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) +inline at::Tensor & Tensor::float_power_(const at::Scalar & exponent) const { + return at::_ops::float_power__Scalar::call(const_cast(*this), exponent); +} + +// aten::float_power_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) +inline at::Tensor & Tensor::float_power_(const at::Tensor & exponent) const { + return at::_ops::float_power__Tensor::call(const_cast(*this), exponent); +} + +// aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!) +inline at::Tensor & Tensor::normal_(double mean, double std, c10::optional generator) const { + return at::_ops::normal_::call(const_cast(*this), mean, std, generator); +} + +// aten::alias(Tensor(a) self) -> Tensor(a) +inline at::Tensor Tensor::alias() const { + return at::_ops::alias::call(const_cast(*this)); +} + +// aten::isfinite(Tensor self) -> Tensor +inline at::Tensor Tensor::isfinite() const { + return at::_ops::isfinite::call(const_cast(*this)); +} + +// aten::isinf(Tensor self) -> Tensor +inline at::Tensor Tensor::isinf() const { + return at::_ops::isinf::call(const_cast(*this)); +} + +// aten::record_stream(Tensor(a!) self, Stream s) -> () +inline void Tensor::record_stream(at::Stream s) const { + return at::_ops::record_stream::call(const_cast(*this), s); +} + +// aten::isposinf(Tensor self) -> Tensor +inline at::Tensor Tensor::isposinf() const { + return at::_ops::isposinf::call(const_cast(*this)); +} + +// aten::isneginf(Tensor self) -> Tensor +inline at::Tensor Tensor::isneginf() const { + return at::_ops::isneginf::call(const_cast(*this)); +} + +// aten::det(Tensor self) -> Tensor +inline at::Tensor Tensor::det() const { + return at::_ops::det::call(const_cast(*this)); +} + +// aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) +inline ::std::tuple Tensor::slogdet() const { + return at::_ops::slogdet::call(const_cast(*this)); +} + +// aten::logdet(Tensor self) -> Tensor +inline at::Tensor Tensor::logdet() const { + return at::_ops::logdet::call(const_cast(*this)); +} + +// aten::inverse(Tensor self) -> Tensor +inline at::Tensor Tensor::inverse() const { + return at::_ops::inverse::call(const_cast(*this)); +} + +// aten::inner(Tensor self, Tensor other) -> Tensor +inline at::Tensor Tensor::inner(const at::Tensor & other) const { + return at::_ops::inner::call(const_cast(*this), other); +} + +// aten::outer(Tensor self, Tensor vec2) -> Tensor +inline at::Tensor Tensor::outer(const at::Tensor & vec2) const { + return at::_ops::outer::call(const_cast(*this), vec2); +} + +// aten::ger(Tensor self, Tensor vec2) -> Tensor +inline at::Tensor Tensor::ger(const at::Tensor & vec2) const { + return at::_ops::ger::call(const_cast(*this), vec2); +} + +// aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor +inline at::Tensor Tensor::to_padded_tensor(double padding, at::OptionalIntArrayRef output_size) const { + return at::_ops::to_padded_tensor::call(const_cast(*this), padding, output_size.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*output_size)) : c10::nullopt); +} + +// aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor +inline at::Tensor Tensor::to_padded_tensor_symint(double padding, at::OptionalSymIntArrayRef output_size) const { + return at::_ops::to_padded_tensor::call(const_cast(*this), padding, output_size); +} +} // namespace at + + +namespace c10 { +template <> +struct MaybeOwnedTraits { + using owned_type = at::Tensor; + using borrow_type = at::Tensor; + + static borrow_type createBorrow(const owned_type& from) { + // NOTE: this can be implemented without the special + // unsafe_borrow_t Tensor constructor as + // + // return borrow_type(c10::intrusive_ptr::reclaim(from.unsafeGetTensorImpl())); + // + // but that hurts inlining due to the nullptr check in the + // Tensor(c10::intrusive_ptr<...>) constructor. We already know + // that from.impl_ isn't null because from is a valid Tensor, so + // we needn't do the check again. (using __builtin_assume can + // avoid this, but wouldn't be portable to MSVC.) + return borrow_type(borrow_type::unsafe_borrow_t{}, from); + } + + static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) { + lhs.unsafeReleaseTensorImpl(); + // See above note: this can be implemented with public API + // similarly to createBorrow(), but that would hurt inlining. + lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs); + } + + static void destroyBorrow(borrow_type& toDestroy) { + toDestroy.unsafeReleaseTensorImpl(); // "leak" it, but it was already +0. + } + + static const owned_type& referenceFromBorrow(const borrow_type& borrow) { + return borrow; + } + + static const owned_type* pointerFromBorrow(const borrow_type& borrow) { + return &borrow; + } + + static bool debugBorrowIsValid(const borrow_type& /*borrow*/) { + return true; + } +}; + +template <> +struct ExclusivelyOwnedTraits { + using repr_type = at::Tensor; + using pointer_type = at::Tensor*; + using const_pointer_type = const at::Tensor*; + + static repr_type nullRepr() { + return at::Tensor(); + } + + template + static repr_type createInPlace(Args&&... args) { + return at::Tensor(std::forward(args)...); + } + + static repr_type moveToRepr(at::Tensor&& x) { + return std::move(x); + } + + static void destroyOwned(at::Tensor& x) { + return ExclusivelyOwnedTraits::destroyOwned(x); + } + + static at::Tensor take(at::Tensor& x) { + return std::move(x); + } + + static pointer_type getImpl(repr_type& x) { + return &x; + } + + static const_pointer_type getImpl(const repr_type& x) { + return &x; + } +}; +} // namespace c10 + +namespace at { + +inline c10::MaybeOwned borrow_from_optional_tensor( + const c10::optional& opt) { + return opt.has_value() + ? c10::MaybeOwned::borrowed(*opt) + : c10::MaybeOwned::owned(c10::in_place); +} + +inline c10::MaybeOwned Tensor::expect_contiguous(MemoryFormat memory_format) const & { + if (is_contiguous(memory_format)) { + return c10::MaybeOwned::borrowed(*this); + } else { + return c10::MaybeOwned::owned(__dispatch_contiguous(memory_format)); + } +} +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TransformationHelper.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TransformationHelper.h new file mode 100644 index 0000000000000000000000000000000000000000..1061a732ddb7e74dbbce89850fe8191f81536c87 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/TransformationHelper.h @@ -0,0 +1,173 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { + +// Using DistAccumType in accumulate types for distributions. +// Note: Ideally we'd be using ATen/AccumulateType.h but looks +// like the there is some inconsistency in how accumulate types +// are mapped currently, e.g. for the cpu side, float is mapped +// to double. +template +struct DistAccumType { }; + +#if defined(__CUDACC__) || defined(__HIPCC__) +template <> struct DistAccumType { using type = float; }; +#endif +template <> struct DistAccumType { using type = float; }; +template <> struct DistAccumType { using type = float; }; +template <> struct DistAccumType { using type = float; }; +template <> struct DistAccumType { using type = double; }; + +template +using dist_acctype = typename DistAccumType::type; + +namespace transformation { + +/** + * A transformation function for `torch.Tensor.random_()`, when both `from` and `to` are specified. + * `range` is `to - from` + * `base` is `from` + */ +template +C10_HOST_DEVICE inline T uniform_int_from_to(V val, uint64_t range, int64_t base) { + return static_cast(static_cast((val % range) + base)); +} + +/** + * A transformation function for `torch.Tensor.random_()`, when `from=min_value(int64_t)` and to=None + */ +template +C10_HOST_DEVICE inline T uniform_int_full_range(V val) { + return static_cast(static_cast(val)); +} + +/** + * A transformation function for `torch.Tensor.random_()`, when used without specifying `from` and `to`. + * In order to prevent compiler warnings reported in GitHub issue 46391, T can't be float or double + * in this overloaded version + */ +template +C10_HOST_DEVICE inline typename std::enable_if::value), T>::type uniform_int(V val) { + if constexpr (std::is_same_v) { + return static_cast(val & 1); + } else if constexpr (std::is_same_v) { + return static_cast(val % (static_cast(std::numeric_limits::max()) + 1)); + } else if constexpr (std::is_same_v || std::is_same::value) { + return static_cast(val % static_cast((1ULL << std::numeric_limits::digits) + 1)); + } else if constexpr (std::is_integral_v) { + return static_cast(val % (static_cast(std::numeric_limits::max()) + 1)); + } else { + assert(false); + return 0; + } +} + +/** + * An overloaded transformation function for `torch.Tensor.random_()`, when used without specifying `from` and `to`, + * added to fix compiler warnings reported in GitHub issue 46391. T is either float or double in this version. + */ +template +C10_HOST_DEVICE inline typename std::enable_if::value, T>::type uniform_int(V val) { + return static_cast(val % static_cast((1ULL << std::numeric_limits::digits) + 1)); +} + +template +C10_HOST_DEVICE inline dist_acctype uniform_real(V val, T from, T to) { + constexpr auto MASK = static_cast((static_cast(1) << std::numeric_limits::digits) - 1); + constexpr auto DIVISOR = static_cast>(1) / (static_cast(1) << std::numeric_limits::digits); + dist_acctype x = (val & MASK) * DIVISOR; + return (x * (to - from) + from); +} + +/** + * Transforms normally distributed `val` with mean 0.0 and standard deviation 1.0 to + * normally distributed with `mean` and standard deviation `std`. + */ +template +C10_HOST_DEVICE inline T normal(T val, T mean, T std) { + return val * std + mean; +} + +/** + * Transforms uniformly distributed `val` between 0.0 and 1.0 to + * Cauchy distribution with location parameter `median` and scale parameter `sigma`. + */ +template +C10_HOST_DEVICE inline T cauchy(T val, T median, T sigma) { + // https://en.wikipedia.org/wiki/Cauchy_distribution#Cumulative_distribution_function + // __tanf overflows and returns `inf/-inf` when (val > 1 - eps) or (val < 0 + eps), + // thus we clip those values. + constexpr T eps = std::numeric_limits::epsilon(); + constexpr T one_minus_eps = 1 - eps; + constexpr T zero_plus_eps = 0 + eps; + val = (val > one_minus_eps ? one_minus_eps : val); + val = (val < zero_plus_eps ? zero_plus_eps : val); + return median + sigma * at::tan(c10::pi * (val - static_cast(0.5))); +} + +template <> +C10_HOST_DEVICE inline double cauchy(double val, double median, double sigma) { + // https://en.wikipedia.org/wiki/Cauchy_distribution#Cumulative_distribution_function + return median + sigma * at::tan(c10::pi * (val - static_cast(0.5))); +} + +/** + * Transforms uniformly distributed `val` between 0.0 and 1.0 to + * exponentially distributed with `lambda` parameter of the distribution. + */ +template +C10_HOST_DEVICE inline T exponential(T val, T lambda) { + // https://en.wikipedia.org/wiki/Exponential_distribution#Generating_exponential_variates + // Different implementations for CUDA and CPU to preserve original logic + // TODO: must be investigated and unified!!! + // https://github.com/pytorch/pytorch/issues/38662 +#if defined(__CUDACC__) || defined(__HIPCC__) + // BEFORE TOUCHING THIS CODE READ: https://github.com/pytorch/pytorch/issues/16706 + // curand_uniform has (0,1] bounds. log(1) is 0 and exponential excludes 0. + // we need log to be not 0, and not underflow when converted to half + // fast __logf approximation can underflow, so set log to -epsilon/2 for 1 or close to 1 args + auto log = val >= static_cast(1.) - std::numeric_limits::epsilon() / 2 + ? -std::numeric_limits::epsilon() / 2 + : at::log(val); + return static_cast(-1.0) / lambda * log; +#else + return static_cast(-1.0) / lambda * at::log1p(-val); +#endif +} + +/** + * Transforms uniformly distributed `val` between 0.0 and 1.0 to + * geometrically distributed with success probability `p`. + */ +template +C10_HOST_DEVICE inline T geometric(T val, T p) { + // https://en.wikipedia.org/wiki/Geometric_distribution#Related_distributions + return static_cast(::ceil(at::log(val) / at::log1p(-p))); +} + +/** + * Transforms normally distributed `val` to log-normally distributed. + */ +template +C10_HOST_DEVICE inline T log_normal(T val) { + // https://en.wikipedia.org/wiki/Log-normal_distribution#Mode,_median,_quantiles + return at::exp(val); +} + +/** + * Transforms uniformly distributed `val` between 0.0 and 1.0 to + * bernoulli distributed with success probability `p`. + */ +template +C10_HOST_DEVICE inline T bernoulli(T val, T p) { + return val < p; +} + +}} // namespace at::transformation diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/UndefinedTensorImpl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/UndefinedTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..885f6e195f05d37ab4253315242167f8e546dcc1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/UndefinedTensorImpl.h @@ -0,0 +1 @@ +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/UnsafeFromTH.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/UnsafeFromTH.h new file mode 100644 index 0000000000000000000000000000000000000000..9ad5c45d3ab6ca499bc10b71c68bc04ededfeb87 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/UnsafeFromTH.h @@ -0,0 +1,21 @@ +#pragma once +#include + +namespace at { + +inline Tensor unsafeTensorFromTH(void * th_pointer, bool retain) { + auto tensor_impl = c10::intrusive_ptr::reclaim(static_cast(th_pointer)); + if (retain && tensor_impl.get() != UndefinedTensorImpl::singleton()) { + c10::raw::intrusive_ptr::incref(tensor_impl.get()); + } + return Tensor(std::move(tensor_impl)); +} + +inline Storage unsafeStorageFromTH(void * th_pointer, bool retain) { + if (retain && th_pointer) { + c10::raw::intrusive_ptr::incref(static_cast(th_pointer)); + } + return Storage(c10::intrusive_ptr::reclaim(static_cast(th_pointer))); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/VariableHooksInterface.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/VariableHooksInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..f75342dea76a976579880ad1fa3491f8ca9881fa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/VariableHooksInterface.h @@ -0,0 +1,75 @@ +#pragma once + +#include +#include + +// A little explanation about why this file exists at all. We have +// a few methods on Tensor class which require access to reified access to +// AutogradMeta. In open source, this isn't a big deal: we just access +// torch/csrc/autograd/variable.h from aten/src/ATen/core/Tensor.cpp and +// we can put the definitions inline. This is because everything gets balled +// into a single dynamic library in the end. +// +// However, inside our Facebook internal version of our build system, we +// have a split between aten and torch/csrc. So we cannot simply just +// cross this boundary. "Now wait," you might say, "Why don't we just +// merge the libraries inside Facebook". Well, the problem is that there +// are some downstream applications which are at binary size limit, and +// incorporating all of the extra code from libtorch would push them +// over (admarket/adreview/service:adreviewservice, see also +// https://github.com/pytorch/pytorch/pull/29299) So if you want to do that, +// we have to fix all of the services like this. +// +// I didn't want to block eliminating Tensor-Variable on this work, so I +// had to introduce another dynamic dispatch to get to the variable +// implementations (which live in torch/csrc/autograd/variable.cpp, FYI). +// +// I also considered using our existing dynamic dispatch mechanism, c10 +// dispatcher, to do this. However, (1) some of the functions on Tensor +// have weird signatures that are not supported by autograd, and (2) +// see this bug https://github.com/pytorch/pytorch/issues/30102 + +namespace torch { namespace autograd { + +struct Node; + +}} // namespace torch::autograd + +namespace at { +namespace impl { + +struct TORCH_API VariableHooksInterface { + virtual ~VariableHooksInterface() = default; + virtual TensorBase tensor_data(const TensorBase&) const = 0; + virtual TensorBase variable_data(const TensorBase&) const = 0; + virtual const std::shared_ptr& grad_fn(const TensorBase&) const = 0; + virtual unsigned _register_hook( + const TensorBase&, + std::function hook) const = 0; + virtual void remove_hook(const TensorBase&, unsigned pos) const = 0; + virtual bool is_view(const TensorBase&) const = 0; + virtual const TensorBase& base(const TensorBase&) const = 0; + virtual const std::string& name(const TensorBase&) const = 0; + virtual bool is_leaf(const TensorBase&) const = 0; + virtual int64_t output_nr(const TensorBase&) const = 0; + virtual void set_data(const TensorBase&, const TensorBase&) const = 0; + virtual TensorBase data(const TensorBase&) const = 0; + virtual int64_t _version(const TensorBase&) const = 0; + virtual void retain_grad(const TensorBase&) const = 0; + virtual bool retains_grad(const TensorBase&) const = 0; + virtual void _backward(const Tensor&, TensorList, const c10::optional&, c10::optional, bool) const = 0; + virtual void requires_grad_(const TensorBase&, bool) const = 0; + virtual void basic_autograd_not_implemented_fallback(const c10::OperatorHandle& op, c10::DispatchKeySet dispatch_keys, torch::jit::Stack* stack) const = 0; +}; + +TORCH_API void SetVariableHooks(VariableHooksInterface* hooks); +TORCH_API VariableHooksInterface* GetVariableHooks(); +TORCH_API bool HasVariableHooks(); + +struct TORCH_API VariableHooksRegisterer { + explicit VariableHooksRegisterer(VariableHooksInterface* hooks) { + SetVariableHooks(hooks); + } +}; + +}} // namespace at::impl diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Variadic.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Variadic.h new file mode 100644 index 0000000000000000000000000000000000000000..61b6a35a0b1cb1ffd3bcfbe2b0d9d7ea6ac15ded --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Variadic.h @@ -0,0 +1,95 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace at { + +// This class allows you to write variadic functions which +// call a (possibly overloaded) function on each argument, +// in order. This is most commonly used in autogenerated code, +// where it is convenient to have a function that can uniformly +// take arguments of different types. If your arguments +// are homogenous consider using a std::initializer_list instead. +// +// For examples of this in use, see torch/csrc/utils/variadic.h +template +struct IterArgs { + template + inline F& apply() { + return self(); + } + + // NB: Use perfect forwarding here, otherwise we'll make value + // copies of all arguments! + template + inline F& apply(T&& arg, Args&&... args) { + self()(std::forward(arg)); + if (self().short_circuit()) { + return self(); + } else { + return apply(std::forward(args)...); + } + } + + // Here are some handy overloads which provide sensible + // defaults for container-like structures that one might + // be interested in recursing into. You can enable them + // by adding: + // + // using IterArgs::operator() + // + // to your struct. These are not enabled by default because + // you may be able to process these structures more efficiently + // than handling them one-by-one. + + template + void operator()(c10::IListRef args) { + for (const auto& arg : args) { + self()(arg); + if (self().short_circuit()) + return; + } + } + + template + void operator()(at::ArrayRef args) { + for (const auto& arg : args) { + self()(arg); + if (self().short_circuit()) + return; + } + } + + template + void operator()(const torch::List& args) { + for (const auto& arg : args) { + self()(arg); + if (self().short_circuit()) + return; + } + } + + // NB: we need to specify std::vector manually as C++ won't + // do an implicit conversion to make a template deduction go through. + template + void operator()(const std::vector& args) { + self()(at::ArrayRef{args}); + } + + constexpr bool short_circuit() const { + return false; + } + + private: + inline F& self() { + return *static_cast(this); + } +}; + +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Vitals.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Vitals.h new file mode 100644 index 0000000000000000000000000000000000000000..2d4fe1cc0995c4d7ad81438c27b5a4b42d8c890f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/Vitals.h @@ -0,0 +1,96 @@ +#pragma once +#include +#include +#include +#include +#include +#include + +#include + +namespace at { +namespace vitals { + +TORCH_API bool torchVitalEnabled(); + +struct TORCH_API TorchVitalAttr { + // always initialized to empty + std::string value = ""; + template + TorchVitalAttr& operator<<(const T& t) { + if (torchVitalEnabled()) { + std::stringstream ss; + ss << t; + value += ss.str(); + } + return *this; + } + + template + void write(const T& t, bool force) { + if (force || torchVitalEnabled()) { + std::stringstream ss; + ss << t; + value = ss.str(); + } + } +}; + +struct TORCH_API TorchVital { + std::string name; + std::unordered_map attrs; + + explicit TorchVital(std::string n) : name(std::move(n)) {} + TorchVital(const TorchVital&) = default; + TorchVital(TorchVital&&) = default; + TorchVital() = delete; + + TorchVitalAttr& create(const std::string& attr); + TorchVitalAttr& create(const std::string& attr, bool force); + friend std::ostream& operator<<(std::ostream& os, const TorchVital& dt); + + ~TorchVital(); +}; + +std::ostream& operator<<(std::ostream& os, TorchVital const& tv); + +// A way to access vitals by string names instead of by global reference. +// This enables access to vitals from the PythonAPI. +class TORCH_API APIVitals { + public: + bool vitals_enabled; + + // Set any vital sign that was added to the map. + bool setVital( + const std::string& vital_name, + const std::string& attr_name, + const std::string& value, + bool force = false); + std::string readVitals(); + + APIVitals(); + + // Ensure this stays a singleton + APIVitals(APIVitals const& other) = delete; + APIVitals(APIVitals&& other) = delete; + APIVitals& operator=(const APIVitals&) = delete; + APIVitals& operator=(APIVitals&&) = delete; + + private: + std::unordered_map name_map_; +}; + +extern TORCH_API APIVitals VitalsAPI; + +} // namespace vitals +} // namespace at + +#define TORCH_VITAL_DECLARE(name) \ + TORCH_API at::vitals::TorchVital TorchVital_##name; + +#define TORCH_VITAL_DEFINE(name) \ + TORCH_API at::vitals::TorchVital TorchVital_##name(#name); + +#define TORCH_VITAL_BASE(name) TorchVital_##name + +#define TORCH_VITAL(name, attr) TORCH_VITAL_BASE(name).create(#attr) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/alias_info.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/alias_info.h new file mode 100644 index 0000000000000000000000000000000000000000..a8a55bb782c479ff2f0243601bd36a034980b2fe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/alias_info.h @@ -0,0 +1,151 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace c10 { +/** + * class AliasInfo + * + * Data structure to hold aliasing information for an `Argument`. They can be + * nested to represent aliasing information on contained types. + * + * There is a `beforeSet` which describes the aliasing information before the + * operator executes, and an `afterSet` that describes aliasing info + * after execution. + */ +class AliasInfo { + public: + // Symbol for the set that can alias anything + static Symbol wildcardSet() { + static const Symbol wc = Symbol::fromQualString("alias::*"); + return wc; + } + + void setIsWrite(bool isWrite) { + isWrite_ = isWrite; + } + + bool isWrite() const { + return isWrite_; + } + + void addBeforeSet(Symbol aliasSet) { + beforeSets_.insert(aliasSet); + } + + void addAfterSet(Symbol aliasSet) { + afterSets_.insert(aliasSet); + } + + const std::unordered_set& beforeSets() const { + return beforeSets_; + } + + const std::unordered_set& afterSets() const { + return afterSets_; + } + + Symbol beforeSet() const { + AT_ASSERT(beforeSets_.size() == 1); + return *beforeSets_.begin(); + } + + bool isWildcardBefore() const { + return beforeSets_.count(wildcardSet()) != 0; + } + + bool isWildcardAfter() const { + return afterSets_.count(wildcardSet()) != 0; + } + + // the alias info for the contained types of the type + // e.g. if this is an annotation on List[T], `sets` refers to + // the alias sets that the list may be in + // while containedTypes()[0] refers to the sets that members of the list + // may be in + void addContainedType(AliasInfo aliasInfo) { + containedTypes_.push_back(std::move(aliasInfo)); + } + const std::vector& containedTypes() const { + return containedTypes_; + } + + private: + std::unordered_set beforeSets_; + std::unordered_set afterSets_; + std::vector containedTypes_; + bool isWrite_ = false; +}; + +inline bool operator==(const AliasInfo& lhs, const AliasInfo& rhs) { + return lhs.isWrite() == rhs.isWrite() + && lhs.beforeSets() == rhs.beforeSets() + && lhs.afterSets() == rhs.afterSets() + && lhs.containedTypes() == rhs.containedTypes(); +} + +// this does match the way things are represented in the schema +inline std::ostream& operator<<(std::ostream& out, const AliasInfo& aliasInfo) { + out << "("; + bool first = true; + for (const auto& set : aliasInfo.beforeSets()) { + if (first) { + first = false; + } else { + out << "|"; + } + out << set.toUnqualString(); + } + if (aliasInfo.isWrite()) { + out << "!"; + } + if (aliasInfo.beforeSets() != aliasInfo.afterSets()) { + out << " -> "; + first = true; + for (const auto& set : aliasInfo.afterSets()) { + if (first) { + first = false; + } else { + out << "|"; + } + out << set.toUnqualString(); + } + } + out << ")"; + return out; +} +} // namespace c10 + +namespace std { +template <> + struct hash { + size_t operator()(const c10::AliasInfo& aliasInfo) const { + auto hash = std::hash()(aliasInfo.isWrite()); + + // NOTE: for unordered_set hashes, we couldn't use hash_combine + // because hash_combine is order dependent. Instead, we choose to + // use XOR as the combining function as XOR is commutative. + size_t before_set_hash_seed = 0; + for (auto &e: aliasInfo.beforeSets()) { + auto symbol_hash = std::hash()(e); + before_set_hash_seed = before_set_hash_seed ^ symbol_hash; + } + size_t after_set_hash_seed = 0; + for (auto &e: aliasInfo.afterSets()) { + auto symbol_hash = std::hash()(e); + after_set_hash_seed = after_set_hash_seed ^ symbol_hash; + } + + hash = c10::hash_combine(hash, before_set_hash_seed); + hash = c10::hash_combine(hash, after_set_hash_seed); + for (auto &e: aliasInfo.containedTypes()) { + auto contained_type_hash = std::hash()(e); + hash = c10::hash_combine(hash, contained_type_hash); + } + return hash; + } + }; +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/aten_interned_strings.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/aten_interned_strings.h new file mode 100644 index 0000000000000000000000000000000000000000..9b6c3a678f0d6e268e371c181273c1bee4f59913 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/aten_interned_strings.h @@ -0,0 +1,2180 @@ +#pragma once + +// @generated by torchgen/gen.py from aten_interned_strings.h + +#if defined(TORCH_ASSERT_NO_OPERATORS) || defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) +#error This change adds a dependency on native_functions.yaml, \ + meaning the file will need to be re-compiled every time an operator \ + is changed or added. Consider if including for \ + the c10::Symbol class would be sufficient, or if your change would be \ + better placed in another file. +#endif + +// ATen symbols correspond exactly to operators defined in ATen. Every +// symbol here corresponds exactly to an ATen operation defined in +// native_functions.yaml; attributes are in one-to-one correspondence +// with their ATen name. + +#define FORALL_ATEN_BASE_SYMBOLS(_) \ +_(aten, __and__) \ +_(aten, __iand__) \ +_(aten, __ilshift__) \ +_(aten, __ior__) \ +_(aten, __irshift__) \ +_(aten, __ixor__) \ +_(aten, __lshift__) \ +_(aten, __or__) \ +_(aten, __rshift__) \ +_(aten, __xor__) \ +_(aten, _adaptive_avg_pool2d) \ +_(aten, _adaptive_avg_pool2d_backward) \ +_(aten, _adaptive_avg_pool3d) \ +_(aten, _adaptive_avg_pool3d_backward) \ +_(aten, _add_batch_dim) \ +_(aten, _add_relu) \ +_(aten, _add_relu_) \ +_(aten, _addmm_activation) \ +_(aten, _aminmax) \ +_(aten, _amp_foreach_non_finite_check_and_unscale) \ +_(aten, _amp_foreach_non_finite_check_and_unscale_) \ +_(aten, _amp_update_scale) \ +_(aten, _amp_update_scale_) \ +_(aten, _assert_async) \ +_(aten, _assert_tensor_metadata) \ +_(aten, _autocast_to_full_precision) \ +_(aten, _autocast_to_reduced_precision) \ +_(aten, _backward) \ +_(aten, _batch_norm_impl_index) \ +_(aten, _batch_norm_impl_index_backward) \ +_(aten, _cast_Byte) \ +_(aten, _cast_Char) \ +_(aten, _cast_Double) \ +_(aten, _cast_Float) \ +_(aten, _cast_Half) \ +_(aten, _cast_Int) \ +_(aten, _cast_Long) \ +_(aten, _cast_Short) \ +_(aten, _cdist_backward) \ +_(aten, _cdist_forward) \ +_(aten, _cholesky_solve_helper) \ +_(aten, _choose_qparams_per_tensor) \ +_(aten, _coalesce) \ +_(aten, _coalesced) \ +_(aten, _coalesced_) \ +_(aten, _compute_linear_combination) \ +_(aten, _conj) \ +_(aten, _conj_copy) \ +_(aten, _conj_physical) \ +_(aten, _conv_depthwise2d) \ +_(aten, _convert_indices_from_coo_to_csr) \ +_(aten, _convert_indices_from_csr_to_coo) \ +_(aten, _convert_weight_to_int4pack) \ +_(aten, _convolution) \ +_(aten, _convolution_double_backward) \ +_(aten, _convolution_mode) \ +_(aten, _copy_from) \ +_(aten, _copy_from_and_resize) \ +_(aten, _cslt_compress) \ +_(aten, _cslt_sparse_mm) \ +_(aten, _ctc_loss) \ +_(aten, _ctc_loss_backward) \ +_(aten, _cudnn_ctc_loss) \ +_(aten, _cudnn_init_dropout_state) \ +_(aten, _cudnn_rnn) \ +_(aten, _cudnn_rnn_backward) \ +_(aten, _cudnn_rnn_flatten_weight) \ +_(aten, _cufft_clear_plan_cache) \ +_(aten, _cufft_get_plan_cache_max_size) \ +_(aten, _cufft_get_plan_cache_size) \ +_(aten, _cufft_set_plan_cache_max_size) \ +_(aten, _cummax_helper) \ +_(aten, _cummin_helper) \ +_(aten, _debug_has_internal_overlap) \ +_(aten, _dimI) \ +_(aten, _dimV) \ +_(aten, _dim_arange) \ +_(aten, _dirichlet_grad) \ +_(aten, _efficient_attention_backward) \ +_(aten, _efficient_attention_forward) \ +_(aten, _efficientzerotensor) \ +_(aten, _embedding_bag) \ +_(aten, _embedding_bag_backward) \ +_(aten, _embedding_bag_dense_backward) \ +_(aten, _embedding_bag_forward_only) \ +_(aten, _embedding_bag_per_sample_weights_backward) \ +_(aten, _embedding_bag_sparse_backward) \ +_(aten, _empty_affine_quantized) \ +_(aten, _empty_per_channel_affine_quantized) \ +_(aten, _euclidean_dist) \ +_(aten, _fake_quantize_learnable_per_channel_affine) \ +_(aten, _fake_quantize_learnable_per_channel_affine_backward) \ +_(aten, _fake_quantize_learnable_per_tensor_affine) \ +_(aten, _fake_quantize_learnable_per_tensor_affine_backward) \ +_(aten, _fake_quantize_per_tensor_affine_cachemask_tensor_qparams) \ +_(aten, _fft_c2c) \ +_(aten, _fft_c2r) \ +_(aten, _fft_r2c) \ +_(aten, _fill_mem_eff_dropout_mask) \ +_(aten, _fill_mem_eff_dropout_mask_) \ +_(aten, _flash_attention_backward) \ +_(aten, _flash_attention_forward) \ +_(aten, _foobar) \ +_(aten, _foreach_abs) \ +_(aten, _foreach_abs_) \ +_(aten, _foreach_acos) \ +_(aten, _foreach_acos_) \ +_(aten, _foreach_add) \ +_(aten, _foreach_add_) \ +_(aten, _foreach_addcdiv) \ +_(aten, _foreach_addcdiv_) \ +_(aten, _foreach_addcmul) \ +_(aten, _foreach_addcmul_) \ +_(aten, _foreach_asin) \ +_(aten, _foreach_asin_) \ +_(aten, _foreach_atan) \ +_(aten, _foreach_atan_) \ +_(aten, _foreach_ceil) \ +_(aten, _foreach_ceil_) \ +_(aten, _foreach_clamp_max) \ +_(aten, _foreach_clamp_max_) \ +_(aten, _foreach_clamp_min) \ +_(aten, _foreach_clamp_min_) \ +_(aten, _foreach_copy) \ +_(aten, _foreach_copy_) \ +_(aten, _foreach_cos) \ +_(aten, _foreach_cos_) \ +_(aten, _foreach_cosh) \ +_(aten, _foreach_cosh_) \ +_(aten, _foreach_div) \ +_(aten, _foreach_div_) \ +_(aten, _foreach_erf) \ +_(aten, _foreach_erf_) \ +_(aten, _foreach_erfc) \ +_(aten, _foreach_erfc_) \ +_(aten, _foreach_exp) \ +_(aten, _foreach_exp_) \ +_(aten, _foreach_expm1) \ +_(aten, _foreach_expm1_) \ +_(aten, _foreach_floor) \ +_(aten, _foreach_floor_) \ +_(aten, _foreach_frac) \ +_(aten, _foreach_frac_) \ +_(aten, _foreach_lerp) \ +_(aten, _foreach_lerp_) \ +_(aten, _foreach_lgamma) \ +_(aten, _foreach_lgamma_) \ +_(aten, _foreach_log) \ +_(aten, _foreach_log10) \ +_(aten, _foreach_log10_) \ +_(aten, _foreach_log1p) \ +_(aten, _foreach_log1p_) \ +_(aten, _foreach_log2) \ +_(aten, _foreach_log2_) \ +_(aten, _foreach_log_) \ +_(aten, _foreach_maximum) \ +_(aten, _foreach_maximum_) \ +_(aten, _foreach_minimum) \ +_(aten, _foreach_minimum_) \ +_(aten, _foreach_mul) \ +_(aten, _foreach_mul_) \ +_(aten, _foreach_neg) \ +_(aten, _foreach_neg_) \ +_(aten, _foreach_norm) \ +_(aten, _foreach_pow) \ +_(aten, _foreach_pow_) \ +_(aten, _foreach_reciprocal) \ +_(aten, _foreach_reciprocal_) \ +_(aten, _foreach_round) \ +_(aten, _foreach_round_) \ +_(aten, _foreach_sigmoid) \ +_(aten, _foreach_sigmoid_) \ +_(aten, _foreach_sign) \ +_(aten, _foreach_sign_) \ +_(aten, _foreach_sin) \ +_(aten, _foreach_sin_) \ +_(aten, _foreach_sinh) \ +_(aten, _foreach_sinh_) \ +_(aten, _foreach_sqrt) \ +_(aten, _foreach_sqrt_) \ +_(aten, _foreach_sub) \ +_(aten, _foreach_sub_) \ +_(aten, _foreach_tan) \ +_(aten, _foreach_tan_) \ +_(aten, _foreach_tanh) \ +_(aten, _foreach_tanh_) \ +_(aten, _foreach_trunc) \ +_(aten, _foreach_trunc_) \ +_(aten, _foreach_zero) \ +_(aten, _foreach_zero_) \ +_(aten, _functional_assert_async) \ +_(aten, _functional_sym_constrain_range) \ +_(aten, _functional_sym_constrain_range_for_size) \ +_(aten, _fused_adam) \ +_(aten, _fused_adam_) \ +_(aten, _fused_adamw) \ +_(aten, _fused_adamw_) \ +_(aten, _fused_dropout) \ +_(aten, _fused_moving_avg_obs_fq_helper) \ +_(aten, _fused_moving_avg_obs_fq_helper_functional) \ +_(aten, _fused_sdp_choice) \ +_(aten, _fw_primal) \ +_(aten, _fw_primal_copy) \ +_(aten, _gather_sparse_backward) \ +_(aten, _grid_sampler_2d_cpu_fallback) \ +_(aten, _grid_sampler_2d_cpu_fallback_backward) \ +_(aten, _has_compatible_shallow_copy_type) \ +_(aten, _has_same_storage_numel) \ +_(aten, _histogramdd_bin_edges) \ +_(aten, _histogramdd_from_bin_cts) \ +_(aten, _histogramdd_from_bin_tensors) \ +_(aten, _index_put_impl) \ +_(aten, _index_put_impl_) \ +_(aten, _indices) \ +_(aten, _indices_copy) \ +_(aten, _int_mm) \ +_(aten, _is_all_true) \ +_(aten, _is_any_true) \ +_(aten, _is_zerotensor) \ +_(aten, _linalg_check_errors) \ +_(aten, _linalg_det) \ +_(aten, _linalg_eigh) \ +_(aten, _linalg_slogdet) \ +_(aten, _linalg_solve_ex) \ +_(aten, _linalg_svd) \ +_(aten, _local_scalar_dense) \ +_(aten, _log_softmax) \ +_(aten, _log_softmax_backward_data) \ +_(aten, _logcumsumexp) \ +_(aten, _lstm_mps) \ +_(aten, _lu_with_info) \ +_(aten, _make_dep_token) \ +_(aten, _make_dual) \ +_(aten, _make_dual_copy) \ +_(aten, _make_per_channel_quantized_tensor) \ +_(aten, _make_per_tensor_quantized_tensor) \ +_(aten, _masked_scale) \ +_(aten, _masked_softmax) \ +_(aten, _masked_softmax_backward) \ +_(aten, _mixed_dtypes_linear) \ +_(aten, _mkldnn_reshape) \ +_(aten, _mkldnn_transpose) \ +_(aten, _mkldnn_transpose_) \ +_(aten, _mps_convolution) \ +_(aten, _mps_convolution_transpose) \ +_(aten, _native_batch_norm_legit) \ +_(aten, _native_batch_norm_legit_functional) \ +_(aten, _native_batch_norm_legit_no_training) \ +_(aten, _native_multi_head_attention) \ +_(aten, _neg_view) \ +_(aten, _neg_view_copy) \ +_(aten, _nested_from_padded) \ +_(aten, _nested_from_padded_and_nested_example) \ +_(aten, _nested_select_backward) \ +_(aten, _nested_sum_backward) \ +_(aten, _nested_tensor_from_mask) \ +_(aten, _nested_tensor_from_mask_left_aligned) \ +_(aten, _nested_tensor_from_tensor_list) \ +_(aten, _nested_tensor_size) \ +_(aten, _nested_tensor_softmax_with_shape) \ +_(aten, _nested_tensor_storage_offsets) \ +_(aten, _nested_tensor_strides) \ +_(aten, _nested_view_from_buffer) \ +_(aten, _nested_view_from_buffer_copy) \ +_(aten, _new_zeros_with_same_feature_meta) \ +_(aten, _nnpack_available) \ +_(aten, _nnpack_spatial_convolution) \ +_(aten, _nnz) \ +_(aten, _pack_padded_sequence) \ +_(aten, _pack_padded_sequence_backward) \ +_(aten, _pad_circular) \ +_(aten, _pad_enum) \ +_(aten, _pad_packed_sequence) \ +_(aten, _pdist_backward) \ +_(aten, _pdist_forward) \ +_(aten, _pin_memory) \ +_(aten, _prelu_kernel) \ +_(aten, _prelu_kernel_backward) \ +_(aten, _propagate_xla_data) \ +_(aten, _remove_batch_dim) \ +_(aten, _reshape_alias) \ +_(aten, _reshape_alias_copy) \ +_(aten, _reshape_copy) \ +_(aten, _reshape_from_tensor) \ +_(aten, _resize_output) \ +_(aten, _resize_output_) \ +_(aten, _rowwise_prune) \ +_(aten, _sample_dirichlet) \ +_(aten, _saturate_weight_to_fp16) \ +_(aten, _scaled_dot_product_attention_math) \ +_(aten, _scaled_dot_product_efficient_attention) \ +_(aten, _scaled_dot_product_efficient_attention_backward) \ +_(aten, _scaled_dot_product_flash_attention) \ +_(aten, _scaled_dot_product_flash_attention_backward) \ +_(aten, _scaled_mm) \ +_(aten, _segment_reduce_backward) \ +_(aten, _shape_as_tensor) \ +_(aten, _slow_conv2d_backward) \ +_(aten, _slow_conv2d_forward) \ +_(aten, _sobol_engine_draw) \ +_(aten, _sobol_engine_ff) \ +_(aten, _sobol_engine_ff_) \ +_(aten, _sobol_engine_initialize_state) \ +_(aten, _sobol_engine_initialize_state_) \ +_(aten, _sobol_engine_scramble) \ +_(aten, _sobol_engine_scramble_) \ +_(aten, _softmax) \ +_(aten, _softmax_backward_data) \ +_(aten, _sparse_addmm) \ +_(aten, _sparse_broadcast_to) \ +_(aten, _sparse_broadcast_to_copy) \ +_(aten, _sparse_bsc_tensor_unsafe) \ +_(aten, _sparse_bsr_tensor_unsafe) \ +_(aten, _sparse_compressed_tensor_unsafe) \ +_(aten, _sparse_coo_tensor_unsafe) \ +_(aten, _sparse_coo_tensor_with_dims) \ +_(aten, _sparse_coo_tensor_with_dims_and_tensors) \ +_(aten, _sparse_csc_tensor_unsafe) \ +_(aten, _sparse_csr_prod) \ +_(aten, _sparse_csr_sum) \ +_(aten, _sparse_csr_tensor_unsafe) \ +_(aten, _sparse_log_softmax) \ +_(aten, _sparse_log_softmax_backward_data) \ +_(aten, _sparse_mask_projection) \ +_(aten, _sparse_mm) \ +_(aten, _sparse_mm_reduce_impl) \ +_(aten, _sparse_mm_reduce_impl_backward) \ +_(aten, _sparse_semi_structured_linear) \ +_(aten, _sparse_softmax) \ +_(aten, _sparse_softmax_backward_data) \ +_(aten, _sparse_sparse_matmul) \ +_(aten, _sparse_sum) \ +_(aten, _sparse_sum_backward) \ +_(aten, _spdiags) \ +_(aten, _stack) \ +_(aten, _standard_gamma) \ +_(aten, _standard_gamma_grad) \ +_(aten, _test_ambiguous_defaults) \ +_(aten, _test_autograd_multiple_dispatch) \ +_(aten, _test_autograd_multiple_dispatch_view) \ +_(aten, _test_autograd_multiple_dispatch_view_copy) \ +_(aten, _test_check_tensor) \ +_(aten, _test_functorch_fallback) \ +_(aten, _test_optional_filled_intlist) \ +_(aten, _test_optional_floatlist) \ +_(aten, _test_optional_intlist) \ +_(aten, _test_serialization_subcmul) \ +_(aten, _test_string_default) \ +_(aten, _test_warn_in_autograd) \ +_(aten, _thnn_differentiable_gru_cell_backward) \ +_(aten, _thnn_differentiable_lstm_cell_backward) \ +_(aten, _thnn_fused_gru_cell) \ +_(aten, _thnn_fused_gru_cell_backward) \ +_(aten, _thnn_fused_lstm_cell) \ +_(aten, _thnn_fused_lstm_cell_backward) \ +_(aten, _thnn_fused_lstm_cell_backward_impl) \ +_(aten, _to_copy) \ +_(aten, _to_cpu) \ +_(aten, _to_dense) \ +_(aten, _to_sparse) \ +_(aten, _to_sparse_bsc) \ +_(aten, _to_sparse_bsr) \ +_(aten, _to_sparse_csc) \ +_(aten, _to_sparse_csr) \ +_(aten, _to_sparse_semi_structured) \ +_(aten, _transform_bias_rescale_qkv) \ +_(aten, _transformer_encoder_layer_fwd) \ +_(aten, _trilinear) \ +_(aten, _triton_multi_head_attention) \ +_(aten, _triton_scaled_dot_attention) \ +_(aten, _unique) \ +_(aten, _unique2) \ +_(aten, _unpack_dual) \ +_(aten, _unsafe_index) \ +_(aten, _unsafe_index_put) \ +_(aten, _unsafe_view) \ +_(aten, _upsample_bicubic2d_aa) \ +_(aten, _upsample_bicubic2d_aa_backward) \ +_(aten, _upsample_bilinear2d_aa) \ +_(aten, _upsample_bilinear2d_aa_backward) \ +_(aten, _upsample_nearest_exact1d) \ +_(aten, _upsample_nearest_exact1d_backward) \ +_(aten, _upsample_nearest_exact2d) \ +_(aten, _upsample_nearest_exact2d_backward) \ +_(aten, _upsample_nearest_exact3d) \ +_(aten, _upsample_nearest_exact3d_backward) \ +_(aten, _use_cudnn_ctc_loss) \ +_(aten, _use_cudnn_rnn_flatten_weight) \ +_(aten, _validate_compressed_sparse_indices) \ +_(aten, _validate_sparse_bsc_tensor_args) \ +_(aten, _validate_sparse_bsr_tensor_args) \ +_(aten, _validate_sparse_compressed_tensor_args) \ +_(aten, _validate_sparse_coo_tensor_args) \ +_(aten, _validate_sparse_csc_tensor_args) \ +_(aten, _validate_sparse_csr_tensor_args) \ +_(aten, _values) \ +_(aten, _values_copy) \ +_(aten, _version) \ +_(aten, _weight_int4pack_mm) \ +_(aten, _weight_norm) \ +_(aten, _weight_norm_differentiable_backward) \ +_(aten, _weight_norm_interface) \ +_(aten, _weight_norm_interface_backward) \ +_(aten, abs) \ +_(aten, abs_) \ +_(aten, absolute) \ +_(aten, absolute_) \ +_(aten, acos) \ +_(aten, acos_) \ +_(aten, acosh) \ +_(aten, acosh_) \ +_(aten, adaptive_avg_pool1d) \ +_(aten, adaptive_avg_pool2d) \ +_(aten, adaptive_avg_pool3d) \ +_(aten, adaptive_avg_pool3d_backward) \ +_(aten, adaptive_max_pool1d) \ +_(aten, adaptive_max_pool2d) \ +_(aten, adaptive_max_pool2d_backward) \ +_(aten, adaptive_max_pool3d) \ +_(aten, adaptive_max_pool3d_backward) \ +_(aten, add) \ +_(aten, add_) \ +_(aten, addbmm) \ +_(aten, addbmm_) \ +_(aten, addcdiv) \ +_(aten, addcdiv_) \ +_(aten, addcmul) \ +_(aten, addcmul_) \ +_(aten, addmm) \ +_(aten, addmm_) \ +_(aten, addmv) \ +_(aten, addmv_) \ +_(aten, addr) \ +_(aten, addr_) \ +_(aten, adjoint) \ +_(aten, affine_grid_generator) \ +_(aten, affine_grid_generator_backward) \ +_(aten, alias) \ +_(aten, alias_copy) \ +_(aten, align_as) \ +_(aten, align_tensors) \ +_(aten, align_to) \ +_(aten, all) \ +_(aten, allclose) \ +_(aten, alpha_dropout) \ +_(aten, alpha_dropout_) \ +_(aten, amax) \ +_(aten, amin) \ +_(aten, aminmax) \ +_(aten, angle) \ +_(aten, any) \ +_(aten, arange) \ +_(aten, arccos) \ +_(aten, arccos_) \ +_(aten, arccosh) \ +_(aten, arccosh_) \ +_(aten, arcsin) \ +_(aten, arcsin_) \ +_(aten, arcsinh) \ +_(aten, arcsinh_) \ +_(aten, arctan) \ +_(aten, arctan2) \ +_(aten, arctan2_) \ +_(aten, arctan_) \ +_(aten, arctanh) \ +_(aten, arctanh_) \ +_(aten, argmax) \ +_(aten, argmin) \ +_(aten, argsort) \ +_(aten, argwhere) \ +_(aten, as_strided) \ +_(aten, as_strided_) \ +_(aten, as_strided_copy) \ +_(aten, as_strided_scatter) \ +_(aten, asin) \ +_(aten, asin_) \ +_(aten, asinh) \ +_(aten, asinh_) \ +_(aten, atan) \ +_(aten, atan2) \ +_(aten, atan2_) \ +_(aten, atan_) \ +_(aten, atanh) \ +_(aten, atanh_) \ +_(aten, atleast_1d) \ +_(aten, atleast_2d) \ +_(aten, atleast_3d) \ +_(aten, avg_pool1d) \ +_(aten, avg_pool2d) \ +_(aten, avg_pool2d_backward) \ +_(aten, avg_pool3d) \ +_(aten, avg_pool3d_backward) \ +_(aten, baddbmm) \ +_(aten, baddbmm_) \ +_(aten, bartlett_window) \ +_(aten, batch_norm) \ +_(aten, batch_norm_backward_elemt) \ +_(aten, batch_norm_backward_reduce) \ +_(aten, batch_norm_elemt) \ +_(aten, batch_norm_gather_stats) \ +_(aten, batch_norm_gather_stats_with_counts) \ +_(aten, batch_norm_stats) \ +_(aten, batch_norm_update_stats) \ +_(aten, bernoulli) \ +_(aten, bernoulli_) \ +_(aten, bilinear) \ +_(aten, binary_cross_entropy) \ +_(aten, binary_cross_entropy_backward) \ +_(aten, binary_cross_entropy_with_logits) \ +_(aten, bincount) \ +_(aten, binomial) \ +_(aten, bitwise_and) \ +_(aten, bitwise_and_) \ +_(aten, bitwise_left_shift) \ +_(aten, bitwise_left_shift_) \ +_(aten, bitwise_not) \ +_(aten, bitwise_not_) \ +_(aten, bitwise_or) \ +_(aten, bitwise_or_) \ +_(aten, bitwise_right_shift) \ +_(aten, bitwise_right_shift_) \ +_(aten, bitwise_xor) \ +_(aten, bitwise_xor_) \ +_(aten, blackman_window) \ +_(aten, block_diag) \ +_(aten, bmm) \ +_(aten, broadcast_tensors) \ +_(aten, broadcast_to) \ +_(aten, bucketize) \ +_(aten, can_cast) \ +_(aten, cartesian_prod) \ +_(aten, cat) \ +_(aten, cauchy) \ +_(aten, cauchy_) \ +_(aten, ccol_indices) \ +_(aten, ccol_indices_copy) \ +_(aten, cdist) \ +_(aten, ceil) \ +_(aten, ceil_) \ +_(aten, celu) \ +_(aten, celu_) \ +_(aten, chain_matmul) \ +_(aten, chalf) \ +_(aten, channel_shuffle) \ +_(aten, cholesky) \ +_(aten, cholesky_inverse) \ +_(aten, cholesky_solve) \ +_(aten, choose_qparams_optimized) \ +_(aten, chunk) \ +_(aten, clamp) \ +_(aten, clamp_) \ +_(aten, clamp_max) \ +_(aten, clamp_max_) \ +_(aten, clamp_min) \ +_(aten, clamp_min_) \ +_(aten, clip) \ +_(aten, clip_) \ +_(aten, clone) \ +_(aten, coalesce) \ +_(aten, col2im) \ +_(aten, col_indices) \ +_(aten, col_indices_copy) \ +_(aten, column_stack) \ +_(aten, combinations) \ +_(aten, complex) \ +_(aten, concat) \ +_(aten, concatenate) \ +_(aten, conj) \ +_(aten, conj_physical) \ +_(aten, conj_physical_) \ +_(aten, constant_pad_nd) \ +_(aten, contiguous) \ +_(aten, conv1d) \ +_(aten, conv2d) \ +_(aten, conv3d) \ +_(aten, conv_depthwise3d) \ +_(aten, conv_tbc) \ +_(aten, conv_tbc_backward) \ +_(aten, conv_transpose1d) \ +_(aten, conv_transpose2d) \ +_(aten, conv_transpose3d) \ +_(aten, convolution) \ +_(aten, convolution_backward) \ +_(aten, convolution_backward_overrideable) \ +_(aten, convolution_overrideable) \ +_(aten, copy) \ +_(aten, copy_) \ +_(aten, copy_sparse_to_sparse) \ +_(aten, copy_sparse_to_sparse_) \ +_(aten, copysign) \ +_(aten, copysign_) \ +_(aten, corrcoef) \ +_(aten, cos) \ +_(aten, cos_) \ +_(aten, cosh) \ +_(aten, cosh_) \ +_(aten, cosine_embedding_loss) \ +_(aten, cosine_similarity) \ +_(aten, count_nonzero) \ +_(aten, cov) \ +_(aten, cross) \ +_(aten, cross_entropy_loss) \ +_(aten, crow_indices) \ +_(aten, crow_indices_copy) \ +_(aten, ctc_loss) \ +_(aten, cudnn_affine_grid_generator) \ +_(aten, cudnn_affine_grid_generator_backward) \ +_(aten, cudnn_batch_norm) \ +_(aten, cudnn_batch_norm_backward) \ +_(aten, cudnn_convolution) \ +_(aten, cudnn_convolution_add_relu) \ +_(aten, cudnn_convolution_relu) \ +_(aten, cudnn_convolution_transpose) \ +_(aten, cudnn_grid_sampler) \ +_(aten, cudnn_grid_sampler_backward) \ +_(aten, cudnn_is_acceptable) \ +_(aten, cummax) \ +_(aten, cummaxmin_backward) \ +_(aten, cummin) \ +_(aten, cumprod) \ +_(aten, cumprod_) \ +_(aten, cumprod_backward) \ +_(aten, cumsum) \ +_(aten, cumsum_) \ +_(aten, cumulative_trapezoid) \ +_(aten, data) \ +_(aten, deg2rad) \ +_(aten, deg2rad_) \ +_(aten, dense_dim) \ +_(aten, dequantize) \ +_(aten, det) \ +_(aten, detach) \ +_(aten, detach_) \ +_(aten, detach_copy) \ +_(aten, diag) \ +_(aten, diag_embed) \ +_(aten, diagflat) \ +_(aten, diagonal) \ +_(aten, diagonal_backward) \ +_(aten, diagonal_copy) \ +_(aten, diagonal_scatter) \ +_(aten, diff) \ +_(aten, digamma) \ +_(aten, digamma_) \ +_(aten, dist) \ +_(aten, div) \ +_(aten, div_) \ +_(aten, divide) \ +_(aten, divide_) \ +_(aten, dot) \ +_(aten, dropout) \ +_(aten, dropout_) \ +_(aten, dsplit) \ +_(aten, dstack) \ +_(aten, einsum) \ +_(aten, elu) \ +_(aten, elu_) \ +_(aten, elu_backward) \ +_(aten, embedding) \ +_(aten, embedding_backward) \ +_(aten, embedding_bag) \ +_(aten, embedding_dense_backward) \ +_(aten, embedding_renorm) \ +_(aten, embedding_renorm_) \ +_(aten, embedding_sparse_backward) \ +_(aten, empty) \ +_(aten, empty_like) \ +_(aten, empty_permuted) \ +_(aten, empty_quantized) \ +_(aten, empty_strided) \ +_(aten, eq) \ +_(aten, eq_) \ +_(aten, equal) \ +_(aten, erf) \ +_(aten, erf_) \ +_(aten, erfc) \ +_(aten, erfc_) \ +_(aten, erfinv) \ +_(aten, erfinv_) \ +_(aten, exp) \ +_(aten, exp2) \ +_(aten, exp2_) \ +_(aten, exp_) \ +_(aten, expand) \ +_(aten, expand_as) \ +_(aten, expand_copy) \ +_(aten, expm1) \ +_(aten, expm1_) \ +_(aten, exponential) \ +_(aten, exponential_) \ +_(aten, eye) \ +_(aten, fake_quantize_per_channel_affine) \ +_(aten, fake_quantize_per_channel_affine_cachemask) \ +_(aten, fake_quantize_per_channel_affine_cachemask_backward) \ +_(aten, fake_quantize_per_tensor_affine) \ +_(aten, fake_quantize_per_tensor_affine_cachemask) \ +_(aten, fake_quantize_per_tensor_affine_cachemask_backward) \ +_(aten, fbgemm_linear_fp16_weight) \ +_(aten, fbgemm_linear_fp16_weight_fp32_activation) \ +_(aten, fbgemm_linear_int8_weight) \ +_(aten, fbgemm_linear_int8_weight_fp32_activation) \ +_(aten, fbgemm_linear_quantize_weight) \ +_(aten, fbgemm_pack_gemm_matrix_fp16) \ +_(aten, fbgemm_pack_quantized_matrix) \ +_(aten, feature_alpha_dropout) \ +_(aten, feature_alpha_dropout_) \ +_(aten, feature_dropout) \ +_(aten, feature_dropout_) \ +_(aten, fft_fft) \ +_(aten, fft_fft2) \ +_(aten, fft_fftfreq) \ +_(aten, fft_fftn) \ +_(aten, fft_fftshift) \ +_(aten, fft_hfft) \ +_(aten, fft_hfft2) \ +_(aten, fft_hfftn) \ +_(aten, fft_ifft) \ +_(aten, fft_ifft2) \ +_(aten, fft_ifftn) \ +_(aten, fft_ifftshift) \ +_(aten, fft_ihfft) \ +_(aten, fft_ihfft2) \ +_(aten, fft_ihfftn) \ +_(aten, fft_irfft) \ +_(aten, fft_irfft2) \ +_(aten, fft_irfftn) \ +_(aten, fft_rfft) \ +_(aten, fft_rfft2) \ +_(aten, fft_rfftfreq) \ +_(aten, fft_rfftn) \ +_(aten, fill) \ +_(aten, fill_) \ +_(aten, fill_diagonal) \ +_(aten, fill_diagonal_) \ +_(aten, fix) \ +_(aten, fix_) \ +_(aten, flatten) \ +_(aten, flatten_dense_tensors) \ +_(aten, flip) \ +_(aten, fliplr) \ +_(aten, flipud) \ +_(aten, float_power) \ +_(aten, float_power_) \ +_(aten, floor) \ +_(aten, floor_) \ +_(aten, floor_divide) \ +_(aten, floor_divide_) \ +_(aten, fmax) \ +_(aten, fmin) \ +_(aten, fmod) \ +_(aten, fmod_) \ +_(aten, frac) \ +_(aten, frac_) \ +_(aten, fractional_max_pool2d) \ +_(aten, fractional_max_pool2d_backward) \ +_(aten, fractional_max_pool3d) \ +_(aten, fractional_max_pool3d_backward) \ +_(aten, frexp) \ +_(aten, frobenius_norm) \ +_(aten, from_file) \ +_(aten, full) \ +_(aten, full_like) \ +_(aten, fused_moving_avg_obs_fake_quant) \ +_(aten, gather) \ +_(aten, gather_backward) \ +_(aten, gcd) \ +_(aten, gcd_) \ +_(aten, ge) \ +_(aten, ge_) \ +_(aten, gelu) \ +_(aten, gelu_) \ +_(aten, gelu_backward) \ +_(aten, geometric) \ +_(aten, geometric_) \ +_(aten, geqrf) \ +_(aten, ger) \ +_(aten, glu) \ +_(aten, glu_backward) \ +_(aten, glu_backward_jvp) \ +_(aten, glu_jvp) \ +_(aten, gradient) \ +_(aten, greater) \ +_(aten, greater_) \ +_(aten, greater_equal) \ +_(aten, greater_equal_) \ +_(aten, grid_sampler) \ +_(aten, grid_sampler_2d) \ +_(aten, grid_sampler_2d_backward) \ +_(aten, grid_sampler_3d) \ +_(aten, grid_sampler_3d_backward) \ +_(aten, group_norm) \ +_(aten, gru) \ +_(aten, gru_cell) \ +_(aten, gt) \ +_(aten, gt_) \ +_(aten, hamming_window) \ +_(aten, hann_window) \ +_(aten, hardshrink) \ +_(aten, hardshrink_backward) \ +_(aten, hardsigmoid) \ +_(aten, hardsigmoid_) \ +_(aten, hardsigmoid_backward) \ +_(aten, hardswish) \ +_(aten, hardswish_) \ +_(aten, hardswish_backward) \ +_(aten, hardtanh) \ +_(aten, hardtanh_) \ +_(aten, hardtanh_backward) \ +_(aten, heaviside) \ +_(aten, heaviside_) \ +_(aten, hinge_embedding_loss) \ +_(aten, histc) \ +_(aten, histogram) \ +_(aten, histogramdd) \ +_(aten, hsplit) \ +_(aten, hspmm) \ +_(aten, hstack) \ +_(aten, huber_loss) \ +_(aten, huber_loss_backward) \ +_(aten, hypot) \ +_(aten, hypot_) \ +_(aten, i0) \ +_(aten, i0_) \ +_(aten, igamma) \ +_(aten, igamma_) \ +_(aten, igammac) \ +_(aten, igammac_) \ +_(aten, im2col) \ +_(aten, imag) \ +_(aten, index) \ +_(aten, index_add) \ +_(aten, index_add_) \ +_(aten, index_copy) \ +_(aten, index_copy_) \ +_(aten, index_fill) \ +_(aten, index_fill_) \ +_(aten, index_put) \ +_(aten, index_put_) \ +_(aten, index_reduce) \ +_(aten, index_reduce_) \ +_(aten, index_select) \ +_(aten, index_select_backward) \ +_(aten, indices) \ +_(aten, indices_copy) \ +_(aten, infinitely_differentiable_gelu_backward) \ +_(aten, inner) \ +_(aten, instance_norm) \ +_(aten, int_repr) \ +_(aten, inverse) \ +_(aten, is_coalesced) \ +_(aten, is_complex) \ +_(aten, is_conj) \ +_(aten, is_distributed) \ +_(aten, is_floating_point) \ +_(aten, is_inference) \ +_(aten, is_leaf) \ +_(aten, is_neg) \ +_(aten, is_nonzero) \ +_(aten, is_pinned) \ +_(aten, is_same_size) \ +_(aten, is_set_to) \ +_(aten, is_signed) \ +_(aten, is_vulkan_available) \ +_(aten, isclose) \ +_(aten, isfinite) \ +_(aten, isin) \ +_(aten, isinf) \ +_(aten, isnan) \ +_(aten, isneginf) \ +_(aten, isposinf) \ +_(aten, isreal) \ +_(aten, istft) \ +_(aten, item) \ +_(aten, kaiser_window) \ +_(aten, kl_div) \ +_(aten, kron) \ +_(aten, kthvalue) \ +_(aten, l1_loss) \ +_(aten, layer_norm) \ +_(aten, lcm) \ +_(aten, lcm_) \ +_(aten, ldexp) \ +_(aten, ldexp_) \ +_(aten, le) \ +_(aten, le_) \ +_(aten, leaky_relu) \ +_(aten, leaky_relu_) \ +_(aten, leaky_relu_backward) \ +_(aten, lerp) \ +_(aten, lerp_) \ +_(aten, less) \ +_(aten, less_) \ +_(aten, less_equal) \ +_(aten, less_equal_) \ +_(aten, lgamma) \ +_(aten, lgamma_) \ +_(aten, lift) \ +_(aten, lift_fresh) \ +_(aten, lift_fresh_copy) \ +_(aten, linalg_cholesky) \ +_(aten, linalg_cholesky_ex) \ +_(aten, linalg_cond) \ +_(aten, linalg_cross) \ +_(aten, linalg_det) \ +_(aten, linalg_diagonal) \ +_(aten, linalg_eig) \ +_(aten, linalg_eigh) \ +_(aten, linalg_eigvals) \ +_(aten, linalg_eigvalsh) \ +_(aten, linalg_householder_product) \ +_(aten, linalg_inv) \ +_(aten, linalg_inv_ex) \ +_(aten, linalg_ldl_factor) \ +_(aten, linalg_ldl_factor_ex) \ +_(aten, linalg_ldl_solve) \ +_(aten, linalg_lstsq) \ +_(aten, linalg_lu) \ +_(aten, linalg_lu_factor) \ +_(aten, linalg_lu_factor_ex) \ +_(aten, linalg_lu_solve) \ +_(aten, linalg_matmul) \ +_(aten, linalg_matrix_exp) \ +_(aten, linalg_matrix_norm) \ +_(aten, linalg_matrix_power) \ +_(aten, linalg_matrix_rank) \ +_(aten, linalg_multi_dot) \ +_(aten, linalg_norm) \ +_(aten, linalg_pinv) \ +_(aten, linalg_qr) \ +_(aten, linalg_slogdet) \ +_(aten, linalg_solve) \ +_(aten, linalg_solve_ex) \ +_(aten, linalg_solve_triangular) \ +_(aten, linalg_svd) \ +_(aten, linalg_svdvals) \ +_(aten, linalg_tensorinv) \ +_(aten, linalg_tensorsolve) \ +_(aten, linalg_vander) \ +_(aten, linalg_vecdot) \ +_(aten, linalg_vector_norm) \ +_(aten, linear) \ +_(aten, linear_backward) \ +_(aten, linspace) \ +_(aten, log) \ +_(aten, log10) \ +_(aten, log10_) \ +_(aten, log1p) \ +_(aten, log1p_) \ +_(aten, log2) \ +_(aten, log2_) \ +_(aten, log_) \ +_(aten, log_normal) \ +_(aten, log_normal_) \ +_(aten, log_sigmoid) \ +_(aten, log_sigmoid_backward) \ +_(aten, log_sigmoid_forward) \ +_(aten, log_softmax) \ +_(aten, logaddexp) \ +_(aten, logaddexp2) \ +_(aten, logcumsumexp) \ +_(aten, logdet) \ +_(aten, logical_and) \ +_(aten, logical_and_) \ +_(aten, logical_not) \ +_(aten, logical_not_) \ +_(aten, logical_or) \ +_(aten, logical_or_) \ +_(aten, logical_xor) \ +_(aten, logical_xor_) \ +_(aten, logit) \ +_(aten, logit_) \ +_(aten, logit_backward) \ +_(aten, logspace) \ +_(aten, logsumexp) \ +_(aten, lshift) \ +_(aten, lstm) \ +_(aten, lstm_cell) \ +_(aten, lstm_mps_backward) \ +_(aten, lt) \ +_(aten, lt_) \ +_(aten, lu_solve) \ +_(aten, lu_unpack) \ +_(aten, mH) \ +_(aten, mT) \ +_(aten, margin_ranking_loss) \ +_(aten, masked_fill) \ +_(aten, masked_fill_) \ +_(aten, masked_scatter) \ +_(aten, masked_scatter_) \ +_(aten, masked_scatter_backward) \ +_(aten, masked_select) \ +_(aten, masked_select_backward) \ +_(aten, matmul) \ +_(aten, matmul_backward) \ +_(aten, matrix_H) \ +_(aten, matrix_exp) \ +_(aten, matrix_exp_backward) \ +_(aten, matrix_power) \ +_(aten, max) \ +_(aten, max_pool1d) \ +_(aten, max_pool1d_with_indices) \ +_(aten, max_pool2d) \ +_(aten, max_pool2d_backward) \ +_(aten, max_pool2d_with_indices) \ +_(aten, max_pool2d_with_indices_backward) \ +_(aten, max_pool3d) \ +_(aten, max_pool3d_with_indices) \ +_(aten, max_pool3d_with_indices_backward) \ +_(aten, max_unpool2d) \ +_(aten, max_unpool3d) \ +_(aten, maximum) \ +_(aten, mean) \ +_(aten, median) \ +_(aten, meshgrid) \ +_(aten, min) \ +_(aten, minimum) \ +_(aten, miopen_batch_norm) \ +_(aten, miopen_batch_norm_backward) \ +_(aten, miopen_convolution) \ +_(aten, miopen_convolution_add_relu) \ +_(aten, miopen_convolution_relu) \ +_(aten, miopen_convolution_transpose) \ +_(aten, miopen_depthwise_convolution) \ +_(aten, miopen_rnn) \ +_(aten, miopen_rnn_backward) \ +_(aten, mish) \ +_(aten, mish_) \ +_(aten, mish_backward) \ +_(aten, mkldnn_adaptive_avg_pool2d) \ +_(aten, mkldnn_adaptive_avg_pool2d_backward) \ +_(aten, mkldnn_convolution) \ +_(aten, mkldnn_linear) \ +_(aten, mkldnn_linear_backward) \ +_(aten, mkldnn_linear_backward_input) \ +_(aten, mkldnn_linear_backward_weights) \ +_(aten, mkldnn_max_pool2d) \ +_(aten, mkldnn_max_pool2d_backward) \ +_(aten, mkldnn_max_pool3d) \ +_(aten, mkldnn_max_pool3d_backward) \ +_(aten, mkldnn_reorder_conv2d_weight) \ +_(aten, mkldnn_reorder_conv3d_weight) \ +_(aten, mkldnn_rnn_layer) \ +_(aten, mkldnn_rnn_layer_backward) \ +_(aten, mm) \ +_(aten, mode) \ +_(aten, moveaxis) \ +_(aten, movedim) \ +_(aten, mps_convolution_backward) \ +_(aten, mps_convolution_transpose_backward) \ +_(aten, mse_loss) \ +_(aten, mse_loss_backward) \ +_(aten, msort) \ +_(aten, mul) \ +_(aten, mul_) \ +_(aten, multi_margin_loss) \ +_(aten, multi_margin_loss_backward) \ +_(aten, multilabel_margin_loss) \ +_(aten, multilabel_margin_loss_backward) \ +_(aten, multilabel_margin_loss_forward) \ +_(aten, multinomial) \ +_(aten, multiply) \ +_(aten, multiply_) \ +_(aten, mv) \ +_(aten, mvlgamma) \ +_(aten, mvlgamma_) \ +_(aten, nan_to_num) \ +_(aten, nan_to_num_) \ +_(aten, nanmean) \ +_(aten, nanmedian) \ +_(aten, nanquantile) \ +_(aten, nansum) \ +_(aten, narrow) \ +_(aten, narrow_copy) \ +_(aten, native_batch_norm) \ +_(aten, native_batch_norm_backward) \ +_(aten, native_channel_shuffle) \ +_(aten, native_dropout) \ +_(aten, native_dropout_backward) \ +_(aten, native_group_norm) \ +_(aten, native_group_norm_backward) \ +_(aten, native_layer_norm) \ +_(aten, native_layer_norm_backward) \ +_(aten, native_norm) \ +_(aten, ne) \ +_(aten, ne_) \ +_(aten, neg) \ +_(aten, neg_) \ +_(aten, negative) \ +_(aten, negative_) \ +_(aten, nested_to_padded_tensor) \ +_(aten, new_empty) \ +_(aten, new_empty_strided) \ +_(aten, new_full) \ +_(aten, new_ones) \ +_(aten, new_zeros) \ +_(aten, nextafter) \ +_(aten, nextafter_) \ +_(aten, nll_loss) \ +_(aten, nll_loss2d) \ +_(aten, nll_loss2d_backward) \ +_(aten, nll_loss2d_forward) \ +_(aten, nll_loss_backward) \ +_(aten, nll_loss_forward) \ +_(aten, nll_loss_nd) \ +_(aten, nonzero) \ +_(aten, nonzero_numpy) \ +_(aten, nonzero_static) \ +_(aten, norm) \ +_(aten, norm_except_dim) \ +_(aten, normal) \ +_(aten, normal_) \ +_(aten, normal_functional) \ +_(aten, not_equal) \ +_(aten, not_equal_) \ +_(aten, nuclear_norm) \ +_(aten, numpy_T) \ +_(aten, one_hot) \ +_(aten, ones) \ +_(aten, ones_like) \ +_(aten, orgqr) \ +_(aten, ormqr) \ +_(aten, outer) \ +_(aten, output_nr) \ +_(aten, pad) \ +_(aten, pad_sequence) \ +_(aten, pairwise_distance) \ +_(aten, pdist) \ +_(aten, permute) \ +_(aten, permute_copy) \ +_(aten, pin_memory) \ +_(aten, pinverse) \ +_(aten, pixel_shuffle) \ +_(aten, pixel_unshuffle) \ +_(aten, poisson) \ +_(aten, poisson_nll_loss) \ +_(aten, polar) \ +_(aten, polygamma) \ +_(aten, polygamma_) \ +_(aten, positive) \ +_(aten, pow) \ +_(aten, pow_) \ +_(aten, prelu) \ +_(aten, prod) \ +_(aten, promote_types) \ +_(aten, put) \ +_(aten, put_) \ +_(aten, q_per_channel_axis) \ +_(aten, q_per_channel_scales) \ +_(aten, q_per_channel_zero_points) \ +_(aten, q_scale) \ +_(aten, q_zero_point) \ +_(aten, qr) \ +_(aten, qscheme) \ +_(aten, quantile) \ +_(aten, quantize_per_channel) \ +_(aten, quantize_per_tensor) \ +_(aten, quantize_per_tensor_dynamic) \ +_(aten, quantized_batch_norm) \ +_(aten, quantized_gru_cell) \ +_(aten, quantized_lstm_cell) \ +_(aten, quantized_max_pool1d) \ +_(aten, quantized_max_pool2d) \ +_(aten, quantized_max_pool3d) \ +_(aten, quantized_rnn_relu_cell) \ +_(aten, quantized_rnn_tanh_cell) \ +_(aten, rad2deg) \ +_(aten, rad2deg_) \ +_(aten, rand) \ +_(aten, rand_like) \ +_(aten, randint) \ +_(aten, randint_like) \ +_(aten, randn) \ +_(aten, randn_like) \ +_(aten, random) \ +_(aten, random_) \ +_(aten, randperm) \ +_(aten, range) \ +_(aten, ravel) \ +_(aten, real) \ +_(aten, reciprocal) \ +_(aten, reciprocal_) \ +_(aten, record_stream) \ +_(aten, refine_names) \ +_(aten, reflection_pad1d) \ +_(aten, reflection_pad1d_backward) \ +_(aten, reflection_pad2d) \ +_(aten, reflection_pad2d_backward) \ +_(aten, reflection_pad3d) \ +_(aten, reflection_pad3d_backward) \ +_(aten, relu) \ +_(aten, relu6) \ +_(aten, relu6_) \ +_(aten, relu_) \ +_(aten, remainder) \ +_(aten, remainder_) \ +_(aten, rename) \ +_(aten, rename_) \ +_(aten, renorm) \ +_(aten, renorm_) \ +_(aten, repeat) \ +_(aten, repeat_interleave) \ +_(aten, replication_pad1d) \ +_(aten, replication_pad1d_backward) \ +_(aten, replication_pad2d) \ +_(aten, replication_pad2d_backward) \ +_(aten, replication_pad3d) \ +_(aten, replication_pad3d_backward) \ +_(aten, requires_grad) \ +_(aten, requires_grad_) \ +_(aten, reshape) \ +_(aten, reshape_as) \ +_(aten, resize) \ +_(aten, resize_) \ +_(aten, resize_as) \ +_(aten, resize_as_) \ +_(aten, resize_as_sparse) \ +_(aten, resize_as_sparse_) \ +_(aten, resolve_conj) \ +_(aten, resolve_neg) \ +_(aten, result_type) \ +_(aten, retain_grad) \ +_(aten, retains_grad) \ +_(aten, rnn_relu) \ +_(aten, rnn_relu_cell) \ +_(aten, rnn_tanh) \ +_(aten, rnn_tanh_cell) \ +_(aten, roll) \ +_(aten, rot90) \ +_(aten, round) \ +_(aten, round_) \ +_(aten, row_indices) \ +_(aten, row_indices_copy) \ +_(aten, row_stack) \ +_(aten, rrelu) \ +_(aten, rrelu_) \ +_(aten, rrelu_with_noise) \ +_(aten, rrelu_with_noise_) \ +_(aten, rrelu_with_noise_backward) \ +_(aten, rshift) \ +_(aten, rsqrt) \ +_(aten, rsqrt_) \ +_(aten, rsub) \ +_(aten, scalar_tensor) \ +_(aten, scaled_dot_product_attention) \ +_(aten, scatter) \ +_(aten, scatter_) \ +_(aten, scatter_add) \ +_(aten, scatter_add_) \ +_(aten, scatter_reduce) \ +_(aten, scatter_reduce_) \ +_(aten, searchsorted) \ +_(aten, segment_reduce) \ +_(aten, select) \ +_(aten, select_backward) \ +_(aten, select_copy) \ +_(aten, select_scatter) \ +_(aten, selu) \ +_(aten, selu_) \ +_(aten, set) \ +_(aten, set_) \ +_(aten, set_data) \ +_(aten, sgn) \ +_(aten, sgn_) \ +_(aten, sigmoid) \ +_(aten, sigmoid_) \ +_(aten, sigmoid_backward) \ +_(aten, sign) \ +_(aten, sign_) \ +_(aten, signbit) \ +_(aten, silu) \ +_(aten, silu_) \ +_(aten, silu_backward) \ +_(aten, sin) \ +_(aten, sin_) \ +_(aten, sinc) \ +_(aten, sinc_) \ +_(aten, sinh) \ +_(aten, sinh_) \ +_(aten, size) \ +_(aten, slice) \ +_(aten, slice_backward) \ +_(aten, slice_copy) \ +_(aten, slice_scatter) \ +_(aten, slogdet) \ +_(aten, slow_conv3d) \ +_(aten, slow_conv3d_forward) \ +_(aten, slow_conv_dilated2d) \ +_(aten, slow_conv_dilated3d) \ +_(aten, slow_conv_transpose2d) \ +_(aten, slow_conv_transpose3d) \ +_(aten, smm) \ +_(aten, smooth_l1_loss) \ +_(aten, smooth_l1_loss_backward) \ +_(aten, soft_margin_loss) \ +_(aten, soft_margin_loss_backward) \ +_(aten, softmax) \ +_(aten, softplus) \ +_(aten, softplus_backward) \ +_(aten, softshrink) \ +_(aten, softshrink_backward) \ +_(aten, sort) \ +_(aten, sparse_bsc_tensor) \ +_(aten, sparse_bsr_tensor) \ +_(aten, sparse_compressed_tensor) \ +_(aten, sparse_coo_tensor) \ +_(aten, sparse_csc_tensor) \ +_(aten, sparse_csr_tensor) \ +_(aten, sparse_dim) \ +_(aten, sparse_mask) \ +_(aten, sparse_resize) \ +_(aten, sparse_resize_) \ +_(aten, sparse_resize_and_clear) \ +_(aten, sparse_resize_and_clear_) \ +_(aten, sparse_sampled_addmm) \ +_(aten, special_airy_ai) \ +_(aten, special_bessel_j0) \ +_(aten, special_bessel_j1) \ +_(aten, special_bessel_y0) \ +_(aten, special_bessel_y1) \ +_(aten, special_chebyshev_polynomial_t) \ +_(aten, special_chebyshev_polynomial_u) \ +_(aten, special_chebyshev_polynomial_v) \ +_(aten, special_chebyshev_polynomial_w) \ +_(aten, special_digamma) \ +_(aten, special_entr) \ +_(aten, special_erf) \ +_(aten, special_erfc) \ +_(aten, special_erfcx) \ +_(aten, special_erfinv) \ +_(aten, special_exp2) \ +_(aten, special_expit) \ +_(aten, special_expm1) \ +_(aten, special_gammainc) \ +_(aten, special_gammaincc) \ +_(aten, special_gammaln) \ +_(aten, special_hermite_polynomial_h) \ +_(aten, special_hermite_polynomial_he) \ +_(aten, special_i0) \ +_(aten, special_i0e) \ +_(aten, special_i1) \ +_(aten, special_i1e) \ +_(aten, special_laguerre_polynomial_l) \ +_(aten, special_legendre_polynomial_p) \ +_(aten, special_log1p) \ +_(aten, special_log_ndtr) \ +_(aten, special_log_softmax) \ +_(aten, special_logit) \ +_(aten, special_logsumexp) \ +_(aten, special_modified_bessel_i0) \ +_(aten, special_modified_bessel_i1) \ +_(aten, special_modified_bessel_k0) \ +_(aten, special_modified_bessel_k1) \ +_(aten, special_multigammaln) \ +_(aten, special_ndtr) \ +_(aten, special_ndtri) \ +_(aten, special_polygamma) \ +_(aten, special_psi) \ +_(aten, special_round) \ +_(aten, special_scaled_modified_bessel_k0) \ +_(aten, special_scaled_modified_bessel_k1) \ +_(aten, special_shifted_chebyshev_polynomial_t) \ +_(aten, special_shifted_chebyshev_polynomial_u) \ +_(aten, special_shifted_chebyshev_polynomial_v) \ +_(aten, special_shifted_chebyshev_polynomial_w) \ +_(aten, special_sinc) \ +_(aten, special_softmax) \ +_(aten, special_spherical_bessel_j0) \ +_(aten, special_xlog1py) \ +_(aten, special_xlogy) \ +_(aten, special_zeta) \ +_(aten, split) \ +_(aten, split_copy) \ +_(aten, split_with_sizes) \ +_(aten, split_with_sizes_copy) \ +_(aten, sqrt) \ +_(aten, sqrt_) \ +_(aten, square) \ +_(aten, square_) \ +_(aten, squeeze) \ +_(aten, squeeze_) \ +_(aten, squeeze_copy) \ +_(aten, sspaddmm) \ +_(aten, stack) \ +_(aten, std) \ +_(aten, std_mean) \ +_(aten, stft) \ +_(aten, stride) \ +_(aten, sub) \ +_(aten, sub_) \ +_(aten, subtract) \ +_(aten, subtract_) \ +_(aten, sum) \ +_(aten, sum_to_size) \ +_(aten, svd) \ +_(aten, swapaxes) \ +_(aten, swapaxes_) \ +_(aten, swapdims) \ +_(aten, swapdims_) \ +_(aten, sym_constrain_range) \ +_(aten, sym_constrain_range_for_size) \ +_(aten, sym_numel) \ +_(aten, sym_size) \ +_(aten, sym_storage_offset) \ +_(aten, sym_stride) \ +_(aten, t) \ +_(aten, t_) \ +_(aten, t_copy) \ +_(aten, take) \ +_(aten, take_along_dim) \ +_(aten, tan) \ +_(aten, tan_) \ +_(aten, tanh) \ +_(aten, tanh_) \ +_(aten, tanh_backward) \ +_(aten, tensor_split) \ +_(aten, tensordot) \ +_(aten, thnn_conv2d) \ +_(aten, threshold) \ +_(aten, threshold_) \ +_(aten, threshold_backward) \ +_(aten, tile) \ +_(aten, to) \ +_(aten, to_dense) \ +_(aten, to_dense_backward) \ +_(aten, to_mkldnn) \ +_(aten, to_mkldnn_backward) \ +_(aten, to_padded_tensor) \ +_(aten, to_sparse) \ +_(aten, to_sparse_bsc) \ +_(aten, to_sparse_bsr) \ +_(aten, to_sparse_csc) \ +_(aten, to_sparse_csr) \ +_(aten, topk) \ +_(aten, trace) \ +_(aten, trace_backward) \ +_(aten, transpose) \ +_(aten, transpose_) \ +_(aten, transpose_copy) \ +_(aten, trapezoid) \ +_(aten, trapz) \ +_(aten, triangular_solve) \ +_(aten, tril) \ +_(aten, tril_) \ +_(aten, tril_indices) \ +_(aten, triplet_margin_loss) \ +_(aten, triu) \ +_(aten, triu_) \ +_(aten, triu_indices) \ +_(aten, true_divide) \ +_(aten, true_divide_) \ +_(aten, trunc) \ +_(aten, trunc_) \ +_(aten, type_as) \ +_(aten, unbind) \ +_(aten, unbind_copy) \ +_(aten, unflatten) \ +_(aten, unflatten_dense_tensors) \ +_(aten, unfold) \ +_(aten, unfold_backward) \ +_(aten, unfold_copy) \ +_(aten, uniform) \ +_(aten, uniform_) \ +_(aten, unique_consecutive) \ +_(aten, unique_dim) \ +_(aten, unique_dim_consecutive) \ +_(aten, unsafe_chunk) \ +_(aten, unsafe_split) \ +_(aten, unsafe_split_with_sizes) \ +_(aten, unsqueeze) \ +_(aten, unsqueeze_) \ +_(aten, unsqueeze_copy) \ +_(aten, upsample_bicubic2d) \ +_(aten, upsample_bicubic2d_backward) \ +_(aten, upsample_bilinear2d) \ +_(aten, upsample_bilinear2d_backward) \ +_(aten, upsample_linear1d) \ +_(aten, upsample_linear1d_backward) \ +_(aten, upsample_nearest1d) \ +_(aten, upsample_nearest1d_backward) \ +_(aten, upsample_nearest2d) \ +_(aten, upsample_nearest2d_backward) \ +_(aten, upsample_nearest3d) \ +_(aten, upsample_nearest3d_backward) \ +_(aten, upsample_trilinear3d) \ +_(aten, upsample_trilinear3d_backward) \ +_(aten, value_selecting_reduction_backward) \ +_(aten, values) \ +_(aten, values_copy) \ +_(aten, vander) \ +_(aten, var) \ +_(aten, var_mean) \ +_(aten, vdot) \ +_(aten, view) \ +_(aten, view_as) \ +_(aten, view_as_complex) \ +_(aten, view_as_complex_copy) \ +_(aten, view_as_real) \ +_(aten, view_as_real_copy) \ +_(aten, view_copy) \ +_(aten, vsplit) \ +_(aten, vstack) \ +_(aten, where) \ +_(aten, xlogy) \ +_(aten, xlogy_) \ +_(aten, zero) \ +_(aten, zero_) \ +_(aten, zeros) \ +_(aten, zeros_like) + +#define FORALL_ATTR_BASE_SYMBOLS(_) \ +_(attr, A) \ +_(attr, B) \ +_(attr, C) \ +_(attr, H) \ +_(attr, HxW) \ +_(attr, K) \ +_(attr, L) \ +_(attr, LD) \ +_(attr, LU) \ +_(attr, LU_data) \ +_(attr, LU_pivots) \ +_(attr, M) \ +_(attr, N) \ +_(attr, P) \ +_(attr, Q) \ +_(attr, R) \ +_(attr, S) \ +_(attr, U) \ +_(attr, UPLO) \ +_(attr, V) \ +_(attr, Vh) \ +_(attr, W) \ +_(attr, X) \ +_(attr, a) \ +_(attr, abs) \ +_(attr, accumulate) \ +_(attr, accumulate_matches) \ +_(attr, activation) \ +_(attr, addends) \ +_(attr, adjoint) \ +_(attr, align_corners) \ +_(attr, allow_tf32) \ +_(attr, alpha) \ +_(attr, amsgrad) \ +_(attr, anchor) \ +_(attr, angle) \ +_(attr, api_name) \ +_(attr, append) \ +_(attr, approximate) \ +_(attr, arg1) \ +_(attr, arg2) \ +_(attr, arg3) \ +_(attr, arg_out) \ +_(attr, assert_msg) \ +_(attr, assume_unique) \ +_(attr, atol) \ +_(attr, attn_bias) \ +_(attr, attn_mask) \ +_(attr, average_attn_weights) \ +_(attr, averaging_const) \ +_(attr, aweights) \ +_(attr, axis) \ +_(attr, axis0) \ +_(attr, axis1) \ +_(attr, b) \ +_(attr, b_hh) \ +_(attr, b_ih) \ +_(attr, bag_size) \ +_(attr, base) \ +_(attr, batch1) \ +_(attr, batch2) \ +_(attr, batch_dim) \ +_(attr, batch_first) \ +_(attr, batch_size) \ +_(attr, batch_sizes) \ +_(attr, benchmark) \ +_(attr, beta) \ +_(attr, beta1) \ +_(attr, beta2) \ +_(attr, bias) \ +_(attr, bias_defined) \ +_(attr, bias_g) \ +_(attr, bias_requires_grad) \ +_(attr, bias_sizes) \ +_(attr, bidirectional) \ +_(attr, bin_edges) \ +_(attr, bins) \ +_(attr, bit_width) \ +_(attr, blank) \ +_(attr, blocksize) \ +_(attr, boundaries) \ +_(attr, buffer) \ +_(attr, causal_diagonal) \ +_(attr, ccol_indices) \ +_(attr, cdim) \ +_(attr, cdist) \ +_(attr, ceil_mode) \ +_(attr, cell_state_fwd) \ +_(attr, center) \ +_(attr, ch_axis) \ +_(attr, check_errors) \ +_(attr, chunks) \ +_(attr, coalesced) \ +_(attr, coefficients) \ +_(attr, col) \ +_(attr, col_indices) \ +_(attr, col_offsets) \ +_(attr, col_offsets_hh) \ +_(attr, col_offsets_ih) \ +_(attr, compressed_A) \ +_(attr, compressed_idx) \ +_(attr, compressed_indices) \ +_(attr, compressed_indices_dtype) \ +_(attr, compute_log_sumexp) \ +_(attr, compute_mode) \ +_(attr, compute_uv) \ +_(attr, compute_v) \ +_(attr, condition) \ +_(attr, copy) \ +_(attr, correction) \ +_(attr, count) \ +_(attr, count_include_pad) \ +_(attr, counts) \ +_(attr, cpu_dtype) \ +_(attr, cpu_enabled) \ +_(attr, cpu_nested_shape_example) \ +_(attr, create_graph) \ +_(attr, crow_indices) \ +_(attr, cu_seqlens_k) \ +_(attr, cu_seqlens_q) \ +_(attr, cuda_dtype) \ +_(attr, cuda_enabled) \ +_(attr, cudnn_enable) \ +_(attr, cudnn_enabled) \ +_(attr, cum_seq_k) \ +_(attr, cum_seq_q) \ +_(attr, custom_mask_type) \ +_(attr, cx) \ +_(attr, cx_) \ +_(attr, cx_tmp) \ +_(attr, cy) \ +_(attr, cy_) \ +_(attr, d) \ +_(attr, data) \ +_(attr, decimals) \ +_(attr, delta) \ +_(attr, dense) \ +_(attr, dense_B) \ +_(attr, dense_dim) \ +_(attr, density) \ +_(attr, dep_token) \ +_(attr, descending) \ +_(attr, destination) \ +_(attr, deterministic) \ +_(attr, device) \ +_(attr, device_index) \ +_(attr, dgrad_glu) \ +_(attr, diagonal) \ +_(attr, diagonals) \ +_(attr, dilation) \ +_(attr, dim) \ +_(attr, dim0) \ +_(attr, dim1) \ +_(attr, dim2) \ +_(attr, dimension) \ +_(attr, dims) \ +_(attr, dims_other) \ +_(attr, dims_self) \ +_(attr, divisor_override) \ +_(attr, downscale_factor) \ +_(attr, driver) \ +_(attr, dropout) \ +_(attr, dropout_mask) \ +_(attr, dropout_p) \ +_(attr, dropout_seed) \ +_(attr, dropout_state) \ +_(attr, dst) \ +_(attr, dtype) \ +_(attr, dual) \ +_(attr, dummy) \ +_(attr, dx) \ +_(attr, edge_order) \ +_(attr, eigenvalues) \ +_(attr, eigenvectors) \ +_(attr, eigvals) \ +_(attr, eigvecs) \ +_(attr, element) \ +_(attr, elements) \ +_(attr, ellipsis_idx) \ +_(attr, embed_dim) \ +_(attr, end) \ +_(attr, end_dim) \ +_(attr, eps) \ +_(attr, epsilon) \ +_(attr, equal_nan) \ +_(attr, equation) \ +_(attr, exp_avg_sqs) \ +_(attr, exp_avgs) \ +_(attr, expand1) \ +_(attr, expand2) \ +_(attr, expand3) \ +_(attr, exponent) \ +_(attr, exponential_average_factor) \ +_(attr, fake_quant_enabled) \ +_(attr, fake_quant_on) \ +_(attr, ffn_bias_1) \ +_(attr, ffn_bias_2) \ +_(attr, ffn_weight_1) \ +_(attr, ffn_weight_2) \ +_(attr, filename) \ +_(attr, fill_value) \ +_(attr, flat) \ +_(attr, forward) \ +_(attr, found_inf) \ +_(attr, from) \ +_(attr, full) \ +_(attr, full_matrices) \ +_(attr, fuse_transform_0213) \ +_(attr, fweights) \ +_(attr, g) \ +_(attr, gO) \ +_(attr, generator) \ +_(attr, ggI) \ +_(attr, ggW) \ +_(attr, ggb) \ +_(attr, glu) \ +_(attr, grad) \ +_(attr, grad_bias) \ +_(attr, grad_cy) \ +_(attr, grad_factor) \ +_(attr, grad_glu) \ +_(attr, grad_hy) \ +_(attr, grad_in) \ +_(attr, grad_input) \ +_(attr, grad_input_mask) \ +_(attr, grad_out) \ +_(attr, grad_out_) \ +_(attr, grad_output) \ +_(attr, grad_scale) \ +_(attr, grad_w) \ +_(attr, grad_weight) \ +_(attr, grad_x) \ +_(attr, grad_y) \ +_(attr, gradient) \ +_(attr, grads) \ +_(attr, grid) \ +_(attr, group) \ +_(attr, groups) \ +_(attr, growth_interval) \ +_(attr, growth_tracker) \ +_(attr, half_to_float) \ +_(attr, has_bias) \ +_(attr, has_biases) \ +_(attr, hermitian) \ +_(attr, hidden_bias) \ +_(attr, hidden_gates) \ +_(attr, hidden_size) \ +_(attr, high) \ +_(attr, hist) \ +_(attr, hop_length) \ +_(attr, hx) \ +_(attr, hx_) \ +_(attr, hy_) \ +_(attr, i1) \ +_(attr, i2) \ +_(attr, i3) \ +_(attr, ignore_index) \ +_(attr, imag) \ +_(attr, impl_index) \ +_(attr, implicit) \ +_(attr, include_last_offset) \ +_(attr, include_self) \ +_(attr, increasing) \ +_(attr, ind) \ +_(attr, index) \ +_(attr, indexing) \ +_(attr, indices) \ +_(attr, info) \ +_(attr, initial) \ +_(attr, innerKTiles) \ +_(attr, input) \ +_(attr, input1) \ +_(attr, input2) \ +_(attr, input3) \ +_(attr, input_bias) \ +_(attr, input_dtype) \ +_(attr, input_g) \ +_(attr, input_gates) \ +_(attr, input_lengths) \ +_(attr, input_scale) \ +_(attr, input_size) \ +_(attr, input_sizes) \ +_(attr, inputs) \ +_(attr, interpolation) \ +_(attr, interpolation_mode) \ +_(attr, inv_scale) \ +_(attr, inverse) \ +_(attr, invert) \ +_(attr, invstd) \ +_(attr, is_causal) \ +_(attr, is_coalesced) \ +_(attr, is_crow) \ +_(attr, is_matrix) \ +_(attr, is_result) \ +_(attr, is_target) \ +_(attr, k) \ +_(attr, keepdim) \ +_(attr, kernel_size) \ +_(attr, key) \ +_(attr, label_smoothing) \ +_(attr, lambd) \ +_(attr, largest) \ +_(attr, last_dim_size) \ +_(attr, layersOutputs) \ +_(attr, layout) \ +_(attr, left) \ +_(attr, length) \ +_(attr, lengths) \ +_(attr, level) \ +_(attr, like) \ +_(attr, list) \ +_(attr, log_alpha) \ +_(attr, log_input) \ +_(attr, log_probs) \ +_(attr, log_target) \ +_(attr, logabsdet) \ +_(attr, logsumexp) \ +_(attr, low) \ +_(attr, lower) \ +_(attr, lr) \ +_(attr, ltm) \ +_(attr, m) \ +_(attr, mantissa) \ +_(attr, margin) \ +_(attr, mask) \ +_(attr, mask_check) \ +_(attr, mask_type) \ +_(attr, masked_grad) \ +_(attr, mat) \ +_(attr, mat1) \ +_(attr, mat2) \ +_(attr, matrices) \ +_(attr, max) \ +_(attr, max_exp_avg_sqs) \ +_(attr, max_k) \ +_(attr, max_norm) \ +_(attr, max_q) \ +_(attr, max_seqlen_k) \ +_(attr, max_seqlen_q) \ +_(attr, max_size) \ +_(attr, max_val) \ +_(attr, max_values) \ +_(attr, maximize) \ +_(attr, maximum_indices) \ +_(attr, maxnorm) \ +_(attr, mean) \ +_(attr, median) \ +_(attr, memory_format) \ +_(attr, meta) \ +_(attr, min) \ +_(attr, min_indices) \ +_(attr, min_val) \ +_(attr, minlength) \ +_(attr, mode) \ +_(attr, momentum) \ +_(attr, n) \ +_(attr, n_bins) \ +_(attr, n_fft) \ +_(attr, names) \ +_(attr, nan) \ +_(attr, need_weights) \ +_(attr, neg_log_likelihood) \ +_(attr, negative) \ +_(attr, negative_slope) \ +_(attr, neginf) \ +_(attr, nested_size) \ +_(attr, nested_strides) \ +_(attr, new_data) \ +_(attr, nnz) \ +_(attr, noise) \ +_(attr, non_blocking) \ +_(attr, norm) \ +_(attr, norm_bias_1) \ +_(attr, norm_bias_2) \ +_(attr, norm_first) \ +_(attr, norm_type) \ +_(attr, norm_weight_1) \ +_(attr, norm_weight_2) \ +_(attr, normalization) \ +_(attr, normalized) \ +_(attr, normalized_shape) \ +_(attr, nt_example) \ +_(attr, num_classes) \ +_(attr, num_generated) \ +_(attr, num_groups) \ +_(attr, num_head) \ +_(attr, num_heads) \ +_(attr, num_layers) \ +_(attr, num_samples) \ +_(attr, num_splits_key) \ +_(attr, num_weights) \ +_(attr, numel) \ +_(attr, observer_on) \ +_(attr, offset) \ +_(attr, offset2bag) \ +_(attr, offsets) \ +_(attr, onesided) \ +_(attr, ord) \ +_(attr, order) \ +_(attr, other) \ +_(attr, out) \ +_(attr, out0) \ +_(attr, out1) \ +_(attr, out2) \ +_(attr, out3) \ +_(attr, out4) \ +_(attr, out5) \ +_(attr, out6) \ +_(attr, out_amax) \ +_(attr, out_dim) \ +_(attr, out_dtype) \ +_(attr, out_int32) \ +_(attr, outdim) \ +_(attr, output) \ +_(attr, output_mask) \ +_(attr, output_padding) \ +_(attr, output_scale) \ +_(attr, output_size) \ +_(attr, output_zero_point) \ +_(attr, p) \ +_(attr, packed) \ +_(attr, packed_hh) \ +_(attr, packed_ih) \ +_(attr, packed_weight) \ +_(attr, pad) \ +_(attr, pad_mode) \ +_(attr, padded) \ +_(attr, padding) \ +_(attr, padding_idx) \ +_(attr, padding_mode) \ +_(attr, padding_value) \ +_(attr, params) \ +_(attr, path) \ +_(attr, pdist) \ +_(attr, per_row_fake_quant) \ +_(attr, per_sample_weights) \ +_(attr, periodic) \ +_(attr, philox_offset) \ +_(attr, philox_seed) \ +_(attr, physical_layout) \ +_(attr, pin_memory) \ +_(attr, pivot) \ +_(attr, pivots) \ +_(attr, plain_idx) \ +_(attr, plain_indices) \ +_(attr, pos_weight) \ +_(attr, posinf) \ +_(attr, positive) \ +_(attr, pow) \ +_(attr, prepend) \ +_(attr, primal) \ +_(attr, prob) \ +_(attr, proj_bias) \ +_(attr, proj_size) \ +_(attr, proj_weight) \ +_(attr, q) \ +_(attr, qGroupSize) \ +_(attr, qScaleAndZeros) \ +_(attr, qkv) \ +_(attr, qkv_bias) \ +_(attr, qkv_weight) \ +_(attr, qtensor) \ +_(attr, quant_max) \ +_(attr, quant_min) \ +_(attr, quasi) \ +_(attr, query) \ +_(attr, r) \ +_(attr, random_samples) \ +_(attr, range) \ +_(attr, rank) \ +_(attr, ratio) \ +_(attr, rcond) \ +_(attr, real) \ +_(attr, reduce) \ +_(attr, reduce_range) \ +_(attr, reduction) \ +_(attr, repeats) \ +_(attr, replacement) \ +_(attr, requires_grad) \ +_(attr, reserve) \ +_(attr, reserveSpace) \ +_(attr, reservedSpace) \ +_(attr, residuals) \ +_(attr, result) \ +_(attr, retain_graph) \ +_(attr, return_complex) \ +_(attr, return_counts) \ +_(attr, return_debug_mask) \ +_(attr, return_inverse) \ +_(attr, reverse) \ +_(attr, right) \ +_(attr, rounding_mode) \ +_(attr, row) \ +_(attr, row_indices) \ +_(attr, rstd) \ +_(attr, rtol) \ +_(attr, running_max) \ +_(attr, running_mean) \ +_(attr, running_min) \ +_(attr, running_var) \ +_(attr, s) \ +_(attr, save_invstd) \ +_(attr, save_mean) \ +_(attr, save_var) \ +_(attr, save_var_transform) \ +_(attr, saved_g) \ +_(attr, saved_norms) \ +_(attr, saved_v) \ +_(attr, scalar) \ +_(attr, scalar1) \ +_(attr, scalar2) \ +_(attr, scalars) \ +_(attr, scale) \ +_(attr, scale_a) \ +_(attr, scale_b) \ +_(attr, scale_backoff_factor) \ +_(attr, scale_factors) \ +_(attr, scale_grad_by_freq) \ +_(attr, scale_growth_factor) \ +_(attr, scale_hh) \ +_(attr, scale_ih) \ +_(attr, scale_result) \ +_(attr, scales) \ +_(attr, scales_d) \ +_(attr, scales_h) \ +_(attr, scales_w) \ +_(attr, sections) \ +_(attr, seed) \ +_(attr, self) \ +_(attr, self_is_result) \ +_(attr, self_num_batch_dims) \ +_(attr, self_or_result) \ +_(attr, self_sizes) \ +_(attr, seqlen_k) \ +_(attr, sequences) \ +_(attr, shape) \ +_(attr, shared) \ +_(attr, shifts) \ +_(attr, side) \ +_(attr, sigma) \ +_(attr, sign) \ +_(attr, singular_values) \ +_(attr, size) \ +_(attr, sizes) \ +_(attr, sobolstate) \ +_(attr, solution) \ +_(attr, some) \ +_(attr, sorted) \ +_(attr, sorted_sequence) \ +_(attr, sorter) \ +_(attr, source) \ +_(attr, spacing) \ +_(attr, sparse) \ +_(attr, sparse_dim) \ +_(attr, sparse_grad) \ +_(attr, split_size) \ +_(attr, split_sizes) \ +_(attr, src) \ +_(attr, stable) \ +_(attr, start) \ +_(attr, start_dim) \ +_(attr, state_steps) \ +_(attr, std) \ +_(attr, step) \ +_(attr, steps) \ +_(attr, storage_offset) \ +_(attr, stride) \ +_(attr, sum_dy) \ +_(attr, sum_dy_xmu) \ +_(attr, sumdim) \ +_(attr, swap) \ +_(attr, symmetric_quant) \ +_(attr, t) \ +_(attr, tangent) \ +_(attr, target) \ +_(attr, target_lengths) \ +_(attr, targets) \ +_(attr, tau) \ +_(attr, tensor) \ +_(attr, tensor1) \ +_(attr, tensor2) \ +_(attr, tensor_indices_or_sections) \ +_(attr, tensors) \ +_(attr, tensors1) \ +_(attr, test_element) \ +_(attr, test_elements) \ +_(attr, the_template) \ +_(attr, theta) \ +_(attr, threshold) \ +_(attr, to) \ +_(attr, tol) \ +_(attr, total) \ +_(attr, total_length) \ +_(attr, total_weight) \ +_(attr, train) \ +_(attr, training) \ +_(attr, transpose) \ +_(attr, transpose_result) \ +_(attr, transposed) \ +_(attr, type1) \ +_(attr, type2) \ +_(attr, unbiased) \ +_(attr, unitriangular) \ +_(attr, unpack_data) \ +_(attr, unpack_pivots) \ +_(attr, unroll_dim) \ +_(attr, unsafe) \ +_(attr, upper) \ +_(attr, upscale_factor) \ +_(attr, use_fast_accum) \ +_(attr, use_gelu) \ +_(attr, use_input_stats) \ +_(attr, v) \ +_(attr, value) \ +_(attr, values) \ +_(attr, var) \ +_(attr, vec) \ +_(attr, vec1) \ +_(attr, vec2) \ +_(attr, w_hh) \ +_(attr, w_ih) \ +_(attr, weight) \ +_(attr, weight0) \ +_(attr, weight1) \ +_(attr, weight2) \ +_(attr, weight3) \ +_(attr, weight4) \ +_(attr, weight_arr) \ +_(attr, weight_buf) \ +_(attr, weight_decay) \ +_(attr, weight_g) \ +_(attr, weight_scale) \ +_(attr, weight_stride0) \ +_(attr, weight_zero_point) \ +_(attr, weights) \ +_(attr, win_length) \ +_(attr, window) \ +_(attr, window_length) \ +_(attr, with_replacement) \ +_(attr, workspace) \ +_(attr, wrap) \ +_(attr, x) \ +_(attr, x1) \ +_(attr, x2) \ +_(attr, y) \ +_(attr, z) \ +_(attr, z_state) \ +_(attr, zero_infinity) \ +_(attr, zero_point) \ +_(attr, zero_point_hh) \ +_(attr, zero_point_ih) \ +_(attr, zero_points) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/blob.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/blob.h new file mode 100644 index 0000000000000000000000000000000000000000..3e8da3e4e7a67e4920b8dd89350a157eddc7edbf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/blob.h @@ -0,0 +1,208 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace caffe2 { + +class Tensor; + +/** + * @brief Blob is a general container that hosts a typed pointer. + * + * A Blob hosts a pointer as well as its type, and takes charge of deleting it + * properly when the blob is deallocated or re-allocated with a new type. A blob + * could contain anything, although the most common case is to contain a Tensor. + */ +class TORCH_API Blob final : public c10::intrusive_ptr_target { + public: + /** + * Initializes an empty Blob. + */ + Blob() noexcept : meta_(), pointer_(nullptr), has_ownership_(false) {} + ~Blob() override { + Reset(); + } + + Blob(Blob&& other) noexcept : Blob() { + swap(other); + } + + Blob& operator=(Blob&& other) noexcept { + Blob(std::move(other)).swap(*this); + return *this; + } + + /** + * Checks if the content stored in the blob is of type T. + */ + template + bool IsType() const noexcept { + return meta_.Match(); + } + + /** + * Returns the meta info of the blob. + */ + const TypeMeta meta() const noexcept { + return meta_; + } + + /** + * Returns a printable typename of the blob. + */ + c10::string_view TypeName() const noexcept { + return meta_.name(); + } + + /** + * @brief Gets the const reference of the stored object. The code checks if + * the stored object is of the desired type. + */ + // TODO(jerryzh): add a Get(c10::DeviceType) function? + template + const T& Get() const { + TORCH_INTERNAL_ASSERT( + IsType(), + "wrong type for the Blob instance. Blob contains ", + meta_.name(), + " while caller expects ", + TypeMeta::TypeName()); + // TODO: after we add Get(c10::DeviceType) + // and changed all the callsites, we can add + // a static assert here to enforce T != Tensor + return *static_cast(pointer_); + } + + const void* GetRaw() const noexcept { + return pointer_; + } + void* GetRaw() noexcept { + return pointer_; + } + + /** + * @brief Gets a mutable pointer to the stored object. + * + * If the current object is not of the right type, a new object is created + * and the old object is freed. Note that type T should have a default + * constructor. Otherwise, create the object yourself first, and use + * Reset(). + */ + template + T* GetMutable() { + static_assert( + std::is_default_constructible::value, + "GetMutable can't be called with non-default-constructible types. " + "Try using specialized methods"); + if (IsType()) { + return static_cast(pointer_); + } else { + // TODO Re-enable logging + // VLOG(1) << "Create new mutable object " << TypeMeta::TypeName(); + return Reset(new T()); + } + } + + template + T* GetMutableOrNull() { + if (IsType()) { + return static_cast(pointer_); + } else { + return nullptr; + } + } + + /** + * Sets the underlying object to the allocated one. The Blob then takes over + * the ownership of the passed in pointer. If there is already an object in + * the Blob, the old object is freed. + * + * This is used when the underlying class T does not have a default ctor, or + * complex initializations needs to be done outside the blob. + */ + template + T* Reset(T* allocated) { + free_(); + meta_ = TypeMeta::Make(); + pointer_ = static_cast(allocated); + has_ownership_ = true; + return allocated; + } + + /** + * Sets the underlying object to the allocated one, but does not take over + * the ownership of the passed in pointer. If there is already an object in + * the Blob, the old object is freed. + * + * Unlike Reset, this does not take over the ownership of the pointer and the + * caller is responsible for making sure that the lifetime of the allocated + * blob outlasts the lifetime of any access to this blob, until another Reset + * call is made or the blob is destructed. + */ + template + typename std::remove_const::type* ShareExternal( + typename std::remove_const::type* allocated) { + return static_cast(ShareExternal( + static_cast(allocated), + TypeMeta::Make::type>())); + } + + void* ShareExternal(void* allocated, const TypeMeta meta) { + free_(); + meta_ = meta; + pointer_ = allocated; + has_ownership_ = false; + return allocated; + } + + /** + * Resets the Blob to an empty one. + */ + void Reset() { + free_(); + pointer_ = nullptr; + meta_ = TypeMeta(); + has_ownership_ = false; + } + + /** + * @brief Swaps the underlying storage of two blobs. + */ + void swap(Blob& rhs) { + using std::swap; + swap(meta_, rhs.meta_); + swap(pointer_, rhs.pointer_); + swap(has_ownership_, rhs.has_ownership_); + } + + private: + void free_() { + if (has_ownership_ && pointer_ != nullptr) { + (*meta_.deleteFn())(pointer_); + } + } + + TypeMeta meta_; + void* pointer_; + bool has_ownership_; + + C10_DISABLE_COPY_AND_ASSIGN(Blob); +}; + +inline void swap(Blob& lhs, Blob& rhs) { + lhs.swap(rhs); +} + +inline std::ostream& operator<<(std::ostream& out, const Blob& v) { + return out << "Blob[" << v.TypeName() << "]"; +} + +} // namespace caffe2 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/builtin_function.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/builtin_function.h new file mode 100644 index 0000000000000000000000000000000000000000..887e57b157ed5324137c381d153f33a822c19629 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/builtin_function.h @@ -0,0 +1,88 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +struct BuiltinOpFunction : public Function { + BuiltinOpFunction( + c10::QualifiedName qualname, + c10::FunctionSchema schema, + std::function callable, + std::string doc_string = "") + : name_(std::move(qualname)), + callable_(std::move(callable)), + schema_(std::move(schema)), + doc_string_(std::move(doc_string)) { + TORCH_INTERNAL_ASSERT(schema_.returns().size() == 1); + } + + c10::string_view doc_string() const override { + return doc_string_; + } + + void run(Stack& stack) override { + callable_(stack); + } + + c10::intrusive_ptr runAsync( + Stack& stack, + TaskLauncher /* not used */) override { + run(stack); + auto res = c10::make_intrusive(stack.front().type()); + res->markCompleted(std::move(stack.front())); + return res; + } + + const c10::QualifiedName& qualname() const override { + return name_; + } + + // if this isn't yet defined, run its method_creator function + void ensure_defined() override { + // nop + } + + const c10::FunctionSchema& getSchema() const override { + return schema_; + } + + size_t num_inputs() const override { + return schema_.arguments().size(); + } + + Function& setSchema(c10::FunctionSchema schema) override { + schema_ = std::move(schema); + return *this; + } + + bool call(Stack& stack, c10::optional, c10::function_ref) override { + run(stack); + return false; + } + + bool call(Stack& stack, c10::function_ref) override { + run(stack); + return false; + } + + ~BuiltinOpFunction() override = default; + + private: + c10::QualifiedName name_; + + std::function callable_; + + c10::FunctionSchema schema_; + + std::string doc_string_; +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/custom_class.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/custom_class.h new file mode 100644 index 0000000000000000000000000000000000000000..ff9bda981b2906e55449e93a582266888c2eb258 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/custom_class.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace c10 { + +struct ClassType; +using ClassTypePtr = std::shared_ptr; + +TORCH_API c10::ClassTypePtr getCustomClassTypeImpl(const std::type_index &tindex); + +template +const c10::ClassTypePtr& getCustomClassType() { + // Classes are never unregistered from getCustomClassTypeMap and the + // hash lookup can be a hot path, so just cache. + // For the same reason, it's fine If this ends up getting duplicated across + // DSO boundaries for whatever reason. + static c10::ClassTypePtr cache = getCustomClassTypeImpl( + std::type_index(typeid(T))); + return cache; +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/enum_tag.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/enum_tag.h new file mode 100644 index 0000000000000000000000000000000000000000..45f3c5c2e8c93cdaccdad6f9db9ecf2c0cf30d06 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/enum_tag.h @@ -0,0 +1,19 @@ +#pragma once + +// @generated by torchgen/gen.py from enum_tag.h + +namespace at { + // Enum of valid tags obtained from the entries in tags.yaml + enum class Tag { + core, + data_dependent_output, + dynamic_output_shape, + generated, + inplace_view, + nondeterministic_bitwise, + nondeterministic_seeded, + pointwise, + pt2_compliant_tag, + view_copy + }; +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/enum_type.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/enum_type.h new file mode 100644 index 0000000000000000000000000000000000000000..bd60c1e77224f743cbb50df61b099b30ccd64a2f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/enum_type.h @@ -0,0 +1,101 @@ +#pragma once + +#include + +#include + +namespace c10 { + +struct EnumType; +using EnumTypePtr = std::shared_ptr; +using EnumNameValue = std::pair; +struct TORCH_API EnumType : public NamedType { + friend struct Type; + static const TypeKind Kind = TypeKind::EnumType; + + static EnumTypePtr create( + const c10::QualifiedName& qualified_class_name, + TypePtr value, + std::vector enum_names_values, + std::weak_ptr<::torch::jit::CompilationUnit> cu) { + switch (value->kind()) { + case TypeKind::IntType: + case TypeKind::FloatType: + case TypeKind::StringType: + return EnumTypePtr(new EnumType( + qualified_class_name, + std::move(value), + std::move(enum_names_values), + std::move(cu))); + default: + AT_ERROR( + "Cannot create Enum with value type '", + value->str(), + "', only int, float and string are supported"); + } + } + + std::string str() const override { + return "Enum<" + annotation_str() + ">"; + } + + std::string repr_str() const override { + return str(); + } + + const TypePtr& getValueType() const { + return value_type_; + } + + bool equals(const Type& rhs) const override { + if (auto* enum_rhs = rhs.castRaw()) { + return name().value() == enum_rhs->name().value() && + *getValueType() == *(enum_rhs->getValueType()) && + this->compilation_unit() == enum_rhs->compilation_unit(); + } + return false; + } + + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + + std::shared_ptr compilation_unit() + const { + auto cu = cu_.lock(); + return cu; + } + + const QualifiedName& qualifiedClassName() const { + return name().value(); + } + + at::ArrayRef containedTypes() const override { + return value_type_; + } + + const at::ArrayRef enumNamesValues() const { + return enum_names_values_; + } + + private: + EnumType( + c10::QualifiedName qualified_class_name, + TypePtr value_type, + std::vector enum_names_values, + std::weak_ptr cu) + : NamedType(TypeKind::EnumType, std::move(qualified_class_name)), + value_type_(std::move(value_type)), + enum_names_values_(std::move(enum_names_values)), + cu_(std::move(cu)) {} + + std::string annotation_str_impl( + C10_UNUSED TypePrinter printer = nullptr) const override { + const auto& n = name().value(); + return n.qualifiedName(); + } + + TypePtr value_type_; + std::vector enum_names_values_; + std::weak_ptr<::torch::jit::CompilationUnit> cu_; +}; + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/function.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/function.h new file mode 100644 index 0000000000000000000000000000000000000000..b4f82712a57d082ca0c2ef4de2876e27bef3cc4d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/function.h @@ -0,0 +1,111 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { +struct FunctionSchema; +}; + +namespace at { +TORCH_API void launch(std::function func); +} + +namespace torch { +namespace jit { + +struct Graph; +struct Code; + +namespace mobile { +struct Code; +} + +using Stack = std::vector; +using Kwargs = std::unordered_map; +struct RecursiveMethodCallError : public std::exception {}; +using TaskLauncher = std::function)>; + +TORCH_API void preoptimizeGraph(std::shared_ptr& graph, bool disable_autocast=false); + +// A Function is a pure Graph with no implicit `self` object bound. +// It contains schema information and the executor that manages the +// execution of the function. Method is a wrapper around an +// underlying Function that also provides a `self` object. +struct TORCH_API Function { + Function() = default; + Function(const Function&) = default; + Function& operator=(const Function&) = default; + Function(Function&&) noexcept = default; + Function& operator=(Function&&) noexcept = default; + virtual c10::string_view doc_string() const { + static constexpr c10::string_view no_doc_string = ""; + return no_doc_string; + } + + virtual bool isGraphFunction() const { + return false; + } + + virtual void run(Stack& stack) = 0; + + virtual c10::intrusive_ptr runAsync( + Stack& /*stack*/, + C10_UNUSED TaskLauncher taskLauncher = at::launch) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false); + return {}; + } + + at::IValue operator()( + Stack stack, + const Kwargs& kwargs = Kwargs()) { + getSchema().checkAndNormalizeInputs(stack, kwargs); + run(stack); + return stack.front(); + } + + virtual const c10::QualifiedName& qualname() const = 0; + + const std::string& name() const { + return qualname().name(); + } + + // if this isn't yet defined, run its method_creator function + virtual void ensure_defined() = 0; + + virtual const c10::FunctionSchema& getSchema() const = 0; + + virtual size_t num_inputs() const = 0; + + virtual Function& setSchema(c10::FunctionSchema schema) = 0; + + // call() defines how different interpreter implementations interacts with + // Function objects. Basically interpreters need to provide a callback to + // communicate to Functions what to do if provided a Code object. + // Alternatively we could design the signature to return an optional Code + // object, but that requires special handling the null case in interpreter + // and the fallback behavior is not well defined by interpreter but rather + // Function themselves, so a callback approach is more reasonable than + // returning values. + // If call() returns true, then callback completes successfully, otherwise + // call() returns false. + + // Overload for server interpreter, a bailout size is needed for graph executor. + virtual bool call(Stack&, c10::optional, c10::function_ref) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false); + return false; + } + + // Overload for mobile interpreter. + virtual bool call(Stack&, c10::function_ref) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false); + return false; + } + + virtual ~Function() = default; +}; +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/function_schema_inl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/function_schema_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..a6959c661af155000fa990e3d4a70930f8eceb04 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/function_schema_inl.h @@ -0,0 +1,483 @@ +#pragma once +#include +#include + +// note: windows build doesn't find symbols in operator files unless +// this is a header file + +namespace c10 { + +inline std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema) { + // eventually this should look almost identical to python arg parser, but + // it is simpler for now to work directly on this schema + + out << schema.name(); + if (!schema.overload_name().empty()) { + out << "." << schema.overload_name(); + } + out << "("; + + bool seen_kwarg_only = false; + for (const auto i : c10::irange(schema.arguments().size())) { + if (i > 0) out << ", "; + if (schema.arguments()[i].kwarg_only() && !seen_kwarg_only) { + out << "*, "; + seen_kwarg_only = true; + } + out << schema.arguments()[i]; + } + + if(schema.is_vararg()) { + if(!schema.arguments().empty()) + out << ", "; + out << "..."; + } + + out << ") -> "; + + const auto& returns = schema.returns(); + + /* + * We should skip parenthesis if we return a single item and it's not varret, + * or we return nothing but varret. + * + * Need special handling for schema + * aten::items.str(Dict(str, t) self) -> (str,t)[] + * Even though this schema returns a single item, we need add parenthesis. + * The is necessary so the printed schema can be parsed by the C++ SchemaParser + * Without the extra parenthesis, the parser sees the first parenthesis in '(str,t)' and mistakenly + * treat the return type as a tuple. An alternative is to enhance the Lexer + * to lookahead multiple tokens to accurately decide if the return type is + * a tuple. + */ + bool need_paren = !( + (returns.size() == 1 && !schema.is_varret()) || + (returns.empty() && schema.is_varret())); + + if (returns.size() == 1 && !schema.is_varret()) { + std::stringstream return_ss; + return_ss << returns.at(0); + auto return_str = return_ss.str(); + + // enclosing the single return item with parenthesis if the return type + // starts with a left parenthesis. + // + // There are 2 cases + // 1. something like 'aten::items.str(Dict(str, t) self) -> ((str, t)[])'. + // without the extra parenthesis, the c++ schem parser can not parse it. + // 2. something like '-> ((str, str))'. Need extra parenthesis so the return + // type is a single tuple rather than two strings. + // PR (https://github.com/pytorch/pytorch/pull/23204) has more context about + // this. test_serialize_and_deserialize (https://github.com/pytorch/pytorch/blob/master/test/test_function_schema.py#L15) + // also covers this case. + if (!return_str.empty() && return_str.front() == '(') { + need_paren = true; + } + } + + if (need_paren) { + out << "("; + } + for (const auto i : c10::irange(returns.size())) { + if (i > 0) { + out << ", "; + } + out << returns.at(i); + } + if (schema.is_varret()) { + if (!returns.empty()) { + out << ", "; + } + out << "..."; + } + if (need_paren) { + out << ")"; + } + return out; +} + +inline size_t findFirstOutArg(const std::vector& args) { + // find the start of out args in the schema + for (const auto out_start_idx : c10::irange(args.size())) { + if (args.at(out_start_idx).is_out()) { + return out_start_idx; + } + } + return args.size(); +} + +inline bool Argument::isBackwardCompatibleWith( + const Argument& old, + std::ostream* why_not) const { + const Argument* lhs = this; + const Argument* rhs = &old; + if (!(lhs->name() == rhs->name() + && lhs->N() == rhs->N() + && (lhs->alias_info() == rhs->alias_info() + || (lhs->alias_info() != nullptr && rhs->alias_info() != nullptr + && *lhs->alias_info() == *rhs->alias_info())))) { + return false; + } + if (lhs->kwarg_only() && !rhs->kwarg_only()) { + return false; + } + if (!rhs->type()->isSubtypeOfExt(*lhs->type(), why_not)) { + return false; + } + if (rhs->default_value().has_value() && + lhs->default_value() != rhs->default_value()) { + return false; + } + return true; +} + +inline bool Argument::isForwardCompatibleWith( + const Argument& old, + std::ostream* why_not) const { + const Argument* lhs = this; + const Argument* rhs = &old; + if (!(lhs->name() == rhs->name() + && lhs->N() == rhs->N() + && (lhs->alias_info() == rhs->alias_info() + || (lhs->alias_info() != nullptr && rhs->alias_info() != nullptr + && *lhs->alias_info() == *rhs->alias_info())))) { + return false; + } + if (lhs->kwarg_only() && !rhs->kwarg_only()) { + return false; + } + if (!lhs->type()->isSubtypeOfExt(rhs->type(), why_not)) { + return false; + } + if (rhs->default_value().has_value() && + lhs->default_value() != rhs->default_value()) { + return false; + } + if (lhs->default_value().has_value() && !rhs->default_value().has_value()) { + return false; + } + return true; +} + +inline std::string FunctionSchema::formatTypeMismatchMsg( + const Argument& expected, + const std::string& actual_type, + c10::optional position, + c10::optional value) const { + std::string position_str; + if (position) { + position_str = c10::str("Position: ", *position, "\n"); + } + std::string value_str; + if (value) { + value_str = c10::str("Value: ", *value, "\n"); + } + return c10::str( + name(), + "() ", + expected.formatTypeMismatchMsg(actual_type), + position_str, + value_str, + "Declaration: ", + *this); +} + +inline bool FunctionSchema::isBackwardCompatibleWith( + const FunctionSchema& old, + std::ostream* why_not) const { + if (!(name() == old.name() + && overload_name() == old.overload_name() + // we are conservative on is_vararg and is_varret, + // since they are only used by internal operators + && is_vararg() == old.is_vararg() + && is_varret() == old.is_varret() + && returns().size() == old.returns().size() + && arguments().size() >= old.arguments().size())) { + return false; + } + for (const auto i : c10::irange(returns().size())) { + // Backwards compatibility requires covariance on argument types + // (i.e. more generic), and contravariance on return types (i.e. + // more specific). + if (!old.returns().at(i).isBackwardCompatibleWith( + returns().at(i), + why_not)) { + return false; + } + } + + // we want to test both out and default args separately + size_t old_out_start_idx = findFirstOutArg(old.arguments()); + size_t new_out_start_idx = findFirstOutArg(arguments()); + + // make sure among the default args, they are backward compatible + for (const auto i : c10::irange(old_out_start_idx)) { + if (!arguments().at(i).isBackwardCompatibleWith( + old.arguments().at(i), why_not)) { + return false; + } + } + + // Validate that all new arguments provided has a default value + for (const auto i : c10::irange(old_out_start_idx, new_out_start_idx)) { + if (!arguments().at(i).default_value()) { + if (why_not) { + *why_not + << "Function schema not backward compatible since the new argument '" + << arguments().at(i).name() << "' of type " + << arguments().at(i).type()->str() + << " did not provide a default value."; + } + return false; + } + } + + // now compare the out args + for (const auto i : c10::irange(old_out_start_idx, old.arguments().size())) { + if (!arguments() + .at(i - old_out_start_idx + new_out_start_idx) + .isBackwardCompatibleWith(old.arguments().at(i), why_not)) { + return false; + } + } + + return true; +} + +inline bool FunctionSchema::isForwardCompatibleWith( + const FunctionSchema& old, + std::ostringstream& why_not) const { + if (!(name() == old.name() && + overload_name() == old.overload_name() + // we are conservative on is_vararg and is_varret, + // since they are only used by internal operators + && is_vararg() == old.is_vararg() && is_varret() == old.is_varret() && + returns().size() == old.returns().size())) { + return false; + } + + // we want to test both out and default args separately + size_t old_out_start_idx = findFirstOutArg(old.arguments()); + size_t new_out_start_idx = findFirstOutArg(arguments()); + + if (old.arguments().size() - old_out_start_idx != + arguments().size() - new_out_start_idx) { + if (why_not) { + why_not << "Function schema should have the " + << "same number of out arguments"; + } + return false; + } + + // make sure among the default args, they are forward compatible + for (size_t i = 0; i < std::min(old_out_start_idx, new_out_start_idx); i++) { + if (!arguments().at(i).isForwardCompatibleWith(old.arguments().at(i))) { + if (why_not) { + why_not + << "'" << arguments().at(i).name() << "'" + << " is not forward compatible with the older version of the schema"; + } + return false; + } + } + + // Validate that all new arguments provided has a default value + for (size_t i = old_out_start_idx; i < new_out_start_idx; ++i) { + if (!arguments().at(i).default_value()) { + if (why_not) { + why_not + << "Function schema is not forward compatible since the new argument '" + << arguments().at(i).name() << "' of type " + << arguments().at(i).type()->str() + << " did not provide a default value."; + } + return false; + } + + auto default_val = arguments().at(i).default_value().value(); + if (default_val.isList() || default_val.isGenericDict()) { + if (why_not) { + why_not + << "Function schema is not forward compatible since the new argument '" + << arguments().at(i).name() << "' of type " + << arguments().at(i).type()->str() << " has a container type " + << "as its default value."; + } + return false; + } + } + + // now compare the out args + for (size_t i = old_out_start_idx; i < old.arguments().size(); i++) { + if (!arguments() + .at(i - old_out_start_idx + new_out_start_idx) + .isForwardCompatibleWith(old.arguments().at(i))) { + if (why_not) { + why_not << "Out argument '" + << "'" << arguments().at(i).name() + << " is not FC with the older version of the schema"; + } + return false; + } + } + + return true; +} + +template +inline void FunctionSchema::checkArg( + const IValue& value, + const Argument& argument, + optional pos) const { + if (value.isTensor() && argument.type() == TensorType::get()) { + // Fast-path for the common case + return; + } + if (!value.type()->isSubtypeOf(*argument.type())) { + TORCH_CHECK( + false, + formatTypeMismatchMsg( + argument, value.type()->repr_str(), pos)); + } +} + +inline std::string FunctionSchema::findErrorInKwargs(const std::vector& kwargs) const { + // First check if any of the kwargs are unknown, i.e. don't match the name of + // any argument in the schema. + for (const auto& kwarg : kwargs) { + if (!std::count_if( + arguments().begin(), + arguments().end(), + [&kwarg](const Argument& argument) { + return argument.name() == kwarg; + })) { + return c10::str( + "Unknown keyword argument '", + kwarg, + "' for operator '", + name(), + "'. Schema: ", + *this); + } + } + // If there are unconsumed kwargs but none of them were unknown, the first + // positional argument present in the kwargs is duplicated. + for (const auto& argument : arguments()) { + if (std::find(kwargs.begin(), kwargs.end(), argument.name()) != kwargs.end()) { + AT_ASSERT(!argument.default_value()); + return c10::str( + "Argument '", + argument.name(), + "' specified both as positional and ", + "keyword argument. Schema: ", + *this); + } + } + return ""; +} + +template +inline void FunctionSchema::checkAndNormalizeInputs( + std::vector& inputs, + const std::unordered_map& kwargs) const { + // Do we have more inputs than the schema accepts? + TORCH_CHECK( + inputs.size() <= arguments().size(), + "Expected at most ", + arguments().size(), + " argument(s) for operator '", + name(), + "', but received ", + inputs.size(), + " argument(s). Declaration: ", + *this); + + size_t consumed_kwargs = 0; + for (const auto pos : c10::irange(arguments().size())) { + const auto& argument = arguments()[pos]; + if (pos < inputs.size()) { + checkArg(inputs[pos], argument, pos); + continue; + } + auto it = kwargs.find(argument.name()); + if (it != kwargs.end()) { + checkArg(it->second, argument, nullopt); + inputs.push_back(it->second); + consumed_kwargs++; + continue; + } + if (argument.default_value()) { + inputs.push_back(*argument.default_value()); + continue; + } + AT_ERROR( + name(), + "() is missing value for argument '", + argument.name(), + "'. Declaration: ", + *this); + } + if (consumed_kwargs != kwargs.size()) { + std::vector names; + names.reserve(kwargs.size()); + for(const auto& k : kwargs) { + names.emplace_back(k.first); + } + throw std::runtime_error(findErrorInKwargs(names)); + } +} + +inline FunctionSchema FunctionSchema::cloneWithRemappedTypes( + const std::function type_map) const { + auto update_args = [&](const std::vector& args) { + std::vector new_args; + new_args.reserve(args.size()); + for(const Argument& arg : args) { + new_args.emplace_back(arg.cloneWithType(type_map(arg.type()))); + } + return new_args; + }; + return FunctionSchema( + name(), + overload_name(), + update_args(arguments()), + update_args(returns()), + is_vararg(), + is_varret()); +} + +// covariant subtyping of list of Arguments +inline bool isSubtypeOfList( + ArrayRef child, + ArrayRef parent, + std::ostream* why_not) { + if (child.size() != parent.size()) { + return false; + } + for (const auto i : c10::irange(child.size())) { + const Argument& c = child[i]; + const Argument& p = parent[i]; + if (c.name() != p.name()) { + return false; + } + if (!c.type()->isSubtypeOfExt(*p.type(), why_not)) { + return false; + } + } + return true; +} + +inline bool FunctionSchema::isSubtypeOf( + const FunctionSchema& rhs, + bool as_method, + std::ostream* why_not) const { + size_t start = as_method ? 1 : 0; + // functions are contravariant in arguments but covariant in returns + return isSubtypeOfList( + ArrayRef(rhs.arguments()).slice(start), + ArrayRef(arguments()).slice(start), + why_not) && + isSubtypeOfList(returns(), rhs.returns(), why_not); +} + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/functional.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/functional.h new file mode 100644 index 0000000000000000000000000000000000000000..6b4f3447f5d48051e83a4664e159303df9db4f4a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/functional.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include + +namespace c10 { + +// The passed in function must take T by value (T), or by +// const reference (const T&); taking T by non-const reference +// will result in an error like: +// +// error: no type named 'type' in 'class std::result_of' +// +// No explicit template parameters are required. + +// Overload for explicit function and ArrayRef +template +inline auto fmap(const T& inputs, const F& fn) -> std::vector { + std::vector r; + r.reserve(inputs.size()); + for(const auto & input : inputs) + r.push_back(fn(input)); + return r; +} + +// C++ forbids taking an address of a constructor, so here's a workaround... +// Overload for constructor (R) application +template +inline std::vector fmap(const T& inputs) { + std::vector r; + r.reserve(inputs.size()); + for(auto & input : inputs) + r.push_back(R(input)); + return r; +} + +template +inline std::vector filter(at::ArrayRef inputs, const F& fn) { + std::vector r; + r.reserve(inputs.size()); + for(auto & input : inputs) { + if (fn(input)) { + r.push_back(input); + } + } + return r; +} + +template +inline std::vector filter(const std::vector& inputs, const F& fn) { + return filter(static_cast>(inputs), fn); +} + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/grad_mode.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/grad_mode.h new file mode 100644 index 0000000000000000000000000000000000000000..47051525c59beece9e8e11accacd926d9c5e587e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/grad_mode.h @@ -0,0 +1,10 @@ +#pragma once + +#include +#include + +namespace at { + using GradMode = c10::GradMode; + using AutoGradMode = c10::AutoGradMode; + using NoGradGuard = c10::NoGradGuard; +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/interned_strings.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/interned_strings.h new file mode 100644 index 0000000000000000000000000000000000000000..15857eec9df0670e3854220e9c01f58299671cba --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/interned_strings.h @@ -0,0 +1,358 @@ +#pragma once +#include +#include +#include +#include +#include + +#include + +#include +#include + +namespace c10 { + +#define FORALL_NS_SYMBOLS(_) \ + _(namespaces, prim) \ + _(namespaces, prims) \ + _(namespaces, nvprims) \ + _(namespaces, aten) \ + _(namespaces, cuda) \ + _(namespaces, onnx) \ + _(namespaces, attr) \ + _(namespaces, scope) \ + _(namespaces, user) \ + _(namespaces, _caffe2) \ + _(namespaces, dimname) \ + _(namespaces, namespaces) \ + _(prim, Assign) \ + _(prim, BroadcastingChunk) \ + _(prim, BroadcastSizes) \ + _(prim, ReductionSizes) \ + _(prim, Constant) \ + _(prim, ChunkSizes) \ + _(prim, ConstantMKLDNNTensor) \ + _(prim, BroadcastMKLDNNTensors) \ + _(prim, MKLDNNGroup) \ + _(prim, MKLDNNHardSwish) \ + _(prim, MKLDNNHardSigmoid) \ + _(prim, MKLDNNHardTanh) \ + _(prim, MKLDNNClamp) \ + _(prim, StaticRuntimeCopyOuts) \ + _(prim, Drop) \ + _(prim, Eval) \ + _(prim, Expand) /* onnx */ \ + _(prim, FusionGroup) \ + _(prim, CudaFusionGroup) \ + _(prim, CudaFusionGuard) \ + _(prim, oneDNNFusionGroup) \ + _(prim, oneDNNFusionGuard) \ + _(prim, FunctionalGraph) \ + _(prim, add_optional) \ + _(prim, view_copy) \ + _(prim, permute_copy) \ + _(prim, reshape_copy) \ + _(prim, squeeze_copy) \ + _(prim, t_copy) \ + _(prim, transpose_copy) \ + _(prim, unsqueeze_copy) \ + _(prim, flatten_copy) \ + _(prim, expand_copy) \ + _(prim, expand_as_copy) \ + _(prim, DifferentiableGraph) \ + _(prim, TensorExprGroup) \ + _(prim, TensorExprDynamicGroup) \ + _(prim, StaticSubgraph) \ + _(prim, If) \ + _(prim, Jump) /* debug */ \ + _(prim, JumpNZ) /* debug */ \ + _(prim, JumpZ) /* debug */ \ + _(prim, Load) \ + _(prim, Loop) \ + _(prim, Param) \ + _(prim, PackPadded) /* onnx */ \ + _(prim, PadPacked) /* onnx */ \ + _(prim, Placeholder) /* debug */ \ + _(prim, Print) \ + _(prim, EmptyListLiteral) \ + _(prim, LegacyTypedConstructor) \ + _(prim, PythonOp) \ + _(prim, IgnoredPythonOp) \ + _(prim, Reverse) \ + _(prim, Return) \ + _(prim, ReturnStmt) \ + _(prim, BreakStmt) \ + _(prim, ContinueStmt) \ + _(prim, ComprehensionScope) \ + _(prim, Store) \ + _(prim, AutogradZero) \ + _(prim, AutogradAnyNonZero) \ + _(prim, AutogradAllNonZero) \ + _(prim, AutogradAllZero) \ + _(prim, Starred) \ + _(prim, TupleConstruct) \ + _(prim, TupleUnpack) \ + _(prim, TupleIndex) \ + _(prim, TupleSlice) \ + _(prim, ListConstruct) \ + _(prim, ListUnpack) \ + _(prim, DictConstruct) \ + _(prim, ModuleContainerIndex) \ + _(prim, EnumName) \ + _(prim, EnumValue) \ + _(prim, StringIndex) \ + _(prim, NumToTensor) \ + _(prim, Uninitialized) \ + _(prim, VarConcat) \ + _(prim, VarStack) \ + _(prim, With) \ + _(prim, Enter) \ + _(prim, Exit) \ + _(prim, IfThenElse) \ + _(aten, Bool) \ + _(aten, Int) \ + _(aten, FloatImplicit) \ + _(aten, ComplexImplicit) \ + _(aten, IntImplicit) \ + _(aten, ScalarImplicit) \ + _(aten, Float) \ + _(aten, Complex) \ + _(aten, str) \ + _(aten, Delete) \ + _(prim, device) \ + _(prim, dtype) \ + _(prim, layout) \ + _(prim, id) \ + _(prim, requires_grad) \ + _(prim, MakeTestTensor) /* test */ \ + _(prim, AutogradAdd) \ + _(prim, GradOf) \ + _(aten, grad) \ + _(aten, backward) \ + _(prim, Guard) \ + _(prim, BailOut) \ + _(prim, TypeCheck) \ + _(prim, RequiresGradCheck) \ + _(prim, FallbackGraph) \ + _(prim, FusedConcat) \ + _(prim, ConstantChunk) \ + _(prim, MMTreeReduce) \ + _(prim, MMBatchSide) \ + _(prim, list) \ + _(prim, dict) \ + _(prim, min) \ + _(prim, max) \ + _(prim, abs) \ + _(aten, divmod) \ + _(prim, zip) \ + _(prim, enumerate) \ + _(prim, range) \ + _(prim, rangelist) \ + _(prim, isinstance) \ + _(prim, tolist) \ + _(prim, unchecked_cast) \ + _(aten, _grad_sum_to_size) \ + _(aten, _size_if_not_equal) \ + _(aten, _ncf_unsqueeze) \ + _(aten, warn) \ + _(aten, sorted) \ + _(aten, floordiv) \ + _(aten, __range_length) \ + _(aten, __derive_index) \ + _(aten, __round_to_zero_floordiv) \ + _(aten, is_scripting) \ + _(aten, _unwrap_optional) \ + _(prim, fork) \ + _(prim, awaitable) \ + _(prim, forkClosure) \ + _(prim, awaitableClosure) \ + _(prim, awaitable_nowait) \ + _(prim, awaitable_wait) \ + _(prim, RaiseException) \ + _(prim, Closure) \ + _(prim, CreateObject) \ + _(prim, SetAttr) \ + _(prim, GetAttr) \ + _(prim, HasAttr) \ + _(prim, profile) \ + _(prim, profile_ivalue) \ + _(prim, AddStatValue) \ + _(prim, TimePoint) \ + _(prim, CallFunction) \ + _(prim, CallMethod) \ + _(prim, LoopContinuation) \ + _(prim, annotate) \ + _(prim, TracedModuleForward) \ + _(prim, TracedFork) \ + _(prim, TracedAttr) \ + _(prim, rpc_async) \ + _(prim, rpc_sync) \ + _(prim, rpc_remote) \ + _(prim, is_cuda) \ + _(aten, append) \ + _(aten, as_tensor) \ + _(aten, adaptive_avg_pool2d_backward) \ + _(aten, dim) \ + _(aten, format) \ + _(aten, percentFormat) \ + _(aten, __not__) \ + _(aten, __is__) \ + _(aten, __isnot__) \ + _(aten, _ger) \ + _(aten, __getitem__) \ + _(aten, _set_item) \ + _(aten, manual_seed) \ + _(aten, device) \ + _(aten, hash) \ + _(aten, len) \ + _(aten, list) \ + _(aten, dict) \ + _(aten, wait) \ + _(aten, save) \ + _(aten, keys) \ + _(aten, ord) \ + _(aten, chr) \ + _(aten, hex) \ + _(aten, oct) \ + _(aten, clear) \ + _(aten, setdefault) \ + _(aten, bin) \ + _(aten, pop) \ + _(aten, insert) \ + _(aten, tensor) \ + _(prim, unchecked_unwrap_optional) \ + _(aten, __contains__) \ + _(prim, BailoutTemplate) \ + _(prim, grad) \ + _(cuda, _set_device) \ + _(cuda, set_stream) \ + _(cuda, _current_device) \ + _(cuda, synchronize) \ + _(aten, has_torch_function) \ + _(aten, is_autocast_enabled) \ + _(aten, is_autocast_cpu_enabled) \ + _(aten, is_autocast_xla_enabled) \ + FORALL_ATEN_BASE_SYMBOLS(_) \ + _(onnx, Add) \ + _(onnx, Concat) \ + _(onnx, Constant) \ + _(onnx, ConstantFill) \ + _(onnx, Div) \ + _(onnx, GRU) \ + _(onnx, Gather) \ + _(onnx, Gemm) \ + _(onnx, LSTM) \ + _(onnx, MatMul) \ + _(onnx, Min) \ + _(onnx, Max) \ + _(onnx, Mul) \ + _(onnx, Pow) \ + _(onnx, RNN) \ + _(onnx, Shape) \ + _(onnx, Size) \ + _(onnx, Slice) \ + _(onnx, Softmax) \ + _(onnx, Squeeze) \ + _(onnx, Sub) \ + _(onnx, Transpose) \ + _(onnx, Unsqueeze) \ + _(onnx, Loop) \ + _(onnx, If) \ + _(onnx, Reshape) \ + _(onnx, Expand) \ + _(onnx, Equal) \ + _(onnx, Greater) \ + _(onnx, GreaterOrEqual) \ + _(onnx, Less) \ + _(onnx, LessOrEqual) \ + _(onnx, Not) \ + _(aten, ATen) \ + _(onnx, Split) \ + _(onnx, ConstantOfShape) \ + _(onnx, Cast) \ + _(onnx, Mod) \ + _(onnx, Sqrt) \ + _(onnx, SplitToSequence) \ + _(onnx, SequenceAt) \ + _(onnx, SequenceConstruct) \ + _(onnx, SequenceEmpty) \ + _(onnx, SequenceInsert) \ + _(onnx, SequenceErase) \ + _(onnx, ConcatFromSequence) \ + _(onnx, Identity) \ + _(onnx, SoftmaxCrossEntropyLoss) \ + _(onnx, NegativeLogLikelihoodLoss) \ + _(onnx, LogSoftmax) \ + _(onnx, ReduceL1) \ + _(onnx, ReduceL2) \ + _(onnx, Conv) \ + _(onnx, BatchNormalization) \ + _(onnx, ReduceMean) \ + _(onnx, ReduceProd) \ + _(onnx, Relu) \ + _(onnx, Neg) \ + _(onnx, NonZero) \ + _(onnx, Range) \ + _(onnx, Tile) \ + _(onnx, Where) \ + _(onnx, Optional) \ + _(onnx, OptionalGetElement) \ + _(onnx, OptionalHasElement) \ + FORALL_ATTR_BASE_SYMBOLS(_) \ + _(attr, Subgraph) \ + _(attr, ReverseSubgraph) \ + _(attr, f_real_outputs) \ + _(attr, df_input_vjps) \ + _(attr, df_input_captured_inputs) \ + _(attr, df_input_captured_outputs) \ + _(attr, df_output_vjps) \ + _(attr, axes) \ + _(attr, symbolic_shape_inputs) \ + _(attr, allow_stack_outputs) \ + _(attr, striding_inputs_desc) \ + _(attr, striding_outputs_desc) \ + _(attr, broadcast) \ + _(attr, direction) \ + _(attr, ends) \ + _(attr, inplace) \ + _(attr, input_as_shape) \ + _(attr, is_zero) \ + _(attr, num_none) \ + _(attr, num_present) \ + _(attr, perm) \ + _(attr, starts) \ + _(attr, profiled_type) \ + _(attr, transA) \ + _(attr, transB) \ + _(attr, name) \ + _(attr, module) \ + _(attr, beg) \ + _(attr, idx) \ + _(attr, split) \ + _(attr, slot) \ + _(attr, kinds) \ + _(attr, types) \ + _(attr, scope) \ + _(attr, keepdims) \ + _(attr, cache_id) \ + _(attr, new_axis) \ + _(attr, warn_id) \ + _(attr, output_layouts) \ + _(attr, allowzero) \ + _(attr, seen_none) \ + _(attr, overload_name) \ + _(attr, node_stack_idx) + +enum class _keys : unique_t { + #define DEFINE_KEY(ns, s) ns##_##s, + FORALL_NS_SYMBOLS(DEFINE_KEY) + #undef DEFINE_KEY + num_symbols +}; + +#define DEFINE_SYMBOL(ns, s) \ + namespace ns { constexpr Symbol s(static_cast(_keys::ns##_##s)); } +FORALL_NS_SYMBOLS(DEFINE_SYMBOL) +#undef DEFINE_SYMBOL + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ivalue.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ivalue.h new file mode 100644 index 0000000000000000000000000000000000000000..57812446d0764878d4b10f49bff47a54f8899b92 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ivalue.h @@ -0,0 +1,1551 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +class TORCH_API CustomClassHolder : public c10::intrusive_ptr_target {}; +namespace jit { +using ::torch::CustomClassHolder; +struct Function; +struct CompilationUnit; +struct Module; +} // namespace jit +} // namespace torch +namespace c10 { +template +class Dict; +template +class List; +template +class IListRef; +struct IValue; +struct ClassType; +struct Type; +class RRefInterface; + +struct ClassType; +using ClassTypePtr = std::shared_ptr; + +TORCH_API bool _fastEqualsForContainer(const IValue& lhs, const IValue& rhs); + +TORCH_API torch::jit::Function* checkObjectSortSchema( + const c10::ClassTypePtr& t, + std::stringstream& why_not); + +// A comparator that checks ordering of two IValues of same type. +typedef std::function IValueComparator; + +TORCH_API IValueComparator getLessThanComparator(const IValue& v); +TORCH_API IValueComparator getGreaterThanComparator(const IValue& v); + +namespace ivalue { +struct Tuple; +struct Future; +struct Await; +struct ConstantString; +struct GenericDict; +struct Object; +struct PyObjectHolder; +struct EnumHolder; +// We need a ComplexHolder because currently the payloads in the Union +// only take 64 bits. Since ComplexDouble takes up 128 bits, and is too big +// to fit in the IValue directly, we indirect complex numbers through an +// intrusive pointer to ComplexHolder (which contains a c10::complex). +struct ComplexHolder : c10::intrusive_ptr_target { + public: + template + ComplexHolder(c10::complex c) { + val = convert>(c); + } + ComplexHolder() = default; + c10::complex val; +}; + +// Similar to ComplexHolder, for StreamData3 +struct StreamData3Holder : c10::intrusive_ptr_target { + public: + StreamData3Holder(struct c10::StreamData3 d) : val(d) {} + StreamData3Holder() = delete; + struct c10::StreamData3 val; +}; + +} // namespace ivalue + +// This is an owning wrapper for a c10::optional> +// that can be implicitly converted to a (non-owning) optional>. +// Its purpose is to be used in generated code to keep the vector alive +// either until the end of a statement (as a temporary), or as a saved arg +// in autograd. +template +struct OptionalArray { + c10::optional> list; + + OptionalArray() = default; + OptionalArray(std::vector val) : list(std::move(val)) {} + + // Used when saving an argument for the backwards pass. + OptionalArray& operator=(c10::optional> ref) { + if (ref) { + list = std::vector(ref->begin(), ref->end()); + } else { + list = nullopt; + } + return *this; + } + + // Used when saving an argument for the backwards pass. + OptionalArray& operator=(c10::OptionalArrayRef ref) { + if (ref) { + list = std::vector(ref->begin(), ref->end()); + } else { + list = nullopt; + } + return *this; + } + + operator c10::optional>() { + if (!list) { + return nullopt; + } + return *list; + } + + operator c10::OptionalArrayRef() { + if (!list) { + return nullopt; + } + return *list; + } +}; + +// Capsule is an internal implementation detail of custom C++ classes. We +// define it as an owning wrapper for +// c10::intrusive_ptr This wrapper is here to serve as +// an abstraction of the type erased custom class object pointer. It also allow +// pybind11 to treat this as a standalone class to register as a separate type +// caster, instead of a custom pointer holder which the pointer holder type +// caster try to "unwrap" it automatically. +struct Capsule { + c10::intrusive_ptr obj_ptr; + explicit Capsule(c10::intrusive_ptr ptr) + : obj_ptr(std::move(ptr)) {} +}; + +// IValue is the generic tagged union used by the interpreter to hold +// all value types. +// It is a 16-byte object with an 8-byte payload and an 8-byte tag. +// The tag is currently 4 bytes to determine the type, and 1 byte +// to mark whether that type is a subtype of c10::intrusive_ptr_target and needs +// retain/release calls. + +#define TORCH_FORALL_TAGS(_) \ + _(None) \ + _(Tensor) \ + _(Storage) \ + _(Double) \ + _(ComplexDouble) \ + _(Int) \ + _(SymInt) \ + _(SymFloat) \ + _(SymBool) \ + _(Bool) \ + _(Tuple) \ + _(String) \ + _(Blob) \ + _(GenericList) \ + _(GenericDict) \ + _(Future) \ + _(Await) \ + _(Device) \ + _(Stream) \ + _(Object) \ + _(PyObject) \ + _(Uninitialized) \ + _(Capsule) \ + _(RRef) \ + _(Quantizer) \ + _(Generator) \ + _(Enum) + +// [doxygen private] +// These methods are not actually private but we don't want to document them, so +// they are marked `@private`, which hides them on the doxygen documentation for +// this page. + +/// IValue (Interpreter Value) is a tagged union over the types +/// supported by the TorchScript interpreter. IValues contain their +/// values as an `IValue::Payload`, which holds primitive types +/// (`int64_t`, `bool`, `double`, `Device`) and `Tensor` as values, +/// and all other types as a `c10::intrusive_ptr`. In order to +/// optimize performance of the destructor and related operations by +/// making the `Tensor` and `c10::intrusive_ptr` paths generate the +/// same code, we represent a null `c10::intrusive_ptr` as +/// `UndefinedTensorImpl::singleton()`, *not* `nullptr`. +/// +/// IValues are used as inputs to and outputs from the TorchScript interpreter. +/// To retrieve the value contained within an IValue, use the `.toX()` methods, +/// where `X` is the type you are trying to get. Note that neither the `.toX()` +/// methods nor the templated `.to` functions do any kind of casting, they +/// only unwrap the contained value. For example: +/// +/// \rst +/// .. code-block:: cpp +/// +/// // Make the IValue +/// torch::IValue my_ivalue(26); +/// std::cout << my_ivalue << "\n"; +/// +/// // Unwrap the IValue +/// int64_t my_int = my_ivalue.toInt(); +/// std::cout << my_int << "\n"; +/// +/// // This will throw an error! +/// // `my_ivalue` is tagged as an int and cannot be used as another type +/// torch::Tensor my_tensor = my_ivalue.toTensor(); +/// \endrst +struct TORCH_API IValue final { + IValue(const IValue& rhs) : IValue(rhs.payload, rhs.tag) { + if (isIntrusivePtr() && + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) { + c10::raw::intrusive_ptr::incref(payload.u.as_intrusive_ptr); + } + } + + IValue(IValue&& rhs) noexcept : tag(rhs.tag) { + moveFrom(std::move(rhs)); + } + + /// @private [doxygen private] + ~IValue() { + destroy(); + } + + C10_ALWAYS_INLINE IValue& operator=(IValue&& rhs) & noexcept { + if (&rhs == this) { + return *this; + } + + destroy(); + moveFrom(std::move(rhs)); + return *this; + } + + IValue& operator=(IValue const& rhs) & { + *this = IValue(rhs); + return *this; + } + + void dump() const; + + /** + * Equality comparison. The semantics are the same as Python's `==`: + * 1. Numerical types are compared by value. + * 2. Tensors compute element-wise equality, returning a BoolTensor (see: + * `torch.eq()`) + * 3. Strings are compared by value. + * 4. Sequence types (list, tuple) are compared lexicographically by + * comparing their elements. Different sequence types never compare equal. + * 5. Mappings (dict) must have equal (key, value) pairs. + * 6. If not listed above, the default behavior for is to test identity + * equality (e.g. pointer equality). + * + * Why does this return an IValue instead of a bool? Because in PyTorch, + * `tensor1 == tensor2` returns a `BoolTensor`, not a bool. + * + * NOTE: we (like Python) assume that identity equality implies value equality + * for efficiency. + * TODO: need to support customizing equality + */ + IValue equals(const IValue& rhs) const; + /** + * This implements the same semantics as `bool(lhs == rhs)` in Python. which + * is the same as `equals()` except for Tensor types. + */ + TORCH_API friend bool operator==(const IValue& lhs, const IValue& rhs); + TORCH_API friend bool operator!=(const IValue& lhs, const IValue& rhs); + + /** + * Identity comparison. Checks if `this` is the same object as `rhs`. The + * semantics are the same as Python's `is` operator. + * + * NOTE: Like in Python, this operation is poorly defined for primitive types + * like numbers and strings. Prefer to use `==` unless you really want to + * check identity equality. + */ + bool is(const IValue& rhs) const; + + /** + * Hashing for IValues. Returns an IValue-boxed int. + * + * Some notes: + * - Like eager, Tensors are hashed by looking at the pointer. This is not + * strictly correct because two value-equal tensors with different tensor + * pointers will hash differently, but we choose to reproduce the eager + * semantics. + * - Hashing is not defined on all built-in IValue types (e.g. list and + * dict), following Python. Calling `hash()` on these types will throw. + */ + IValue hash() const { + return (int64_t)IValue::hash(*this); + } + // This is defined because `c10::hash` dispatches to a function of this + // signature. See the member function `hash()`. + static size_t hash(const IValue& iv); + + /** + * @private [doxygen private] + * [container equality] + * This is an equality implementation that assumes objects with the same + * identity equal themselves, for efficiency reasons. We primarily have this + * for consistency, because Python does the same thing. This actually + * provokes user-visible changes in behavior due to quirks in torch: + * [tensor1] == [tensor1] -> True (because container equality will first + * compare identity) [tensor1] == [tensor1_copy] -> RuntimeError: + * Boolean value of Tensor with more than one value is ambiguous + */ + TORCH_API friend bool _fastEqualsForContainer( + const IValue& lhs, + const IValue& rhs); + + private: + static bool isAliasOf(const at::Tensor& a, const at::Tensor& b) { + if (a.is_sparse()) { + return isAliasOf(a._values(), b) || isAliasOf(a._indices(), b); + } + if (b.is_sparse()) { + return isAliasOf(a, b._values()) || isAliasOf(a, b._indices()); + } + if (a.is_sparse_csr()) { + return isAliasOf(a.values(), b) || isAliasOf(a.crow_indices(), b) || + isAliasOf(a.col_indices(), b); + } + if (b.is_sparse_csr()) { + return isAliasOf(a, b.values()) || isAliasOf(a, b.crow_indices()) || + isAliasOf(a, b.col_indices()); + } + + // Opaque tensors such as the ones constructed by the MKL-DNN backend + // don't have storage so we just compare their TensorImpls. + // TODO: Find way to expose alias info for opaque tensors. + if (!a.has_storage() || !b.has_storage()) { + return a.unsafeGetTensorImpl() == b.unsafeGetTensorImpl(); + } + + return a.is_alias_of(b); + } + + template + bool isListOf() const; + + public: + /// @private [doxygen private] + bool isAliasOf(const IValue& rhs) const { + if (this->tag != rhs.tag) { + // Trivially don't alias if the type is different + return false; + } + + // Tensors should be compared based on internal storage + if (this->isTensor()) { + return isAliasOf(this->toTensor(), rhs.toTensor()); + } + + if (!isIntrusivePtr()) { + // Primitive types don't alias anything + return false; + } + + AT_ASSERT(rhs.isIntrusivePtr()); + + // Other types can be compared by their ptr value + return this->payload.u.as_intrusive_ptr == rhs.payload.u.as_intrusive_ptr; + } + + /// @private [doxygen private] + size_t use_count() const noexcept { + if (isTensor()) { + return payload.as_tensor.use_count(); + } + + if (!isIntrusivePtrLegacyBehavior()) { + return 1; + } + + if (payload.u.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton()) { + return 0; + } + return c10::raw::intrusive_ptr::use_count(payload.u.as_intrusive_ptr); + } + + /// @private [doxygen private] + void swap(IValue& rhs) noexcept { + if (isTensor() && rhs.isTensor()) { + std::swap(payload.as_tensor, rhs.payload.as_tensor); + } else if (isTensor()) { + at::Tensor t = std::move(payload.as_tensor); + // As far as I can tell, omitting the usual explicit destructor call + // is not UB in and of itself, and it's a slight perf win. The + // destructor is a no-op, because the moved-from Tensor is + // effectively an intrusive_ptr in the null state, so we don't need + // the behavior for correctness reasons either. Leaving this + // explanatory comment, including commented-out destructor call, to + // make this abundantly clear. + // + // payload.as_tensor.~Tensor(); + payload.u = rhs.payload.u; + new (&rhs.payload.as_tensor) at::Tensor(std::move(t)); + } else if (rhs.isTensor()) { + rhs.swap(*this); + return; + } else { + std::swap(payload.u, rhs.payload.u); + } + std::swap(tag, rhs.tag); + } + + // Accessors for subtypes are arranged together below + // While some of these accessors could be generated through templates, + // we prefer to write them manually for clarity + + IValue(at::TensorBase t) : tag(Tag::Tensor) { + new (&payload.as_tensor) at::Tensor(std::move(t)); + } + bool isTensor() const { + return Tag::Tensor == tag; + } + + private: + // Outlined error path so that toTensor() can be inlined. + [[noreturn]] void reportToTensorTypeError() const; + + public: + at::Tensor toTensor() &&; + at::Tensor& toTensor() &; + const at::Tensor& toTensor() const&; + at::TensorImpl* unsafeToTensorImpl() const { + TORCH_INTERNAL_ASSERT(isTensor()); + return payload.as_tensor.unsafeGetTensorImpl(); + } + + IValue(at::Storage s) : tag(Tag::Storage) { + payload.u.as_intrusive_ptr = + null_to_undefined_tensor(s.unsafeReleaseStorageImpl()); + } + bool isStorage() const { + return Tag::Storage == tag; + } + c10::Storage toStorage() &&; + c10::Storage toStorage() const&; + + const IValue& toIValue() const { + return *this; + } + IValue& toIValue() { + return *this; + } + + /// @private [doxygen private] + IValue(intrusive_ptr blob) : tag(Tag::Blob) { + // TODO (after Tensor merge) If we pass in a Blob holding a Tensor, extract + // and store it as a Tensor instead. + payload.u.as_intrusive_ptr = null_to_undefined_tensor(blob.release()); + } + + /// @private [doxygen private] + bool isBlob() const { + return Tag::Blob == tag; + } + + /// @private [doxygen private] + c10::intrusive_ptr toBlob() &&; + + /// @private [doxygen private] + c10::intrusive_ptr toBlob() const&; + + // Capsule. No new callsites of these APIs should + // be introduced. + static inline IValue make_capsule( + intrusive_ptr blob); + bool isCapsule() const { + return Tag::Capsule == tag; + } + c10::intrusive_ptr toCapsule() &&; + c10::intrusive_ptr toCapsule() const&; + + // Custom C++ classes + template < + typename T, + std::enable_if_t< + std::is_base_of::value, + int> = 0> + IValue(intrusive_ptr custom_class); + bool isCustomClass() const; + template + c10::intrusive_ptr toCustomClass() &&; + template + c10::intrusive_ptr toCustomClass() const&; + + // Tuple + IValue(c10::intrusive_ptr v); + + template < + typename... Args, + std::enable_if_t< + !std::disjunction< + std::is_lvalue_reference..., + std::negation>...>::value, + std::nullptr_t> = nullptr> + IValue(const std::tuple& t); + template < + typename... Args, + std::enable_if_t< + !std::disjunction< + std::is_lvalue_reference..., + std::negation>...>::value, + std::nullptr_t> = nullptr> + IValue(std::tuple&& t); + bool isTuple() const { + return Tag::Tuple == tag; + } + c10::intrusive_ptr toTuple() &&; + c10::intrusive_ptr toTuple() const&; + C10_NODISCARD ivalue::Tuple& toTupleRef() const; + + // Double + IValue(double d) : tag(Tag::Double) { + payload.u.as_double = d; + } + bool isDouble() const { + return Tag::Double == tag; + } + double toDouble() const { + AT_ASSERT(isDouble()); + return payload.u.as_double; + } + + // ComplexDouble + template + IValue(c10::complex c); + bool isComplexDouble() const { + return Tag::ComplexDouble == tag; + } + c10::complex toComplexDouble() const; + + // Future + IValue(c10::intrusive_ptr v); + bool isFuture() const { + return Tag::Future == tag; + } + c10::intrusive_ptr toFuture() &&; + c10::intrusive_ptr toFuture() const&; + + IValue(c10::intrusive_ptr v); + bool isAwait() const { + return Tag::Await == tag; + } + c10::intrusive_ptr toAwait() &&; + c10::intrusive_ptr toAwait() const&; + + // RRef + IValue(c10::intrusive_ptr v); + bool isRRef() const { + return Tag::RRef == tag; + } + c10::intrusive_ptr toRRef() &&; + c10::intrusive_ptr toRRef() const&; + + // Quantizer + IValue(c10::intrusive_ptr v); + bool isQuantizer() const { + return Tag::Quantizer == tag; + } + c10::intrusive_ptr toQuantizer() &&; + c10::intrusive_ptr toQuantizer() const&; + + // Int + IValue(int64_t i) : tag(Tag::Int) { + payload.u.as_int = i; + } + + IValue(const c10::SymInt& i) { + if (auto mi = i.maybe_as_int()) { + tag = Tag::Int; + payload.u.as_int = *mi; + } else { + tag = Tag::SymInt; + payload.u.as_intrusive_ptr = i.toSymNode().release(); + } + } + + bool isSymInt() const { + return Tag::SymInt == tag; + } + + c10::SymInt toSymInt() &&; + c10::SymInt toSymInt() const&; + + IValue(const c10::SymFloat& i) { + if (i.is_symbolic()) { + tag = Tag::SymFloat; + payload.u.as_intrusive_ptr = i.toSymNodeImpl().release(); + } else { + tag = Tag::Double; + payload.u.as_double = i.as_float_unchecked(); + } + } + + bool isSymFloat() const { + return Tag::SymFloat == tag; + } + + c10::SymFloat toSymFloat() &&; + c10::SymFloat toSymFloat() const&; + + IValue(const c10::SymBool& i) { + if (auto mi = i.maybe_as_bool()) { + tag = Tag::Bool; + payload.u.as_int = *mi; + } else { + tag = Tag::SymBool; + payload.u.as_intrusive_ptr = i.toSymNodeImpl().release(); + } + } + + bool isSymBool() const { + return Tag::SymBool == tag; + } + + c10::SymBool toSymBool() &&; + c10::SymBool toSymBool() const&; + + // allow you to pass literals (3, 4) without ambiguity + IValue(int32_t i) : IValue(static_cast(i)) {} + + bool isInt() const { + return Tag::Int == tag; + } + + int64_t toInt() const { + AT_ASSERT(isInt()); + return payload.u.as_int; + } + + // Bool + IValue(bool b) : tag(Tag::Bool) { +#if defined(__clang__) && defined(__x86_64__) + // Initializing entire payload stops valgrind's from reporting + // "jump or move depends on uninitialised value" in IValue copy constructor + // See https://github.com/pytorch/pytorch/issues/37117 + payload.u.as_int = b; +#else + payload.u.as_bool = b; +#endif + } + bool isBool() const { + return Tag::Bool == tag; + } + bool toBool() const { + AT_ASSERT(isBool()); + return payload.u.as_bool; + } + + // IntList + bool isIntList() const; + bool isSymIntList() const; + c10::List toIntList() &&; + c10::List toIntList() const&; + std::vector toIntVector() const; + std::vector toSymIntVector() const; + at::DimVector toDimVector() const; + + // ConstantString + IValue(c10::intrusive_ptr v); + IValue(std::string v); + IValue(const char* v) : IValue(std::string(v)) {} + IValue(c10::string_view v) : IValue(std::string(v)){}; + bool isString() const { + return Tag::String == tag; + } + c10::intrusive_ptr toString() &&; + c10::intrusive_ptr toString() const&; + const std::string& toStringRef() const; + c10::optional> toOptionalStringRef() + const; + c10::string_view toStringView() const; + + // DoubleList + bool isDoubleList() const; + c10::List toDoubleList() &&; + c10::List toDoubleList() const&; + std::vector toDoubleVector() const; + + // ComplexDoubleList + bool isComplexDoubleList() const; + c10::List> toComplexDoubleList() &&; + c10::List> toComplexDoubleList() const&; + std::vector> toComplexDoubleVector() const; + + // BoolList + bool isBoolList() const; + c10::List toBoolList() &&; + c10::List toBoolList() const&; + + // TensorList + bool isTensorList() const; + c10::List toTensorList() &&; + c10::List toTensorList() const&; + std::vector toTensorVector() const; + + // OptionalTensorList + bool isOptionalTensorList() const; + c10::List> toOptionalTensorList() &&; + c10::List> toOptionalTensorList() const&; + std::vector> toOptionalTensorVector() const; + + // GenericList + IValue(c10::List v); + bool isList() const { + return Tag::GenericList == tag; + } + c10::List toList() &&; + c10::List toList() const&; + c10::ArrayRef toListRef() const; + + // Some template constructors of IValue calls another constructor recursively. + // This SFINAEs the called constructor exists. + template + using enable_if_ivalue_constructible = + std::enable_if_t::value, std::nullptr_t>; + + // The rule for lists is more complicated; the generic constructor is only + // acceptable if your element isn't SymInt. If you do have a SymInt element, + // then you must also, at construction time, check if you can decay the list + // into an int list (this is MANDATORY, as at a use site we may expect + // toIntList to work even if at the call site you had a SymIntArrayRef + // argument). In practice, only SymIntArrayRef is used this way, so we + // didn't bother making it work for the other constructors, we just make sure + // they're not selectable. + template + using enable_if_list_is_ivalue_constructible = std::enable_if_t< + std::is_constructible::value && + !std::is_same::value, + std::nullptr_t>; + + template = nullptr> + IValue(c10::List&& v); + template = nullptr> + IValue(const c10::List& v); + template = nullptr> + IValue(at::ArrayRef v); + template = nullptr> + IValue(const std::vector& v); + template + IValue(std::array v); + + // Manual constructors for lists of symints, which decay to int list if + // possible. To avoid ambiguous overload situations, we template them + // to prevent implicit conversions + template + using enable_if_symint = + std::enable_if_t::value, std::nullptr_t>; + + template = nullptr> + IValue(at::ArrayRef v); + template = nullptr> + IValue(at::OptionalArrayRef v); + template = nullptr> + IValue(const std::vector& v); + + template + using enable_if_ilist_is_ivalue_constructible = std::enable_if_t< + std::is_constructible::value && + std::is_constructible::boxed_type>:: + value && + !std::is_same::value, + std::nullptr_t>; + + template = nullptr> + IValue(c10::IListRef v); + + // GenericDict + IValue(c10::Dict v); + bool isGenericDict() const { + return Tag::GenericDict == tag; + } + c10::Dict toGenericDict() &&; + c10::Dict toGenericDict() const&; + + template + IValue(c10::Dict v); + + template + /// \cond + /// DOXYGEN_CANNOT_HANDLE_CONSTRUCTORS_WITH_MACROS_SO_EXCLUDE_THIS_LINE_FROM_DOXYGEN + C10_DEPRECATED_MESSAGE( + "IValues based on std::unordered_map are slow and deprecated. Please use c10::Dict instead.") + /// \endcond + IValue(std::unordered_map v); + + template = nullptr> + IValue(c10::optional v); + template = nullptr> + IValue(c10::OptionalArrayRef v); + IValue(c10::nullopt_t); + + // ClassType + IValue(c10::intrusive_ptr v); + bool isObject() const { + return tag == Tag::Object; + } + c10::intrusive_ptr toObject() &&; + c10::intrusive_ptr toObject() const&; + ivalue::Object& toObjectRef() const; + + torch::jit::Module toModule() const; + bool isModule() const; + + // PyObject + IValue(c10::intrusive_ptr v); + bool isPyObject() const { + return tag == Tag::PyObject; + } + c10::intrusive_ptr toPyObjectHolder() &&; + c10::intrusive_ptr toPyObjectHolder() const&; + PyObject* toPyObject() const; + + // Enum + explicit IValue(c10::intrusive_ptr v); + bool isEnum() const { + return tag == Tag::Enum; + } + c10::intrusive_ptr toEnumHolder() &&; + c10::intrusive_ptr toEnumHolder() const&; + + // None + IValue() : tag(Tag::None) {} + bool isNone() const { + return Tag::None == tag; + } + std::string toNone() const { + AT_ASSERT(isNone()); + return "None"; + } + + static IValue uninitialized() { + auto i = IValue(); + i.tag = Tag::Uninitialized; + return i; + } + + // Scalar, which gets encoded as either an Int, a Double or a ComplexDouble + IValue(const at::Scalar& s) : IValue() { + // NB: do the symbolic versions first, as isFloatingPoint is true + // for both SymFloat and double + if (s.isSymInt()) { + tag = Tag::SymInt; + payload.u.as_intrusive_ptr = s.toSymInt().toSymNode().release(); + } else if (s.isSymFloat()) { + tag = Tag::SymFloat; + payload.u.as_intrusive_ptr = s.toSymFloat().toSymNodeImpl().release(); + } else if (s.isSymBool()) { + tag = Tag::SymBool; + payload.u.as_intrusive_ptr = s.toSymBool().toSymNodeImpl().release(); + } else if (s.isFloatingPoint()) { + tag = Tag::Double; + payload.u.as_double = s.toDouble(); + } else if (s.isComplex()) { + *this = s.toComplexDouble(); + } else if (s.isBoolean()) { + tag = Tag::Bool; + payload.u.as_bool = s.toBool(); + } else { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + s.isIntegral(false), "Unknown type in Scalar"); + tag = Tag::Int; + payload.u.as_int = s.toLong(); + } + } + + bool isScalar() const { + return isDouble() || isInt() || isComplexDouble() || isBool() || + isSymInt() || isSymFloat() || isSymBool(); + } + + at::Scalar toScalar() const { + if (isDouble()) + return toDouble(); + else if (isInt()) + return toInt(); + else if (isComplexDouble()) + return toComplexDouble(); + else if (isBool()) + return toBool(); + else if (isSymInt()) + return toSymInt(); + else if (isSymFloat()) + return toSymFloat(); + else if (isSymBool()) + return toSymBool(); + throw std::runtime_error("IValue is not a Scalar"); + } + + // Device + IValue(c10::Device d) : tag(Tag::Device) { + payload.u.as_device.type = d.type(); + payload.u.as_device.index = d.index(); + } + bool isDevice() const { + return Tag::Device == tag; + } + c10::Device toDevice() const { + AT_ASSERT(isDevice()); + return c10::Device(payload.u.as_device.type, payload.u.as_device.index); + } + + // Stream + IValue(c10::Stream s) : tag(Tag::Stream) { + auto v = c10::make_intrusive(s.pack3()); + payload.u.as_intrusive_ptr = v.release(); + } + c10::Stream toStream() &&; + c10::Stream toStream() const&; + bool isStream() const { + return Tag::Stream == tag; + } + + // ScalarType + IValue(ScalarType t) + : IValue(static_cast::type>(t)) {} + at::ScalarType toScalarType() const { + return static_cast(toInt()); + } + + // Layout + IValue(Layout l) + : IValue(static_cast::type>(l)) {} + at::Layout toLayout() const { + return static_cast(toInt()); + } + + // MemoryFormat + IValue(MemoryFormat m) + : IValue(static_cast::type>(m)) {} + at::MemoryFormat toMemoryFormat() const { + return static_cast(toInt()); + } + + // QScheme + IValue(at::QScheme qscheme) : tag(Tag::Int) { + payload.u.as_int = static_cast(qscheme); + } + + at::QScheme toQScheme() const { + return static_cast(toInt()); + } + + // Dimname + IValue(at::Dimname dimname) : IValue(dimname.symbol().toQualString()) {} + + at::Dimname toDimname() const { + return at::Dimname::fromSymbol(Symbol::fromQualString(toStringRef())); + } + + // Generator + IValue(at::Generator g) : tag(Tag::Generator) { + payload.u.as_intrusive_ptr = + null_to_undefined_tensor(g.unsafeReleaseGeneratorImpl()); + } + bool isGenerator() const { + return Tag::Generator == tag; + } + at::Generator toGenerator() &&; + at::Generator toGenerator() const&; + + // for debugging + std::string tagKind() const { + switch (tag) { +#define DEFINE_CASE(x) \ + case Tag::x: \ + return #x; + TORCH_FORALL_TAGS(DEFINE_CASE) +#undef DEFINE_CASE + } + return "InvalidTag(" + std::to_string(static_cast(tag)) + ")"; + } + + // generic v.to() implementations + // that can be used in special functions like pop/push + // that use template meta-programming. + // prefer the directly named methods when you can, + // since they are simpler to understand + + // Note: if you get linker errors saying one of these is missing, + // change it to ... && = delete; and you will see better error messages for + // why However, we cannot commit this because some compiler versions barf on + // it. + template + T to() &&; + template + typename c10::detail::ivalue_to_const_ref_overload_return::type to() + const&; + + // ToOptional: convert a IValue to the Optional obj that accepts both T and + // None + template + optional toOptional(); + template + optional toOptional() const; + + /// @private [doxygen private] + /// this is a shallow comparison of two IValues to test the object identity + bool isSameIdentity(const IValue& rhs) const; + + // Computes the "official" string representation of an IValue. This produces a + // TorchScript expression that can be used to recreate an IValue with the same + // value (e.g. when we are printing constants in the serializer). + // + // Callers can use `customFormatter` to override how `repr()` prints out an + // IValue. This is useful if you have some other environment where you can + // look up values, and you want to print a reference to that environment (like + // the serializer's constant table). + // + // repr() is not necessarily defined on all objects! + std::ostream& repr( + std::ostream& stream, + std::function customFormatter) + const; + + // Computes an "informal" string representation of an IValue. This should be + // used for debugging, or servicing `print()`-like functions. + // This is different from `repr()` in that there is no expectation that we can + // exactly reconstruct an IValue from the output; feel free to use a + // concise/pretty form + TORCH_API friend std::ostream& operator<<(std::ostream& out, const IValue& v); + + bool isPtrType() const { + if (isTensor()) { + return payload.as_tensor.defined(); + } + return isIntrusivePtrLegacyBehavior(); + } + + /// @private [doxygen private] + const void* internalToPointer() const { + TORCH_INTERNAL_ASSERT( + isPtrType(), "Can only call internalToPointer() for pointer types"); + if (isTensor()) { + return payload.as_tensor.unsafeGetTensorImpl(); + } else { + return payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton() + ? payload.u.as_intrusive_ptr + : nullptr; + } + } + + template + TypePtr type() const; + + // Detect aliased tensors. + struct HashAliasedIValue { + size_t hashTensor(const at::Tensor& ten) const { + if (ten.is_sparse()) { + // COO sparse tensors have a "values" tensor and an "indices" tensor + // so this will detect overlap of sparse tensors that share a values + // tensor, but not sparse tensors that share an indices tensor. + return hashTensor(ten._values()); + } else if (ten.is_sparse_csr()) { + // COO sparse tensors have a "values" tensor and an "indices" tensor + // so this will detect overlap of sparse tensors that share a values + // tensor, but not sparse tensors that share an indices tensor. + return hashTensor(ten.values()); + } else if (!ten.has_storage()) { + // Opaque tensors such as the ones constructed by the MKL-DNN backend + // don't have storage so we just use their TensorImpls. + // TODO: Find way to expose alias info for opaque tensors. + return reinterpret_cast(ten.unsafeGetTensorImpl()); + } else { + return reinterpret_cast(ten.storage().unsafeGetStorageImpl()); + } + } + size_t operator()(const IValue& val) const { + if (val.isTensor()) { + return hashTensor(val.toTensor()); + } + // If it is not a Tensor, then two mutable IValues alias each other only + // if they are the same pointer. + return val.payload.u.as_int; + } + }; + + struct CompAliasedIValues { + bool operator()(const IValue& lhs, const IValue& rhs) const { + return lhs.isAliasOf(rhs); + } + }; + + using HashAliasedIValues = + std::unordered_set; + using HashAliasedIValueMap = + std::unordered_map; + + // Chechs if this and rhs has a subvalues in common. + // [t1,t2] and [t2, t3] returns true. + bool overlaps(const IValue& rhs) const; + + // Inserts all subvalues of this in subValues. + void getSubValues(HashAliasedIValues& subValues) const; + + // Apply visitor to every subvalue. + // TODO: There are several places that recurse over IValue. This is fragile. + // This visitor should be used to recurse over ivalues. + void visit(const std::function& visitor) const; + IValue deepcopy(c10::optional device = c10::nullopt) const; + IValue deepcopy( + HashAliasedIValueMap& memo, + c10::optional device = c10::nullopt) const; + + private: + static c10::intrusive_ptr_target* null_to_undefined_tensor( + c10::intrusive_ptr_target* p) { + return p ? p + : static_cast( + c10::UndefinedTensorImpl::singleton()); + } + + static bool ptrEqual(const IValue& lhs, const IValue& rhs); + // NOTE: IValue tags are intentionally private. In the future we may encode + // this value different (e.g. using NaN boxing), and this would make it more + // costly to determine the tag for all types vs just determining if something + // is a particular type. Instead we want clients to use the `isX` methods when + // possible. If for perf. reasons you really, absolutely, must have a jump + // table, then we can revisit this. + enum class Tag : uint32_t { +#define DEFINE_TAG(x) x, + TORCH_FORALL_TAGS(DEFINE_TAG) +#undef DEFINE_TAG + }; + +#define COUNT_TAG(x) 1 + + static constexpr auto kNumTags = TORCH_FORALL_TAGS(COUNT_TAG) 0; +#undef COUNT_TAG + + template < + class T, + class NullType = c10::detail::intrusive_target_default_null_type> + c10::intrusive_ptr moveToIntrusivePtr(); + template < + typename T, + class NullType = c10::detail::intrusive_target_default_null_type> + c10::intrusive_ptr toIntrusivePtr() const; + + void destroy() { + // We carefully construct this call to both 1) avoid UB by using + // the "wrong" one of as_tensor and as_intrusive_ptr and 2) enable + // the compiler to generate the same code for each case. It is + // surprisingly difficult to get this right. + if (isTensor() || isIntrusivePtr()) { + c10::intrusive_ptr_target* p = isTensor() + ? payload.as_tensor.unsafeGetTensorImpl() + : payload.u.as_intrusive_ptr; + c10::intrusive_ptr:: + reclaim(p); + // No need to make this destructor call! + // payload.as_tensor.~Tensor(); + } + } + + C10_ALWAYS_INLINE void moveFrom(IValue&& rhs) noexcept { + if (rhs.isTensor()) { + new (&payload.as_tensor) at::Tensor(std::move(rhs.payload.as_tensor)); + // As far as I can tell, omitting the usual explicit destructor call + // is not UB in and of itself, and it's a slight perf win. The + // destructor is a no-op, because the moved-from Tensor is + // effectively an intrusive_ptr in the null state, so we don't need + // the behavior for correctness reasons either. Leaving this + // explanatory comment, including commented-out destructor call, to + // make this abundantly clear. + // + // rhs.payload.as_tensor.~Tensor(); + } else { + payload.u = rhs.payload.u; + } + tag = rhs.tag; + rhs.clearToNone(); + } + + void clearToNone() noexcept { + payload.u.as_int = 0; + tag = Tag::None; + } + + private: + // This is the source of truth for isIntrusivePtr; edit results here + // as needed and isIntrusivePtr will pick them up. + // NOLINTBEGIN(bugprone-branch-clone) + static constexpr bool isIntrusivePtrConstexpr(Tag tag) { + switch (tag) { + case Tag::None: + return false; + case Tag::Tensor: + return false; + case Tag::Storage: + return true; + case Tag::Generator: + return true; + case Tag::Double: + return false; + case Tag::ComplexDouble: + return true; + case Tag::Int: + return false; + case Tag::SymInt: + return true; + case Tag::SymFloat: + return true; + case Tag::SymBool: + return true; + case Tag::Bool: + return false; + case Tag::Tuple: + return true; + case Tag::String: + return true; + case Tag::Blob: + return true; + case Tag::GenericList: + return true; + case Tag::GenericDict: + return true; + case Tag::Future: + return true; + case Tag::Await: + return true; + case Tag::Device: + return false; + case Tag::Stream: + return true; + case Tag::Object: + return true; + case Tag::PyObject: + return true; + case Tag::Uninitialized: + return false; + case Tag::Capsule: + return true; + case Tag::RRef: + return true; + case Tag::Quantizer: + return true; + case Tag::Enum: + return true; + } + return false; + } + // NOLINTEND(bugprone-branch-clone) + + public: + // Don't edit this just to add results for new tags; edit + // isIntrusivePtrConstexpr above. + bool isIntrusivePtr() const { + // Implementation NOTE: the switch in isIntrusivePtrConstexpr + // above is the previous production implementation of this + // function. We observed that, at least on x86_64, the generated + // instruction sequence was a similar bit vector test to what we + // have manually implemented below, except that there was an extra + // "bounds check" branch confirming, essentially, that `tag < + // kNumTags` and providing a consistent result in that case. We + // don't care about the result if tag is out of bounds, so we'd + // like to eliminate that comparison and branch; manually + // implementing this function as a bit test is the simplest way I + // could find to accomplish that elimination. + static constexpr uint32_t kTruthTableBitVector = +#define TRUTH_TABLE_ENTRY(tag) \ + (uint32_t(isIntrusivePtrConstexpr(Tag::tag)) << uint32_t(Tag::tag)) | + TORCH_FORALL_TAGS(TRUTH_TABLE_ENTRY) +#undef TRUTH_TABLE_ENTRY + 0; + + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + static_cast(tag) < kNumTags, + "unexpected tag ", + static_cast(tag)); + return kTruthTableBitVector & (1 << (uint32_t(tag) % 32)); + } + + // Storage and Generator were treated specially when + // is_intrusive_ptr was stored as explicit state. This getter + // preserves the old behavior for use with WeakIValue for now. + bool isIntrusivePtrLegacyBehavior() const { + if (tag == Tag::Storage || tag == Tag::Generator) { + return payload.u.as_intrusive_ptr != + c10::UndefinedTensorImpl::singleton(); + } else { + return isIntrusivePtr(); + } + } + + union Payload { + // [TriviallyCopyablePayload] + // We use a nested union here so that we can make the copy easy + // and efficient in the non-tensor (i.e., trivially copyable) + // case. Specifically, we do not have to do a switch-on-tag to + // figure out which union member to assign; we can just use + // TriviallyCopyablePayload::operator=. + union TriviallyCopyablePayload { + TriviallyCopyablePayload() : as_int(0) {} + int64_t as_int; + double as_double; + bool as_bool; + // Invariant: never nullptr; null state is represented as + // c10::UndefinedTensorImpl::singleton() for consistency of + // representation with Tensor. + c10::intrusive_ptr_target* as_intrusive_ptr; + struct { + c10::DeviceType type; + DeviceIndex index; + } as_device; + } u; + at::Tensor as_tensor; + Payload() : u() {} + ~Payload() {} + }; + + IValue(const Payload& p, Tag t) : tag(t) { + if (isTensor()) { + new (&payload.as_tensor) at::Tensor(p.as_tensor); + } else { + payload.u = p.u; + } + } + + template + struct TagType {}; + + friend MaybeOwnedTraits; + + Payload payload; + Tag tag{IValue::Tag::None}; + friend struct WeakIValue; +}; + +struct TORCH_API WeakIValue final { + WeakIValue() = default; + + WeakIValue(const WeakIValue& rhs) + : payload(rhs.payload), + tag(rhs.tag), + is_intrusive_ptr(rhs.is_intrusive_ptr) { + if (is_intrusive_ptr && + payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) { + c10::raw::weak_intrusive_ptr::incref(payload.as_intrusive_ptr); + } + } + WeakIValue(const IValue& rhs) + : tag(rhs.tag), is_intrusive_ptr(rhs.isIntrusivePtrLegacyBehavior()) { + if (rhs.isTensor()) { + payload.as_intrusive_ptr = rhs.unsafeToTensorImpl(); + is_intrusive_ptr = true; + } else { + payload = rhs.payload.u; + } + if (is_intrusive_ptr) { + if (payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) { + c10::raw::weak_intrusive_ptr::incref(payload.as_intrusive_ptr); + } + } + } + WeakIValue(WeakIValue&& rhs) noexcept : WeakIValue() { + swap(rhs); + } + ~WeakIValue() { + if (is_intrusive_ptr && + payload.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton()) { + c10::raw::weak_intrusive_ptr::decref(payload.as_intrusive_ptr); + } + } + WeakIValue& operator=(WeakIValue&& rhs) & noexcept { + WeakIValue(std::move(rhs)).swap(*this); // this also sets rhs to None + return *this; + } + WeakIValue& operator=(WeakIValue const& rhs) & { + WeakIValue(rhs).swap(*this); + return *this; + } + void swap(WeakIValue& rhs) noexcept { + std::swap(payload, rhs.payload); + std::swap(is_intrusive_ptr, rhs.is_intrusive_ptr); + std::swap(tag, rhs.tag); + } + + bool isSameIdentity(const WeakIValue& rhs) const { + return payload.as_int == rhs.payload.as_int && tag == rhs.tag && + is_intrusive_ptr == rhs.is_intrusive_ptr; + } + + IValue lock() const { + if (!is_intrusive_ptr) { + IValue::Payload newPayload; + newPayload.u = payload; + return IValue(newPayload, tag); + } + if (IValue::Tag::Tensor == tag) { + auto temp = + c10::weak_intrusive_ptr:: + reclaim(static_cast(payload.as_intrusive_ptr)); + c10::intrusive_ptr ip( + temp.lock()); + temp.release(); + if (!ip) { + return IValue(); + } else { + return IValue(at::Tensor(std::move(ip))); + } + } else { + auto temp = c10::weak_intrusive_ptr::reclaim( + payload.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton() + ? nullptr + : payload.as_intrusive_ptr); + IValue::Payload pl; + pl.u.as_intrusive_ptr = temp.lock().release(); + temp.release(); + if (!pl.u.as_intrusive_ptr) { + return IValue(); + } else { + return IValue(pl, tag); + } + } + } + + size_t use_count() const noexcept { + if (!is_intrusive_ptr) { + return 1; + } + auto temp = c10::weak_intrusive_ptr< + c10::intrusive_ptr_target, + c10::UndefinedTensorImpl>::reclaim(payload.as_intrusive_ptr); + size_t result = temp.use_count(); + temp.release(); + return result; + } + + size_t weak_use_count() const noexcept { + if (!is_intrusive_ptr) { + return 1; + } + auto temp = c10::weak_intrusive_ptr< + c10::intrusive_ptr_target, + c10::UndefinedTensorImpl>::reclaim(payload.as_intrusive_ptr); + size_t result = temp.weak_use_count(); + temp.release(); + return result; + } + size_t hash() const { + return payload.as_int; + } + + private: + using Payload = IValue::Payload::TriviallyCopyablePayload; + Payload payload; + IValue::Tag tag{IValue::Tag::None}; + bool is_intrusive_ptr{false}; +}; + +// An owning pointer to a type. When the type is class type, it requires a pair +// of shared_ptrs to the class type and its owning CU, so that the class type is +// guaranteed to stay alive as long as we hold this object. +struct TORCH_API StrongTypePtr { + StrongTypePtr(std::shared_ptr cu, TypePtr type); + + std::shared_ptr cu_; + TypePtr type_; +}; + +// [Constant Object Weak CompilationUnit Reference] +// A non owning pointer to a type. When a class get inserted as a constant +// into a graph, if we used a strong pointer we would have a circular reference +// from Object -> CompilationUnit and CompilationUnit -> Graph (which owns the +// Constant Object) +struct TORCH_API WeakTypePtr { + WeakTypePtr(std::weak_ptr cu, TypePtr type); + + std::weak_ptr cu_; + TypePtr type_; +}; + +// internal build errors with std::variant :/ +struct WeakOrStrongCompilationUnit { + explicit WeakOrStrongCompilationUnit( + std::shared_ptr shared_cu) + : strong_ptr_(std::move(shared_cu)), weak_ptr_(c10::nullopt) {} + + explicit WeakOrStrongCompilationUnit( + std::weak_ptr weak_cu) + : strong_ptr_(c10::nullopt), weak_ptr_(std::move(weak_cu)) {} + + std::shared_ptr getStrongRefOrThrow() const { + TORCH_INTERNAL_ASSERT(strong_ptr_ != c10::nullopt); + return *strong_ptr_; + } + + std::weak_ptr getWeakRefOrThrow() const { + TORCH_INTERNAL_ASSERT(weak_ptr_ != c10::nullopt); + return *weak_ptr_; + } + + bool holdingStrongRef() const { + return strong_ptr_ != c10::nullopt; + } + + bool holdingEmptyStrongRef() const { + return holdingStrongRef() && *strong_ptr_ == nullptr; + } + + c10::optional> strong_ptr_; + c10::optional> weak_ptr_; +}; + +// An Object will hold a non-owning Compilation Unit reference if it is a +// Constant in the graph and a Owning reference otherwise +struct TORCH_API WeakOrStrongTypePtr { + explicit WeakOrStrongTypePtr(WeakTypePtr weak) + : cu_(WeakOrStrongCompilationUnit(std::move(weak.cu_))), + type_(std::move(weak.type_)) {} + explicit WeakOrStrongTypePtr(StrongTypePtr strong) + : cu_(WeakOrStrongCompilationUnit(std::move(strong.cu_))), + type_(std::move(strong.type_)) {} + explicit WeakOrStrongTypePtr(WeakOrStrongCompilationUnit cu, TypePtr type) + : cu_(std::move(cu)), type_(std::move(type)) {} + WeakTypePtr asWeakTypePtr() const; + + WeakOrStrongCompilationUnit cu_; + TypePtr type_; + + bool holds_strong_ref() const { + return cu_.holdingStrongRef(); + } + + bool holds_empty_strong_ref() const { + return cu_.holdingEmptyStrongRef(); + } +}; + +} // namespace c10 + +#include // IWYU pragma: keep diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ivalue_inl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ivalue_inl.h new file mode 100644 index 0000000000000000000000000000000000000000..d59d33219d82e6a85ef73a91ba9c22da00e9061d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ivalue_inl.h @@ -0,0 +1,2512 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { +struct Function; +struct CompilationUnit; +} // namespace jit +TORCH_API bool isCustomClass(const c10::IValue& v); +} // namespace torch +namespace c10 { +struct IValue; +struct ClassType; +struct TupleType; +struct EnumType; +struct InferredType; + +// For custom class __init__ registration, we need to pass in a function +// that looks like this: [](IValue x, args...) + +// However, make_boxed_from_unboxed_functor.h automatically sets the input types +// of the function by introspecting the types of the functor (which is IValue in +// this case). However, we need the type it binds to be Foo. + +// Instead, we pass in a lambda [](ivalue_holder x, args...) from +// which getTypePtr can recover the original class pointer. + +template +struct tagged_capsule { + IValue ivalue; +}; + +template +c10::intrusive_ptr IValue::moveToIntrusivePtr() { + auto t = c10::intrusive_ptr::reclaim( + payload.u.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton() + ? NullType::singleton() + : static_cast(payload.u.as_intrusive_ptr)); + clearToNone(); + return t; +} +template +c10::intrusive_ptr IValue::toIntrusivePtr() const { + if (payload.u.as_intrusive_ptr == c10::UndefinedTensorImpl::singleton()) { + return c10::intrusive_ptr(); + } + c10::raw::intrusive_ptr::incref(payload.u.as_intrusive_ptr); + return c10::intrusive_ptr::reclaim( + static_cast(payload.u.as_intrusive_ptr)); +} + +template +intrusive_ptr static_intrusive_pointer_cast(intrusive_ptr r) { + return intrusive_ptr::reclaim(static_cast(r.release())); +} + +template +intrusive_ptr dynamic_intrusive_pointer_cast(intrusive_ptr r) { + return intrusive_ptr::reclaim(dynamic_cast(r.release())); +} + +inline c10::intrusive_ptr IValue::toFuture() && { + AT_ASSERT(isFuture(), "Expected Future but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toFuture() const& { + AT_ASSERT(isFuture(), "Expected Future but got ", tagKind()); + return toIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toAwait() && { + AT_ASSERT(isAwait(), "Expected Await but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toAwait() const& { + AT_ASSERT(isAwait(), "Expected Await but got ", tagKind()); + return toIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toRRef() && { + AT_ASSERT(isRRef(), "Expected RRef but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toRRef() const& { + AT_ASSERT(isRRef(), "Expected RRef but got ", tagKind()); + return toIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toQuantizer() && { + AT_ASSERT(isQuantizer(), "Expected Quantizer but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toQuantizer() const& { + AT_ASSERT(isQuantizer(), "Expected Quantizer but got ", tagKind()); + return toIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toString() && { + AT_ASSERT(isString(), "Expected String but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toString() const& { + AT_ASSERT(isString(), "Expected String but got ", tagKind()); + return toIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toObject() && { + AT_ASSERT(isObject(), "Expected Object but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toObject() const& { + AT_ASSERT(isObject(), "Expected Object but got ", tagKind()); + return toIntrusivePtr(); +} +inline c10::intrusive_ptr IValue:: + toPyObjectHolder() && { + TORCH_INTERNAL_ASSERT(isPyObject(), "Expected PyObject but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toPyObjectHolder() + const& { + TORCH_INTERNAL_ASSERT(isPyObject(), "Expected PyObject but got ", tagKind()); + return toIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toEnumHolder() && { + TORCH_INTERNAL_ASSERT(isEnum(), "Expected Enum but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toEnumHolder() const& { + TORCH_INTERNAL_ASSERT(isEnum(), "Expected Enum but got ", tagKind()); + return toIntrusivePtr(); +} +inline c10::complex IValue::toComplexDouble() const { + TORCH_INTERNAL_ASSERT(isComplexDouble(), "Expected ComplexDouble but got ", tagKind()); + auto ptr = toIntrusivePtr(); + return (*ptr).val; +} +inline at::Tensor IValue::toTensor() && { + if (C10_UNLIKELY(!isTensor())) { + reportToTensorTypeError(); + } + auto result = std::move(payload.as_tensor); + // As far as I can tell, omitting the usual explicit destructor call + // is not UB in and of itself, and it's a slight perf win. The + // destructor is a no-op, because the moved-from Tensor is + // effectively an intrusive_ptr in the null state, so we don't need + // the behavior for correctness reasons either. Leaving this + // explanatory comment, including commented-out destructor call, to + // make this abundantly clear. + // + // payload.as_tensor.~Tensor(); + clearToNone(); + return result; +} +inline at::Tensor& IValue::toTensor() & { + if (C10_UNLIKELY(!isTensor())) { + reportToTensorTypeError(); + } + return payload.as_tensor; +} +inline const at::Tensor& IValue::toTensor() const& { + if (C10_UNLIKELY(!isTensor())) { + reportToTensorTypeError(); + } + return payload.as_tensor; +} +inline c10::Storage IValue::toStorage() && { + AT_ASSERT(isStorage(), "Expected Storage but got ", tagKind()); + return c10::Storage( + moveToIntrusivePtr()); +} +inline c10::Storage IValue::toStorage() const& { + AT_ASSERT(isStorage(), "Expected Storage but got ", tagKind()); + return c10::Storage(toIntrusivePtr()); +} +inline c10::Stream IValue::toStream() && { + AT_ASSERT(isStream(), "Expected Stream but got ", tagKind()); + auto ptr = toIntrusivePtr(); + return c10::Stream::unpack3((*ptr).val.stream_id, + (*ptr).val.device_index, + (*ptr).val.device_type); +} +inline c10::Stream IValue::toStream() const& { + AT_ASSERT(isStream(), "Expected Stream but got ", tagKind()); + auto ptr = toIntrusivePtr(); + return c10::Stream::unpack3((*ptr).val.stream_id, + (*ptr).val.device_index, + (*ptr).val.device_type); +} +inline c10::intrusive_ptr IValue::toBlob() && { + AT_ASSERT(isBlob(), "Expected Blob but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toBlob() const& { + AT_ASSERT(isBlob(), "Expected Blob but got ", tagKind()); + return toIntrusivePtr(); + ; +} +inline c10::intrusive_ptr IValue::toCapsule() && { + TORCH_INTERNAL_ASSERT(isCapsule()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toCapsule() const& { + TORCH_INTERNAL_ASSERT(isCapsule()); + return toIntrusivePtr(); +} +inline at::Generator IValue::toGenerator() && { + AT_ASSERT(isGenerator(), "Expected Generator but got ", tagKind()); + return at::Generator(moveToIntrusivePtr()); +} +inline at::Generator IValue::toGenerator() const& { + AT_ASSERT(isGenerator(), "Expected Generator but got ", tagKind()); + return at::Generator(toIntrusivePtr()); +} +inline c10::SymInt IValue::toSymInt() && { + AT_ASSERT(isSymInt() || isInt(), "Expected SymInt or int but got ", tagKind()); + if (isSymInt()) { + return c10::SymInt(moveToIntrusivePtr()); + } else { + return c10::SymInt(payload.u.as_int); + } +} +inline c10::SymInt IValue::toSymInt() const& { + AT_ASSERT(isSymInt() || isInt(), "Expected SymInt or int but got ", tagKind()); + if (isSymInt()) { + return c10::SymInt(toIntrusivePtr()); + } else { + return c10::SymInt(payload.u.as_int); + } +} +inline c10::SymFloat IValue::toSymFloat() && { + AT_ASSERT(isSymFloat() || isDouble(), "Expected SymFloat or double but got ", tagKind()); + if (isSymFloat()) { + return c10::SymFloat(moveToIntrusivePtr()); + } else { + return c10::SymFloat(payload.u.as_double); + } +} +inline c10::SymFloat IValue::toSymFloat() const& { + AT_ASSERT(isSymFloat() || isDouble(), "Expected SymFloat or double but got ", tagKind()); + if (isSymFloat()) { + return c10::SymFloat(toIntrusivePtr()); + } else { + return c10::SymFloat(payload.u.as_double); + } +} +inline c10::SymBool IValue::toSymBool() && { + AT_ASSERT(isSymBool() || isBool(), "Expected SymBool or boolean but got ", tagKind()); + if (isSymBool()) { + return c10::SymBool(moveToIntrusivePtr()); + } else { + return c10::SymBool(payload.u.as_bool); + } +} + +inline c10::SymBool IValue::toSymBool() const& { + AT_ASSERT(isSymBool() || isBool(), "Expected SymBool or boolean but got ", tagKind()); + if (isSymBool()) { + return c10::SymBool(toIntrusivePtr()); + } else { + return c10::SymBool(payload.u.as_bool); + } +} + +namespace ivalue { + +void TORCH_API +checkCustomClassType(const ClassType* expected_type, const Type* actual_type); + +template +using Shared = c10::intrusive_ptr; + +// string +struct TORCH_API ConstantString final : c10::intrusive_ptr_target { + private: + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const std::string str_; + + public: + ConstantString(std::string str) : str_(std::move(str)) {} + ConstantString(c10::string_view str) : str_(std::string(str)) {} + static c10::intrusive_ptr create(std::string str_); + static c10::intrusive_ptr create(c10::string_view str_); + static c10::intrusive_ptr create(const char* str_); + + const std::string& string() const { + return str_; + } + c10::string_view string_view() const { + return str_; + } + + operator const std::string&() const { + return string(); + } + TORCH_API friend std::ostream& operator<<( + std::ostream& out, + const ConstantString& v); +}; + +struct Future; + +struct TORCH_API TupleElements { + private: + size_t inlineSize_; + // We represent TupleElements this way to save doing a heap + // allocation in the common (at least for unpickling) case where we + // have only 3 elements. We have our own union instead of + // c10::SmallVector because c10::SmallVector always + // stores the begin/end/capacity pointers, which would be a waste of + // space in our use case. + union { + std::vector elementsVector_; + // Don't want to declare a std::array because the convenient + // iteration and size members are a footgun in this case -- the + // actual size of the array may be smaller than 3! + // NOLINTNEXTLINE(*c-arrays*) + IValue elementsInline_[3]; + }; + + void destroyInline() { + for (const auto ii : c10::irange(inlineSize_)) { + elementsInline_[ii].~IValue(); + } + } + public: + + using iterator = IValue*; + using const_iterator = const IValue*; + + TupleElements() : inlineSize_(0) { + new (&elementsVector_) std::vector(); + } + + explicit TupleElements(std::vector elements) + : inlineSize_(0), elementsVector_(std::move(elements)) {} + + explicit TupleElements(c10::ArrayRef elements) + : inlineSize_(elements.size() <= 3 ? elements.size() : 0) { + switch (inlineSize_) { + case 3: + new (&elementsInline_[2]) IValue(elements[2]); + C10_FALLTHROUGH; + case 2: + new (&elementsInline_[1]) IValue(elements[1]); + C10_FALLTHROUGH; + case 1: + new (&elementsInline_[0]) IValue(elements[0]); + break; + case 0: + new (&elementsVector_) std::vector(elements.begin(), elements.end()); + break; + } + } + + explicit TupleElements(IValue&& e1) + : inlineSize_(1) { + new (&elementsInline_[0]) IValue(std::move(e1)); + } + + explicit TupleElements(IValue&& e1, IValue&& e2) + : inlineSize_(2) { + new (&elementsInline_[0]) IValue(std::move(e1)); + new (&elementsInline_[1]) IValue(std::move(e2)); + } + + explicit TupleElements(IValue&& e1, IValue&& e2, IValue&& e3) + : inlineSize_(3) { + new (&elementsInline_[0]) IValue(std::move(e1)); + new (&elementsInline_[1]) IValue(std::move(e2)); + new (&elementsInline_[2]) IValue(std::move(e3)); + } + + ~TupleElements() { + if (inlineSize_) { + destroyInline(); + } else { + elementsVector_.~vector(); + } + } + + // It would be nice to make this noncopyable to prevent people from + // writing code like `auto output = + // forward(...).toTupleRef().elements()` (which does refcount bumps on + // each element, unlike the more efficient but verbose + // ``` + // auto outputIntrusivePtr = forward(...).toTuple(); + // const auto& output = outputIntrusivePtr->elements(); + // ``` + // ), but there is simply an overwhelming amount of code that does + // it the inefficient way. + // See also operator std::vector below. + TupleElements(const TupleElements& rhs) + : inlineSize_(rhs.inlineSize_) { + if (rhs.inlineSize_) { + for (const auto ii : c10::irange(inlineSize_)) { + new (&elementsInline_[ii]) IValue(rhs.elementsInline_[ii]); + } + } else { + new (&elementsVector_) std::vector(rhs.elementsVector_); + } + } + + TupleElements& operator=(const TupleElements& rhs) { + if (inlineSize_) { + if (rhs.inlineSize_) { + for (const auto ii : c10::irange(std::min(inlineSize_, rhs.inlineSize_))) { + elementsInline_[ii] = rhs.elementsInline_[ii]; + } + if (rhs.inlineSize_ > inlineSize_) { + for (const auto ii : c10::irange(inlineSize_, rhs.inlineSize_)) { + new (&elementsInline_[ii]) IValue(rhs.elementsInline_[ii]); + } + } else { + for (const auto ii : c10::irange(rhs.inlineSize_, inlineSize_)) { + elementsInline_[ii].~IValue(); + } + } + } else { + destroyInline(); + new (&elementsVector_) std::vector(rhs.elementsVector_); + } + } else { + if (rhs.inlineSize_) { + elementsVector_.~vector(); + for (const auto ii : c10::irange(rhs.inlineSize_)) { + new (&elementsInline_[ii]) IValue(rhs.elementsInline_[ii]); + } + } else { + elementsVector_ = rhs.elementsVector_; + } + } + inlineSize_ = rhs.inlineSize_; + return *this; + } + + TupleElements(TupleElements&& rhs) noexcept + : inlineSize_(rhs.inlineSize_) { + if (inlineSize_) { + for (const auto ii : c10::irange(inlineSize_)) { + new (&elementsInline_[ii]) IValue(std::move(rhs.elementsInline_[ii])); + } + } else { + new (&elementsVector_) std::vector(std::move(rhs.elementsVector_)); + } + } + + TupleElements& operator=(TupleElements&& rhs) noexcept { + if (inlineSize_) { + if (rhs.inlineSize_) { + for (const auto ii : c10::irange(std::min(inlineSize_, rhs.inlineSize_))) { + elementsInline_[ii] = std::move(rhs.elementsInline_[ii]); + } + if (rhs.inlineSize_ > inlineSize_) { + for (const auto ii : c10::irange(inlineSize_, rhs.inlineSize_)) { + new (&elementsInline_[ii]) IValue(std::move(rhs.elementsInline_[ii])); + } + } else { + for (const auto ii : c10::irange(rhs.inlineSize_, inlineSize_)) { + elementsInline_[ii].~IValue(); + } + } + } else { + destroyInline(); + new (&elementsVector_) std::vector(std::move(rhs.elementsVector_)); + } + } else { + if (rhs.inlineSize_) { + elementsVector_.~vector(); + for (const auto ii : c10::irange(rhs.inlineSize_)) { + new (&elementsInline_[ii]) IValue(std::move(rhs.elementsInline_[ii])); + } + } else { + elementsVector_ = std::move(rhs.elementsVector_); + } + } + inlineSize_ = rhs.inlineSize_; + return *this; + } + + C10_NODISCARD c10::ArrayRef asArrayRef() const { + if (inlineSize_) { + return c10::ArrayRef(elementsInline_, inlineSize_); + } else { + return elementsVector_; + } + } + + // Mimic implicit conversion from std::vector to ArrayRef. + operator c10::ArrayRef() const { + return asArrayRef(); + } + + static size_t hash(const TupleElements& v) { + return c10::hash>()(v.asArrayRef()); + } + + void setContents(std::vector&& contents) { + if (inlineSize_) { + destroyInline(); + new (&elementsVector_) std::vector(std::move(contents)); + inlineSize_ = 0; + } else { + elementsVector_ = std::move(contents); + } + } + + C10_NODISCARD bool empty() const { + return inlineSize_ ? false : elementsVector_.empty(); + } + + C10_NODISCARD size_t size() const { + return inlineSize_ ? inlineSize_ : elementsVector_.size(); + } + + C10_NODISCARD IValue& operator[](size_t idx) { + if (inlineSize_) { + return elementsInline_[idx]; + } else { + return elementsVector_[idx]; + } + } + + C10_NODISCARD const IValue& operator[](size_t idx) const { + if (inlineSize_) { + return elementsInline_[idx]; + } else { + return elementsVector_[idx]; + } + } + + C10_NODISCARD IValue& at(size_t idx) { + if (inlineSize_) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(inlineSize_ <= 3); + TORCH_CHECK(idx < inlineSize_, "TupleElements: invalid index Index = ", idx, "; Length = ", inlineSize_); + return elementsInline_[idx]; + } else { + return elementsVector_.at(idx); + } + } + + C10_NODISCARD const IValue& at(size_t idx) const { + if (inlineSize_) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(inlineSize_ <= 3); + TORCH_CHECK(idx < inlineSize_, "TupleElements: invalid index Index = ", idx, "; Length = ", inlineSize_); + return elementsInline_[idx]; + } else { + TORCH_CHECK(idx < elementsVector_.size(), "TupleElements: invalid index Index = ", idx, "; Length = ", elementsVector_.size()); + return elementsVector_.at(idx); + } + } + + C10_NODISCARD iterator begin() { + if (inlineSize_) { + return elementsInline_; + } else { + return elementsVector_.data(); + } + } + + C10_NODISCARD iterator end() { + if (inlineSize_) { + return elementsInline_ + inlineSize_; + } else { + return elementsVector_.data() + elementsVector_.size(); + } + } + + C10_NODISCARD const_iterator begin() const { + if (inlineSize_) { + return elementsInline_; + } else { + return elementsVector_.data(); + } + } + + C10_NODISCARD const_iterator end() const { + if (inlineSize_) { + return elementsInline_ + inlineSize_; + } else { + return elementsVector_.data() + elementsVector_.size(); + } + } + + C10_NODISCARD const_iterator cbegin() const { + return begin(); + } + + C10_NODISCARD const_iterator cend() const { + return end(); + } + + C10_NODISCARD std::vector vec() const & { + return asArrayRef().vec(); + } + + C10_NODISCARD IValue& back() { + return *(end() - 1); + } + + C10_NODISCARD const IValue& back() const { + return *(end() - 1); + } + + C10_NODISCARD std::vector vec() && { + std::vector result; + result.reserve(size()); + for (auto&& iv : *this) { + result.push_back(std::move(iv)); + } + return result; + } + + // More compatibility shims for the overwhelming amount of code that + // likes to copy tuple elements into a vector; see comment above the + // copy constructor. + operator std::vector() const & { + return vec(); + } + + operator std::vector() && { + return vec(); + } +}; + +template +struct TupleTypeFactory {}; + +template <> +struct TORCH_API TupleTypeFactory { + static TupleTypePtr create(std::vector types) { + return TupleType::create(std::move(types)); + } + static TupleTypePtr fallback(const Type& type); +}; + +template <> +struct TORCH_API TupleTypeFactory { + static DynamicTypePtr create(const std::vector& elemTypes); + static DynamicTypePtr fallback(const Type&); +}; + +struct TORCH_API Tuple : c10::intrusive_ptr_target { + private: + TupleElements elements_; + mutable c10::TypePtr type_; // lazily computed for unnamed tuples + + public: + // named tuples have additional type information, so we + // directly create them tagged + static c10::intrusive_ptr createNamed( + std::vector elements_, + c10::TypePtr type_) { + return c10::make_intrusive(std::move(elements_), std::move(type_)); + } + + static c10::intrusive_ptr createNamed( + TupleElements elements_, + std::shared_ptr type_) { + return c10::make_intrusive(std::move(elements_), std::move(type_)); + } + + static c10::intrusive_ptr createNamed( + std::initializer_list elements_, + std::shared_ptr type_) { + return createNamed(TupleElements(c10::ArrayRef(elements_)), std::move(type_)); + } + + // MSVC apparently can't disambiguate the other two overloads of + // create when passed an initializer_list without this. + static c10::intrusive_ptr create(std::initializer_list elements_) { + return create(c10::ArrayRef(elements_)); + } + + static c10::intrusive_ptr create(std::vector elements_) { + return c10::make_intrusive(std::move(elements_)); + } + + static c10::intrusive_ptr create(TupleElements elements_) { + return c10::make_intrusive(std::move(elements_)); + } + + static c10::intrusive_ptr create(c10::ArrayRef elements_) { + return create(TupleElements(elements_)); + } + + static c10::intrusive_ptr create(IValue e1) { + return c10::make_intrusive(std::move(e1)); + } + + static c10::intrusive_ptr create(IValue e1, IValue e2) { + return c10::make_intrusive(std::move(e1), std::move(e2)); + } + + static c10::intrusive_ptr create(IValue e1, IValue e2, IValue e3) { + return c10::make_intrusive(std::move(e1), std::move(e2), std::move(e3)); + } + + private: + // Workaround inability to use `>` operator in template argument list. + template + static constexpr bool hasMoreThanThreeArgs() { + return sizeof...(Args) > 3; + } + + public: + template + static c10::intrusive_ptr create(Args&&... elements_) { + switch (sizeof...(Args)) { + case 1: + case 2: + case 3: + return create(IValue(std::forward(elements_))...); + default: + return create( + std::vector{IValue(std::forward(elements_))...}); + } + } + + // Again, it would be nice to make this noncopyable, but there's a + // lot of extant code that copies Tuples. + // Tuple(const Tuple& rhs) = delete; + + const TupleElements& elements() const& { + return elements_; + } + + TupleElements elements() && { + return std::move(elements_); + } + + void setElements(std::vector&& elements) { + elements_.setContents(std::move(elements)); + } + + void setElements(TupleElements&& elements) { + elements_ = std::move(elements); + } + + void unsafeSetElement(size_t idx, const IValue& element) { + elements_[idx] = element; + } + + void unsafeSetElement(size_t idx, IValue&& element) { + elements_[idx] = std::move(element); + } + + size_t size() const { + return elements_.size(); + } + + template + std::shared_ptr type() const { + if (!type_) { + type_ = TupleTypeFactory::create(fmap(elements(), [&](const IValue& v) { + return v.type(); + })); + } + if (auto t = type_->cast()) { + return t; + } + return TupleTypeFactory::fallback(*type_); + } + + static size_t hash(const Tuple& t) { + return c10::get_hash(t.elements()); + } + + TORCH_API friend bool operator==( + const ivalue::Tuple& lhs, + const ivalue::Tuple& rhs); + + private: + // NOTE: If we try to avoid the overloads without + // `std::shared_ptr type` by defaulting it to nullptr, we + // end up having to call (part of) the shared_ptr destructor for + // `type` even though we should know statically it won't do + // anything. + explicit Tuple(std::vector elements) + : elements_(std::move(elements)){} + + explicit Tuple(std::vector elements, c10::TypePtr type) + : elements_(std::move(elements)), type_(std::move(type)) {} + + explicit Tuple(TupleElements&& elements) + : elements_(std::move(elements)) {} + + explicit Tuple(TupleElements&& elements, std::shared_ptr type) + : elements_(std::move(elements)), type_(std::move(type)) {} + + explicit Tuple(IValue&& e1) + : elements_(std::move(e1)) {} + + explicit Tuple(IValue&& e1, std::shared_ptr type) + : elements_(std::move(e1)), type_(std::move(type)) {} + + explicit Tuple(IValue&& e1, IValue&& e2) + : elements_(std::move(e1), std::move(e2)) {} + + explicit Tuple(IValue&& e1, IValue&& e2, std::shared_ptr type) + : elements_(std::move(e1), std::move(e2)), type_(std::move(type)) {} + + explicit Tuple(IValue&& e1, IValue&& e2, IValue&& e3) + : elements_(std::move(e1), std::move(e2), std::move(e3)) {} + + explicit Tuple(IValue&& e1, IValue&& e2, IValue&& e3, std::shared_ptr type) + : elements_(std::move(e1), std::move(e2), std::move(e3)), type_(std::move(type)) {} + + friend class c10::intrusive_ptr; +}; + +struct Object; +struct PyObjectHolder; +struct EnumHolder; +} // namespace ivalue + +// Future +struct C10_EXPORT ivalue::Future final : c10::intrusive_ptr_target { + private: + // Keep this private in order to force users to go through make_intrusive and + // thus prevent creating a Future that's not held by an intrusive_ptr. + explicit Future(TypePtr type, std::vector devices={}) + : type_(std::move(type)), + impl_(getTypeOfDevices(devices)), + devices_(sortAndDeduplicateDevices(impl_, std::move(devices))) {} + + friend c10::intrusive_ptr; + + struct FutureCallback { + std::function callback; + bool uses_future; // whether the Future& passed in is actually used + + template + FutureCallback(T callback, bool uses_future) + : callback(std::move(callback)), uses_future(uses_future) {} + }; + + public: + Future(const Future&) = delete; + Future(Future&&) = delete; + Future& operator=(const Future&) = delete; + Future& operator=(Future&&) = delete; + + struct TORCH_API FutureError final : public std::exception { + explicit FutureError(std::string&& error_msg_) + : error_msg(std::move(error_msg_)) {} + + FutureError() = default; + + const char* what() const noexcept override { + return error_msg.c_str(); + } + + std::string error_msg; + }; + + /** + * Wait on the future until it completes. + */ + void wait() { + std::unique_lock lock(mutex_); + finished_cv_.wait(lock, [&]() -> bool { return completed_; }); + synchronizeWithCurrentStreams(); + } + + /** + * Wait on the future until it completes and throw an + * exception if an error exists. + */ + void waitAndThrow() { + wait(); + + if (eptr_) { + std::rethrow_exception(eptr_); + } + } + + /** + * Explicitly mark the future as completed with the output value. Optionally, + * the storages for all tensors in IValue can be passed as well. The DataPtrs + * of these storages are used to synchronize CUDA streams. If storages isn't + * given we will attempt to extract it from the value, if we need to (this + * happens if a non-empty set of devices was given to the constructor). Thus + * one only needs to provide storages when 1) they cannot be extracted through + * IValue::getSubValues() or through pickling in case of Python object; or + * when 2) customized storage extraction is more efficient. + */ + using WeakStorage = c10::weak_intrusive_ptr; + void markCompleted( + IValue value, + c10::optional> storages = c10::nullopt) { + // Start by performing all steps that can throw, before setting any field. + // Do this before even acquiring the mutex, because extractStorages might + // acquire the GIL, which could lead to a lock inversion with our mutex. + // See https://github.com/pytorch/pytorch/issues/58239. + std::vector actualStorages; + std::vector usedDevices; + try { + // FIXME We should always extract DataPtrs, in order to catch the case of + // users using CUDA values but forgetting to set devices, which currently + // leads to a silent synchronization/correctness issue. However, as this + // might worsen perf in CPU-only cases, we should only do so after careful + // benchmarks. + if (impl_.type() != c10::kCPU) { + actualStorages = + storages.has_value() ? std::move(*storages) : extractStorages(value); + usedDevices = getDevicesOfStorages(impl_, actualStorages); + ensureIsSubsetOfDevices(usedDevices, devices_); + } + } catch (const std::exception&) { + setError(std::current_exception()); + return; + } + + std::unique_lock lock(mutex_); + TORCH_CHECK( + !completed(), + "Attempting to mark a completed Future as complete again. Note that " + "a Future can only be marked completed once."); + + // Only set value_ and completed_ flag once all checks and preparation steps + // have returned successfully to allow for proper error propagation. + value_ = std::move(value); + completed_ = true; + + currentDevice_ = impl_.getDevice(); + storages_ = std::move(actualStorages); + for (const c10::Device& device : usedDevices) { + c10::Event event(impl_.type()); + event.record(impl_.getStream(device)); + events_.push_back(std::move(event)); + } + + std::vector cbs; + cbs.swap(callbacks_); + lock.unlock(); + + finished_cv_.notify_all(); + for (auto& callback : cbs) { + invokeCallback(std::move(callback.callback), callback.uses_future); + } + } + + void markCompleted() { + markCompleted(IValue{}); + } + + void setError(std::exception_ptr eptr) { + std::unique_lock lock(mutex_); + setErrorInternal(std::move(eptr), lock); + } + + void setErrorIfNeeded(std::exception_ptr eptr) { + std::unique_lock lock(mutex_); + if (completed_) { + // This should be rare and shouldn't cause log spew. Its important to + // log errors and thats why we have this log here. + std::string msg = c10::str( + "Skipping setting following error on the Future since " + "it is already marked completed (this is not necessarily " + "an error):\n", + tryRetrieveErrorMessageInternal(std::move(eptr))); + if (eptr_) { + msg += c10::str( + ", \nOriginal exception:\n", + tryRetrieveErrorMessageInternal(eptr_)); + } + LOG(INFO) << msg; + return; + } else { + setErrorInternal(std::move(eptr), lock); + } + } + + // Get the result of the current future. + IValue value() { + std::unique_lock lock(mutex_); + AT_ASSERT(completed()); + if (eptr_) { + std::rethrow_exception(eptr_); + } + return value_; + } + + // This accessor should only be used if we know that the future is + // completed() with no error. + const IValue& constValue() const { + std::unique_lock lock(mutex_); + AT_ASSERT(completed()); + TORCH_INTERNAL_ASSERT( + !eptr_, + "value() accessor should only be used when future is not completed with ", + "an error, but future had the following error: ", + tryRetrieveErrorMessageInternal(eptr_) + ); + return value_; + } + + // This accessor should only be used if we know that the future is + // completed() with no error. + const std::vector& storages() const { + std::unique_lock lock(mutex_); + AT_ASSERT(completed()); + AT_ASSERT(!eptr_); + return storages_; + } + + /** + * Add a callback to the future. + * The callbacks will be executed once the future completes. + * If the future has already completed, + * this function will execute the callback immediately. + */ + template + void addCallback(T callback, bool uses_future = true) { +#if __cpp_lib_is_invocable >= 201703 + static_assert( + std::is_invocable_r::value, + "The callback must have signature void(Future&)"); +#endif + + std::unique_lock lock(mutex_); + if (completed()) { + lock.unlock(); + invokeCallback(std::move(callback), uses_future); + return; + } + callbacks_.emplace_back(std::move(callback), uses_future); + } + + /** + * Add a callback to the future, and return another Future to hold the return + * value of the callback. This is necessary when the callback provider needs + * to know for sure when the callback has finished. + */ + template + c10::intrusive_ptr then(T callback, TypePtr type) { + using IValueWithStorages = std::tuple>; +#if __cpp_lib_is_invocable >= 201703 + static_assert( + std::disjunction< + std::is_invocable_r, + std::is_invocable_r>::value, + "The callback must have signature IValue(Future&) or " + "std::tuple>(Future&)"); +#endif + auto childFut = createInstance(::std::move(type)); + addCallback([childFut, + cb = std::move(callback)](Future& parentFut) mutable { + try { + if constexpr (::std::is_convertible_v, IValueWithStorages>) { + auto [ivalue, storages] = cb(parentFut); + childFut->markCompleted(::std::move(ivalue), ::std::move(storages)); + } else { + childFut->markCompleted(cb(parentFut)); + } + } catch (std::exception&) { + childFut->setError(std::current_exception()); + } + }); + return childFut; + } + + template + c10::intrusive_ptr thenAsync(T callback, TypePtr type) { +#if __cpp_lib_is_invocable >= 201703 + static_assert( + std::is_invocable_r, T, Future&>::value, + "The callback must have signature c10::intrusive_ptr(Future&)"); +#endif + auto childFut = createInstance(std::move(type)); + addCallback( + [childFut, cb = std::move(callback)](Future& parentFut) mutable { + c10::intrusive_ptr intermediateFut; + try { + intermediateFut = cb(parentFut); + } catch (std::exception&) { + childFut->setError(std::current_exception()); + return; + } + intermediateFut->addCallback( + [childFut = std::move(childFut)](Future& intermediateFut) { + if (intermediateFut.hasError()) { + childFut->setError(intermediateFut.exception_ptr()); + } else { + childFut->markCompleted( + intermediateFut.value(), intermediateFut.storages()); + } + }); + }); + return childFut; + } + + // Tries to retrieve the error message from std::exception_ptr. + std::string tryRetrieveErrorMessage() const { + TORCH_CHECK(hasError(), "No error present on the future."); + std::unique_lock lock(mutex_); + return tryRetrieveErrorMessageInternal(eptr_); + } + + // Check if the current future has completed + bool completed() const { + return completed_; + } + + bool hasValue() const { + std::unique_lock lock(mutex_); + return completed_ && !eptr_; + } + + bool hasError() const { + std::unique_lock lock(mutex_); + return eptr_ ? true : false; + } + + std::exception_ptr exception_ptr() const { + std::unique_lock lock(mutex_); + return eptr_; + } + + TORCH_API friend std::ostream& operator<<( + std::ostream& out, + const Future& v); + + const TypePtr& elementType() const { + return type_; + } + + const std::vector& devices() const { + return devices_; + } + + // This method should be used when one intends to manually create a child + // future, for example when implementing a customized version of then(). + c10::intrusive_ptr createInstance(at::TypePtr type) { + return c10::make_intrusive(std::move(type), devices_); + } + + private: + + // This method should always be used when invoking a callback (regardless of + // how/when that happens) as it will ensure that the proper "environment" is + // set up before running the callback, as in, it will set up the CUDA streams, + // synchronize them with the value, and so on (if needed). + template + void invokeCallback(T callback, bool uses_future) { +#if __cpp_lib_is_invocable >= 201703 + static_assert( + std::is_invocable_r::value, + "The callback must have signature void(Future&)"); +#endif + + // The synchronization performed below shouldn't be needed when the future + // is not used by the callback. + if (uses_future) { + c10::OptionalDeviceGuard deviceGuard(currentDevice_); + + std::vector streams; + streams.reserve(devices_.size()); + for (const c10::Device& device : devices_) { + streams.push_back(impl_.getStreamFromGlobalPool(device)); + } + c10::MultiStreamGuard streamGuard(streams); + synchronizeWithCurrentStreams(); + callback(*this); + } else { + callback(*this); + } + } + + // This method should be called before this future's value is used, as it + // ensures that the CUDA streams that are "current" at the callsite properly + // synchronize with the value. + void synchronizeWithCurrentStreams() { + for (c10::Event& event : events_) { + event.block(impl_.getStream(event.device())); + } + + for (const WeakStorage& weak_storage : storages_) { + c10::intrusive_ptr storage = weak_storage.lock(); + if (!storage) { + continue; + } + if (!storage->device().is_cpu()) { + impl_.recordDataPtrOnStream( + storage->data_ptr(), impl_.getStream(storage->device())); + } + } + } + + void setErrorInternal( + std::exception_ptr eptr, + std::unique_lock& lock) { + TORCH_CHECK( + !eptr_, + "Error already set on this Future: ", + tryRetrieveErrorMessageInternal(eptr_), + ", trying to set error: ", + tryRetrieveErrorMessageInternal(eptr)); + TORCH_INTERNAL_ASSERT(!completed(), "Future is already marked completed"); + completed_ = true; + eptr_ = std::move(eptr); + + std::vector cbs; + cbs.swap(callbacks_); + lock.unlock(); + + finished_cv_.notify_all(); + for (auto& callback : cbs) { + invokeCallback(std::move(callback.callback), callback.uses_future); + } + } + + // Tries to retrieve the error message from std::exception_ptr. + std::string tryRetrieveErrorMessageInternal(std::exception_ptr eptr) const { + try { + std::rethrow_exception(std::move(eptr)); + } catch (const std::exception& e) { + return e.what(); + } catch (...) { + return "Unknown Exception Type"; + } + } + + // Defined in ivalue.cpp. + static std::vector extractStorages( + const at::IValue& value); + + static std::vector getDevicesOfStorages( + const c10::impl::VirtualGuardImpl& impl, + const std::vector& storages) { + c10::DeviceIndex deviceCount = impl.deviceCount(); + std::vector isDeviceUsed(deviceCount, false); + for (const WeakStorage& weak_storage : storages) { + c10::intrusive_ptr storage = weak_storage.lock(); + if (!storage) { + continue; + } + c10::Device device = storage->device(); + if (!device.is_cpu()) { + TORCH_CHECK_VALUE( + device.type() == impl.type(), + "Expected all data ptrs to be on a device of type ", + impl.type(), + ", got one on device ", + device); + isDeviceUsed[device.index()] = true; + } + } + std::vector devices; + for (c10::DeviceIndex idx = 0; idx < deviceCount; idx++) { + if (isDeviceUsed[idx]) { + devices.emplace_back(impl.type(), idx); + } + } + return devices; + } + + static std::string formatSetOfDevices( + const std::vector& devices) { + if (devices.empty()) { + return "(none)"; + } + std::ostringstream oss; + oss << devices[0]; + for (const auto idx : c10::irange(1, devices.size())) { + if (idx == devices.size() - 1) { + oss << " and "; + } else { + oss << ", "; + } + oss << devices[idx]; + } + return oss.str(); + } + + static c10::DeviceType getTypeOfDevices( + const std::vector& devices) { + if (devices.empty()) { + return c10::kCPU; + } + c10::DeviceType deviceType = devices[0].type(); + for (const auto idx : c10::irange(1, devices.size())) { + TORCH_CHECK_VALUE( + devices[idx].type() == deviceType, + "Expected all devices to be of the same type, but got a mismatch between ", + devices[0], + " and ", + devices[idx]); + } + return deviceType; + } + + // We need devices to be sorted in order to use ensureIsSubsetOfDevices. + static std::vector sortAndDeduplicateDevices( + const c10::impl::VirtualGuardImpl& /*impl*/, + std::vector devices) { + std::sort( + devices.begin(), devices.end(), + [](const c10::Device& a, const c10::Device& b) { return a.index() < b.index(); }); + // Deduplicate by compacting. + size_t targetIdx = 0; + for (const auto sourceIdx : c10::irange(devices.size())) { + TORCH_CHECK_VALUE( + devices[sourceIdx].has_index(), + "Expected devices to have indices, got ", devices[sourceIdx]); + if (targetIdx > 0 && devices[targetIdx - 1].index() == devices[sourceIdx].index()) { + // It's a duplicate, skip it. + continue; + } + if (sourceIdx != targetIdx) { + devices[targetIdx] = devices[sourceIdx]; + } + targetIdx++; + } + // If there were duplicates there's now a gap at the end: trim it. Resizing + // requires the item type to be default-constructible (which c10::Device is + // not) because in principle it could be required to create new items. Since + // we know we'll shrink the vector, we provide a custom dummy value instead. + devices.resize(targetIdx, c10::Device(c10::kCPU)); + return devices; + } + + static void ensureIsSubsetOfDevices( + const std::vector& subset, + const std::vector& superset) { + // We assume the devices in both vectors have the same consistent type, and + // their indices are unique and sorted. + std::vector excessDevices; + std::set_difference( + subset.begin(), + subset.end(), + superset.begin(), + superset.end(), + std::back_inserter(excessDevices), + [](const c10::Device& a, const c10::Device& b) { return a.index() < b.index(); }); + TORCH_CHECK_VALUE( + excessDevices.empty(), + "The result contained tensors residing on device(s) ", + formatSetOfDevices(excessDevices), + " which are not among the expected device(s) ", + formatSetOfDevices(superset)); + } + + mutable std::mutex mutex_; + std::atomic_bool completed_ = {false}; // is this future complete + std::condition_variable finished_cv_; + + IValue value_; // when finished the value + TypePtr type_; + std::vector callbacks_; + std::exception_ptr eptr_; + + // An upcast pointer to a virtual class which allows us to manipulate events, + // streams, ... in a generic way, without an explicit dependency on CUDA. + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const c10::impl::VirtualGuardImpl impl_; + + // The device that was current when markCompleted was called, which we'll + // restore when invoking callbacks. It's optional because we'll only store it + // if the future completes successfully. + optional currentDevice_; + + // The events that correspond to the completion of the async I/O kernels. They + // are recorded on the appropriate streams when the future is marked completed + // and can then be queried/waited/blocked on. There is one event for each + // distinct device on which the value's tensors reside. + std::vector events_; + + // A cached version of the storages extracted from the value when the future + // is first marked completed. + std::vector storages_; + + // The bounding set of devices that this future, and any of its children, is + // allowed to use. This is a superset of the set of devices used by the events + // above. We need this to know what streams (for which devices) to set as + // current when invoking a callback, thus allowing the callback to use devices + // that the parent future didn't use. This field is set to the value provided + // in the constructor and will be "inherited" by all child futures. + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + const std::vector devices_; +}; + +struct C10_EXPORT ivalue::Await final : c10::intrusive_ptr_target { + private: + explicit Await(TypePtr elType, std::function fn) + : elType_(std::move(elType)), type_(AwaitType::create(elType_)), fn_(std::move(fn)) {} + + explicit Await(TypePtr elType) : elType_(std::move(elType)), type_(AwaitType::create(elType_)) { } + + friend c10::intrusive_ptr; + + public: + Await(const Await&) = delete; + Await(Await&&) = delete; + Await& operator=(const Await&) = delete; + Await& operator=(Await&&) = delete; + + IValue wait() { + if (!completed_) { + TORCH_CHECK(fn_, "Incompleted Await: fn can't be None"); + value_ = fn_(); + completed_ = true; + args_ = {}; + } + return value_; + } + + IValue value() { + TORCH_CHECK(completed_, "Await must be completed"); + return value_; + } + + void setFn(std::function fn) { + fn_ = std::move(fn); + } + + bool completed() { + return completed_; + } + + void markCompleted(IValue value) { + value_ = std::move(value); + completed_ = true; + } + + TORCH_API friend std::ostream& operator<<( + std::ostream& out, + const Await& v); + + const TypePtr& elementType() const { + return elType_; + } + + const TypePtr& type() const { + return type_; + } + + void setArgs(std::vector args) { + args_ = std::move(args); + } + + std::vector& args() { + return args_; + } + + private: + TypePtr elType_; + TypePtr type_; + std::vector args_; + std::function fn_; + IValue value_; + bool completed_{}; +}; + +// Input is a list of Futures with the same target type. +// Output is a Future to the List of completed Futures. +TORCH_API intrusive_ptr collectAll( + const c10::List>& srcs); +// Input is a List of Futures with the same target type. +// Output is a Future that will be updated with a seen value. +TORCH_API intrusive_ptr collectAny( + const c10::List>& srcs); + +// User-defined object. +struct C10_EXPORT ivalue::Object final : c10::intrusive_ptr_target { + public: + // In general, class types hold a shared_ptr to its owning CompilationUnit, + // so that its type and methods do not get deallocated while the class exists. + // However, the CompilationUnit holds ownership of the type's graphs, so + // inserting a constant object into a Graph would create a reference cycle if + // that constant object held a shared_ptr to its CU. For these objects we + // instatiate them with non-owning references to its CU + Object(WeakOrStrongTypePtr type, size_t numSlots) : type_(std::move(type)) { + slots_.resize(numSlots); + } + + Object(StrongTypePtr type, size_t numSlots) + : type_(WeakOrStrongTypePtr(std::move(type))) { + slots_.resize(numSlots); + } + + static c10::intrusive_ptr create( + WeakOrStrongTypePtr type, + size_t numSlots) { + return c10::make_intrusive(std::move(type), numSlots); + } + + static c10::intrusive_ptr create( + StrongTypePtr type, + size_t numSlots) { + return c10::make_intrusive(std::move(type), numSlots); + } + + static c10::intrusive_ptr create(ClassTypePtr classType, size_t numSlots); + + /** + * Slot API. + * + * Attributes are stored as a simple vector so that lookups are fast at + * runtime. A "slot" is just an index into that vector, which can be computed + * statically if you have access to the class type. Use this API if you are + * writing compiler stuff. + */ + void setSlot(size_t slot, IValue v) { + if (slot >= slots_.size()) { + // for module types, it is possible that the members of the class have + // expanded after the object was created. In this case, we expand + // the slots to the right size + resizeObject(slot); + } + slots_[slot] = std::move(v); + } + + const IValue& getSlot(size_t slot) const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(slot < slots_.size()); + // NOTE: This lookup is fairly hot, so we use unchecked access to the + // vector. Errors should still be detectable with ASan. + return slots_[slot]; + } + + void unsafeRemoveSlot(size_t slot) { + TORCH_CHECK(slot < slots_.size()); + slots_.erase(slots_.begin() + static_cast(slot)); + } + + /** + * Attribute API. + * + * Wrappers around the slot stuff so that users can access attributes + * directly. Use this API if you are a user. + * + * Note: Unlike in Python, TorchScript must make a distinction between + * attributes (which are IValues) and methods (which are Methods). If you + * want a method, use `obj.type()->getMethod()` + */ + IValue getAttr(const std::string& name) const; + void setAttr(const std::string& name, IValue v); + // Remove attribute by name, caller is responsible for + // the safety of this operation + // We didn't remove the attribute in the type because the type + // might be shared by multiple objects. + // Therefore after removing attribute, the object is in an inconsistent + // state where it has more attribute types in its Type than + // the attribute slots it has, user needs to make sure the object + // has consistent by removing the attribute in type as well + void unsafeRemoveAttr(const std::string& name); + + std::string name() const; + + const std::vector& slots() const { + return slots_; + } + std::shared_ptr type() const; + + std::shared_ptr compilation_unit() { + if (type_.holds_strong_ref()) { + return type_.cu_.getStrongRefOrThrow(); + } else { + auto weak_ptr = type_.cu_.getWeakRefOrThrow(); + return std::shared_ptr(weak_ptr); + } + } + + c10::intrusive_ptr copy_to_weak_compilation_ref() const; + + void unsafe_make_weak_compilation_ref() { + type_ = WeakOrStrongTypePtr(type_.asWeakTypePtr()); + } + + c10::intrusive_ptr copy() const; + + c10::intrusive_ptr deepcopy( + c10::optional device = c10::nullopt) const; + + c10::intrusive_ptr deepcopy( + IValue::HashAliasedIValueMap& memo, + c10::optional device = c10::nullopt) const; + + bool is_weak_compilation_ref() const { + return !type_.holds_strong_ref(); + } + + bool is_empty_strong_compilation_ref() const { + return type_.holds_empty_strong_ref(); + } + + private: + void resizeObject(size_t slot); + WeakOrStrongTypePtr type_; + std::vector slots_; +}; + +// virtual ivalue PyObjectHolder that hold a py::object, we make this virtual +// because the py::object and refcounting logic should happen in libtorch_python +// see concrete implementation in python_ivalue.h +struct ivalue::PyObjectHolder : c10::intrusive_ptr_target { + public: + virtual PyObject* getPyObject() = 0; + virtual c10::InferredType tryToInferType() = 0; + virtual IValue toIValue(const TypePtr& type, c10::optional N = c10::nullopt) = 0; + virtual std::string toStr() = 0; + virtual std::vector extractTensors() = 0; + + ~PyObjectHolder() override = default; +}; + +struct ivalue::EnumHolder : c10::intrusive_ptr_target { + public: + EnumHolder(std::shared_ptr type, std::string name, IValue value) + : type_(std::move(type)), + name_(std::move(name)), + value_(std::move(value)) {} + + bool is(const ivalue::EnumHolder& rhs) { + return *this == rhs; + } + + friend bool operator==( + const ivalue::EnumHolder& lhs, + const ivalue::EnumHolder& rhs); + + TORCH_API friend std::ostream& operator<<( + std::ostream& out, + const ivalue::EnumHolder& v); + + TORCH_API const std::string& qualifiedClassName() const; + + const std::string& unqualifiedClassName() const; + + const std::string& name() const { + return name_; + } + + const IValue& value() const { + return value_; + } + + std::shared_ptr type() const { + return type_; + } + + private: + std::shared_ptr type_; + std::string name_; + IValue value_; +}; + +#undef TORCH_FORALL_TAGS + +namespace detail { + +struct _guarded_unsigned_long_unique_dummy final { + _guarded_unsigned_long_unique_dummy(int64_t){}; +}; +using _guarded_unsigned_long = std::conditional_t< + std::is_same::value || + std::is_same::value, + _guarded_unsigned_long_unique_dummy, + unsigned long>; + +} // namespace detail + +inline ivalue::Object& IValue::toObjectRef() const { + AT_ASSERT(isObject(), "Expected Object but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), "Attempted to create null reference"); + return *static_cast(payload.u.as_intrusive_ptr); +} + +// note: when adding a DEFINE_TO case here you should also add a +// toX method to IValue. These named methods are much more discoverable +// than the to templated function. + +#define DEFINE_TO(T, method_name) \ + template <> \ + inline T IValue::to()&& { \ + return static_cast(std::move(*this).method_name()); \ + } \ + template <> \ + inline c10::detail::ivalue_to_const_ref_overload_return::type IValue::to() const& { \ + typedef c10::detail::ivalue_to_const_ref_overload_return::type return_type; \ + return static_cast(this->method_name()); \ + } + +DEFINE_TO(at::Tensor, toTensor) +DEFINE_TO(at::Storage, toStorage) +DEFINE_TO(c10::Stream, toStream) +DEFINE_TO(float, toDouble) +DEFINE_TO(double, toDouble) +DEFINE_TO(c10::complex, toComplexDouble) +DEFINE_TO(unsigned char, toInt) +DEFINE_TO(signed char, toInt) +DEFINE_TO(unsigned short, toInt) +DEFINE_TO(short, toInt) +DEFINE_TO(int, toInt) +DEFINE_TO(uint32_t, toInt) +DEFINE_TO(uint64_t, toInt) +DEFINE_TO(detail::_guarded_unsigned_long, toInt) +DEFINE_TO(int64_t, toInt) +DEFINE_TO(bool, toBool) +DEFINE_TO(c10::intrusive_ptr, toBlob); +DEFINE_TO(c10::intrusive_ptr, toString) +DEFINE_TO(c10::intrusive_ptr, toObject) +DEFINE_TO(at::Scalar, toScalar) +DEFINE_TO(c10::List, toIntList) +DEFINE_TO(c10::List, toDoubleList) +DEFINE_TO(c10::List>, toComplexDoubleList) +DEFINE_TO(c10::List, toBoolList) +DEFINE_TO(c10::List, toTensorList) +DEFINE_TO(c10::impl::GenericList, toList) +DEFINE_TO(c10::impl::GenericDict, toGenericDict) +DEFINE_TO(c10::intrusive_ptr, toTuple) +DEFINE_TO(std::string, toStringRef) +DEFINE_TO(c10::string_view, toStringView) +DEFINE_TO(c10::intrusive_ptr, toFuture) +DEFINE_TO(c10::intrusive_ptr, toAwait) +DEFINE_TO(c10::intrusive_ptr, toRRef) +DEFINE_TO(c10::intrusive_ptr, toQuantizer) +DEFINE_TO(IValue, toIValue) +DEFINE_TO(c10::Device, toDevice) +DEFINE_TO(at::ScalarType, toScalarType) +DEFINE_TO(at::Layout, toLayout) +DEFINE_TO(at::MemoryFormat, toMemoryFormat) +DEFINE_TO(at::QScheme, toQScheme) +DEFINE_TO(at::Dimname, toDimname) +DEFINE_TO(at::Generator, toGenerator) +DEFINE_TO(c10::SymInt, toSymInt) +DEFINE_TO(c10::SymFloat, toSymFloat) +DEFINE_TO(c10::SymBool, toSymBool) + +template +struct _fake_type {}; + +// generic_to converts an IValue from a generic list or generic dict +// to a concrete list/dict type likelike List, Dict<...> or optional. +// Note that in the case of lists, this only works for IValue-based lists, +// i.e. not for int64_t, double, ... +// generic_to is an implementation detail of IValue::to and not +// supposed to be called directly. +// The _fake_type parameter allows us to overload +// based on the return type. +template +// TODO this is deprecated but we don't throw a warning because a lot of ops in +// native_functions.yaml still return std::vector. +// C10_DEPRECATED_MESSAGE("IValues based on std::vector are potentially slow +// and deprecated. Please use torch::List instead.") +std::vector generic_to(IValue ivalue, _fake_type>) { + // We need to do a deep copy of the vector because there might be other + // references to this same IValue that also use the list. We can't just + // move the elements out. + auto list = std::move(ivalue).to>(); + std::vector result; + result.reserve(list.size()); + for (Elem v : list) { + result.push_back(std::move(v)); + } + return result; +} + +template +c10::intrusive_ptr IValue::toCustomClass() && { + static_assert( + std::is_base_of::value == true, + "toCustomClass requires that template parameter T must inherit " + "from torch::CustomClassHolder"); + auto obj = toObject(); + TORCH_CHECK( + obj->slots().size() == 1, + "Tried to cast IValue to custom class but it did " + "not contain a custom class!"); + const auto* expected_type = c10::getCustomClassType>().get(); + ivalue::checkCustomClassType(expected_type, type().get()); + auto userObj = + c10::static_intrusive_pointer_cast(obj->getSlot(0).toCapsule()); + return userObj; +} + +template +c10::intrusive_ptr IValue::toCustomClass() const& { + static_assert( + std::is_base_of::value == true, + "toCustomClass requires that template parameter T must inherit " + "from torch::CustomClassHolder"); + auto obj = toObject(); + TORCH_CHECK( + obj->slots().size() == 1, + "Tried to cast IValue to custom class but it did " + "not contain a custom class!"); + const auto* expected_type = c10::getCustomClassType>().get(); + ivalue::checkCustomClassType(expected_type, type().get()); + auto userObj = + c10::static_intrusive_pointer_cast(obj->getSlot(0).toCapsule()); + return userObj; +} + +template +T generic_to(IValue ivalue, _fake_type) { + using ElemType = typename std::remove_pointer::type::element_type; + return std::move(ivalue).toCustomClass(); +} + +template +tagged_capsule generic_to(IValue ivalue, _fake_type>) { + return tagged_capsule{std::move(ivalue)}; +} + +template +c10::List generic_to(IValue ivalue, _fake_type>) { + return impl::toTypedList(std::move(ivalue).toList()); +} + +template +static T createVectorLikeFromList(const c10::detail::ListImpl* impl) { + T result; + result.reserve(impl->list.size()); + for (const auto & i : impl->list) { + result.push_back(i.to()); + } + return result; +} + +template +static std::vector createVectorFromList(const c10::detail::ListImpl* impl) { + return createVectorLikeFromList>(impl); +} + +template +std::vector createVectorFromList(const c10::List& impl) { + std::vector result; + result.reserve(impl.size()); + for (size_t i = 0, N = impl.size(); i < N; ++i) { + result.push_back(impl[i]); + } + return result; +} + +template +OptionalArray generic_to(IValue ivalue, _fake_type>) { + if (ivalue.isNone()) { + return {}; + } + return createVectorFromList( + std::move(ivalue).to>() + ); +} + +namespace detail { +template +std::array generic_to_array( + IValue ivalue, + _fake_type>, + std::index_sequence) { + // We need to do a deep copy of the array because there might be other + // references to this same IValue that also use the list. We can't just + // move the elements out. + auto list = std::move(ivalue).to>(); + TORCH_CHECK( + list.size() == sizeof...(I), + "Tried to convert a List with ", + list.size(), + " elements to a fixed-size array of size ", + sizeof...(I)); + return {list[I]...}; +} +} // namespace detail + +template +std::array generic_to( + IValue ivalue, + _fake_type> ft) { + return detail::generic_to_array(ivalue, ft, std::make_index_sequence()); +} + +template +c10::Dict generic_to( + IValue ivalue, + _fake_type>) { + return impl::toTypedDict(std::move(ivalue).toGenericDict()); +} + +template +C10_DEPRECATED_MESSAGE( + "IValues based on std::unordered_map are slow and deprecated. Please use c10::Dict instead.") +std::unordered_map generic_to( + IValue ivalue, + _fake_type>) { + std::unordered_map specialized_dict; + + for (const auto& item : std::move(ivalue).toGenericDict()) { + specialized_dict[item.key().template to()] = item.value().template to(); + } + + return specialized_dict; +} + +template +c10::optional generic_to(IValue ivalue, _fake_type>) { + if (ivalue.isNone()) { + return c10::nullopt; + } + return std::move(ivalue).to(); +} + +namespace detail { +template +Tuple generic_to_tuple_impl( + const ivalue::TupleElements& t, + std::index_sequence) { + return std::make_tuple( + t[INDEX].to::type>()...); +} +} // namespace detail + +template < + typename... Args, + typename Indices = std::make_index_sequence, + std::enable_if_t< + !std::disjunction< + std::is_lvalue_reference..., + std::negation>...>::value, + std::nullptr_t> = nullptr> +std::tuple generic_to(const IValue& ivalue, _fake_type>) { + const auto& vals = ivalue.toTupleRef().elements(); + TORCH_CHECK(vals.size() == sizeof...(Args)); + return detail::generic_to_tuple_impl>(vals, Indices{}); +} + +template +inline T IValue::to() && { + return generic_to(std::move(*this), _fake_type{}); +} + +template <> +inline c10::optional IValue::to() && { + // In the default implementation, the IValue is destroyed with std::move. + // But if the unboxed type is optional we cannot destroy + // the IValue. + return generic_to(*this, _fake_type>{}); +} + +template +inline typename c10::detail::ivalue_to_const_ref_overload_return::type IValue::to() const& { + return generic_to(*this, _fake_type{}); +} + +inline c10::List IValue::toIntList() && { + AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind()); + return c10::List(moveToIntrusivePtr()); +} +inline c10::List IValue::toIntList() const& { + AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind()); + return c10::List(toIntrusivePtr()); +} +inline std::vector IValue::toIntVector() const { + AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toIntVector on null intrusive_ptr IValue"); + return createVectorFromList( + static_cast(payload.u.as_intrusive_ptr)); +} +inline std::vector IValue::toSymIntVector() const { + AT_ASSERT(isSymIntList() || isIntList(), "Expected SymIntList or IntList but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toSymIntVector on null intrusive_ptr IValue"); + return createVectorFromList( + static_cast(payload.u.as_intrusive_ptr)); +} +inline at::DimVector IValue::toDimVector() const { + AT_ASSERT(isIntList(), "Expected IntList but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toDimVector on null intrusive_ptr IValue"); + return createVectorLikeFromList( + static_cast(payload.u.as_intrusive_ptr)); +} +inline c10::List IValue::toDoubleList() && { + AT_ASSERT(isDoubleList(), "Expected DoubleList but got ", tagKind()); + return c10::List(moveToIntrusivePtr()); +} +inline c10::List IValue::toDoubleList() const& { + AT_ASSERT(isDoubleList(), "Expected DoubleList but got ", tagKind()); + return c10::List(toIntrusivePtr()); +} +inline std::vector IValue::toDoubleVector() const { + AT_ASSERT(isDoubleList(), "Expected DoubleList but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toDoubleVector on null intrusive_ptr IValue"); + return createVectorFromList( + static_cast(payload.u.as_intrusive_ptr)); +} +inline c10::List> IValue::toComplexDoubleList() && { + AT_ASSERT(isComplexDoubleList(), "Expected ComplexDoubleList but got ", tagKind()); + return c10::List>(moveToIntrusivePtr()); +} +inline c10::List> IValue::toComplexDoubleList() const& { + AT_ASSERT(isComplexDoubleList(), "Expected ComplexDoubleList but got ", tagKind()); + return c10::List>(toIntrusivePtr()); +} +inline std::vector> IValue::toComplexDoubleVector() const { + AT_ASSERT(isComplexDoubleList(), "Expected ComplexDoubleList but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toComplexDoubleVector on null intrusive_ptr IValue"); + return createVectorFromList>( + static_cast(payload.u.as_intrusive_ptr)); +} +inline c10::List IValue::toBoolList() && { + AT_ASSERT(isBoolList(), "Expected BoolList but got ", tagKind()); + return c10::List(moveToIntrusivePtr()); +} +inline c10::List IValue::toBoolList() const& { + AT_ASSERT(isBoolList(), "Expected BoolList but got ", tagKind()); + return c10::List(toIntrusivePtr()); +} +inline c10::List IValue::toTensorList() && { + AT_ASSERT(isTensorList(), "Expected TensorList but got ", tagKind()); + return c10::List(moveToIntrusivePtr()); +} +inline c10::List IValue::toTensorList() const& { + AT_ASSERT(isTensorList(), "Expected TensorList but got ", tagKind()); + return c10::List(toIntrusivePtr()); +} +inline std::vector IValue::toTensorVector() const { + AT_ASSERT(isTensorList(), "Expected TensorList but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toTensorVector on null intrusive_ptr IValue"); + return createVectorFromList( + static_cast(payload.u.as_intrusive_ptr)); +} +inline c10::List> IValue::toOptionalTensorList() && { + AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind()); + return c10::List>(moveToIntrusivePtr()); +} +inline c10::List> IValue::toOptionalTensorList() const& { + AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind()); + return c10::List>(toIntrusivePtr()); +} +inline std::vector> IValue::toOptionalTensorVector() const { + AT_ASSERT(isOptionalTensorList(), "Expected OptionalTensorList but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toOptionalTensorVector on null intrusive_ptr IValue"); + return createVectorFromList>( + static_cast(payload.u.as_intrusive_ptr)); +} +inline c10::List IValue::toList() && { + AT_ASSERT(isList(), "Expected GenericList but got ", tagKind()); + return c10::List(moveToIntrusivePtr()); +} +inline c10::List IValue::toList() const& { + AT_ASSERT(isList(), "Expected GenericList but got ", tagKind()); + return c10::List(toIntrusivePtr()); +} +inline c10::ArrayRef IValue::toListRef() const { + AT_ASSERT(isList(), "Expected GenericList but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toListRef on null intrusive_ptr IValue"); + return static_cast(payload.u.as_intrusive_ptr) + ->list; +} +inline c10::Dict IValue::toGenericDict() && { + AT_ASSERT(isGenericDict(), "Expected GenericDict but got ", tagKind()); + return c10::Dict(moveToIntrusivePtr()); +} +inline c10::Dict IValue::toGenericDict() const& { + AT_ASSERT(isGenericDict(), "Expected GenericDict but got ", tagKind()); + return c10::Dict(toIntrusivePtr()); +} +inline c10::intrusive_ptr IValue::toTuple() && { + AT_ASSERT(isTuple(), "Expected Tuple but got ", tagKind()); + return moveToIntrusivePtr(); +} +inline c10::intrusive_ptr IValue::toTuple() const& { + AT_ASSERT(isTuple(), "Expected Tuple but got ", tagKind()); + return toIntrusivePtr(); +} +inline ivalue::Tuple& IValue::toTupleRef() const { + AT_ASSERT(isTuple(), "Expected Tuple but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toTupleRef on null intrusive_ptr IValue"); + return *static_cast( + payload.u.as_intrusive_ptr); +} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::Tuple) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} +template < + typename... Args, + std::enable_if_t< + !std::disjunction< + std::is_lvalue_reference..., + std::negation>...>::value, + std::nullptr_t>> +inline IValue::IValue(const std::tuple& t) + : IValue(c10::guts::apply(c10::ivalue::Tuple::create, t)) { +} + +template < + typename... Args, + std::enable_if_t< + !std::disjunction< + std::is_lvalue_reference..., + std::negation>...>::value, + std::nullptr_t>> +inline IValue::IValue(std::tuple&& t) + : IValue(c10::guts::apply(c10::ivalue::Tuple::create, std::move(t))) { +} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::String) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} +inline IValue::IValue(std::string v) + : IValue(ivalue::ConstantString::create(std::move(v))) {} + +inline IValue::IValue(c10::impl::GenericList v) + : tag(Tag::GenericList) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.impl_.release()); +} + +template > +inline IValue::IValue(c10::List&& v) : IValue(impl::toList(std::move(v))) {} +template > +inline IValue::IValue(const c10::List& v) : IValue(impl::toList(v)) {} +template > +inline IValue::IValue(at::ArrayRef v) : IValue(c10::List()) { + auto list = to>(); + list.reserve(v.size()); + for (const auto& e : v) { + list.push_back(e); + } +} +template > +inline IValue::IValue(at::ArrayRef v) : IValue() { + auto vi = c10::asIntArrayRefSlowOpt(v); + if (vi.has_value()) { + // This list is entirely integers; ensure it is typed as + // an IntList so toIntList works + *this = IValue(*vi); + } else { + // This list has SymInts; type it as a SymInt + *this = IValue(impl::toList(c10::List())); + auto list = to>(); + list.reserve(v.size()); + for (const auto& e : v) { + list.push_back(e); + } + } +} +template > +inline IValue::IValue(at::OptionalArrayRef mb_v) : IValue() { + if (!mb_v.has_value()) return; + *this = IValue(*mb_v); +} +template > +inline IValue::IValue(const std::vector& v) : IValue() { + *this = IValue(at::ArrayRef(v)); +} +template > +inline IValue::IValue(const std::vector& v) : IValue(c10::List()) { + auto list = to>(); + list.reserve(v.size()); + for (const auto& e : v) { + list.push_back(e); + } +} +template > +inline IValue::IValue(c10::OptionalArrayRef v) : IValue() { + if (v.has_value()) { + *this = IValue(std::move(*v)); + } +} + +template +inline IValue::IValue(std::array v) : IValue(c10::List()) { + auto list = to>(); + list.reserve(v.size()); + for (auto& e : v) { + list.push_back(std::move(e)); + } +} + +template > +inline IValue::IValue(c10::IListRef v) : IValue() { + constexpr bool boxed_type_constructs_ivalue = + std::is_constructible::boxed_type>::value; + // First, we try to use the boxed value. + // If we fail (either it's not in the boxed state, or its boxed type + // can not construct an IValue), we fallback to copying the list. + if (boxed_type_constructs_ivalue && v.isBoxed()) { + *this = IValue(impl::toList(v.toBoxed())); + } else { + c10::List list; + list.reserve(v.size()); + for (const auto& t : v) { + list.push_back(t); + } + *this = IValue(impl::toList(std::move(list))); + } +} + +inline IValue::IValue(c10::impl::GenericDict v) + : tag(Tag::GenericDict) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.impl_.release()); +} +template +inline IValue::IValue(c10::Dict v) + : IValue(impl::toGenericDict(std::move(v))) {} + +template +inline IValue::IValue(std::unordered_map v) + : IValue(Dict()) { + auto dict = to>(); + dict.reserve(v.size()); + for (auto& e : v) { + dict.insert(std::move(e.first), std::move(e.second)); + } +} + +template > +inline IValue::IValue(c10::optional v) : IValue() { + if (v.has_value()) { + *this = IValue(std::move(*v)); + } +} + +inline IValue::IValue(c10::nullopt_t) : IValue() {} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::Object) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::PyObject) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::Enum) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} + +inline IValue IValue::make_capsule( + intrusive_ptr blob) { + IValue iv; + iv.tag = Tag::Capsule; + iv.payload.u.as_intrusive_ptr = null_to_undefined_tensor(blob.release()); + return iv; +} + +template < + typename T, + std::enable_if_t::value, int>> +IValue::IValue(c10::intrusive_ptr custom_class) : tag(Tag::Object) { + auto classType = []() { + try { + return c10::getCustomClassType>(); + } catch (const c10::Error&) { + throw c10::Error( + "Trying to instantiate a class that isn't a registered custom class: " + + std::string(c10::util::get_fully_qualified_type_name()), + ""); + } + }(); + auto ivalue_obj = c10::ivalue::Object::create(std::move(classType), /* numSlots */1); + ivalue_obj->setSlot(0, IValue::make_capsule(std::move(custom_class))); + payload.u.as_intrusive_ptr = null_to_undefined_tensor(ivalue_obj.release()); + +} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::Future) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::Await) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::RRef) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} + +inline IValue::IValue(c10::intrusive_ptr v) + : tag(Tag::Quantizer) { + payload.u.as_intrusive_ptr = null_to_undefined_tensor(v.release()); +} + +template +inline IValue::IValue(c10::complex c) + : tag(Tag::ComplexDouble) { + auto v = c10::make_intrusive(c); + payload.u.as_intrusive_ptr = v.release(); +} + +inline const std::string& IValue::toStringRef() const { + AT_ASSERT(isString(), "Expected String but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toStringRef on null intrusive_ptr IValue"); + return static_cast( + payload.u.as_intrusive_ptr) + ->string(); +} +inline c10::optional> IValue:: + toOptionalStringRef() const { + if (isNone()) { + return c10::nullopt; + } + AT_ASSERT(isString(), "Expected optional but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toOptionalStringRef on null intrusive_ptr IValue"); + return std::reference_wrapper( + static_cast(payload.u.as_intrusive_ptr) + ->string()); +} + +inline c10::string_view IValue::toStringView() const { + AT_ASSERT(isString(), "Expected String but got ", tagKind()); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + payload.u.as_intrusive_ptr != c10::UndefinedTensorImpl::singleton(), + "called toStringView on null intrusive_ptr IValue"); + return static_cast( + payload.u.as_intrusive_ptr) + ->string_view(); +} + +inline PyObject* IValue::toPyObject() const { + return toPyObjectHolder()->getPyObject(); +} + +template +inline optional IValue::toOptional() { + if (this->isNone()) { + return nullopt; + } + return this->to(); +} + +template +inline optional IValue::toOptional() const { + if (this->isNone()) { + return nullopt; + } + return this->to(); +} + +inline bool IValue::isCustomClass() const { + return torch::isCustomClass(*this); +} + +inline bool IValue::isSameIdentity(const IValue& rhs) const { + // We choose to not use memcmp for payload check due to potential random + // padding characters on union type + + // Semantics: + // 1. Immutable primitive values of the same type (Int, Double, None, Bool, + // Str) return value equality + // 2. If it is a tensor type, we need to take undefined tensor into account + // 3. Undefined_tensor is None and vice versa should be true + // 4. If it is a reference type (i.e. isIntrusivePtr()), then is True when + // the pointed-to object is the same. + // 5. False for all other comparisons. + if (this->isNone() && rhs.isNone()) { + return true; + } else if (this->isBool() && rhs.isBool()) { + // for bool type, do equality check + return this->toBool() == rhs.toBool(); + } else if (this->isTensor() && rhs.isTensor()) { + return this->payload.as_tensor.is_same(rhs.payload.as_tensor); + } else if (this->isTensor() && rhs.isNone()) { + // special case: undefined tensor and None are the same identity + return !this->payload.as_tensor.defined(); + } else if (this->isNone() && rhs.isTensor()) { + // special case: undefined tensor and None are the same identity + return !rhs.payload.as_tensor.defined(); + } else if (this->isInt() && rhs.isInt()) { + return this->toInt() == rhs.toInt(); + } else if (this->isDouble() && rhs.isDouble()) { + return this->toDouble() == rhs.toDouble(); + } else if (this->isString() && rhs.isString()) { + return this->toStringRef() == rhs.toStringRef(); + } else { + // for objects holding in IValue, do shallow compare on pointer address to + // testify the identity + return this->isIntrusivePtr() && rhs.isIntrusivePtr() && + this->payload.u.as_intrusive_ptr == rhs.payload.u.as_intrusive_ptr; + } +} + +namespace ivalue { +namespace detail { + +template +IValue from_(T&& x, std::true_type) { + return IValue(std::forward(x)); +} +template +IValue from_(c10::intrusive_ptr x, std::false_type) { + return IValue(std::move(x)); +} +template +IValue from_(T&& /*x*/, std::false_type) { + static_assert( + guts::false_t::value, + "You are calling from with a type that it doesn't support, and isn't a potential custom class (ie: is an intrusive_ptr)"); + return IValue(); +} +} // namespace detail + +template +IValue from(T&& x) { + return detail::from_( + std::forward(x), typename std::is_constructible::type{}); +} + +} // namespace ivalue + + +template <> +struct MaybeOwnedTraits { + using owned_type = IValue; + using borrow_type = IValue; + + static borrow_type createBorrow(const owned_type& from) { + if (!from.isPtrType()) { + return from; + } + if (from.isTensor()) { + return IValue(MaybeOwnedTraits::createBorrow(from.toTensor())); + } else { + return IValue(from.payload, from.tag); + } + } + + static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) { + lhs.clearToNone(); + if (!rhs.isPtrType()) { + lhs = rhs; + } else if (rhs.isTensor()) { + lhs = IValue(MaybeOwnedTraits::createBorrow(rhs.toTensor())); + } else { + lhs = IValue(rhs.payload, rhs.tag); + } + } + + static void destroyBorrow(borrow_type& toDestroy) { + toDestroy.clearToNone(); + } + + static const owned_type& referenceFromBorrow(const borrow_type& borrow) { + return borrow; + } + + static const owned_type* pointerFromBorrow(const borrow_type& borrow) { + return &borrow; + } + + static bool debugBorrowIsValid(const borrow_type&) { + return true; + } +}; + +template <> +struct IValue::TagType { + static TORCH_API c10::TypePtr get(const IValue&); +}; + +template <> +struct IValue::TagType { + static TORCH_API c10::TypePtr get(const IValue&); +}; + +template +TypePtr IValue::type() const { + return IValue::TagType::get(*this); +} + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ivalue_to.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ivalue_to.h new file mode 100644 index 0000000000000000000000000000000000000000..52af58083596939106fe120abfbd360f0068db67 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/ivalue_to.h @@ -0,0 +1,36 @@ +#pragma once + +#include + +namespace at { +class Tensor; +} // namespace at + +namespace c10 { +struct IValue; +namespace detail { +// Determine the return type of `IValue::to() const &`. It's a const +// reference when possible and a copy otherwise. It is in this +// separate header so that List can use it as well. +template +struct ivalue_to_const_ref_overload_return { + using type = T; +}; + +template<> +struct ivalue_to_const_ref_overload_return { + using type = const at::Tensor&; +}; + +template<> +struct ivalue_to_const_ref_overload_return { + using type = const std::string&; +}; + +template<> +struct ivalue_to_const_ref_overload_return { + using type = const IValue&; +}; + +} // namespace detail +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/jit_type.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/jit_type.h new file mode 100644 index 0000000000000000000000000000000000000000..3f0d7970a10f90c64b5c17ff66c9d26809dcbd35 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/jit_type.h @@ -0,0 +1,2424 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { +struct Function; +} // namespace jit +} // namespace torch + +namespace c10 { + +template +class Dict; +struct IValue; +struct FunctionSchema; +struct NamedType; +using OptNameList = c10::optional>; + +void standardizeVectorForUnion(std::vector& reference, std::vector* to_fill); +void standardizeVectorForUnion(std::vector* to_flatten); + +inline bool is_contiguous_strides( + const IntArrayRef sizes, + const IntArrayRef strides) { + int n_dim = static_cast(sizes.size()); + if (n_dim == 0) { + return true; + } + + if (strides[n_dim - 1] != 1) { + return false; + } + + for (int i = n_dim - 2; i >= 0; i--) { + if (strides[i] != strides[i + 1] * sizes[i + 1]) { + return false; + } + } + return true; +} + +struct AnyType; +using AnyTypePtr = SingletonTypePtr; +// Any is the top of the type hierarchy, all other types are subtypes +// T <: Any, forall T +struct TORCH_API AnyType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "Any"; + } + static const TypeKind Kind = TypeKind::AnyType; + // global singleton + static AnyTypePtr get(); + + private: + AnyType() : Type(TypeKind::AnyType) {} +}; + +inline std::string toString(const Type& type) { + return type.str(); +} + +// Shim for compatibility with code that uses TypePtr. +inline std::string toString(const TypePtr& typePtr) { + return toString(*typePtr); +} + +inline bool operator!=(const Type& lhs, const Type& rhs) { + return !(lhs == rhs); +} + +// common base for all types that have a single sub element +// e.g. Future[T], Optional[T], List[T] +template +struct SingleElementType : public SharedType { + static const TypeKind Kind = K; + + const TypePtr& getElementType() const { + return elem; + } + + bool hasFreeVariables() const override { + return getElementType()->hasFreeVariables(); + } + + at::ArrayRef containedTypes() const override { + return elem; + } + + bool equals(const Type& rhs) const override { + if (auto rhs_ = rhs.cast()) { + return *getElementType() == *rhs_->getElementType(); + } + return false; + } + + protected: + SingleElementType(TypePtr elem) : SharedType(Kind), elem(std::move(elem)) { + if (!this->elem) { + throw std::runtime_error(c10::str( + "Can not create ", typeKindToString(Kind), " with None type")); + } + } + + private: + TypePtr elem; +}; + +struct UnionType; +using UnionTypePtr = std::shared_ptr; +struct TORCH_API UnionType : public SharedType { + friend struct Type; + + static const TypeKind Kind = TypeKind::UnionType; + + bool isSubtypeOfExt(const Type& rhs_, std::ostream* why_not) const override; + + std::string str() const override; + + static UnionTypePtr create(std::vector reference); + + bool equals(const Type& rhs) const override; + + bool isUnionType() const override { + return true; + } + + at::ArrayRef containedTypes() const override { + return types_; + } + + // For testing purposes only + at::ArrayRef getTypes() const { + return types_; + } + + TypePtr createWithContained(std::vector contained_types) const override { + return create(std::move(contained_types)); + } + + bool canHoldType(const Type& type) const; + + bool hasFreeVariables() const override { + return has_free_variables_; + } + + c10::optional toOptional() const; + + c10::optional subtractTypeSet(std::vector& to_subtract) const; + + protected: + explicit UnionType(std::vector types, TypeKind kind=TypeKind::UnionType); + std::string annotation_str_impl(TypePrinter printer = nullptr) const override; + std::string unionStr( + TypePrinter printer = nullptr, + bool is_annotation_str = false) const; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool has_free_variables_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::vector types_; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool can_hold_none_; + +}; + +struct OptionalType; +using OptionalTypePtr = std::shared_ptr; +// This type represents an optional type. There is one `Optional` for +// each element type. `Optional[T]` can accept both `T` and +// `None`(`c10::nullopt` in C++) +// Subtype hierarchy for Optional: +// - Optional[T] <: Optional[R] iff T <: R +// - T <: Optional[R] if T <: R +// - None <: Optional[T] for all T +// - Optional[T] == Union[T, None] for all T +struct TORCH_API OptionalType : public UnionType { + static OptionalTypePtr create(const TypePtr& contained); + + static const TypeKind Kind = TypeKind::OptionalType; + + friend struct Type; + + bool equals(const Type& rhs) const override; + + const TypePtr& getElementType() const { + return contained_; + } + + at::ArrayRef containedTypes() const override { + return contained_; + } + + std::string str() const override { + std::stringstream ss; + ss << getElementType()->str() << "?"; + return ss.str(); + } + + TypePtr createWithContained( + std::vector contained_types) const override { + AT_ASSERT(contained_types.size() == 1); + return create(contained_types[0]); + } + + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + + bool isUnionType() const override { + return true; + } + + // common cast Optional[Tensor] for undefined tensor type + static TypePtr ofTensor(); + // + // global singleton + static TypePtr get(TypePtr inner); + + private: + explicit OptionalType(const TypePtr& contained); + + TypePtr contained_; + + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + std::stringstream ss; + ss << "Optional[" << getElementType()->annotation_str(std::move(printer)) << "]"; + return ss.str(); + } +}; + +template +inline c10::optional merge_primitive( + const c10::optional& a, + const c10::optional& b) { + if (a.has_value() && b.has_value() && a.value() == b.value()) { + return a; + } + return c10::optional{}; +} + +// If we see `a + b + c` and know that a, b, and c are the same size and have +// two dimensions (WxH), then we can generate a fused kernel for them. That +// fused kernel would likely have indexing math to handling both the W and H +// dimensions. However, if we knew the WxH dimensions were contiguous, we can +// pretend like we only have a single dimension, simplifying the indexing logic. +// This can be performed even if the dimensions are transposed, +// as long as a, b, and c are transposed in the same way. +// We'd like to have the compiler be able to do this dimensionality reduction, +// but simply knowing sizes is not enough. +// We can extend profiling to also record stride information. +// Rather than recording specific strides, +// we can simply order the strides from smallest to largest with +// `stride_indices` A contiguity marker on the smallest stride (c0) indicates +// the stride is precisely 1, otherwise a contiguity marker means that $stride_n +// = size_{n-1}*stride_{n-1}$ +struct TORCH_API Stride { + Stride() = default; + Stride( + const c10::optional& stride_index, + c10::optional contiguous, + const c10::optional& stride) + : stride_index_(stride_index), contiguous_(contiguous), stride_(stride) {} + + bool operator==(const Stride& b) const { + return stride_index_ == b.stride_index_ && contiguous_ == b.contiguous_ && + stride_ == b.stride_; + } + + bool isComplete() const { + return stride_index_ && contiguous_ && stride_; + } + + c10::optional stride_index_; + c10::optional contiguous_; + c10::optional stride_; +}; + +template <> +inline c10::optional merge_primitive( + const c10::optional& a, + const c10::optional& b) { + c10::optional left = a; + c10::optional right = b; + if (!left.has_value()) { + left = {Stride()}; + } + if (!right.has_value()) { + right = {Stride()}; + } + + auto merged_index = + merge_primitive(left->stride_index_, right->stride_index_); + auto merged_cont = merge_primitive(left->contiguous_, right->contiguous_); + auto merged_stride = merge_primitive(left->stride_, right->stride_); + auto r = Stride(merged_index, merged_cont, merged_stride); + // normalize + if (!r.stride_index_.has_value() && !r.contiguous_.has_value() && + !r.stride_.has_value()) { + return c10::optional{}; + } + + return r; +} + +struct TORCH_API ShapeSymbol { + // needed for use in `std::map` + ShapeSymbol() : value_(-1) {} + // is this symbol a fixed/static dimension + bool is_static() const { + return value_ >= 0; + }; + bool operator==(const ShapeSymbol& b) const { + return value_ == b.value_; + } + bool operator<(const ShapeSymbol& b) const { + return value_ < b.value_; + } + + static ShapeSymbol fromStaticSize(int64_t val) { + return ShapeSymbol(val); + } + int64_t static_size() const { + TORCH_CHECK(is_static()); + return value_; + }; + + int64_t value() const { + return value_; + }; + + static ShapeSymbol newSymbol() { + return fromStaticSize(-static_cast(++num_symbols)); + }; + friend TORCH_API std::ostream& operator<<( + std::ostream& os, + const ShapeSymbol& s); + + private: + ShapeSymbol(int64_t val) : value_(val) {} + int64_t value_; + static std::atomic num_symbols; +}; + +inline ShapeSymbol merge_primitive( + const ShapeSymbol& a, + const ShapeSymbol& b) { + if (a.is_static() && b.is_static() && a == b) { + return a; + } + return ShapeSymbol::newSymbol(); +} + +// Shape of a Tensor represented with ShapeSymbol's. Unranked, ranked unknown +// dims, partially known and fully known shapes are all supported. +struct TORCH_API SymbolicShape { + // Unranked shape constructor. + SymbolicShape() : dims_(c10::nullopt) {} + + // Known rank but unknown dimentions. + SymbolicShape(c10::optional rank) : dims_(c10::nullopt) { + if(!rank) { + return; + } + + std::vector shape_symbols; + shape_symbols.reserve(*rank); + for(size_t i = 0; i < *rank; ++i) { + shape_symbols.push_back(ShapeSymbol::newSymbol()); + } + dims_ = shape_symbols; + } + + // Mix of known and unknown ranks + SymbolicShape(const std::vector>& dims) { + std::vector shape_symbols; + shape_symbols.reserve(dims.size()); + for(c10::optional dim: dims) { + if(!dim) { + shape_symbols.push_back(ShapeSymbol::newSymbol()); + } else { + shape_symbols.push_back(ShapeSymbol::fromStaticSize(*dim)); + } + } + dims_ = shape_symbols; + } + + void dump() const; + + SymbolicShape(std::vector dims) : dims_(std::move(dims)) {} + + SymbolicShape(c10::IntArrayRef dims) { + std::vector shape_symbols; + shape_symbols.reserve(dims.size()); + for(int64_t dim : dims) { + shape_symbols.push_back(ShapeSymbol::fromStaticSize(dim)); + } + dims_ = shape_symbols; + } + + ShapeSymbol operator[](size_t i) const { + if (!dims_) { + throw std::runtime_error("Rank isn't fixed"); + } + return (*dims_).at(i); + } + + ShapeSymbol at(size_t i) const { + if (!dims_) { + throw std::runtime_error("Rank isn't fixed"); + } + return (*dims_).at(i); + } + + // Returns rank or nullopt in case of unranked shape. + c10::optional rank() const { + if(!dims_) { + return c10::nullopt; + } + return dims_->size(); + } + + c10::optional> sizes() const { + return dims_; + } + + c10::optional> symbolicDims() const { + if (!dims_) { + return c10::nullopt; + } + auto symbolic_dims = std::vector(); + for (const ShapeSymbol& s : *dims_) { + symbolic_dims.push_back(!s.is_static()); + } + return symbolic_dims; + } + + // Checks whether the shape is fully defined/complete, ie. rank and sizes + // of every dimension are known. + bool isComplete() const { + if(!dims_) { + return false; + } + for(auto d : *dims_) { + if(!d.is_static()) { + return false; + } + } + return true; + } + + // Create new SymbolicShape that is result of merging self and another + // SymbolicShape. Only dimensions that are static and equal will be + // preserved. + // If either of two shapes are of unknown rank or they have unmatching rank, + // result will be unranked. + SymbolicShape merge(const SymbolicShape& other) const; + + friend bool operator==(const SymbolicShape& lhs, const SymbolicShape& rhs) { + return lhs.dims_ == rhs.dims_; + } + + friend bool operator!=(const SymbolicShape& lhs, const SymbolicShape& rhs) { + return !(lhs == rhs); + } + + private: + c10::optional> dims_; +}; + +namespace detail { +inline bool isComplete(const Stride& s) { + return s.isComplete(); +} + +template +inline bool isComplete(const T& /*t*/) { + return true; +} +} + +template +struct VaryingShape { + using ListOfOptionalElements = std::vector>; + VaryingShape(const std::vector& vec) + : VaryingShape(ListOfOptionalElements(vec.begin(), vec.end())) {} + + VaryingShape(c10::ArrayRef vec) + : VaryingShape(ListOfOptionalElements(vec.begin(), vec.end())) {} + + VaryingShape(c10::optional size = c10::nullopt) : dims_(c10::nullopt) { + if (size) { + dims_ = ListOfOptionalElements(*size); + } + } + + VaryingShape(ListOfOptionalElements dims) : dims_(std::move(dims)) {} + + VaryingShape(size_t size) : VaryingShape(c10::optional(size)) {} + + bool operator==(const VaryingShape& other) const { + return dims_ == other.dims_; + } + + const c10::optional &operator[](size_t i) const { + if (!dims_) { + throw std::runtime_error("Rank isn't fixed"); + } + return (*dims_).at(i); + } + + c10::optional size() const { + if (!dims_) { + return c10::nullopt; + } + const auto& dims = dims_.value(); + return dims.size(); + } + + const c10::optional& sizes() const { + return dims_; + } + + TORCH_API VaryingShape merge(const VaryingShape& other) const; + + c10::optional> concrete_sizes() const { + if (!dims_) { + return c10::nullopt; + } + std::vector sizes; + for (auto d : *dims_) { + if (!d) { + return c10::nullopt; + } + sizes.push_back(d.value()); + } + return sizes; + } + + bool isComplete() const { + if (!dims_) { + return false; + } + for (auto d : *dims_) { + if (!d || !detail::isComplete(*d)) { + return false; + } + } + return true; + } + + private: + c10::optional dims_; +}; + +struct TensorType; +// TODO: investigate making this SingletonOrSharedTypePtr +using TensorTypePtr = std::shared_ptr; +// This type represents a single Tensor with a specific size +struct TORCH_API TensorType : public SharedType { + static TensorTypePtr create(const at::Tensor& t); + + // used by TensorType::create(size_t dim) which in turn used by + // shape_analysis.cpp + static TensorTypePtr create( + c10::optional scalar_type, + c10::optional device, + const VaryingShape& sizes, + const VaryingShape& strides, + c10::optional requires_grad, + c10::optional undefined = false, + bool tensor_contiguity = false); + + static TensorTypePtr create( + c10::optional scalar_type, + c10::optional device, + const SymbolicShape& sizes, + const VaryingShape& stride_, + c10::optional requires_grad, + c10::optional undefined = false); + + static TensorTypePtr create( + c10::optional scalar_type, + c10::optional device, + c10::optional dim, + c10::optional requires_grad); + + // overloaded create variadic template argument as it could not distinguish + // initializer list + static TensorTypePtr createContiguous( + at::ScalarType scalar_type, + at::Device device, + at::IntArrayRef sizes); + + static TypePtr fromNumberType(const Type& typ); + static TypePtr fromBoolType(); + + c10::optional dim() const { + return sizes().size(); + } + + VaryingShape sizes() const; + + VaryingShape strides() const; + + const VaryingShape& stride_properties() const { + return strides_; + } + + c10::optional device() const { + return device_; + } + c10::optional scalarType() const { + return scalar_type_; + } + c10::optional requiresGrad() const { + return requires_grad_; + } + bool requires_grad() const override { + return requires_grad_ ? *requires_grad_ : true; + } + + bool equals(const Type& rhs) const override; + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + + std::string str() const override; + + std::string repr_str() const override { + if (isInferredType()) { + return str() + " (inferred)"; + } else { + return str(); + } + } + + c10::optional numel() const { + size_t prod = 1; + const auto& shape = sizes(); + + for (size_t i = 0; i < shape.size(); i++) { + if (!shape[i]) { + return c10::optional{}; + } + prod *= shape[i].value(); + } + return prod; + } + + TensorTypePtr withRequiresGrad(c10::optional s) { + auto copy = clone(); + copy->requires_grad_ = s; + return copy; + } + + TensorTypePtr withScalarType(c10::optional st) { + auto copy = clone(); + copy->scalar_type_ = st; + return copy; + } + + TensorTypePtr withDim(c10::optional d) { + auto copy = clone(); + // withDim is only used by the legacy executor + // that only cares about the rank, so create dummy symbols)) : + copy->sizes_ = SymbolicShape(d); + copy->strides_ = VaryingShape(d); + return copy; + } + + TensorTypePtr withStrides(VaryingShape sstrides) const { + auto cloned = clone(); + cloned->strides_ = std::move(sstrides); + return cloned; + } + + TensorTypePtr withSizesStrides( + at::IntArrayRef sizes, + at::IntArrayRef strides) const { + auto cloned = clone(); + auto ssizes = SymbolicShape(sizes); + cloned->sizes_ = ssizes; + cloned->strides_ = computeStrideProps(sizes, strides); + return cloned; + } + + TensorTypePtr withSymbolicShapes(SymbolicShape ssizes) const { + auto cloned = clone(); + cloned->sizes_ = std::move(ssizes); + return cloned; + } + + TensorTypePtr withSizes(at::IntArrayRef sizes) const { + return withSizesStrides( + sizes, contiguousStridesOf(sizes)); + } + + TensorTypePtr withDevice(const c10::optional device) const { + auto copy = clone(); + copy->device_ = device; + return copy; + } + + TensorTypePtr dimensionedOnly() const { + auto copy = clone(); + copy->sizes_ = SymbolicShape(sizes().size()); + copy->strides_ = VaryingShape(sizes().size()); + return copy; + } + + TensorTypePtr contiguous() const { + auto cloned = clone(); + TORCH_INTERNAL_ASSERT(sizes().concrete_sizes().has_value()); + auto strides = computeStrideProps( + *sizes().concrete_sizes(), + contiguousStridesOf(*sizes().concrete_sizes())); + cloned->strides_ = strides; + return cloned; + } + + const SymbolicShape& symbolic_sizes() const; + + TensorTypePtr merge(const TensorType& other, bool merge_sizes = true) const; + + bool matchTensor(const at::Tensor& t); + + // is all information about the type specified except for autograd? + // This replaces the notion of a 'CompleteTensorType' that used to exist + // in the type-hierarchy. Excluding require_grad and undefined allows + // this to match the old behavior. + bool isComplete() const { + return scalar_type_ && device_ && sizes_.isComplete() && strides_.isComplete(); + } + + bool isInferredType() const { + return is_inferred_; + } + + static TensorTypePtr getInferred() { + static auto valueInferred = TensorType::create( + /*scalar_type=*/{}, + /*device=*/{}, + /*sizes=*/SymbolicShape(), + /*stride=*/VaryingShape{}, + /*requires_grad=*/{}, + /*undefined=*/false); + valueInferred->is_inferred_ = true; + return valueInferred; + } + + // this property is used by GuardElimination + // please see `checkInputs` for more details + bool isSummarized() const { + return !(isComplete() && requiresGrad().has_value() && + undefined().has_value()); + } + + TensorTypePtr withUndefined() { + auto r = clone(); + r->undefined_ = true; + return r; + } + + TensorTypePtr withPossiblyUndefined() { + auto r = clone(); + r->undefined_ = c10::nullopt; + return r; + } + + c10::optional undefined() const { return undefined_; } + + static const TensorTypePtr& get(); + + static const TypeKind Kind = TypeKind::TensorType; + + static std::vector contiguousStridesOf( + at::IntArrayRef in_sizes, + at::MemoryFormat memory_format = MemoryFormat::Contiguous) { + auto contiguous_fn = [](const at::IntArrayRef& sizes, + const std::vector& dim_order) { + std::vector strides(sizes.size()); + if (sizes.empty()) // zero-dim case + return strides; + + strides[dim_order[0]] = 1; + for (size_t i = 1; i < dim_order.size(); i++) { + auto cur_dim = dim_order[i]; + auto pre_dim = dim_order[i - 1]; + strides[cur_dim] = strides[pre_dim] * sizes[pre_dim]; + } + return strides; + }; + + std::vector dim_order(in_sizes.size()); + if (memory_format == MemoryFormat::ChannelsLast) { + dim_order = {1, 3, 2, 0}; + } else if (memory_format == MemoryFormat::ChannelsLast3d) { + dim_order = {1, 4, 3, 2, 0}; + } else { + auto ndims = in_sizes.size(); + for (size_t i = 0; i < ndims; i++) { + dim_order[i] = static_cast(ndims - i - 1); // Reverse + } + } + return contiguous_fn(in_sizes, dim_order); + } + + private: + TensorType( + c10::optional scalar_type, + c10::optional device, + SymbolicShape sizes, + VaryingShape strides, + c10::optional requires_grad, + c10::optional undefined = false); + + TensorTypePtr clone() const { + return TensorTypePtr(new TensorType( + scalar_type_, device_, sizes_, strides_, requires_grad_, undefined_)); + } + + static VaryingShape computeStrideProps( + at::IntArrayRef sizes, + at::IntArrayRef strides, + bool tensor_contiguity = false); + + c10::optional scalar_type_; + c10::optional device_; + SymbolicShape sizes_; + VaryingShape strides_; + c10::optional requires_grad_; + // we exploit the fact certain tensors must be zero in the autograd to + // optimize gradient computation. Such zero tensors are currently implemented + // with `UndefinedTensorImpl.` They can be handled only by special operators + // (e.g. `AutogradAdd`) and their `Tensor::defined()` property returns false. + // Normally, `undefined_` is set to false, unless a type was created + // with `withUndefined` + // This will also mean that `undefined` tensors will fail + // `subtypeOf(TensorType::get())` check + // undefined_ may become `c10::nullopt` if the tensor was observed to be both + // defined and undefined. However, no tensor type starts out with + // `undefined_` set to `c10::nullopt` + c10::optional undefined_; + // Represents whether or not this type was inferred. + bool is_inferred_ = false; +}; + +struct ListType; +using ListTypePtr = std::shared_ptr; +struct TORCH_API ListType + : public SingleElementType { + // It's not exactly a singleton, but there should be exactly one instance of + // List[T] for every T + friend struct Type; + template + static ListTypePtr create(T&&... all) { + return ListTypePtr( + new ListType(std::forward(all)...)); // NOLINT(modernize-make-shared) + } + + std::string str() const override { + std::stringstream ss; + ss << getElementType()->str() << "[]"; + return ss.str(); + } + TypePtr createWithContained( + std::vector contained_types) const override { + return create(std::move(contained_types.at(0))); + } + + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + + // global singleton + // Given an inner type T and an identifier, + // this function wil return the global singleton type pointer + // the type List. + // The extra "identifier" argument is needed beccause we have multiple container types + // that all re-use this function (List, array, etc.) + static TypePtr get(const std::string& identifier, TypePtr inner); + + // common cast List[Tensor] + static ListTypePtr ofTensors(); + static ListTypePtr ofOptionalTensors(); + static ListTypePtr ofInts(); + static ListTypePtr ofSymInts(); + static ListTypePtr ofFloats(); + static ListTypePtr ofComplexDoubles(); + static ListTypePtr ofBools(); + static ListTypePtr ofStrings(); + static ListTypePtr ofNumbers(); + + private: + ListType(TypePtr elem) : SingleElementType(std::move(elem)) {} + + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + std::stringstream ss; + ss << "List[" << getElementType()->annotation_str(std::move(printer)) << "]"; + return ss.str(); + } +}; + +struct DictType; +using DictTypePtr = std::shared_ptr; +struct TORCH_API DictType : public SharedType { + friend struct Type; + static const TypeKind Kind = TypeKind::DictType; + + static DictTypePtr create(TypePtr key, TypePtr value) { + auto kind = key->kind(); + if (auto dyn = key->castRaw()) { + kind = dyn->dynamicKind(); + } + switch (kind) { + case TypeKind::AnyType: + case TypeKind::IntType: + case TypeKind::BoolType: + case TypeKind::FloatType: + case TypeKind::ComplexType: + case TypeKind::StringType: + case TypeKind::TensorType: + case TypeKind::DeviceObjType: + return DictTypePtr(new DictType(std::move(key), std::move(value))); + default: + AT_ERROR( + "Cannot create dict for key type '", + key->str(), + "', only int, float, complex, Tensor, device and string keys are supported"); + } + } + + // aligned with the format in FunctionSchema + std::string str() const override { + std::stringstream ss; + ss << "Dict(" << getKeyType()->str() << ", " << getValueType()->str() + << ")"; + return ss.str(); + } + + TypePtr createWithContained( + std::vector contained_types) const override { + if (contained_types.size() != 2) { + throw std::runtime_error("Expected 2 contained types"); + } + return create(std::move(contained_types.at(0)), std::move(contained_types.at(1))); + } + + const TypePtr& getKeyType() const { + return types.at(0); + } + + const TypePtr& getValueType() const { + return types.at(1); + } + + bool hasFreeVariables() const override { + return has_free_variables; + } + + at::ArrayRef containedTypes() const override { + return types; + } + + bool equals(const Type& rhs) const override { + if (auto* dict_rhs = rhs.castRaw()) { + return *getKeyType() == *(dict_rhs->getKeyType()) && + *getValueType() == *(dict_rhs->getValueType()); + } + return false; + } + + // global singleton + // Given an inner type T and an identifier, + // this function will return the global singleton type pointer + // the type List. + // The extra "identifier" argument is needed because we have multiple container types + // that all re-use this function (Dict and unordered_map) + static TypePtr get(const std::string& identifier, TypePtr key, TypePtr val); + + private: + DictType(TypePtr key, TypePtr value) + : SharedType(TypeKind::DictType), + has_free_variables( + key->hasFreeVariables() || value->hasFreeVariables()) { + types.reserve(2); + types.push_back(std::move(key)); + types.push_back(std::move(value)); + } + + std::string annotation_str_impl(TypePrinter printer = nullptr) const override; + + std::vector types; + bool has_free_variables; +}; + +struct FutureType; +using FutureTypePtr = std::shared_ptr; + +struct TORCH_API FutureType + : public SingleElementType { + friend struct Type; + template + static FutureTypePtr create(TypePtr elem) { + return FutureTypePtr( + new FutureType(std::move(elem))); // NOLINT(modernize-make-shared) + } + + std::string str() const override { + std::stringstream ss; + ss << "Future(" << getElementType()->str() << ")"; + return ss.str(); + } + TypePtr createWithContained( + std::vector contained_types) const override { + return create(std::move(contained_types.at(0))); + } + + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override { + if (Type::isSubtypeOfExt(rhs, why_not)) { + return true; + } + if (auto rhs_ = rhs.castRaw()) { + return getElementType()->isSubtypeOfExt(*rhs_->getElementType(), why_not); + } + return false; + } + + private: + FutureType(TypePtr elem) : SingleElementType(std::move(elem)) {} + + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + std::stringstream ss; + ss << "Future[" << getElementType()->annotation_str(std::move(printer)) << "]"; + return ss.str(); + } +}; + +struct AwaitType; +using AwaitTypePtr = std::shared_ptr; + +struct TORCH_API AwaitType + : public SingleElementType { + friend struct Type; + template + static AwaitTypePtr create(TypePtr elem) { + return AwaitTypePtr( + new AwaitType(std::move(elem))); // NOLINT(modernize-make-shared) + } + + std::string str() const override { + std::stringstream ss; + ss << "Await(" << getElementType()->str() << ")"; + return ss.str(); + } + TypePtr createWithContained( + std::vector contained_types) const override { + return create(std::move(contained_types.at(0))); + } + + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override { + if (Type::isSubtypeOfExt(rhs, why_not)) { + return true; + } + if (auto rhs_ = rhs.castRaw()) { + return getElementType()->isSubtypeOfExt(*rhs_->getElementType(), why_not); + } + return false; + } + + private: + AwaitType(TypePtr elem) : SingleElementType(std::move(elem)) {} + + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + std::stringstream ss; + ss << "Await[" << getElementType()->annotation_str(printer) << "]"; + return ss.str(); + } +}; + +struct RRefType; +using RRefTypePtr = std::shared_ptr; + +struct TORCH_API RRefType + : public SingleElementType { + friend struct Type; + template + static RRefTypePtr create(TypePtr elem) { + return RRefTypePtr( + new RRefType(std::move(elem))); // NOLINT(modernize-make-shared) + } + + std::string str() const override { + std::stringstream ss; + ss << "RRef(" << getElementType()->str() << ")"; + return ss.str(); + } + TypePtr createWithContained( + std::vector contained_types) const override { + return create(std::move(contained_types.at(0))); + } + + private: + RRefType(TypePtr elem) : SingleElementType(std::move(elem)) {} + + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + std::stringstream ss; + ss << "RRef[" << getElementType()->annotation_str(std::move(printer)) << "]"; + return ss.str(); + } +}; + +// Any should never appear in a named type like a class, namedtuple or +// interface. If it does, then dynamic type information will be lost in the +// Pickler, leading to hard-to-track-down bugs that will only occur +// after saving or loading a model. This is because we rely on the +// static types in named types to reconstruct type tags of loaded +// values. Lifting this restriction requires solving the serialization +// problem first. +TORCH_API void checkNoAny( + const Type& base, + const char* what, + const std::string& attrname, + const TypePtr& attrtype); + +struct TupleType; +using TupleTypePtr = std::shared_ptr; +using NameList = std::vector; +// This type represents a Tuple +struct TORCH_API TupleType : public NamedType { + + static TupleTypePtr createNamed(const c10::optional& name, + const std::vector& field_names, + const std::vector& field_types, + std::vector& field_defaults); + + static TupleTypePtr createNamed(const c10::optional& name, + const std::vector& field_names, + const std::vector& field_types); + + static TupleTypePtr createNamed(const c10::optional& name, + const std::vector& field_names, + const std::vector& field_types); + + static TupleTypePtr create( + std::vector types) { + return TupleTypePtr(new TupleType( + std::move(types), + c10::nullopt, + nullptr)); // NOLINT(modernize-make-shared) + } + static TupleTypePtr create() { + return create({}); + } + + at::ArrayRef elements() const { + return elements_; + } + + bool equals(const Type& rhs) const override; + bool isSubtypeOfExt(const Type& rhs_, std::ostream* why_not) const override; + + std::string str() const override; + bool hasFreeVariables() const override { + return has_free_variables_; + } + at::ArrayRef containedTypes() const override { + return elements_; + } + TypePtr createWithContained( + std::vector contained_types) const override { + return std::shared_ptr( + new TupleType(std::move(contained_types), name(), schema())); + } + const std::shared_ptr& schema() const { + return schema_; + } + c10::optional> names() const; + + static const TypeKind Kind = TypeKind::TupleType; + + private: + template + static TupleTypePtr createWithSpec( + const c10::optional& name, + const std::vector& field_names, + const std::vector& field_types, + std::vector& field_defaults); + + TupleType( + std::vector elements_, + c10::optional name, + std::shared_ptr schema); + + bool compare( + const Type& rhs, + const std::function& fn) const { + if (rhs.kind() != kind()) { + return false; + } + + const auto& l_elements = elements(); + const auto& r_elements = rhs.castRaw()->elements(); + if (l_elements.size() != r_elements.size()) + return false; + for (size_t i = 0; i < l_elements.size(); ++i) { + if (!fn(*l_elements[i], *r_elements[i])) + return false; + } + return true; + } + + std::string annotation_str_impl(TypePrinter printer = nullptr) const override; + + std::vector elements_; + bool has_free_variables_; + std::shared_ptr schema_; +}; + +// the common supertype of all Enums, only used in operator registraion. +// EnumType <: AnyEnumType for all Enums +struct AnyEnumType; +using AnyEnumTypePtr = SingletonTypePtr; +struct TORCH_API AnyEnumType final : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "AnyEnumType"; + } + static const TypeKind Kind = TypeKind::AnyEnumType; + // global singleton + static AnyEnumTypePtr get(); +private: + AnyEnumType() + : Type(TypeKind::AnyEnumType) {} +}; + +struct NumberType; +using NumberTypePtr = SingletonTypePtr; +// This type represents a Python number +// Subtype hierarchy for Number Types (NumberType as the base type): +// IntType <: NumberType +// FloatType <: NumberType +// ComplexType <:NumberType +// +// WARNING: if you add a new subtype of NumberType that is not +// represented by a global singleton, you need to change NumberTypePtr +// to a SingletonOrSharedTypePtr and deal with NumberType needing to +// both inherit and not inherit from SharedType! +struct TORCH_API NumberType : public Type { + bool equals(const Type& rhs) const override; + + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + + std::string str() const override { + return "Scalar"; // match what PythonArgParser says for clarity + } + static const TypeKind Kind = TypeKind::NumberType; + // global singleton + static NumberTypePtr get(); + + protected: + NumberType(TypeKind kind = TypeKind::NumberType) : Type(kind) {} + + std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override { + return "number"; // technically not a valid python type, but + // we need to use it when parsing back in annotations + // for implicit conversions + } +}; + +struct FloatType; +using FloatTypePtr = SingletonTypePtr; +// This type represents a Python float number +struct TORCH_API FloatType : public NumberType { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "float"; + } + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override { + // NOLINTNEXTLINE(bugprone-parent-virtual-call) + return rhs.kind() == TypeKind::NumberType || Type::isSubtypeOfExt(rhs, why_not); + } + static const TypeKind Kind = TypeKind::FloatType; + // global singleton + static FloatTypePtr get(); + + private: + FloatType() : NumberType(TypeKind::FloatType) {} + std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override { + return "float"; + } +}; + +struct ComplexType; +using ComplexTypePtr = SingletonTypePtr; +// This type represents a Python float number +struct TORCH_API ComplexType : public NumberType { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "complex"; + } + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override { + // NOLINTNEXTLINE(bugprone-parent-virtual-call) + return rhs.kind() == TypeKind::NumberType || Type::isSubtypeOfExt(rhs, why_not); + } + static const TypeKind Kind = TypeKind::ComplexType; + // global singleton + static ComplexTypePtr get(); + + private: + ComplexType() : NumberType(TypeKind::ComplexType) {} + std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override { + return "complex"; + } +}; + +// We need to introduce `SymIntType` to represent the `SymInt` type +// used in function schemas e.g. `aten::narrow_copy(... SymInt length) +// `SymInt` will be used to enable tracing arithmetic operations on +// dimension values. Please see [SymInt.h] for more information +struct SymIntType; +using SymIntTypePtr = SingletonTypePtr; +struct TORCH_API SymIntType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "SymInt"; + } + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + return "int"; + } + static const TypeKind Kind = TypeKind::SymIntType; + // global singleton + static SymIntTypePtr get(); + + private: + SymIntType() : Type(TypeKind::SymIntType) {} +}; + +struct SymFloatType; +using SymFloatTypePtr = SingletonTypePtr; +struct TORCH_API SymFloatType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "SymFloat"; + } + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + return "float"; + } + static const TypeKind Kind = TypeKind::SymFloatType; + // global singleton + static SymFloatTypePtr get(); + + private: + SymFloatType() : Type(TypeKind::SymFloatType) {} +}; + +struct SymBoolType; +using SymBoolTypePtr = SingletonTypePtr; +struct TORCH_API SymBoolType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "SymBool"; + } + std::string annotation_str_impl(TypePrinter printer = nullptr) const override { + return "bool"; + } + static const TypeKind Kind = TypeKind::SymBoolType; + // global singleton + static SymBoolTypePtr get(); + + private: + SymBoolType() : Type(TypeKind::SymBoolType) {} +}; + +struct IntType; +using IntTypePtr = SingletonTypePtr; +// This type represents a Python int number +struct TORCH_API IntType : public NumberType { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "int"; + } + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override { + // NOLINTNEXTLINE(bugprone-parent-virtual-call) + return rhs.kind() == TypeKind::NumberType || Type::isSubtypeOfExt(rhs, why_not); + } + static const TypeKind Kind = TypeKind::IntType; + // global singleton + static IntTypePtr get(); + + private: + IntType() : NumberType(TypeKind::IntType) {} + std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override { + return "int"; + } +}; + +struct BoolType; +using BoolTypePtr = SingletonTypePtr; +// This node represents a Python bool value +struct TORCH_API BoolType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "bool"; + } + static const TypeKind Kind = TypeKind::BoolType; + // global singleton + static BoolTypePtr get(); + + private: + BoolType() : Type(TypeKind::BoolType) {} +}; + +struct StringType; +using StringTypePtr = SingletonTypePtr; +// This type represents a Python string +struct TORCH_API StringType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + // we only use "str" (not "string") in both FunctionSchema and script + return annotation_str(); + } + std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override { + return "str"; + } + static const TypeKind Kind = TypeKind::StringType; + // global singleton + static StringTypePtr get(); + + private: + StringType() : Type(TypeKind::StringType) {} +}; + +struct StorageType; +using StorageTypePtr = SingletonTypePtr; +struct TORCH_API StorageType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return annotation_str(); + } + std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override { + return "Storage"; + } + static const TypeKind Kind = TypeKind::StorageType; + // global singleton + static StorageTypePtr get(); + + private: + StorageType() : Type(TypeKind::StorageType) {} +}; + +struct FunctionType; +using FunctionTypePtr = std::shared_ptr; +struct TORCH_API FunctionType : public NamedType { + static FunctionTypePtr create(torch::jit::Function* function) { + return FunctionTypePtr( + new FunctionType(function)); // NOLINT(modernize-make-shared) + } + bool equals(const Type& rhs) const override { + if (auto func_type = rhs.cast()) { + return func_type->function_ == function_; + } + + return false; + } + std::string str() const override { + return "Function"; + } + torch::jit::Function* function() const { + return function_; + } + static const TypeKind Kind = TypeKind::FunctionType; + + private: + FunctionType(torch::jit::Function* function); + std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override { + const auto& n = name().value(); + return n.qualifiedName(); + } + torch::jit::Function* function_; +}; + +struct NoneType; +using NoneTypePtr = SingletonTypePtr; +// This type represents a Python None +struct TORCH_API NoneType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "NoneType"; + } + bool isSubtypeOfExt(const Type& rhs, std::ostream *why_not) const override; + + static const TypeKind Kind = TypeKind::NoneType; + // global singleton + static NoneTypePtr get(); + + private: + NoneType() : Type(TypeKind::NoneType) {} +}; + +struct GeneratorType; +using GeneratorTypePtr = SingletonTypePtr; +// This type represents a Generator +struct TORCH_API GeneratorType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "Generator"; + } + static const TypeKind Kind = TypeKind::GeneratorType; + // global singleton + static GeneratorTypePtr get(); + + private: + GeneratorType() : Type(TypeKind::GeneratorType) {} +}; + +struct QuantizerType; +using QuantizerTypePtr = SingletonTypePtr; +// This type represents a Quantizer +struct TORCH_API QuantizerType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "Quantizer"; + } + static const TypeKind Kind = TypeKind::QuantizerType; + // global singleton + static QuantizerTypePtr get(); + + private: + QuantizerType() : Type(TypeKind::QuantizerType) {} +}; + +struct QSchemeType; +using QSchemeTypePtr = SingletonTypePtr; +// This type represents a QScheme +struct TORCH_API QSchemeType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "QScheme"; + } + static const TypeKind Kind = TypeKind::QSchemeType; + // global singleton + static QSchemeTypePtr get(); + + private: + QSchemeType() : Type(TypeKind::QSchemeType) {} +}; + +struct DeviceObjType; +using DeviceObjTypePtr = SingletonTypePtr; +// This type represents a Device +struct TORCH_API DeviceObjType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "Device"; + } + static const TypeKind Kind = TypeKind::DeviceObjType; + // global singleton + static DeviceObjTypePtr get(); + + private: + DeviceObjType() : Type(TypeKind::DeviceObjType) {} +}; + +struct StreamObjType; +using StreamObjTypePtr = SingletonTypePtr; +// This type represents a Generator +struct TORCH_API StreamObjType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "Stream"; + } + static const TypeKind Kind = TypeKind::StreamObjType; + // global singleton + static StreamObjTypePtr get(); + +private: + StreamObjType() : Type(TypeKind::StreamObjType) {} +}; + +struct VarType; +using VarTypePtr = std::shared_ptr; +// This type represents a type variable, used in FunctionSchema +struct VarType : public SharedType { + static VarTypePtr create(std::string name_) { + return VarTypePtr(new VarType(std::move(name_))); + } + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return name(); + } + const std::string& name() const { + return name_; + } + bool hasFreeVariables() const override { + return true; + } + static const TypeKind Kind = TypeKind::VarType; + + private: + VarType(std::string name_) + : SharedType(TypeKind::VarType), name_(std::move(name_)) {} + std::string name_; +}; + +struct CapsuleType; +using CapsuleTypePtr = SingletonTypePtr; +// This type represents a Python Capsule. +// It does not appear in the IR and is only used during runtime +struct TORCH_API CapsuleType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "Capsule"; + } + static const TypeKind Kind = TypeKind::CapsuleType; + // global singleton + static CapsuleTypePtr get(); +private: + CapsuleType() + : Type(TypeKind::CapsuleType) {} +}; + +struct PyObjectType; +using PyObjectTypePtr = SingletonTypePtr; +// This type represents a PyObject Type +struct TORCH_API PyObjectType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "PyObject"; + } + static const TypeKind Kind = TypeKind::PyObjectType; + // global singleton + static PyObjectTypePtr get(); +private: + PyObjectType() + : Type(TypeKind::PyObjectType) {} +}; + +enum class TypeVerbosity { + None, + Type, + TypeAndStride, + Full, + Symbolic, + Default = Full, +}; + +TORCH_API TypeVerbosity type_verbosity(); + +TORCH_API std::ostream& operator<<(std::ostream& out, const Type& t); +template +TORCH_API std::ostream& operator<<( + std::ostream& out, + const VaryingShape& t); +TORCH_API std::ostream& operator<<(std::ostream& os, const SymbolicShape& s); +TORCH_API std::ostream& operator<<(std::ostream& os, const ShapeSymbol& s); +TORCH_API std::ostream& operator<<(std::ostream& os, const Stride& s); +// what is the type, ignoring extra size/shape information? +// e.g. Tensor(2x3) -> Dynamic, and Tuple(Tensor(2x3),...) -> Tuple(Dynamic,...) + +// `unshapedType` is used to remove Tensor subtypes. We treat all Tensor +// subtypes as simply "Tensor"; we also create a new version of any +// container types in which internal Tensors have undergone the same +// operation. This is used for type comparisons between two Tensor types +// (`unshapedType` means that we don't falsely return `false` for e.g. +// Tensors of different dimensions). It's also used in the alias +// analysis pass. +// Be careful with calls because this can be very slow. If calling this +// on a graph, use `EraseShapeInformation` in shape_analysis.h +inline TypePtr unshapedType(const TypePtr& type) { + if (type->isSubtypeOf(*TensorType::get())) { + return TensorType::get(); + } + at::ArrayRef contained = type->containedTypes(); + if (contained.empty()) { + return type; + } + return type->withContained(fmap(type->containedTypes(), unshapedType)); +} + +inline TypePtr TensorType::fromNumberType(const Type& typ) { + if (typ.isSubtypeOf(*IntType::get())) { + return TensorType::createContiguous(at::kLong, at::kCPU, {}); + } else if (typ.isSubtypeOf(*FloatType::get())) { + return TensorType::createContiguous(at::kDouble, at::kCPU, {}); + } else if (typ.isSubtypeOf(*BoolType::get())) { + return TensorType::createContiguous(at::kBool, at::kCPU, {}); + } else if (typ.kind() == NumberType::Kind) { + return TensorType::create(c10::nullopt, at::kCPU, {}, c10::nullopt); + } + TORCH_CHECK(false, "Unknown number type: ", typ.str()); +} +inline TypePtr TensorType::fromBoolType() { + return TensorType::createContiguous(at::kBool, at::kCPU, {}); +} + +inline c10::optional tryScalarTypeFromJitType(const Type& type) { + if (type == *FloatType::get()) { + return at::typeMetaToScalarType(c10::get_default_dtype()); + } else if (type == *IntType::get()) { + return at::ScalarType::Long; + } else if (type == *BoolType::get()) { + return at::ScalarType::Bool; + } + return c10::nullopt; +} + +inline at::ScalarType scalarTypeFromJitType(const Type& type) { + auto result = tryScalarTypeFromJitType(type); + TORCH_CHECK( + result, + "Add new condition, expected Float, Complex, Int, or Bool but got", + type.str()); + return *result; +} + +// Attempt to find the correct supertype of the two types `t1` and `t2`. +// If no supertype is found, then nullopt will be returned if +// `default_to_union` is false, and `Union[t1, t2]` will be returned +// if it is true. If `t1 == t2`, or `t1` is a type refinement of `t2`, +// then `t2` will be returned (and vice versa). +// +// Two different tensortypes will return dynamic. +// +// Currently we chose not to support returning a NumberType for +// two types from the set of {FloatType, IntType, ComplexType}, because +// there is a lack of operator support for NumberType. +// +// If `type_hint` is an `InterfaceType`, then we can use that as a +// potential supertype for `ClassType`s in the list. Otherwise, we have +// no way to find and use some common interface type +TORCH_API c10::optional unifyTypes( + const TypePtr& t1, + const TypePtr& t2, + bool default_to_union = false, + const TypePtr& type_hint = nullptr); + +TORCH_API c10::optional unifyTypeList( + at::ArrayRef elements, + std::ostream& why_not, + bool default_to_union = false, + const TypePtr& type_hint = nullptr); + +namespace detail { +template +struct getTypePtr_ final { + static decltype(auto) call() { + return ([]() { + try { + return getCustomClassType(); + } catch(const c10::Error&) { + TORCH_CHECK( + false, + "Type ", + c10::util::get_fully_qualified_type_name(), + " could not be converted to any of the known types." + ); + } + }()); + } +}; + +template +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return getTypePtr_::call(); + } +}; + +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return AnyType::get(); + } +}; + +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return TensorType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return StorageType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return StreamObjType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return FloatType::get(); + } +}; +template <> +struct getTypePtr_> final { + static decltype(auto) call() { + return ComplexType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return IntType::get(); + } +}; + +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return IntType::get(); + } +}; + +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return SymIntType::get(); + } +}; +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return IntType::get(); + } +}; + +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return SymFloatType::get(); + } +}; +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return FloatType::get(); + } +}; + +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return SymBoolType::get(); + } +}; +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return BoolType::get(); + } +}; + +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return DeviceObjType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return BoolType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return NumberType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return QSchemeType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return TypeFactory::create( + TypeFactory::get()); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return StringType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return StringType::get(); + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return StringType::get(); + } +}; +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto inner_type = getMaybeFakeTypePtr_::call(); + // The "per vector" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + static auto type = ListType::get("vector", inner_type); + return type; + } +}; +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto inner_type = getMaybeFakeTypePtr_::call(); + // The "per ArrayRef" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + static auto type = ListType::get("ArrayRef", inner_type); + return type; + } +}; +template +struct getMaybeFakeTypePtr_ final { + static const auto& call() { + static auto type = ListType::create(getMaybeFakeTypePtr_::call()); + return type; + } +}; +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto inner_type = getMaybeFakeTypePtr_::call(); + // The "per List" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + static auto type = ListType::get("List", inner_type); + return type; + } +}; +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto inner_type = getMaybeFakeTypePtr_::call(); + static auto type = ListType::get("List", inner_type); + return type; + } +}; +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto inner_type = getMaybeFakeTypePtr_::call(); + // The "per array" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + // (Concatenating the length onto the end of the string because we want a unique + // type_ptr created for every std::array type). + static auto type = ListType::get(std::string("array") + std::to_string(N), inner_type); + return type; + } +}; +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto inner_key_type = getMaybeFakeTypePtr_::call(); + static auto inner_val_type = getMaybeFakeTypePtr_::call(); + // The "per unordered_map" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + static auto type = DictType::get("unordered_map", inner_key_type, inner_val_type); + return type; + } +}; +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto inner_key_type = getMaybeFakeTypePtr_::call(); + static auto inner_val_type = getMaybeFakeTypePtr_::call(); + // The "per Dict" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + static auto type = DictType::get("Dict", inner_key_type, inner_val_type); + return type; + } +}; + +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto inner_type = getMaybeFakeTypePtr_::call(); + // The "per optional" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + static auto type = OptionalType::get(inner_type); + return type; + } +}; + + +template<> +struct getTypePtr_ final { + static const auto& call() { + static auto inner_type = getMaybeFakeTypePtr_::call(); + // The "per optional" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + static auto type = OptionalType::get(inner_type); + return type; + } +}; + +template +struct getMaybeFakeTypePtr_ final { + static const auto& call() { + // The "per optional" static singleton needs to live in a .cpp file, + // otherwise we'll end up with one singleton instance per shared library. + static auto inner_type = getMaybeFakeTypePtr_::call(); + static auto type = OptionalType::get(inner_type); + return type; + } +}; + +template +struct getMaybeFakeTypePtr_, fake> final { + static const auto& call() { + static auto type = ([]() { + std::vector contained_types = { + (getMaybeFakeTypePtr_::call())... + }; + return TupleType::create(std::move(contained_types)); + })(); + return type; + } +}; +template <> +struct getTypePtr_ final { + static decltype(auto) call() { + return NoneType::get(); + } +}; +} // namespace detail +template +inline decltype(auto) getTypePtr() { + // TODO: static_assert that a templated function exists, and throw a friendly + // error message if not + return detail::getMaybeFakeTypePtr_::call(); +} + +template +inline TypePtr getTypePtrCopy() { + // TODO: static_assert that a templated function exists, and throw a friendly + // error message if not + return getTypePtr(); +} + +template +inline decltype(auto) getFakeTypePtr() { + return detail::getMaybeFakeTypePtr_::call(); +} + +template +inline TypePtr getFakeTypePtrCopy() { + return getFakeTypePtr(); +} + +using TypeEnv = std::unordered_map; +struct MatchTypeReturn { + MatchTypeReturn(std::string reason) : reason_(std::move(reason)) {} + static MatchTypeReturn Success() { + return MatchTypeReturn(); + } + bool success() const { + return !reason_.has_value(); + } + const std::string& reason() const { + return reason_.value(); + } + + private: + MatchTypeReturn() + : reason_(c10::nullopt) {} + c10::optional reason_; // is there is no match, this contains the reason +}; + +// attempt to match the type variables in formal to actual, adding them to type_env. +// If no match is possible this returns a MatchTypeReturn with r.success() == false +// and a r.reason() that describes why it could not match. +// note: It is possible to successfully match a formal, but for type variables +// in the formal to still not be defined. In particular, None matches Optional[T] +// but does not define the value of T. +TORCH_API MatchTypeReturn +matchTypeVariables(const TypePtr& formal, const TypePtr& actual, TypeEnv& type_env); + +// replace type variables appearing in `type` with the values in +// `type_env`. Returns nullptr if a variable used in `type` +// does not appear in `type_env` +TORCH_API TypePtr tryEvalTypeVariables(const TypePtr& type, TypeEnv& type_env); + +TORCH_API bool elementTypeCanBeInferredFromMembers(const TypePtr& elem_type); + +struct InterfaceType; +using InterfaceTypePtr = std::shared_ptr; + +// Interfaces are a list of abstract methods that a class might meet. +// If a class provides those methods, it implicitly meets the interface. + +// Subtype relations for Interface with ClassType: +// lhs (ClassType or InterfaceType) is a subtype of rhs if: +// 1. lhs methods are a superset of rhs methods +// 2. if rhs is module interface, the lhs must be module interface or module itself +struct TORCH_API InterfaceType : public NamedType { + static InterfaceTypePtr create( + QualifiedName qualifiedName, bool is_module=false); + + bool equals(const Type& rhs) const override { + if (auto user_rhs = rhs.castRaw()) { + return isSubTypeImpl(*this, *user_rhs, nullptr) && + isSubTypeImpl(*user_rhs, *this, nullptr); + } + return false; + } + + std::string str() const override { + return std::string("InterfaceType<") + name()->name() + ">"; + } + + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + + // try to find a method of this interface, + // returns nullptr if not found. + const FunctionSchema* getMethod(const std::string& name) const; + void addMethod(FunctionSchema schema); + const std::vector& methods() const { + return *methods_; + } + + bool is_module() const override{ + return is_module_; + } + static const TypeKind Kind = TypeKind::InterfaceType; + ~InterfaceType() override; + private: + InterfaceType(QualifiedName name, bool is_module); + static bool isSubTypeImpl( + const InterfaceType& lhs, + const InterfaceType& rhs, + std::ostream* why_not); + + std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override { + return name()->qualifiedName(); + } + + // shared_ptr so that this header does not have to depend on + // FunctionSchema.h + std::shared_ptr> methods_; + // flag to distinguish if it's an interface type from a module or not + bool is_module_; +}; + +template +struct EnumerationType : public Type { +static const TypeKind Kind = K; + +bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); +} + +protected: +EnumerationType() : Type(Kind) {} +}; + +// WARNING: These enumeration types below DO NOT actually get parsed out +// from the logical schema strings, instead they are mapped as ints. To +// observe these types, use real_type() instead of type() on Argument + +struct ScalarTypeType; +using ScalarTypeTypePtr = SingletonTypePtr; +struct TORCH_API ScalarTypeType : public EnumerationType { +std::string str() const override { +return "ScalarType"; +} +static const TypeKind Kind = TypeKind::ScalarTypeType; +// global singleton +static ScalarTypeTypePtr get(); + +private: +ScalarTypeType() : EnumerationType() {} +}; + +struct MemoryFormatType; +using MemoryFormatTypePtr = SingletonTypePtr; +struct TORCH_API MemoryFormatType : public EnumerationType { +std::string str() const override { +return "MemoryFormat"; +} +static const TypeKind Kind = TypeKind::MemoryFormatType; +// global singleton +static MemoryFormatTypePtr get(); + +private: +MemoryFormatType() : EnumerationType() {} +}; + +struct LayoutType; +using LayoutTypePtr = SingletonTypePtr; +struct TORCH_API LayoutType : public EnumerationType { +std::string str() const override { +return "Layout"; +} +static const TypeKind Kind = TypeKind::LayoutType; +// global singleton +static LayoutTypePtr get(); + +private: +LayoutType() : EnumerationType() {} +}; + +namespace detail { +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return ScalarTypeType::get(); + } +}; +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return LayoutType::get(); + } +}; +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return MemoryFormatType::get(); + } +}; +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return IntType::get(); + } +}; +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return IntType::get(); + } +}; +template <> +struct getMaybeFakeTypePtr_ final { + static decltype(auto) call() { + return IntType::get(); + } +}; +} // namespace detail + +// the common supertype of all lists, +// List[T] <: AnyList for all T +struct AnyListType; +using AnyListTypePtr = SingletonTypePtr; +struct TORCH_API AnyListType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "list"; + } + static const TypeKind Kind = TypeKind::AnyListType; + // global singleton + static AnyListTypePtr get(); +private: + AnyListType() + : Type(TypeKind::AnyListType) {} +}; + +// the common supertype of all tuples, +// Tuple[T...] <: AnyTuple for all T +struct AnyTupleType; +using AnyTupleTypePtr = SingletonTypePtr; +struct TORCH_API AnyTupleType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + + std::string str() const override { + return "tuple"; + } + static const TypeKind Kind = TypeKind::AnyTupleType; + + // global singleton + static AnyTupleTypePtr get(); +private: + AnyTupleType() + : Type(TypeKind::AnyTupleType) {} +}; + +// the common supertype of all classes, +// ClassType <: AnyClassType for all classes +struct AnyClassType; +using AnyClassTypePtr = SingletonTypePtr; +struct TORCH_API AnyClassType : public Type { + bool equals(const Type& rhs) const override { + return rhs.kind() == kind(); + } + std::string str() const override { + return "AnyClassType"; + } + static const TypeKind Kind = TypeKind::AnyClassType; + // global singleton + static AnyClassTypePtr get(); +private: + AnyClassType() + : Type(TypeKind::AnyClassType) {} +}; + +template<> +inline typename detail::CastReturnType::type Type::cast() { + if (kind() == TypeKind::TupleType || kind() == TypeKind::FunctionType || + kind() == TypeKind::ClassType || kind() == TypeKind::InterfaceType) { + return std::static_pointer_cast(static_cast(this)->shared_from_this()); + } + return nullptr; +} + +template<> +inline typename detail::CastConstReturnType::type Type::cast() const { + if (kind() == TypeKind::TupleType || kind() == TypeKind::FunctionType || + kind() == TypeKind::ClassType || kind() == TypeKind::InterfaceType) { + return std::static_pointer_cast(static_cast(this)->shared_from_this()); + } + return nullptr; +} + +template<> +inline const NamedType* Type::castRaw() const { + if (kind() == TypeKind::TupleType || kind() == TypeKind::FunctionType || + kind() == TypeKind::ClassType || kind() == TypeKind::InterfaceType) { + return static_cast(this); + } + return nullptr; +} + +// Used as a return type when inferring the IValue type of a Python object. +struct InferredType { + /* implicit */ InferredType(TypePtr type) : type_(std::move(type)) {} + /* implicit */ InferredType(std::string reason) + : type_(nullptr), reason_(std::move(reason)) {} + TypePtr type() const { + TORCH_INTERNAL_ASSERT( + type_, + "Tried to get the type from an InferredType but the type is null. ", + "Reason: ", + reason_); + return type_; + } + bool success() const { + return type_ != nullptr; + } + const std::string& reason() const { + TORCH_INTERNAL_ASSERT(!type_); + return reason_; + } + +private: + TypePtr type_; + std::string reason_; +}; + +TORCH_API bool containsAnyType(const TypePtr& type); + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/jit_type_base.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/jit_type_base.h new file mode 100644 index 0000000000000000000000000000000000000000..bf4909aaefec7baa64e1f2d4b8b3a06743dff253 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/jit_type_base.h @@ -0,0 +1,719 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +#define C10_FORALL_TYPES(_) \ + _(AnyType) \ + _(EnumType) \ + _(AnyEnumType) \ + _(TensorType) \ + _(StorageType) \ + _(TupleType) \ + _(ListType) \ + _(DictType) \ + _(NumberType) \ + _(FloatType) \ + _(ComplexType) \ + _(FutureType) \ + _(AwaitType) \ + _(RRefType) \ + _(IntType) \ + _(NoneType) \ + _(StringType) \ + _(GeneratorType) \ + _(QuantizerType) \ + _(BoolType) \ + _(OptionalType) \ + _(VarType) \ + _(DeviceObjType) \ + _(StreamObjType) \ + _(FunctionType) \ + _(ClassType) \ + _(PyObjectType) \ + _(CapsuleType) \ + _(InterfaceType) \ + _(QSchemeType) \ + _(ScalarTypeType) \ + _(LayoutType) \ + _(MemoryFormatType) \ + _(AnyListType) \ + _(AnyTupleType) \ + _(AnyClassType) \ + _(SymIntType) \ + _(SymFloatType) \ + _(SymBoolType) \ + _(UnionType) \ + _(DynamicType) + +enum class TypeKind { +#define DEFINE_TYPE(T) T, + C10_FORALL_TYPES(DEFINE_TYPE) +#undef DEFINE_TYPE +}; + +TORCH_API const char* typeKindToString(TypeKind kind); + +struct Type; +struct SharedType; + +// Use this to customize how a Type is printed using `annotation_str()`. If +// c10::nullopt is returned, `annotation_str()` falls through to its default +// implementation. +using TypePrinter = std::function(const Type&)>; + +namespace detail { +template +struct IsSingletonType : public std::integral_constant {}; +} // namespace detail +#define TORCH_DECLARE_SINGLETON(Type) \ + struct Type; \ + namespace detail { \ + template <> struct IsSingletonType : public std::integral_constant {}; \ + } + +TORCH_DECLARE_SINGLETON(AnyType); +TORCH_DECLARE_SINGLETON(AnyEnumType); +TORCH_DECLARE_SINGLETON(NumberType); +TORCH_DECLARE_SINGLETON(FloatType); +TORCH_DECLARE_SINGLETON(ComplexType); +TORCH_DECLARE_SINGLETON(IntType); +TORCH_DECLARE_SINGLETON(BoolType); +TORCH_DECLARE_SINGLETON(StringType); +TORCH_DECLARE_SINGLETON(StorageType); +TORCH_DECLARE_SINGLETON(NoneType); +TORCH_DECLARE_SINGLETON(GeneratorType); +TORCH_DECLARE_SINGLETON(QuantizerType); +TORCH_DECLARE_SINGLETON(QSchemeType); +TORCH_DECLARE_SINGLETON(DeviceObjType); +TORCH_DECLARE_SINGLETON(StreamObjType); +TORCH_DECLARE_SINGLETON(CapsuleType); +TORCH_DECLARE_SINGLETON(PyObjectType); +TORCH_DECLARE_SINGLETON(ScalarTypeType); +TORCH_DECLARE_SINGLETON(LayoutType); +TORCH_DECLARE_SINGLETON(MemoryFormatType); +TORCH_DECLARE_SINGLETON(AnyListType); +TORCH_DECLARE_SINGLETON(AnyTupleType); +TORCH_DECLARE_SINGLETON(AnyClassType); + +namespace detail { +template +struct CastReturnType { + using type = std::shared_ptr; +}; + +template +struct CastReturnType::value>::type> { + using type = SingletonTypePtr; +}; + +template +struct CastConstReturnType { + using type = std::shared_ptr; +}; + +template +struct CastConstReturnType::value>::type> { + using type = SingletonTypePtr; +}; + +template +struct as_shared_type { + using type = SharedType*; +}; + +template +struct as_shared_type { + using type = const SharedType *; +}; +} // namespace detail + +struct TORCH_API Type { + friend TORCH_API bool operator==(const Type& lhs, const Type& rhs); + private: + TypeKind kind_; + + protected: + Type(TypeKind kind) : kind_(kind) {} + + Type(const Type&) = default; + Type& operator=(const Type&) = default; + Type(Type&&) noexcept = default; + Type& operator=(Type&&) noexcept = default; + + virtual std::string annotation_str_impl(TypePrinter /*printer*/) const { + return str(); + } + // a == b + virtual bool equals(const Type& rhs) const = 0; + // a == b <=> b == a + virtual bool symmetric() const { + return true; + } + + public: + template + class SingletonOrSharedTypePtr { + public: + using element_type = typename std::shared_ptr::element_type; + + SingletonOrSharedTypePtr() = default; + + /* implicit */ SingletonOrSharedTypePtr(std::shared_ptr x) + : repr_(std::move(x)) {} + + template ::value, bool> = true> + /* implicit */ SingletonOrSharedTypePtr(std::shared_ptr x) + : repr_(std::move(x)) {} + + /* implicit */ SingletonOrSharedTypePtr(std::nullptr_t) + : repr_(nullptr) {} + + /* implicit */ SingletonOrSharedTypePtr(SingletonTypePtr p) + : repr_(p) {} + + template ::value, bool> = true> + /* implicit */ SingletonOrSharedTypePtr(SingletonTypePtr p) + : repr_(SingletonTypePtr(p.get())) {} + + + // We need to support construction from T* for pybind. The problem + // is that it's not clear if we are supposed to be taking shared + // ownership or not. + // + // Case 1: if T is known statically to derive from SharedType, we should use + // shared_from_this() and take shared_ownership. + // + // Case 2: if T is exactly Type, we need to do a dynamic_cast to + // check if it's a SharedType and do the right thing. + // + // Case 3: Otherwise, T is not a SharedType. (debug-check this + // assumption!) Use a singleton pointer. + + template ::value, bool> = true> + /* implicit */ SingletonOrSharedTypePtr(T* p) : SingletonOrSharedTypePtr(static_cast::type>(p)->shared_from_this()) {} + + template ::value, bool> = true> + /* implicit */ SingletonOrSharedTypePtr(T* p) { + if (auto* shared_p = dynamic_cast::type>(p)) { + repr_ = Repr(shared_p->shared_from_this()); + } else { + repr_ = Repr(p); + } + } + + template ::value && !std::is_base_of::value, bool> = true> + /* implicit */ SingletonOrSharedTypePtr(T* p) + : repr_(p) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(dynamic_cast::type>(p) == nullptr); + } + + SingletonOrSharedTypePtr(const SingletonOrSharedTypePtr&) = default; + SingletonOrSharedTypePtr(SingletonOrSharedTypePtr&&) noexcept = default; + SingletonOrSharedTypePtr& operator=(const SingletonOrSharedTypePtr&) = default; + SingletonOrSharedTypePtr& operator=(SingletonOrSharedTypePtr&&) noexcept = default; + + T* get() const { + return repr_.isSharedAndNonNull() ? repr_.shared_.repr_.get() : static_cast(repr_.rawRepr().first); + } + + operator bool() const { + return repr_.isNonNull(); + } + + bool operator==(std::nullptr_t) const { + return !repr_.isNonNull(); + } + + bool operator!=(std::nullptr_t) const { + return repr_.isNonNull(); + } + + template , void>::value, bool> = true> + U& operator*() const { + return *get(); + } + + T* operator->() const { + return get(); + } + + private: + // NOTE: SharedPtrWrapper exists to work around a baffling bug in + // nvcc; see comment in destroy() below. + struct SharedPtrWrapper { + SharedPtrWrapper(std::shared_ptr &&x) + : repr_(std::move(x)) {} + std::shared_ptr repr_; + }; + union Repr { + Repr() : Repr(nullptr) {} + + explicit Repr(std::shared_ptr x) + : shared_(std::move(x)) {} + + explicit Repr(std::nullptr_t) + : singletonRepr_(nullptr) {} + + explicit Repr(SingletonTypePtr p) + : singletonRepr_(p.get()) {} + + ~Repr() { + destroy(); + } + + // NOTE: the only non-UB way to access our null state is through + // rawRepr(), because our copy operation doesn't preserve which + // union member is active for null pointers. + Repr(const Repr& rhs) { + if (rhs.isSharedAndNonNull()) { + new (&shared_) SharedPtrWrapper(rhs.shared_); + } else { + singletonRepr_.singleton_ = static_cast(rhs.rawRepr().first); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.singletonRepr_.unused_ == nullptr); + singletonRepr_.unused_ = nullptr; + } + } + + Repr(Repr&& rhs) noexcept { + if (rhs.isSharedAndNonNull()) { + new (&shared_) SharedPtrWrapper(std::move(rhs.shared_)); + } else { + singletonRepr_.singleton_ = static_cast(rhs.rawRepr().first); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.singletonRepr_.unused_ == nullptr); + singletonRepr_.unused_ = nullptr; + } + } + + Repr& operator=(const Repr& rhs) { + if (&rhs == this) { + return *this; + } + if (rhs.isSharedAndNonNull()) { + if (isSharedAndNonNull()) { + shared_ = rhs.shared_; + } else { + new (&shared_) SharedPtrWrapper(rhs.shared_); + } + } else { + if (isSharedAndNonNull()) { + destroy(); + } + singletonRepr_.singleton_ = static_cast(rhs.rawRepr().first); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.rawRepr().nullIfSingleton_ == nullptr); + singletonRepr_.unused_ = nullptr; + } + return *this; + } + + Repr& operator=(Repr&& rhs) noexcept { + if (&rhs == this) { + return *this; + } + if (rhs.isSharedAndNonNull()) { + if (isSharedAndNonNull()) { + shared_ = std::move(rhs.shared_); + } else { + new (&shared_) SharedPtrWrapper(std::move(rhs.shared_)); + } + } else { + if (isSharedAndNonNull()) { + destroy(); + } + singletonRepr_.singleton_ = static_cast(rhs.rawRepr().first); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.rawRepr().nullIfSingleton_ == nullptr); + singletonRepr_.unused_ = nullptr; + } + return *this; + } + + SharedPtrWrapper shared_; + + struct SingletonRepr { + explicit SingletonRepr(T* s) : singleton_(s) {} + T* singleton_; + void* unused_ = nullptr; + } singletonRepr_; + struct RawRepr { + void* first; + void* nullIfSingleton_; + }; + + // It is UB to read the singleton part of Repr if it was + // constructed as a shared_ptr and vice versa, but memcpying out + // the representation is always OK, so here's an accessor to obey + // the letter of the law. + RawRepr rawRepr() const { + RawRepr repr{}; + memcpy(&repr, reinterpret_cast(this), sizeof(RawRepr)); + return repr; + } + + bool isNonNull() const { + auto repr = rawRepr(); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(repr.nullIfSingleton_ == nullptr || repr.first != nullptr); + return repr.first != nullptr; + } + + bool isSharedAndNonNull() const { + return rawRepr().nullIfSingleton_ != nullptr; + } + + private: + void destroy() { + if (isSharedAndNonNull()) { + // Without SharedPtrWrapper, this line would read + // `shared_.~shared_ptr()` and nvcc would complain with + // "error: expected primary-expression before '>' token" + // referring to the "t" in "shared_ptr". SharedPtrWrapper + // exists to work around this compiler bug. + shared_.~SharedPtrWrapper(); + } + } + } repr_; + }; + + using TypePtr = SingletonOrSharedTypePtr; + using Ptr = TypePtr; + using ElementType = Type; + + // subtyping relation. By default, we return true for the case + // when the type is exactly equal or if this <: T where rhs = Optional[T] + + // if this returns false and the why_not stream is non-null, it contains + // additional details that describe why this is not a subtype of 'rhs'. + // This additional information should only contain details that are not + // obvious from the annotation_str() that describes the type. For instance it + // is clear that `int <: str` is false but not clear why `Foo <: InterfaceBar` + // might be false. + virtual bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const; + virtual bool is_module() const; + bool isSubtypeOf(const Type& rhs) const { + return isSubtypeOfExt(rhs, nullptr); + } + // Compatibility shims to accommodate existing code that passes shared_ptrs + // around. Ideally, we would just delete this, but it should be harmless. + template + typename std::enable_if::value, bool>::type + isSubtypeOf(const std::shared_ptr& rhs) const { + return isSubtypeOf(*rhs); + } + + template + typename std::enable_if::value, bool>::type + isSubtypeOf(const SingletonOrSharedTypePtr& rhs) const { + return isSubtypeOf(*rhs); + } + + template + typename std::enable_if::value, bool>::type + isSubtypeOf(SingletonTypePtr rhs) const { + return isSubtypeOf(*rhs); + } + + template + typename std::enable_if::value, bool>::type + isSubtypeOfExt(const SingletonOrSharedTypePtr& rhs, std::ostream* why_not) const { + return isSubtypeOfExt(*rhs, why_not); + } + + template + typename std::enable_if::value, bool>::type + isSubtypeOfExt(const std::shared_ptr& rhs, std::ostream* why_not) const { + return isSubtypeOfExt(*rhs, why_not); + } + + template + typename std::enable_if::value, bool>::type + isSubtypeOfExt(SingletonTypePtr rhs, std::ostream* why_not) const { + return isSubtypeOfExt(*rhs, why_not); + } + + // How this type will appear in FunctionSchema declarations + virtual std::string str() const = 0; + + // How this type will appear as if it were a type annotation in Python + // which is sometimes different than how it appears in declarations (e.g. + // int[] vs List[int]) + // + // Takes a custom printer that users can pass in to customize the output of + // this method. + std::string annotation_str(TypePrinter printer) const { + if (printer) { + // the printer can return nullopt to fall through to the default impl + if (auto renamed = printer(*this)) { + return *renamed; + } + } + return annotation_str_impl(std::move(printer)); + } + std::string annotation_str() const { + // Overload instead of define a default value for `printer` to help + // debuggers out. + return annotation_str(nullptr); + } + + // Returns a human readable string that includes additional information like + // "type is inferred rather than explicitly defined" to help construct more + // user-friendly messages. + virtual std::string repr_str() const { + return annotation_str(); + } + + TypeKind kind() const { + return kind_; + } + + virtual bool isUnionType() const { + return false; + } + + virtual bool requires_grad() const { + for (const auto& ct : containedTypes()) { + if (ct->requires_grad()) { + return true; + } + } + return false; + } + + // Dynamically cast this object to the subclass indicated by the + // template variable, returning nullptr if the cast is invalid. + template ::value, bool> = true> + typename detail::CastReturnType::type cast() { + if (T::Kind == kind()) { + return std::static_pointer_cast(static_cast(this)->shared_from_this()); + } + return nullptr; + } + template ::value, bool> = true> + typename detail::CastReturnType::type cast() { + if (T::Kind == kind()) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(this == T::get().get()); + return typename detail::CastReturnType::type(static_cast(this)); + } + return nullptr; + } + template ::value, bool> = true> + typename detail::CastConstReturnType::type cast() const { + if (T::Kind == kind()) { + return std::static_pointer_cast(static_cast(this)->shared_from_this()); + } + return nullptr; + } + template ::value, bool> = true> + typename detail::CastConstReturnType::type cast() const { + if (T::Kind == kind()) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(this == T::get().get()); + return typename detail::CastConstReturnType::type(static_cast(this)); + } + return nullptr; + } + template + T* castRaw() { + if (T::Kind == kind()) { + return static_cast(this); + } + return nullptr; + } + template + const T* castRaw() const { + if (T::Kind == kind()) { + return static_cast(this); + } + return nullptr; + } + template + auto expect() { + auto r = cast(); + AT_ASSERT(r); + return r; + } + template + auto expect() const { + auto r = cast(); + AT_ASSERT(r); + return r; + } + template + T& expectRef() { + auto* r = castRaw(); + AT_ASSERT(r); + return *r; + } + template + const T& expectRef() const { + auto* r = castRaw(); + AT_ASSERT(r); + return *r; + } + virtual ~Type() = default; + virtual bool hasFreeVariables() const { + return false; + } + // list of types this type contains, e.g. for a List then element type of a + // list for a tuple, the types of the tuple elements + virtual at::ArrayRef containedTypes() const { + return {}; + } + virtual TypePtr containedType(size_t i) const { + return containedTypes().at(i); + } + virtual size_t containedTypeSize() const { + return containedTypes().size(); + } + // create a new version of this type, replacing its contained types with + // contained_types + TypePtr withContained(std::vector contained_types); + // per-type constructor, you only need to override this if the + // containedTypes() is not empty + virtual TypePtr createWithContained( + std::vector /*contained_types*/) const { + AT_ERROR( + "type with contained types did not overload createWithContained: ", + str()); + } + +}; + +template +using SingletonOrSharedTypePtr = Type::SingletonOrSharedTypePtr; + + +template +bool operator==(const SingletonOrSharedTypePtr& x, const SingletonOrSharedTypePtr& y) { + return (void*)x.get() == (void*)y.get(); +} + +template +bool operator==(const SingletonOrSharedTypePtr& x, const std::shared_ptr& y) { + return (void*)x.get() == (void*)y.get(); +} + +template +bool operator==(const std::shared_ptr& x, const SingletonOrSharedTypePtr& y) { + return (void*)x.get() == (void*)y.get(); +} + +template +bool operator==(const SingletonOrSharedTypePtr& x, const SingletonTypePtr& y) { + return (void*)x.get() == (void*)y.get(); +} + +template +bool operator==(const SingletonTypePtr& x, const SingletonOrSharedTypePtr& y) { + return (void*)x.get() == (void*)y.get(); +} + +template +bool operator!=(const SingletonOrSharedTypePtr& x, const SingletonOrSharedTypePtr& y) { + return !(x == y); +} + +template +bool operator!=(const SingletonOrSharedTypePtr& x, const std::shared_ptr& y) { + return !(x == y); +} + +template +bool operator!=(const std::shared_ptr& x, const SingletonOrSharedTypePtr& y) { + return !(x == y); +} + +template +bool operator!=(const SingletonOrSharedTypePtr& x, const SingletonTypePtr& y) { + return !(x == y); +} + +template +bool operator!=(const SingletonTypePtr& x, const SingletonOrSharedTypePtr& y) { + return !(x == y); +} + +using TypePtr = SingletonOrSharedTypePtr; +using ConstTypePtr = SingletonOrSharedTypePtr; + +// Explicitly enable MaybeOwned>, rather than allowing +// MaybeOwned to be used for any type right away. +template +struct MaybeOwnedTraits> + : public MaybeOwnedTraitsGenericImpl> {}; + +// Base class for Types that are guaranteed to be owned by std::shared_ptr. +struct TORCH_API SharedType : public Type, public std::enable_shared_from_this { + using Type::Type; +}; + +inline TypePtr Type::withContained(std::vector contained_types) { + auto current_contained = containedTypes(); + // Types with no contained_types don't need this call. Check before calling! + // + // (We can't support this efficiently because types without + // contained types may be singletons, in which case + // shared_from_this will crash; we would have to provide a virtual + // typeptr_from_this or isSingleton.) + TORCH_INTERNAL_ASSERT(!current_contained.empty() && current_contained.size() == contained_types.size()); + if (current_contained.equals(contained_types)) { + return std::static_pointer_cast(static_cast(this)->shared_from_this()); + } + return createWithContained(std::move(contained_types)); +} + + +TORCH_API inline bool operator==(const Type& lhs, const Type& rhs) { + if (C10_UNLIKELY(!rhs.symmetric())) { + return rhs.equals(lhs); + } + return lhs.equals(rhs); +} + +struct NamedType; +using NamedTypePtr = std::shared_ptr; +using ConstNamedTypePtr = std::shared_ptr; + +struct TORCH_API NamedType : public SharedType { + NamedType(TypeKind tk, c10::optional name) + : SharedType(tk), name_(std::move(name)) { + TORCH_INTERNAL_ASSERT( + tk == TypeKind::TupleType || tk == TypeKind::FunctionType || + tk == TypeKind::ClassType || tk == TypeKind::InterfaceType || + tk == TypeKind::EnumType, + "If you add a new kind of NamedType, ", + "please update the cast specialization and this assert"); + } + + // Fully qualified name of type + // Looks like: "foo.bar.Baz". + const c10::optional& name() const { + return name_; + } + + private: + c10::optional name_; +}; + +} // namespace c10 + +namespace std { +template +struct hash> { + size_t operator()(const c10::SingletonOrSharedTypePtr& x) const { + return std::hash()(x.get()); + } +}; +} // namespace std diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/operator_name.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/operator_name.h new file mode 100644 index 0000000000000000000000000000000000000000..6440a695b55ec45e8d2346911fed2dc767e0afa3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/operator_name.h @@ -0,0 +1,92 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +// TODO: consider storing namespace separately too +struct OperatorName final { + std::string name; + std::string overload_name; + OperatorName(std::string name, std::string overload_name) + : name(std::move(name)), overload_name(std::move(overload_name)) {} + + // TODO: These two functions below are slow! Fix internal data structures so + // I don't have to manually reconstruct the namespaces! + + // Return the namespace of this OperatorName, if it exists. The + // returned string_view is only live as long as the OperatorName + // exists and name is not mutated + c10::optional getNamespace() const { + auto pos = name.find("::"); + if (pos == std::string::npos) { + return c10::nullopt; + } else { + return c10::make_optional(c10::string_view(name.data(), pos)); + } + } + + // Returns true if we successfully set the namespace + bool setNamespaceIfNotSet(const char* ns) { + if (!getNamespace().has_value()) { + const auto ns_len = strlen(ns); + const auto old_name_size = name.size(); + name.resize(ns_len + 2 + old_name_size); + // Shift current value of name to the end of the new space. + name.replace(name.size() - old_name_size, old_name_size, name, 0, old_name_size); + name.replace(0, ns_len, ns, ns_len); + name[ns_len] = ':'; + name[ns_len + 1] = ':'; + return true; + } else { + return false; + } + } +}; + +// Non-owning view of an OperatorName. Unlike OperatorName, most of +// its functions are constexpr, so it can be used for compile time +// computations +struct OperatorNameView final { + c10::string_view name; + c10::string_view overload_name; + constexpr OperatorNameView(c10::string_view name, c10::string_view overload_name) + : name(name), overload_name(overload_name) {} + // Parses strings like "foo.overload" and also "foo" + constexpr static OperatorNameView parse(c10::string_view full_name) { + auto i = full_name.find('.'); + if (i == c10::string_view::npos) { + return OperatorNameView(full_name, c10::string_view()); + } else { + return OperatorNameView(full_name.substr(0, i), full_name.substr(i + 1)); + } + } +}; + +inline bool operator==(const OperatorName& lhs, const OperatorName& rhs) { + return lhs.name == rhs.name && lhs.overload_name == rhs.overload_name; +} + +inline bool operator!=(const OperatorName& lhs, const OperatorName& rhs) { + return !operator==(lhs, rhs); +} + +TORCH_API std::string toString(const OperatorName& opName); +TORCH_API std::ostream& operator<<(std::ostream&, const OperatorName&); + +} // namespace c10 + +namespace std { + template <> + struct hash<::c10::OperatorName> { + size_t operator()(const ::c10::OperatorName& x) const { + return std::hash()(x.name) ^ (~ std::hash()(x.overload_name)); + } + }; +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/qualified_name.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/qualified_name.h new file mode 100644 index 0000000000000000000000000000000000000000..22fd8b8b857d7c1b60fbd4c5033a7765e281cab6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/qualified_name.h @@ -0,0 +1,161 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { + +// Represents a name of the form "foo.bar.baz" +struct QualifiedName { + QualifiedName() = default; + + // `name` can be a dotted string, like "foo.bar.baz", or just a bare name. + /* implicit */ QualifiedName(const std::string& name) { + TORCH_CHECK(!name.empty()); + // split the string into its atoms. + size_t startSearchFrom = 0; + size_t pos = name.find(delimiter_, startSearchFrom); + + while (pos != std::string::npos) { + auto atom = name.substr(startSearchFrom, pos - startSearchFrom); + TORCH_INTERNAL_ASSERT( + !atom.empty(), "Invalid name for qualified name: '", name, "'"); + atoms_.push_back(std::move(atom)); + startSearchFrom = pos + 1; + pos = name.find(delimiter_, startSearchFrom); + } + + auto finalAtom = name.substr(startSearchFrom); + TORCH_INTERNAL_ASSERT( + !finalAtom.empty(), "Invalid name for qualified name: '", name, "'"); + atoms_.emplace_back(std::move(finalAtom)); + + cacheAccessors(); + } + + explicit QualifiedName(std::vector atoms) : atoms_(std::move(atoms)) { + for (const auto& atom : atoms_) { + TORCH_CHECK(!atom.empty(), "Atom cannot be empty"); + TORCH_CHECK( + atom.find(delimiter_) == std::string::npos, + "Delimiter not allowed in atom"); + } + + cacheAccessors(); + } + // Unnecessary copy. Ideally we'd use something like std::string_view. + /* implicit */ QualifiedName(const char* name) + : QualifiedName(std::string(name)) {} + + // `name` must be a bare name (no dots!) + explicit QualifiedName(const QualifiedName& prefix, std::string name) { + TORCH_INTERNAL_ASSERT(!name.empty()); + TORCH_INTERNAL_ASSERT(name.find(delimiter_) == std::string::npos); + atoms_.insert(atoms_.begin(), prefix.atoms_.begin(), prefix.atoms_.end()); + atoms_.push_back(std::move(name)); + + cacheAccessors(); + } + + // Is `this` a prefix of `other`? + // For example, "foo.bar" is a prefix of "foo.bar.baz" + bool isPrefixOf(const QualifiedName& other) const { + const auto& thisAtoms = atoms_; + const auto& otherAtoms = other.atoms_; + + if (thisAtoms.size() > otherAtoms.size()) { + // Can't be a prefix if it's bigger + return false; + } + for (const auto i : c10::irange(thisAtoms.size())) { + if (thisAtoms[i] != otherAtoms[i]) { + return false; + } + } + return true; + } + + // The fully qualified name, like "foo.bar.baz" + const std::string& qualifiedName() const { + return qualifiedName_; + } + + // The leading qualifier, like "foo.bar" + const std::string& prefix() const { + return prefix_; + } + + // The base name, like "baz" + const std::string& name() const { + return name_; + } + + const std::vector& atoms() const { + return atoms_; + } + + bool operator==(const QualifiedName& other) const { + return this->qualifiedName_ == other.qualifiedName_; + } + + bool operator!=(const QualifiedName& other) const { + return !(*this == other); + } + + private: + static constexpr char delimiter_ = '.'; + + // Helper for cacheAccessors() below. + template + std::string join(char delimiter, const T& v) { + std::string out; + size_t reserve = 0; + for (const auto& e : v) { + reserve += e.size() + 1; + } + out.reserve(reserve); + for (const auto i : c10::irange(v.size())) { + if (i != 0) { + out.push_back(delimiter); + } + out.append(v[i]); + } + return out; + } + + void cacheAccessors() { + qualifiedName_ = join(delimiter_, atoms_); + if (atoms_.size() > 1) { + ArrayRef view(atoms_); + const auto prefixView = view.slice(0, view.size() - 1); + prefix_ = join(delimiter_, prefixView); + } + + if (!atoms_.empty()) { + name_ = atoms_.back(); + } + } + + // The actual list of names, like "{foo, bar, baz}" + std::vector atoms_; + + /* + * Cached accessors, derived from `atoms_`. + */ + std::string qualifiedName_; + std::string prefix_; + std::string name_; +}; +} // namespace c10 + +namespace std { +template <> +struct hash { + size_t operator()(const c10::QualifiedName& n) const noexcept { + return std::hash()(n.qualifiedName()); + } +}; +} // namespace std diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/rref_interface.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/rref_interface.h new file mode 100644 index 0000000000000000000000000000000000000000..cefb29c08ddc6a6b8280d31ca0ff53c912ab9fd3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/rref_interface.h @@ -0,0 +1,40 @@ +#pragma once + +#include +#include + +namespace c10 { + +struct Type; +using worker_id_t = int16_t; + +// This abstract class contains only user-facing APIs, and will be shared +// between jit and distributed to implement TorchScript support. +class C10_EXPORT RRefInterface : public c10::intrusive_ptr_target { + public: + RRefInterface() = default; + // RRef is made NOT copyable NOT movable to prevent messing up reference + // counting. + RRefInterface(const RRefInterface& other) = delete; + RRefInterface(RRefInterface&& other) = delete; + RRefInterface& operator=(RRefInterface&& other) = delete; + + ~RRefInterface() override = default; + + // returns the worker id of the owner + virtual worker_id_t owner() const = 0; + + // returns the worker name of the owner + virtual std::string ownerName() const = 0; + + // Returns true if this is the ``OwnerRRef`` + virtual bool isOwner() const = 0; + + // Returns true if this is an ``OwnerRRef`` or if this ``UserRRef`` has been + // confirmed by its owner. + virtual bool confirmedByOwner() const = 0; + + virtual const TypePtr type() const = 0; +}; + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/stack.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/stack.h new file mode 100644 index 0000000000000000000000000000000000000000..1695e5995ab6922e37e6c1aba8fda811c0b528ad --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/stack.h @@ -0,0 +1,200 @@ +#pragma once + +#include + +#include +#include +#include + +// TODO move this to c10 namespace + +namespace torch { +namespace jit { + +using c10::IValue; +using Stack = std::vector; + +class Operation { + template + using accepts = std::is_constructible, F&&>; + + public: + template ::value, int> = 0> + C10_DEPRECATED_MESSAGE("Please use void(Stack&) to register operator instead.") + Operation(F&& raw): op_([raw = std::forward(raw)](Stack& stack) { + raw(&stack); + }) {} + + template ::value && + !std::is_same, Operation>::value, int> = 0> + Operation(F&& op): op_(std::forward(op)) {} + + Operation(std::nullptr_t) noexcept {} + + explicit operator bool() const noexcept { + return op_ ? true : false; + } + + void operator()(Stack& stack) { + op_(stack); + } + + template + T* target() noexcept { + return op_.target(); + } + + private: + std::function op_; +}; + +// An operation with N inputs and M outputs pops the last N inputs off +// the stack and pushes its M inputs onto the stack +// before: I0, I1, ... IN <- stack.back() +// after: O0, O1, ... OM +// operations are defined this way so that ownership of inputs can be +// transferred to the operation and it can incrementally drop ownership of +// tensors when they become unneeded. For large operations, like 'run an entire +// subgraph', this functionality is very important for minimizing gpu memory +// usage return value is the relative 'offset' to jump to for the next +// operation: +// pc += 1 + offset +// so a return value of 0 goes to the next instruction + +// treat the last N elements of the stack as a list, looking up +// element i +static inline IValue& peek(Stack& stack, size_t i, size_t N) { + return *(stack.end() - N + i); +} +static inline IValue& peek(Stack* stack, size_t i, size_t N) { + return peek(*stack, i, N); +} +static inline const IValue& peek(const Stack& stack, size_t i, size_t N) { + return *(stack.end() - N + i); +} +static inline const IValue& peek(const Stack* stack, size_t i, size_t N) { + return peek(*stack, i, N); +} +// treat the last N elements of the stack as a list, looking up the +// slice starting at index i and having length len +static inline at::ArrayRef peekSlice( + const Stack& stack, + size_t i, + size_t len, + size_t N) { + return at::ArrayRef(stack).slice(stack.size() - N + i, len); +} +static inline at::ArrayRef last(const Stack& stack, size_t N) { + return peekSlice(stack, 0, N, N); +} +static inline at::ArrayRef last(const Stack* stack, size_t N) { + return last(*stack, N); +} +static inline void drop(Stack& stack, size_t n) { + stack.erase(stack.end() - n, stack.end()); +} +static inline void drop(Stack* stack, size_t n) { + drop(*stack, n); +} +static inline IValue pop(Stack& stack) { + auto r = std::move(stack.back()); + stack.pop_back(); + return r; +} +static inline IValue pop(Stack* stack) { + return pop(*stack); +} +static inline std::vector pop(Stack& stack, size_t n) { + std::vector result; + result.reserve(n); + for (const auto i : c10::irange(n)) { + result.push_back(std::move(peek(stack, i, n))); + } + drop(stack, n); + return result; +} + +// variadic pop: +// int64_t a; at::Tensor b; +// pop(stack, a, b); +// equivalent to: +// b = pop(stack).toTensor(); +// a = pop(stack).toInt(); +template +static inline void pop(Stack& stack, Types&... args) { + size_t i = 0; + constexpr size_t N = sizeof...(args); + (void)std::initializer_list{ + (args = std::move(peek(stack, i++, N)).template to(), 0)...}; + drop(stack, N); +} +template +static inline void pop(Stack* stack, Types&... args) { + pop(*stack, args...); +} +template +static inline void push_one(Stack& stack, Type&& arg) { + stack.emplace_back(std::forward(arg)); +} + +static inline void push_one(Stack& stack, c10::TensorOptions options) { + stack.emplace_back(c10::typeMetaToScalarType(options.dtype())); + stack.emplace_back(options.layout()); + stack.emplace_back(options.device()); + stack.emplace_back(options.pinned_memory()); +} + +template +static inline void push(Stack& stack, Types&&... args) { + (void)std::initializer_list{(push_one(stack, std::forward(args)), 0)...}; +} +template +static inline void push(Stack* stack, Types&&... args) { + return push(*stack, std::forward(args)...); +} +template +static inline void push_list_elements(Stack& stack, const c10::List& elements) { + for (T elem : elements) { + stack.push_back(std::move(elem)); + } +} + +// The packer here is carefully written not to make any unnecessary +// copies. + +// pack takes the return values of aten functions pushes them onto the stack +template +inline void pack(Stack& stack, T&& v) { + stack.emplace_back(std::forward(v)); +} +template +inline void pack(Stack* stack, T&& v) { + pack(*stack, std::forward(v)); +} + +template +struct TuplePacker { + // NB: *Not* a universal reference. + static void execute(Stack& stack, std::tuple&& t) { + // NB: The move here does not "destroy" the entire tuple, that is + // not what std::move does; only the particular tuple index + // processed here gets stolen. + pack(stack, std::get(std::move(t))); + TuplePacker::execute(stack, std::move(t)); + } +}; + +template +struct TuplePacker<0, Args...> { + static void execute(Stack& /*stack*/, std::tuple&& /*t*/){}; +}; + +template +inline void pack(Stack& stack, std::tuple&& t) { + TuplePacker::execute(stack, std::move(t)); +} + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/symbol.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/symbol.h new file mode 100644 index 0000000000000000000000000000000000000000..04d480b51e3170e54db1df7050e23fceecd114f9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/symbol.h @@ -0,0 +1,147 @@ +#pragma once +#include +#include +#include // For std::hash +#include + + +namespace c10 { + +// 'prim' symbols are synthetic operators that occur only in the IR +// and don't have corresponding implementations in ATen. + +// 'onnx' symbols correspond to ONNX operators. Their semantics +// are defined in https://github.com/onnx/onnx/blob/master/docs/Operators.md +// The particular version we are targeting is specified by '_onnx_opset_version' +// in torch.onnx.symbolic_helper +// +// In general, most ONNX operators won't get an entry here, because they +// are handled from the Python end. However, you may occasionally need +// to intern an ONNX symbol here so that you can conveniently write an +// optimization on ONNX operations. + +// 'attr' symbols are attribute keys. They are shared between both ONNX and ATen +// operators (you disambiguate their meaning by looking at the operator itself). +// In general, you only need to define attribute keys that are used by +// onnx or prim; ATen attributes are automatically generated in FORALL_ATTR_BASE_SYMBOLS. + +// Note [Symbol allocation] +// ~~~~~~~~~~~~~~~~~~~~~~~~ +// +// 1. Symbol namespace is split up into namespaces. +// +// 2. The intended access pattern for built-in symbols is onnx::MatMul +// in the c10 namespace (this is a Symbol). +// + +// Built-in constant definition strategy: +// - Enum is the most convenient way to generate a contiguous sequence +// of numbers for an identifier. +// - However, an enum gives you a fresh type. We want onnx::MatMul to +// be type Symbol, not some random enum type! +// - Therefore, after using enums to generate the sequence of integers, +// we then declare constexpr Symbols to get everything the actual Symbol +// type we want. Symbols must be constexpr to be valid to be "case"ed on. + +using unique_t = uint32_t; + +const std::string& domain_prefix(); + +// A Symbol is like an interned string, but with a little extra +// structure; it is namespaced via SymbolNamespace and the resulting +// intern pointers support efficient namespace testing. +struct TORCH_API Symbol { + explicit constexpr Symbol() : value(0) {}; + explicit constexpr Symbol(unique_t uniq) + : value(uniq) {} + + // Get a Symbol for a qualified string like "attr::bar" + static Symbol fromQualString(const std::string & s); + + // Get a Symbol from a domain and an unqualified string like "org.pytorch.attr" and "bar" + static Symbol fromDomainAndUnqualString(const std::string & d, const std::string & s); + + // Constructors for our various namespaced strings. This will construct + // the appropriate namespaced string, e.g., "attr::foo" for the + // argument "foo", and then attempt to intern it. DO NOT USE THIS + // with a string literal; attr::foo should be available in that case + // (and if it's not, you should add it to the built-ins list above.) + static Symbol attr(const std::string & s); + static Symbol aten(const std::string & s); + static Symbol cuda(const std::string & s); + static Symbol onnx(const std::string & s); + static Symbol prim(const std::string & s); + static Symbol user(const std::string & s); + static Symbol caffe2(const std::string & s); + static Symbol dimname(const std::string & s); + // TODO: eliminate me + static Symbol scope(const std::string & s); + + bool is_attr() const; + bool is_aten() const; + bool is_cuda() const; + bool is_prim() const; + bool is_prims() const; + bool is_nvprims() const; + bool is_onnx() const; + bool is_user() const; + bool is_caffe2() const; + bool is_dimname() const; + + // So we can switch on this + constexpr operator unique_t() const { + return value; + } + + Symbol ns() const; + + // Give a string corresponding to the unqualified version of this name, e.g., + // "mm". Use this in a context where the intended namespace of the string is + // obvious; this is a *lossy* conversion. + const char * toUnqualString() const; + + // Give a string corresponding to the qualified version of this name, + // e.g., "aten::mm". This string format is made available to Python bindings + // (so we know how to parse it.) + const char * toQualString() const; + + // This describes a symbol in a case where humans read it. At the moment it's + // the same as toQualString. This has to be a const char* returned because + // a lot of printf style macros use it. + const char * toDisplayString() const; + + // Give a string corresponding to the domain name for the symbol, + // e.g., "org.pytorch.aten". + std::string domainString() const; + +private: + + explicit Symbol(Symbol ns, const std::string & s); + unique_t value; +}; + +static inline bool operator==(Symbol lhs, Symbol rhs) { + return static_cast(lhs) == static_cast(rhs); +} + +inline Symbol Symbol::attr(const std::string & s) { return Symbol::fromQualString("attr::" + s); } +inline Symbol Symbol::aten(const std::string & s) { return Symbol::fromQualString("aten::" + s); } +inline Symbol Symbol::cuda(const std::string & s) { return Symbol::fromQualString("cuda::" + s); } +inline Symbol Symbol::onnx(const std::string & s) { return Symbol::fromQualString("onnx::" + s); } +inline Symbol Symbol::prim(const std::string & s) { return Symbol::fromQualString("prim::" + s); } +inline Symbol Symbol::scope(const std::string & s) { return Symbol::fromQualString("scope::" + s); } +inline Symbol Symbol::user(const std::string & s) { return Symbol::fromQualString("user::" + s); } +inline Symbol Symbol::caffe2(const std::string & s) { return Symbol::fromQualString("_caffe2::" + s); } +inline Symbol Symbol::dimname(const std::string & s) { return Symbol::fromQualString("dimname::" + s); } + +} // namespace c10 + +// make symbol behave like an integer in hash tables +namespace std { +template <> +struct hash { + size_t operator()(c10::Symbol s) const { + return std::hash()(static_cast(s)); + } +}; +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/type_factory.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/type_factory.h new file mode 100644 index 0000000000000000000000000000000000000000..8592a8864d64e4db14a5eefda85a9e4ad02dc27c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/type_factory.h @@ -0,0 +1,108 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace c10 { + +template +struct TORCH_API TypeFactoryBase {}; + +template <> +struct TORCH_API TypeFactoryBase { + template + static c10::DynamicTypePtr create(TypePtr ty, Args&&... args) { + return std::make_shared( + c10::DynamicTypeTrait::tagValue(), + c10::DynamicType::Arguments(c10::ArrayRef( + {std::move(ty), std::forward(args)...}))); + } + template + static c10::DynamicTypePtr create(const std::vector& types) { + return std::make_shared( + c10::DynamicTypeTrait::tagValue(), + c10::DynamicType::Arguments(types)); + } + static c10::DynamicTypePtr createNamedTuple( + const std::string& name, + const std::vector& fields, + const std::vector& types) { + return std::make_shared( + c10::DynamicType::Tag::Tuple, + name, + c10::DynamicType::Arguments(fields, types)); + } + template + C10_ERASE static c10::DynamicTypePtr createNamed(const std::string& name) { + return std::make_shared( + c10::DynamicTypeTrait::tagValue(), + name, + c10::DynamicType::Arguments{}); + } + template + C10_ERASE static c10::DynamicTypePtr get() { + return DynamicTypeTrait::getBaseType(); + } + static const std::unordered_map& basePythonTypes(); +}; + +using DynamicTypeFactory = TypeFactoryBase; + +// Helper functions for constructing DynamicTypes inline. +template < + typename T, + std::enable_if_t::isBaseType, int> = 0> +C10_ERASE DynamicTypePtr dynT() { + return DynamicTypeFactory::get(); +} + +template < + typename T, + typename... Args, + std::enable_if_t::isBaseType, int> = 0> +C10_ERASE DynamicTypePtr dynT(Args&&... args) { + return DynamicTypeFactory::create(std::forward(args)...); +} + +template <> +struct TORCH_API TypeFactoryBase { + template + static c10::TypePtr create(TypePtr ty, Args&&... args) { + return T::create(std::move(ty), std::forward(args)...); + } + template + static c10::TypePtr create(std::vector types) { + return T::create(std::move(types)); + } + static c10::TypePtr createNamedTuple( + const std::string& name, + const std::vector& fields, + const std::vector& types); + template + C10_ERASE static c10::TypePtr createNamed(const std::string& name) { + return T::create(name); + } + static const std::unordered_map& basePythonTypes(); + template + C10_ERASE static c10::TypePtr get() { + return T::get(); + } +}; + +using DefaultTypeFactory = TypeFactoryBase; + +using PlatformType = +#ifdef C10_MOBILE + c10::DynamicType +#else + c10::Type +#endif + ; + +using TypeFactory = TypeFactoryBase; + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/type_ptr.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/type_ptr.h new file mode 100644 index 0000000000000000000000000000000000000000..d14c3b8a45641b8584054e1b797a2a6f330f1ac7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/type_ptr.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include + +#include +#include + +namespace c10 { + +// Compatibility wrapper around a raw pointer so that existing code +// written to deal with a shared_ptr can keep working. +template +class SingletonTypePtr { + public: + /* implicit */ SingletonTypePtr(T* p) : repr_(p) {} + + // We need this to satisfy Pybind11, but it shouldn't be hit. + explicit SingletonTypePtr(std::shared_ptr) { TORCH_CHECK(false); } + + using element_type = typename std::shared_ptr::element_type; + + template , void>::value, bool> = true> + T& operator*() const { + return *repr_; + } + + T* get() const { + return repr_; + } + + T* operator->() const { + return repr_; + } + + operator bool() const { + return repr_ != nullptr; + } + + private: + T* repr_{nullptr}; +}; + +template +bool operator==(SingletonTypePtr lhs, SingletonTypePtr rhs) { + return (void*)lhs.get() == (void*)rhs.get(); +} + +template +bool operator!=(SingletonTypePtr lhs, SingletonTypePtr rhs) { + return !(lhs == rhs); +} + +} // namespace c10 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/typeid.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/typeid.h new file mode 100644 index 0000000000000000000000000000000000000000..5967c0a1659aadd9225d3f16f13275879c8bdbc9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/typeid.h @@ -0,0 +1 @@ +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/CUDAHooksInterface.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/CUDAHooksInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..981fd1227a81d6cb21d24d014649377af35ab744 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/CUDAHooksInterface.h @@ -0,0 +1,199 @@ +#pragma once + +#include +#include +#include + +// Forward-declares at::Generator and at::cuda::NVRTC +namespace at { +struct Generator; +namespace cuda { +struct NVRTC; +} // namespace cuda +} // namespace at + +// NB: Class must live in `at` due to limitations of Registry.h. +namespace at { + +#ifdef _MSC_VER +constexpr const char* CUDA_HELP = + "PyTorch splits its backend into two shared libraries: a CPU library " + "and a CUDA library; this error has occurred because you are trying " + "to use some CUDA functionality, but the CUDA library has not been " + "loaded by the dynamic linker for some reason. The CUDA library MUST " + "be loaded, EVEN IF you don't directly use any symbols from the CUDA library! " + "One common culprit is a lack of -INCLUDE:?warp_size@cuda@at@@YAHXZ " + "in your link arguments; many dynamic linkers will delete dynamic library " + "dependencies if you don't depend on any of their symbols. You can check " + "if this has occurred by using link on your binary to see if there is a " + "dependency on *_cuda.dll library."; +#else +constexpr const char* CUDA_HELP = + "PyTorch splits its backend into two shared libraries: a CPU library " + "and a CUDA library; this error has occurred because you are trying " + "to use some CUDA functionality, but the CUDA library has not been " + "loaded by the dynamic linker for some reason. The CUDA library MUST " + "be loaded, EVEN IF you don't directly use any symbols from the CUDA library! " + "One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many " + "dynamic linkers will delete dynamic library dependencies if you don't " + "depend on any of their symbols. You can check if this has occurred by " + "using ldd on your binary to see if there is a dependency on *_cuda.so " + "library."; +#endif + +// The CUDAHooksInterface is an omnibus interface for any CUDA functionality +// which we may want to call into from CPU code (and thus must be dynamically +// dispatched, to allow for separate compilation of CUDA code). How do I +// decide if a function should live in this class? There are two tests: +// +// 1. Does the *implementation* of this function require linking against +// CUDA libraries? +// +// 2. Is this function *called* from non-CUDA ATen code? +// +// (2) should filter out many ostensible use-cases, since many times a CUDA +// function provided by ATen is only really ever used by actual CUDA code. +// +// TODO: Consider putting the stub definitions in another class, so that one +// never forgets to implement each virtual function in the real implementation +// in CUDAHooks. This probably doesn't buy us much though. +struct TORCH_API CUDAHooksInterface { + // This should never actually be implemented, but it is used to + // squelch -Werror=non-virtual-dtor + virtual ~CUDAHooksInterface() = default; + + // Initialize THCState and, transitively, the CUDA state + virtual void initCUDA() const { + TORCH_CHECK(false, "Cannot initialize CUDA without ATen_cuda library. ", CUDA_HELP); + } + + virtual const Generator& getDefaultCUDAGenerator(C10_UNUSED DeviceIndex device_index = -1) const { + TORCH_CHECK(false, "Cannot get default CUDA generator without ATen_cuda library. ", CUDA_HELP); + } + + virtual Device getDeviceFromPtr(void* /*data*/) const { + TORCH_CHECK(false, "Cannot get device of pointer on CUDA without ATen_cuda library. ", CUDA_HELP); + } + + virtual bool isPinnedPtr(const void* /*data*/) const { + return false; + } + + virtual bool hasCUDA() const { + return false; + } + + virtual bool hasCUDART() const { + return false; + } + + virtual bool hasMAGMA() const { + return false; + } + + virtual bool hasCuDNN() const { + return false; + } + + virtual bool hasCuSOLVER() const { + return false; + } + + virtual bool hasROCM() const { + return false; + } + + virtual const at::cuda::NVRTC& nvrtc() const { + TORCH_CHECK(false, "NVRTC requires CUDA. ", CUDA_HELP); + } + + virtual bool hasPrimaryContext(DeviceIndex device_index) const { + TORCH_CHECK(false, "Cannot call hasPrimaryContext(", device_index, ") without ATen_cuda library. ", CUDA_HELP); + } + + virtual DeviceIndex current_device() const { + return -1; + } + + virtual Allocator* getPinnedMemoryAllocator() const { + TORCH_CHECK(false, "Pinned memory requires CUDA. ", CUDA_HELP); + } + + virtual Allocator* getCUDADeviceAllocator() const { + TORCH_CHECK(false, "CUDADeviceAllocator requires CUDA. ", CUDA_HELP); + } + + virtual bool compiledWithCuDNN() const { + return false; + } + + virtual bool compiledWithMIOpen() const { + return false; + } + + virtual bool supportsDilatedConvolutionWithCuDNN() const { + return false; + } + + virtual bool supportsDepthwiseConvolutionWithCuDNN() const { + return false; + } + + virtual bool supportsBFloat16ConvolutionWithCuDNNv8() const { + return false; + } + + virtual long versionCuDNN() const { + TORCH_CHECK(false, "Cannot query cuDNN version without ATen_cuda library. ", CUDA_HELP); + } + + virtual long versionCUDART() const { + TORCH_CHECK(false, "Cannot query CUDART version without ATen_cuda library. ", CUDA_HELP); + } + + virtual std::string showConfig() const { + TORCH_CHECK(false, "Cannot query detailed CUDA version without ATen_cuda library. ", CUDA_HELP); + } + + virtual double batchnormMinEpsilonCuDNN() const { + TORCH_CHECK(false, + "Cannot query batchnormMinEpsilonCuDNN() without ATen_cuda library. ", CUDA_HELP); + } + + virtual int64_t cuFFTGetPlanCacheMaxSize(DeviceIndex /*device_index*/) const { + TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP); + } + + virtual void cuFFTSetPlanCacheMaxSize(DeviceIndex /*device_index*/, int64_t /*max_size*/) const { + TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP); + } + + virtual int64_t cuFFTGetPlanCacheSize(DeviceIndex /*device_index*/) const { + TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP); + } + + virtual void cuFFTClearPlanCache(DeviceIndex /*device_index*/) const { + TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP); + } + + virtual int getNumGPUs() const { + return 0; + } + + virtual void deviceSynchronize(DeviceIndex /*device_index*/) const { + TORCH_CHECK(false, "Cannot synchronize CUDA device without ATen_cuda library. ", CUDA_HELP); + } +}; + +// NB: dummy argument to suppress "ISO C++11 requires at least one argument +// for the "..." in a variadic macro" +struct TORCH_API CUDAHooksArgs {}; + +TORCH_DECLARE_REGISTRY(CUDAHooksRegistry, CUDAHooksInterface, CUDAHooksArgs); +#define REGISTER_CUDA_HOOKS(clsname) \ + C10_REGISTER_CLASS(CUDAHooksRegistry, clsname, clsname) + +namespace detail { +TORCH_API const CUDAHooksInterface& getCUDAHooks(); +} // namespace detail +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/FunctionTraits.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/FunctionTraits.h new file mode 100644 index 0000000000000000000000000000000000000000..f49a55e1326d5ca3f1ce26363eda81a80b574cf5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/FunctionTraits.h @@ -0,0 +1,102 @@ +#pragma once + +#include + +// Modified from https://stackoverflow.com/questions/7943525/is-it-possible-to-figure-out-the-parameter-type-and-return-type-of-a-lambda + +// Fallback, anything with an operator() +template +struct function_traits : public function_traits { +}; + +// Pointers to class members that are themselves functors. +// For example, in the following code: +// template +// struct S { +// func_t f; +// }; +// template +// S make_s(func_t f) { +// return S { .f = f }; +// } +// +// auto s = make_s([] (int, float) -> double { /* ... */ }); +// +// function_traits traits; +template +struct function_traits : public function_traits { +}; + +// Const class member functions +template +struct function_traits : public function_traits { +}; + +// Reference types +template +struct function_traits : public function_traits {}; +template +struct function_traits : public function_traits {}; + +// Free functions +template +struct function_traits { + // arity is the number of arguments. + enum { arity = sizeof...(Args) }; + + typedef std::tuple ArgsTuple; + typedef ReturnType result_type; + + template + struct arg + { + typedef typename std::tuple_element>::type type; + // the i-th argument is equivalent to the i-th tuple element of a tuple + // composed of those arguments. + }; +}; + +template +struct nullary_function_traits { + using traits = function_traits; + using result_type = typename traits::result_type; +}; + +template +struct unary_function_traits { + using traits = function_traits; + using result_type = typename traits::result_type; + using arg1_t = typename traits::template arg<0>::type; +}; + +template +struct binary_function_traits { + using traits = function_traits; + using result_type = typename traits::result_type; + using arg1_t = typename traits::template arg<0>::type; + using arg2_t = typename traits::template arg<1>::type; +}; + + +// Traits for calling with c10::guts::invoke, where member_functions have a first argument of ClassType +template +struct invoke_traits : public function_traits{ +}; + +template +struct invoke_traits : public invoke_traits{ +}; + +template +struct invoke_traits : public invoke_traits{ +}; + +template +struct invoke_traits : + public function_traits { +}; + +template +struct invoke_traits : + public function_traits { +}; diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/HIPHooksInterface.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/HIPHooksInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..3ce351b623908af529425318c2be68fc07a6609d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/HIPHooksInterface.h @@ -0,0 +1,70 @@ +#pragma once + +#include +#include +#include + +#include + +#include +#include + +namespace at { +class Context; +} + +// NB: Class must live in `at` due to limitations of Registry.h. +namespace at { + +// The HIPHooksInterface is an omnibus interface for any HIP functionality +// which we may want to call into from CPU code (and thus must be dynamically +// dispatched, to allow for separate compilation of HIP code). See +// CUDAHooksInterface for more detailed motivation. +struct TORCH_API HIPHooksInterface { + // This should never actually be implemented, but it is used to + // squelch -Werror=non-virtual-dtor + virtual ~HIPHooksInterface() = default; + + // Initialize the HIP library state + virtual void initHIP() const { + AT_ERROR("Cannot initialize HIP without ATen_hip library."); + } + + virtual std::unique_ptr initHIPGenerator(Context*) const { + AT_ERROR("Cannot initialize HIP generator without ATen_hip library."); + } + + virtual bool hasHIP() const { + return false; + } + + virtual int64_t current_device() const { + return -1; + } + + virtual Allocator* getPinnedMemoryAllocator() const { + AT_ERROR("Pinned memory requires HIP."); + } + + virtual void registerHIPTypes(Context*) const { + AT_ERROR("Cannot registerHIPTypes() without ATen_hip library."); + } + + virtual int getNumGPUs() const { + return 0; + } +}; + +// NB: dummy argument to suppress "ISO C++11 requires at least one argument +// for the "..." in a variadic macro" +struct TORCH_API HIPHooksArgs {}; + +TORCH_DECLARE_REGISTRY(HIPHooksRegistry, HIPHooksInterface, HIPHooksArgs); +#define REGISTER_HIP_HOOKS(clsname) \ + C10_REGISTER_CLASS(HIPHooksRegistry, clsname, clsname) + +namespace detail { +TORCH_API const HIPHooksInterface& getHIPHooks(); + +} // namespace detail +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/IPUHooksInterface.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/IPUHooksInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..593987cd23e693a60240953cf6c01c6478213c87 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/IPUHooksInterface.h @@ -0,0 +1,35 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { + +struct TORCH_API IPUHooksInterface { + virtual ~IPUHooksInterface() = default; + + virtual const Generator& getDefaultIPUGenerator( + DeviceIndex device_index = -1) const { + AT_ERROR( + "Cannot get the default IPU generator: the IPU backend is not " + "available."); + } + + virtual Generator newIPUGenerator(DeviceIndex device_index = -1) const { + AT_ERROR( + "Cannot create a new IPU generator: the IPU backend is not available."); + } +}; + +struct TORCH_API IPUHooksArgs {}; + +TORCH_DECLARE_REGISTRY(IPUHooksRegistry, IPUHooksInterface, IPUHooksArgs); +#define REGISTER_IPU_HOOKS(clsname) \ + C10_REGISTER_CLASS(IPUHooksRegistry, clsname, clsname) + +namespace detail { +TORCH_API const IPUHooksInterface& getIPUHooks(); +} // namespace detail +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/MPSHooksInterface.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/MPSHooksInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..29b357f13d483e56645484e2fd952737843ec43b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/MPSHooksInterface.h @@ -0,0 +1,103 @@ +// Copyright © 2022 Apple Inc. + +#pragma once + +#include +#include +#include +#include + +#include + +namespace at { + +struct TORCH_API MPSHooksInterface { + // this fails the implementation if MPSHooks functions are called, but + // MPS backend is not present. + #define FAIL_MPSHOOKS_FUNC(func) \ + TORCH_CHECK(false, "Cannot execute ", func, "() without MPS backend."); + + virtual ~MPSHooksInterface() = default; + + // Initialize the MPS library state + virtual void initMPS() const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual bool hasMPS() const { + return false; + } + virtual bool isOnMacOS13orNewer(unsigned minor = 0) const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual const Generator& getDefaultMPSGenerator() const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual Allocator* getMPSDeviceAllocator() const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual void deviceSynchronize() const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual void commitStream() const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual void* getCommandBuffer() const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual void* getDispatchQueue() const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual void emptyCache() const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual size_t getCurrentAllocatedMemory() const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual size_t getDriverAllocatedMemory() const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual void setMemoryFraction(double /*ratio*/) const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual void profilerStartTrace(const std::string& mode, bool waitUntilCompleted) const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual void profilerStopTrace() const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual uint32_t acquireEvent(bool enable_timing) const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual void releaseEvent(uint32_t event_id) const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual void recordEvent(uint32_t event_id) const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual void waitForEvent(uint32_t event_id) const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual void synchronizeEvent(uint32_t event_id) const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual bool queryEvent(uint32_t event_id) const { + FAIL_MPSHOOKS_FUNC(__func__); + } + virtual double elapsedTimeOfEvents(uint32_t start_event_id, uint32_t end_event_id) const { + FAIL_MPSHOOKS_FUNC(__func__); + } + + #undef FAIL_MPSHOOKS_FUNC +}; + +struct TORCH_API MPSHooksArgs {}; + +TORCH_DECLARE_REGISTRY(MPSHooksRegistry, MPSHooksInterface, MPSHooksArgs); +#define REGISTER_MPS_HOOKS(clsname) \ + C10_REGISTER_CLASS(MPSHooksRegistry, clsname, clsname) + +namespace detail { +TORCH_API const MPSHooksInterface& getMPSHooks(); + +} // namespace detail +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/MTIAHooksInterface.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/MTIAHooksInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..f969beef7a36e59a12b745f7f974323a654e4808 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/MTIAHooksInterface.h @@ -0,0 +1,51 @@ +#pragma once + +#include + +#include + +#include + +namespace at { +class Context; +} + +namespace at { + +constexpr const char* MTIA_HELP = + "The MTIA backend requires MTIA extension for PyTorch;" + "this error has occurred because you are trying " + "to use some MTIA's functionality without MTIA extension included."; + +struct TORCH_API MTIAHooksInterface { + virtual ~MTIAHooksInterface() = default; + + virtual void initMTIA() const { + TORCH_CHECK( + false, + "Cannot initialize MTIA without MTIA Extension for PyTorch.", + MTIA_HELP); + } + + virtual bool hasMTIA() const { + return false; + } + + virtual std::string showConfig() const { + TORCH_CHECK( + false, + "Cannot query detailed MTIA version without MTIA Extension for PyTorch.", + MTIA_HELP); + } +}; + +struct TORCH_API MTIAHooksArgs {}; + +C10_DECLARE_REGISTRY(MTIAHooksRegistry, MTIAHooksInterface, MTIAHooksArgs); +#define REGISTER_MTIA_HOOKS(clsname) \ + C10_REGISTER_CLASS(MTIAHooksRegistry, clsname, clsname) + +namespace detail { +TORCH_API const MTIAHooksInterface& getMTIAHooks(); +} // namespace detail +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/ORTHooksInterface.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/ORTHooksInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..f49969ec66a5bd47407090454a4ca671eda66cea --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/ORTHooksInterface.h @@ -0,0 +1,36 @@ +#pragma once + +#include +#include + +constexpr const char* ORT_HELP = + " You need to 'import torch_ort' to use the 'ort' device in PyTorch. " + "The 'torch_ort' module is provided by the ONNX Runtime itself " + "(https://onnxruntime.ai)."; + +// NB: Class must live in `at` due to limitations of Registry.h. +namespace at { + +struct TORCH_API ORTHooksInterface { + // This should never actually be implemented, but it is used to + // squelch -Werror=non-virtual-dtor + virtual ~ORTHooksInterface() = default; + + virtual std::string showConfig() const { + TORCH_CHECK(false, "Cannot query detailed ORT version information.", ORT_HELP); + } +}; + +// NB: dummy argument to suppress "ISO C++11 requires at least one argument +// for the "..." in a variadic macro" +struct TORCH_API ORTHooksArgs {}; + +TORCH_DECLARE_REGISTRY(ORTHooksRegistry, ORTHooksInterface, ORTHooksArgs); +#define REGISTER_ORT_HOOKS(clsname) \ + C10_REGISTER_CLASS(ORTHooksRegistry, clsname, clsname) + +namespace detail { +TORCH_API const ORTHooksInterface& getORTHooks(); +} // namespace detail + +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/PrivateUse1HooksInterface.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/PrivateUse1HooksInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..91b197daeff2a9b8f0618912582ddc3ba807cce2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/PrivateUse1HooksInterface.h @@ -0,0 +1,29 @@ +#pragma once + +#include +#include +#include +namespace at { + +struct TORCH_API PrivateUse1HooksInterface { + virtual ~PrivateUse1HooksInterface() = default; + virtual const at::Generator& getDefaultGenerator(c10::DeviceIndex device_index) { + TORCH_CHECK_NOT_IMPLEMENTED( + false, + "You should register `PrivateUse1HooksInterface` for PrivateUse1 before call `getDefaultGenerator`."); + } + + virtual at::Device getDeviceFromPtr(void* data) const { + TORCH_CHECK_NOT_IMPLEMENTED( + false, + "You should register `PrivateUse1HooksInterface` for PrivateUse1 before call `getDeviceFromPtr`."); + } +}; + +struct TORCH_API PrivateUse1HooksArgs {}; + +TORCH_API void RegisterPrivateUse1HooksInterface(at::PrivateUse1HooksInterface* hook_); + +TORCH_API at::PrivateUse1HooksInterface* GetPrivateUse1HooksInterface(); + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/XPUHooksInterface.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/XPUHooksInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..9a3836dbcc78e074af30659d59983adafc208e0c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/detail/XPUHooksInterface.h @@ -0,0 +1,88 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +// We use forward declaration here instead of #include to avoid +// leaking DLPack implementation detail to every project that includes `ATen/Context.h`, which in turn +// would lead to a conflict when linked with another project using DLPack (for example TVM) +struct DLDevice_; + +namespace at { + +constexpr const char* XPU_HELP = + "The XPU backend requires Intel Extension for Pytorch;" + "this error has occurred because you are trying " + "to use some XPU's functionality, but the Intel Extension for Pytorch has not been " + "loaded for some reason. The Intel Extension for Pytorch MUST " + "be loaded, EVEN IF you don't directly use any symbols from that!"; + +struct TORCH_API XPUHooksInterface { + virtual ~XPUHooksInterface() {} + + virtual void initXPU() const { + TORCH_CHECK( + false, + "Cannot initialize XPU without Intel Extension for Pytorch.", + XPU_HELP); + } + + virtual bool hasXPU() const { + return false; + } + + virtual std::string showConfig() const { + TORCH_CHECK( + false, + "Cannot query detailed XPU version without Intel Extension for Pytorch. ", + XPU_HELP); + } + + virtual Device getATenDeviceFromDLPackDevice( + const DLDevice_& dl_device, + void* data) const { + TORCH_CHECK( + false, + "Cannot get XPU device without Intel Extension for Pytorch. ", + XPU_HELP); + } + + virtual DLDevice_& getDLPackDeviceFromATenDevice( + DLDevice_& dl_device, + const Device& aten_device, + void* data) const { + TORCH_CHECK( + false, + "Cannot get XPU DL device without Intel Extension for Pytorch. ", + XPU_HELP); + } + + virtual Generator getXPUGenerator(C10_UNUSED DeviceIndex device_index = -1) const { + TORCH_CHECK(false, "Cannot get XPU generator without Intel Extension for Pytorch. ", XPU_HELP); + } + + virtual const Generator& getDefaultXPUGenerator(C10_UNUSED DeviceIndex device_index = -1) const { + TORCH_CHECK(false, "Cannot get default XPU generator without Intel Extension for Pytorch. ", XPU_HELP); + } + + virtual int getNumGPUs() const { + return 0; + } +}; + +struct TORCH_API XPUHooksArgs {}; + +C10_DECLARE_REGISTRY(XPUHooksRegistry, XPUHooksInterface, XPUHooksArgs); +#define REGISTER_XPU_HOOKS(clsname) \ + C10_REGISTER_CLASS(XPUHooksRegistry, clsname, clsname) + +namespace detail { +TORCH_API const XPUHooksInterface& getXPUHooks(); +} // namespace detail +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/renorm_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/renorm_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..e0e1aa603be317bc5167b80d049e0eb54919f96c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/renorm_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_renorm : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm); +}; + +} // namespace native +} // namespace at