diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/IListRef.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/IListRef.h new file mode 100644 index 0000000000000000000000000000000000000000..2bbf43975eacc4453dc33ae561b1f5ff71bd36a1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/IListRef.h @@ -0,0 +1,631 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +/* + * [Note: IListRef] + * Wrapper around different API containers (e.g. boxed and unboxed). + * + * What is it? + * =========== + * It is a tagged union of both boxed and unboxed API containers. + * Working implementations: + * + * - `IListRef` + * - `IListRef` + * + * Note that `IListRef` is a view type. Meaning that it won't own the + * tensors it holds. It's intended to be used only as argument parameters. + * Specifically, where these 2 worlds overlap. + * + * What is this for? + * ================= + * Historically, PyTorch has maintained 2 different APIs: the unboxed + * (called from C++ API and Python eager mode) and boxed APIs (called + * from the TorchScript JIT, mobile interpreter, and boxed fallbacks). + * + * Calling unboxed kernels from the boxed "world" and vice-versa may + * result in non-negligible overhead. Lists are one of those types: + * + * - Boxed world: `c10::List` + * - Unboxed world: `c10::ArrayRef` + * + * In this context, `c10::IListRef` solves this problem by wrapping those + * 2 container types, so that we don't need to convert from one to + * the other. + * + * (see https://github.com/pytorch/pytorch/issues/66328) + * + * What does it do? + * ================ + * This container wraps around the different tagged containers + * (currently, only boxed and unboxed), without incurring in extra + * overhead for converting from one to another. It does so while + * exposing usual container methods, which dispatch to corresponding + * implementations. + * + * While it works with different container types, it introduces + * overhead for repeatedly calling member functions (since those will + * get dispatched, again). Therefore, you should only use it to iterate + * through the list up to one time. If you need to do more complex things, + * call `materialize()` first. + * + * Adding support for a new Tag + * ============================ + * Suppose we want to add a new tag: `Chest`. Here are the steps + * we would have to go through: + * + * 1. Add a line for it in the macro `TORCH_ILISTREF_FORALL_TAGS`. + * + * #define TORCH_ILISTREF_FORALL_TAGS(_, ...) \ + * ... + * _(Chest, ##__VA_ARGS__) + * + * 2. Add type aliases, union members, and constructors. + * + * template + * class IListRef { + * ... + * using chest_type = + * typename detail::IListRefTagImpl::list_type; + * ... + * IListRef(...) : tag_(IListRefTag::Chest) { + * ... + * } + * ... + * union Payload { + * ... + * chest_type chest; + * ... + * }; + * ... + * }; + * + * 3. Add a default implementation for it (in 'IListRef_inl.h'). It's + * preferable to make the default implementation work for `T = Tensor` + * (both `Unboxed` and `Boxed` do it). + * + * template + * class IListRefTagImplBase { + * public: + * using elem_type = ListElemT; + * using list_type = ChestContainer; + * + * static const list_type& unwrap(const IListRef& ilist) { ... } + * + * static typename list_type::const_iterator& unwrap( + * IListRefIterator& it) { ... } + * + * static const typename list_type::const_iterator& unwrap( + * const IListRefIterator& it) { ... } + * + * static IListRefConstRef iterator_get( + * const typename list_type::const_iterator& it) { ... } + * } + * + * 4. Add an specialization for each of the already supported types. + * Finally, for consistency, add them to the tracking list. + * (see [Note: IListRefTagImpl Specializations]) + * + * template <> + * class IListRefTagImpl + * : public IListRefTagImplBase {}; + * + * Adding support for a new Type + * ============================= + * Suppose we want to add support for a new type: `Matrix`. + * Here are the steps we would have to go through: + * + * 1. Add an specialization for each of the existing tags. + * For consistency, add them to the tracking list. + * (see [Note: IListRefTagImpl Specializations]) + * + * template <> + * class IListRefTagImpl + * : public IListRefTagImplBase {}; + * + * template <> + * class IListRefTagImpl + * : public IListRefTagImplBase {}; + * + * Common Problems + * =============== + * 1. One of `IListRef(Iterator)` methods are failing to compile. + * + * That may be happening because the container type you added + * is not compatible with the code written for that method. If + * that's true, then you might have to transform that code into + * a static method call (see `List::operator[]` method). + * + * 2. Can't make `IListRefIterator::operator*` return a const-reference. + * + * First, keep in mind that we assume that boxed containers will + * have to deal with `IValue` (e.g. `c10::List`). In this context, + * what may be happening is that `IValue` doesn't store internally + * your type `T`. Instead, it constructs a type new `T` everytime + * you try to get `T` for it (see `IListRef`). + */ + +namespace c10 { +template +class IListRef; + +/* + * Applies arbitrary macros to each `IListRefTag`. + */ +#define TORCH_ILISTREF_FORALL_TAGS(_, ...) \ + _(Unboxed, ##__VA_ARGS__) \ + _(Boxed, ##__VA_ARGS__) \ + _(Materialized, ##__VA_ARGS__) + +/* + * Defines a "switch-case" for `TAG`. Inside, it executes `BODY`, + * while bringing to scope: + * + * - `ImplT`: the implementation class for `TAG` + * - `this_`: the result of unwrapping `this` + */ +#define TORCH_ILISTREF_UNWRAP_CASE(TAG, BODY) \ + case c10::IListRefTag::TAG: { \ + using ImplT = c10::detail::IListRefTagImpl; \ + auto& this_ = ImplT::unwrap(*this); \ + BODY \ + } break; + +/* + * Dispatches the unwrap call, depending on `TAG`, followed by + * the execution of `BODY`. It aborts if `TAG` is not a `IListRefTag`. + * + * This macro is useful because it allows us to handle different + * types (that correspond to different tags) to be implemented + * only once. We can do it even when the implementation of the + * different tags aren't syntatically the same, by dispatching + * it to a function (e.g. `ImplT::(this_)`). + */ +#define TORCH_ILISTREF_UNWRAP(TAG, BODY) \ + switch (TAG) { \ + TORCH_ILISTREF_FORALL_TAGS(TORCH_ILISTREF_UNWRAP_CASE, BODY) \ + break; \ + default: \ + TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); \ + } + +enum class IListRefTag { +#define DEFINE_TAG(tag, ...) tag, + TORCH_ILISTREF_FORALL_TAGS(DEFINE_TAG) +#undef DEFINE_TAG + None +}; + +namespace detail { +/* + * Type alias that specifies whether we return a reference or a copy of `T`. + * + * What is this for? + * ================= + * Since values in the boxed world are represented by an `IValue`, we also + * depend on whether it can be converted to a const-reference (`Tensor`) or + * has to create a new copy of `T` (`OptionalTensorRef`). + */ +template +using IListRefConstRef = typename ivalue_to_const_ref_overload_return::type; + +/* + * Interface that implements key functions for each `IListRefTag` type. + * + * What is this for? + * ================= + * Given an `IListRef(Iterator)`, some methods have to be implemented + * differently for each `TAG`. Therefore, the methods inside this class + * are used as dispatch targets for the different `IListRefTag` values. + * + * You should create an specialization of this class for each possible + * combination of `IListRefTag` type (except `None`) and element types + * (e.g. `Tensor`). + * + * What does it do? + * ================ + * 1. defines static methods to be used as dispatch targets by both + * `IListRef` and `IListRefIterator` (see the implementation of + * `IListRefTagImplBase`). + * + * 2. defines the `elem_type` and `list_type` aliases that will be + * used in the definition of `IListRef`. In general, we should do + * so by inheriting from `IListRefTagImplBase`. + * + * [Note: IListRefTagImpl Specialization] + * ====================================== + * For `IListRef(Iterator)`: + * - + * - + * - + * + * For `IListRef(Iterator)`: + * - + * - + * - + */ +template +class IListRefTagImpl {}; + +/* + * Base implementation of `IListRefTagImpl` methods. + * + * What is this for? + * ================= + * This should make adding specializations for new types easier. For + * example, one should be able to add a new type just by making its + * `IListRefTagImpl` specialization inherit from `IListRefTagImplBase`. + * + * You should create a partial specialization for this class only if + * you introduce a new `IListRefTag`. The idea being that there is one + * default implementation for each possible value of `IListRefTag`. + * + * What does it do? + * ================ + * 1. defines `elem_type` as an alias to `ListElemT`. + * + * 1. defines `list_type` as an alias to the default container type + * that will hold a collection of `elem_type`. The idea being that + * all types tagged as `TAG` will have `list_type` as its container, + * with different `elem_type`. + * + * 3. defines the default implementation for each of the methods that + * are supposed to be defined on `IListRefTagImpl` specializations. + * + * 4. inheriting from `IListRefTagImplBase` also means + * that the payload of the type `IListRef` will be of type `list_type` + * when it is tagged as `TAG`. + */ +template +class IListRefTagImplBase {}; + +/* + * Materialized container for `IListRef`. + * + * What is this for? + * ================= + * Container that groups `T` references together. This exchanges the + * overhead of every method call from `IListRef` for a dynamic allocation. + * + * You should use this container instead of `IListRef` if: + * + * - You are going to iterate the list more than once + * - You need to repeatedly access arbitrary elements (using `operator[]`) + * What does it do? + + * ================ + * Removes the reference (&) from the type, and wraps it into a + * `std::reference_wrapper`. If `IListRefConstRef` is not a + * reference type, then it's left unchanged. + */ +template +using _MaterializedIListRefElem = typename std::conditional< + std::is_reference::value, + typename std::reference_wrapper::type>, + T>::type; + +template +using MaterializedIListRefElem = _MaterializedIListRefElem>; + +template +using MaterializedIListRef = std::vector>; + +} // namespace detail + +/* + * Iterator for `IListRef`. + * + * What is it? + * =========== + * Currently, a `std::bidirectional_iterator` that wraps the iterator + * types defined for each of the `IListRefTag`. + * + * One should be able to use it, as if it were the unwrapped + * iterators themselves. + + * What does it do? + * ================ + * Similarly to `IListRef`, this is a wrapper class. Specifically, it + * wraps each container's `const_iterator` type alias. So, for example, + * given that the container for `IListRefTag::Boxed` is `c10::List`, this + * iterator will wrap a `c10::List::const_iterator`. + * + * [Note: MSVC Iterator Debug] + * =========================== + * MSVC `vector::iterator` implementation (used in the boxed variant) + * makes it so this union's destructor, copy-constructor (assignment), and + * move-constructor (assignment) are implicitly deleted. + * + * Therefore, we need to explicitly define them as needed. Follows a list + * of places where these are needed and their reason: + * + * - `Payload` destructor: + * it is deleted only if the macro `_ITERATOR_DEBUG_LEVEL` is set to 2. + * + * - `IListRefIterator` destructor: + * same as above. However, we need to explicitly call the variant + * destructor explicitly. + * + * - `IListRefIterator` copy-constructor: + * it is deleted only if the macro `_ITERATOR_DEBUG_LEVEL` is different + * than 0. + */ +template +class IListRefIterator { + private: +#define DEFINE_FRIEND_CLASS(TAG, ...) \ + friend class detail::IListRefTagImpl; \ + friend class detail::IListRefTagImplBase< \ + IListRefTag::TAG, \ + T, \ + typename detail::IListRefTagImpl::elem_type>; + TORCH_ILISTREF_FORALL_TAGS(DEFINE_FRIEND_CLASS) +#undef DEFINE_FRIEND_CLASS + + public: + // C++17 friendly std::iterator implementation + using iterator_category = std::bidirectional_iterator_tag; + using value_type = T; + using difference_type = std::ptrdiff_t; + using pointer = T*; + using reference = T&; + + using unboxed_iterator_type = typename detail:: + IListRefTagImpl::list_type::const_iterator; + using boxed_iterator_type = typename detail:: + IListRefTagImpl::list_type::const_iterator; + using materialized_iterator_type = + typename detail::MaterializedIListRef::const_iterator; + + IListRefIterator() : tag_(IListRefTag::None) {} + +#if defined(_MSC_VER) && _ITERATOR_DEBUG_LEVEL != 0 + // See [Note: MSVC Iterator Debug] + IListRefIterator(const IListRefIterator& iterator) + : tag_(iterator.tag_) { + switch (tag_) { + case IListRefTag::Boxed: + payload_.boxed_iterator = iterator.payload_.boxed_iterator; + break; + case IListRefTag::Unboxed: + payload_.unboxed_iterator = iterator.payload_.unboxed_iterator; + break; + case IListRefTag::Materialized: + payload_.materialized_iterator = iterator.payload_.materialized_iterator; + break; + default: + TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); + } + } +#endif + +#if defined(_MSC_VER) && _ITERATOR_DEBUG_LEVEL == 2 + // See [Note: MSVC Iterator Debug] + ~IListRefIterator() noexcept(false) { + switch (tag_) { + case IListRefTag::Boxed: + payload_.boxed_iterator.~boxed_iterator_type(); + break; + case IListRefTag::Unboxed: + payload_.unboxed_iterator.~unboxed_iterator_type(); + break; + case IListRefTag::Materialized: + payload_.materialized_iterator.~materialized_iterator_type(); + break; + default: + TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); + } + } +#endif + + IListRefIterator(boxed_iterator_type boxed) : tag_(IListRefTag::Boxed) { + payload_.boxed_iterator = boxed; + } + + IListRefIterator(unboxed_iterator_type unboxed) : tag_(IListRefTag::Unboxed) { + payload_.unboxed_iterator = unboxed; + } + + IListRefIterator(materialized_iterator_type materialized) : tag_(IListRefTag::Materialized) { + payload_.materialized_iterator = materialized; + } + + detail::IListRefConstRef operator*() const { + TORCH_ILISTREF_UNWRAP(tag_, { return ImplT::iterator_get(this_); }); + } + + IListRefIterator& operator++() { + TORCH_ILISTREF_UNWRAP(tag_, { ++this_; }); + return *this; + } + + IListRefIterator operator++(int) { + auto old = *this; + TORCH_ILISTREF_UNWRAP(tag_, { ++this_; }); + return old; + } + + IListRefIterator& operator--() { + TORCH_ILISTREF_UNWRAP(tag_, { --this_; }); + return *this; + } + + IListRefIterator operator--(int) { + auto old = *this; + TORCH_ILISTREF_UNWRAP(tag_, { --this_; }); + return old; + } + + bool operator==(const IListRefIterator& rhs) const { + if (tag_ != rhs.tag_) { + return false; + } + TORCH_ILISTREF_UNWRAP(tag_, { + auto& rhs_it = ImplT::unwrap(rhs); + return this_ == rhs_it; + }); + } + + bool operator!=(const IListRefIterator& rhs) const { + return !(*this == rhs); + } + + private: + union Payload { + boxed_iterator_type boxed_iterator; + unboxed_iterator_type unboxed_iterator; + materialized_iterator_type materialized_iterator; + void* _init_ptr; + Payload() : _init_ptr(nullptr) {} +#if defined(_MSC_VER) + // See [Note: MSVC Iterator Debug] + ~Payload() {} +#endif + }; + + Payload payload_; + IListRefTag tag_; +}; + +/* + * See [Note: IListRef] + */ +template +class IListRef { + private: +#define DEFINE_FRIEND_CLASS(TAG, ...) \ + friend class detail::IListRefTagImpl; \ + friend class detail::IListRefTagImplBase< \ + IListRefTag::TAG, \ + T, \ + typename detail::IListRefTagImpl::elem_type>; + TORCH_ILISTREF_FORALL_TAGS(DEFINE_FRIEND_CLASS) +#undef DEFINE_FRIEND_CLASS + + public: + using unboxed_type = + typename detail::IListRefTagImpl::list_type; + using boxed_type = + typename detail::IListRefTagImpl::list_type; + using materialized_type = + typename detail::MaterializedIListRef; + + using iterator = IListRefIterator; + using const_iterator = IListRefIterator; + using reverse_iterator = std::reverse_iterator; + using value_type = typename iterator::value_type; + + IListRef() : tag_(IListRefTag::None) {} + + IListRef(const boxed_type& boxed) : tag_(IListRefTag::Boxed) { + payload_.boxed = &boxed; + } + + IListRef(const unboxed_type& unboxed) : tag_(IListRefTag::Unboxed) { + payload_.unboxed = unboxed; + } + + IListRef(const std::initializer_list& list) : tag_(IListRefTag::Unboxed) { + payload_.unboxed = at::ArrayRef(list); + } + + template < + typename... UnboxedConstructorArgs, + typename = std::enable_if_t< + std::is_constructible::value>> + IListRef(UnboxedConstructorArgs&&... args) : tag_(IListRefTag::Unboxed) { + payload_.unboxed = unboxed_type(std::forward(args)...); + } + + IListRef(const materialized_type& materialized) : tag_(IListRefTag::Materialized) { + payload_.materialized = &materialized; + } + + size_t size() const { + TORCH_ILISTREF_UNWRAP(tag_, { return this_.size(); }); + } + + bool empty() const { + return size() == 0; + } + + iterator begin() const { + TORCH_ILISTREF_UNWRAP(tag_, { return this_.begin(); }); + } + + iterator end() const { + TORCH_ILISTREF_UNWRAP(tag_, { return this_.end(); }); + } + + detail::IListRefConstRef front() const { + TORCH_ILISTREF_UNWRAP(tag_, { return ImplT::front(this_); }); + } + + /* + * Materializes the `IListRef` into a `std::vector`. + * + * This should be used when one wishes to either: + * + * - iterate over the list more than once: each `IListRefIterator` + * member function call has to go through a switch, introducing + * non-negligible overhead + * + * - randomly access an arbitrary element using `operator[]`: + * same reason as above + */ + detail::MaterializedIListRef materialize() const { + if (isMaterialized()) { + return toMaterialized(); + } + + detail::MaterializedIListRef materialized; + materialized.reserve(size()); + for (const auto& t : *this) { + materialized.emplace_back(t); + } + return materialized; + } + +#define DEFINE_CHECK(TAG, ...) \ + bool is##TAG() const { \ + return tag_ == IListRefTag::TAG; \ + } + TORCH_ILISTREF_FORALL_TAGS(DEFINE_CHECK); +#undef DEFINE_CHECK + + bool isNone() const { + return tag_ == IListRefTag::None; + } + +#define DEFINE_CASTING(TAG, ...) \ + const typename detail::IListRefTagImpl::list_type& \ + to##TAG() const { \ + TORCH_INTERNAL_ASSERT(is##TAG()); \ + return detail::IListRefTagImpl::unwrap(*this); \ + } + TORCH_ILISTREF_FORALL_TAGS(DEFINE_CASTING); +#undef DEFINE_CASTING + + private: + union Payload { + const boxed_type* boxed; + unboxed_type unboxed; + const materialized_type* materialized; + Payload() : boxed(nullptr) {} + }; + + Payload payload_; + IListRefTag tag_; +}; + +} // namespace c10 + +#include diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/class_type.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/class_type.h new file mode 100644 index 0000000000000000000000000000000000000000..310ed332aec77b95fb18b2150cc0c5414b1f0853 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/class_type.h @@ -0,0 +1,441 @@ +#pragma once + +#include + +#include +#include +#include + +namespace torch { +namespace jit { +struct CompilationUnit; +struct Function; +} // namespace jit +} // namespace torch + +namespace c10 { + +struct FunctionSchema; + +// This enumerator represents the 'kind' of an attribute - a buffer, a parameter, or neither. +// This state is mutually exclusive. Buffers and Parameters can only appear on modules. +enum class AttributeKind { + BUFFER, + PARAMETER, + REGULAR_ATTRIBUTE +}; + +// This structure represents all notional booking entities in a class attribute: name, kind (see: AttributeKind), and type (see: TypePtr). +// Note: This structure does not represent the value of the attribute. +struct TORCH_API ClassAttribute { + public: + ClassAttribute(AttributeKind kind, + TypePtr attributeType, + std::string attributeName) : + kind_(kind), + attributeType_(std::move(attributeType)), + attributeName_(std::move(attributeName)) {} + + AttributeKind getKind() const { + return kind_; + } + + const TypePtr& getType() const { + return attributeType_; + } + + const std::string& getName() const { + return attributeName_; + } + + private: + AttributeKind kind_; + TypePtr attributeType_; + std::string attributeName_; +}; + +/** + * User Defined Types + */ + +struct ClassType; +using ClassTypePtr = std::shared_ptr; +using ::torch::jit::CompilationUnit; + +// This represents a class in TorchScript. +struct TORCH_API ClassType : public NamedType { + // This represents an attribute of a class; a name associated with an attribute, and a + // getter and (optional) setter for that attribute. + struct Property { + std::string name; + torch::jit::Function* getter; + torch::jit::Function* setter; + }; + + // Create a class type with name `name` and its methods stored in `cu`. + static ClassTypePtr create( + c10::optional qualifiedName, + std::weak_ptr cu, + bool is_module = false, + std::string doc_string = "", + std::vector unresolved_class_attributes = {}); + + bool equals(const Type& rhs) const override { + if (this == &rhs) { + return true; + } + if (auto user_rhs = rhs.castRaw()) { + const auto& lhs_name = name().value(); + const auto& rhs_name = user_rhs->name().value(); + + return lhs_name == rhs_name && + this->compilation_unit() == user_rhs->compilation_unit(); + } + return false; + } + + std::string str() const override { + return annotation_str(); + } + + std::string repr_str() const override { + std::stringstream ss; + ss << str() + << " (of Python compilation unit at: " << compilation_unit().get() << ")"; + return ss.str(); + } + + const std::vector& methods() const; + + TypePtr findAttribute(const std::string& name) const { + size_t pos = 0; + for (const auto& attr : attributes_) { + if (name == attr.getName()) { + break; + } + ++pos; + } + + if (pos >= attributes_.size()) { + return nullptr; + } + return attributes_[pos].getType(); + } + + const TypePtr& getAttribute(const std::string& name) const { + auto slot = findAttributeSlot(name); + TORCH_CHECK( + slot, + repr_str(), + " does not have an attribute with name '", + name, + "'"); + return attributes_[*slot].getType(); + } + + size_t numAttributes() const { + return attributes_.size(); + } + + const TypePtr& getAttribute(size_t slot) const { + AT_ASSERT(slot < attributes_.size()); + return attributes_.at(slot).getType(); + } + + const std::string getAttributeName(size_t slot) const { + AT_ASSERT(slot < attributes_.size()); + return attributes_[slot].getName(); + } + + void checkNotExist(const std::string& name, const std::string& what) const; + + // Attributes are stored in a specific slot at runtime for effiency. + // When emitting instructions we specify the slot so that attribute access is + // a constant lookup + c10::optional findAttributeSlot(const std::string& name) const { + size_t slot = 0; + for (const auto& attr : attributes_) { + if (name == attr.getName()) { + return slot; + } + slot++; + } + return c10::nullopt; + } + size_t getAttributeSlot(const std::string& name) const { + if (auto r = findAttributeSlot(name)) { + return *r; + } + TORCH_CHECK( + false, + repr_str(), + " does not have an attribute with name '", + name, + "'"); + } + + bool hasAttribute(const std::string& name) const { + return std::find_if( + attributes_.cbegin(), + attributes_.cend(), + [&](const ClassAttribute& attr) { return attr.getName() == name; }) != + attributes_.cend(); + } + + bool isUnresolvedClassAttribute(const std::string& name) const; + + at::ArrayRef containedTypes() const override { + return attributeTypes_; + } + + size_t addAttribute( + const std::string& name, + TypePtr type, + bool is_parameter = false, + bool is_buffer = false); + + // [Internal Only] Remove attribute from the ClassType, + // caller is responsible to make sure the modification is safe: + // it is unsafe to having existing allocations + // of this object around anymore, and any code that works on + // the attribute is now invalid. Only newly created code is + // valid again. + void unsafeRemoveAttribute(const std::string& name); + + // [Internal Only] Change the type of an attribute of the ClassType, + // The caller is responsible to make sure the modification is safe: + // it is unsafe to maintain uses of the old type of the attribute, + // and any code that works on the attribute is now invalid. + // Only newly created code is valid again. + void unsafeChangeAttributeType(const std::string& name, const TypePtr& new_ty); + + // Add attribute \p NAME if it doesn't exist or verify that it has a + // compatible type otherwise. + size_t addOrCheckAttribute( + const std::string& name, + TypePtr ty, + bool is_parameter = false, + bool is_buffer = false) { + auto slot_idx = findAttributeSlot(name); + if (!slot_idx) { + return addAttribute(name, std::move(ty), is_parameter, is_buffer); + } + + TORCH_CHECK( + is_parameter == this->is_parameter(*slot_idx), + "Parameter field mismatch for the field '", + name, + "'"); + const TypePtr& atype = getAttribute(*slot_idx); + TORCH_CHECK( + ty->isSubtypeOf(*atype), + ty->repr_str(), + " is not compatible with the type ", + atype->repr_str(), + " for the field '", + name, + "'"); + return *slot_idx; + } + + // Get the property with the given \p name, if it exists on the class. + c10::optional getProperty(const std::string& name); + // Add a property named \p name with \p getter and \p setter as its getter and setter. + void addProperty(const std::string& name, torch::jit::Function* getter, torch::jit::Function* setter); + // Get a list of all properties. + const std::vector& properties() const { + return properties_; + } + + bool hasConstant(const std::string& name) const { + return std::find_if( + constantNames_.cbegin(), + constantNames_.cend(), + [&](const std::string& constant) { return constant == name; }) != + constantNames_.cend(); + } + + size_t addConstant(const std::string& name, const IValue& value); + + c10::optional findConstantSlot(const std::string& name) const; + + size_t getConstantSlot(const std::string& name) const { + if (auto r = findConstantSlot(name)) { + return *r; + } + TORCH_CHECK( + false, + repr_str(), + " does not have constant field with the name '", + name, + "'"); + } + + const std::string& getConstantName(size_t slot) const; + + const std::string& doc_string() const { + return doc_string_; + } + + IValue getConstant(const std::string& name) const; + + IValue getConstant(size_t slot) const; + + c10::optional findConstant(const std::string& name) const; + + size_t numConstants() const; + + at::ArrayRef constantNames() const { + return constantNames_; + } + + at::ArrayRef constantValues() const; + + // [Internal Only] Remove constant from the ClassType + // caller is responsible to make sure the modification is safe: + // it is unsafe to having existing allocations + // of this object around anymore, and any code that works on + // the attribute is now invalid. Only newly created code is + // valid again. + void unsafeRemoveConstant(const std::string& name); + + TypePtr createWithContained(std::vector contained_types) const override { + auto ptr = ClassType::create(name(), compilation_unit_, is_module()); + AT_ASSERT(numAttributes() == contained_types.size()); + for(size_t i = 0; i < attributes_.size(); ++i) { + AT_ASSERT(attributes_[i].getType()->isSubtypeOf(*contained_types[i])); + ptr->addAttribute(attributes_[i].getName(), std::move(contained_types[i])); + } + // Copy methods over + for (const auto& method : methods()) { + ptr->addMethod(method); + } + return ptr; + } + + bool is_module() const override { + return isModule_; + } + + const std::vector& getAttributes() const { + return attributes_; + } + + bool is_parameter(size_t slot) const { + TORCH_INTERNAL_ASSERT( + is_module(), "asking for parameterSlots of non-Module"); + return attributes_.at(slot).getKind() == AttributeKind::PARAMETER; + } + + bool is_buffer(size_t slot) const { + TORCH_INTERNAL_ASSERT( + is_module(), "asking for bufferWrittenSlots of non-Module"); + return attributes_.at(slot).getKind() == AttributeKind::BUFFER; + } + + void addForwardPreHook(torch::jit::Function* pre_hook_ptr); + void addForwardHook(torch::jit::Function* hook_ptr); + torch::jit::Function* findForwardPreHook(const std::string& name) const; + torch::jit::Function* findForwardHook(const std::string& name) const; + const std::vector& getForwardHooks() const; + const std::vector& getForwardPreHooks() const; + + void checkForwardPreHookSchema( + int pre_hook_idx, + const FunctionSchema& pre_hook_schema) const; + void checkForwardHookSchema( + int hook_idx, + const FunctionSchema& hook_schema) const; + + void addMethod(torch::jit::Function* method); + torch::jit::Function* findMethod(const std::string& name) const; + torch::jit::Function& getMethod(const std::string& name) const; + torch::jit::Function* findHook(const std::string& name) const; + torch::jit::Function& getHook(const std::string& name) const; + bool hasMethod(const std::string& name) const; + + torch::jit::Function* findStaticMethod(const std::string& name) const; + void addStaticMethod(torch::jit::Function* method); + + // [Internal Only] Remove method from the ClassType + // caller is responsible to make sure the modification is safe: + // it is unsafe to having existing allocations + // of this object around anymore, and any code that works on + // the attribute is now invalid. Only newly created code is + // valid again. + // Note this method is intended for freezing only. + void unsafeRemoveMethod(const std::string& name); + + std::shared_ptr compilation_unit(); + + std::shared_ptr compilation_unit() const; + + // generate a refined version of this class. + // It has the same name but the slot Types are subtypes of + // the original slots. It is only valid to refine a class type in a context + // where it is know that there are not assignments to the objects slots + // that would invalidate the refinement. + // These variants are not registered in the global class table. + ClassTypePtr refine(at::ArrayRef refined_slots) const; + + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + + static const TypeKind Kind = TypeKind::ClassType; + + private: + ClassType( + c10::optional name, + std::weak_ptr cu, + bool is_module = false, + std::string doc_string = "", + std::vector unresolved_class_attributes = {}); + + std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override { + const auto& n = name().value(); + return n.qualifiedName(); + } + + void addAttribute(ClassAttribute classAttribute); + std::string getForwardPreHookErrorMessage(int pre_hook_idx) const; + std::string getForwardHookErrorMessage(int hook_idx) const; + + // Mapping of attribute names -> their type. + // NOTE: this does not contain methods, which are stored in the module + // TODO: once modules support arbitrary ivalue attributes, we don't need this + // anymore. + // TODO: This is better represented as an OrderedDict, but alas it is not yet + // available from c10 + + // Mapping of constant names -> their value. + std::vector constantNames_; + std::vector constantValues_; + // Holds method attributes + std::weak_ptr compilation_unit_; + + // Holds all atrributes, attribute details are found on ClassAttribute + std::vector attributes_; + // Construct mirroring attributes_, only around due to the fact that `containedTypes()` method returns an ArrayRef. + // Never fill this without using the appropriate provideNewClassAttribute method + std::vector attributeTypes_; + + // List of methods associated with this class. + std::vector methods_; + std::vector staticmethods_; + + // List of hooks to be run before/after forward. + std::vector forward_hooks_; + std::vector forward_pre_hooks_; + + // List of properties exposed by this class. + std::vector properties_; + + bool isModule_ = false; + + // Doc string of class. + std::string doc_string_ = ""; + + // For error reporting accesses to class level attributes. + std::vector unresolved_class_attributes_; +}; + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/function_schema.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/function_schema.h new file mode 100644 index 0000000000000000000000000000000000000000..b3818784561f45dda9b8f92c250c630ac5ef78ba --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/core/function_schema.h @@ -0,0 +1,687 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +// schema as used in the compiler for resolving function calls and reporting +// errors. These objects should be constructed from C10 schema once those +// are available. + +struct Argument; +struct FunctionSchema; + +using AliasTypeSet = std::vector; + +bool operator==(const Argument& lhs, const Argument& rhs); + +struct Argument { + Argument( + std::string name = "", + const TypePtr& type = nullptr, + c10::optional N = c10::nullopt, + c10::optional default_value = c10::nullopt, + bool kwarg_only = false, + c10::optional alias_info = c10::nullopt) + : Argument(std::move(name), type, type, N, std::move(default_value), kwarg_only, std::move(alias_info)) {} + + Argument( + std::string name, + TypePtr fake_type, + TypePtr real_type, + c10::optional N = c10::nullopt, + c10::optional default_value = c10::nullopt, + bool kwarg_only = false, + c10::optional alias_info = c10::nullopt) + : name_(std::move(name)), + type_(fake_type ? std::move(fake_type) : TensorType::get()), + real_type_(real_type ? std::move(real_type) : type_), + N_(N), + default_value_(std::move(default_value)), + alias_info_(alias_info ? std::make_unique(std::move(*alias_info)) : nullptr), + kwarg_only_(kwarg_only) { + // this is an softly-enforced invariant for out arguments. + bool is_alias = alias_info_ != nullptr && alias_info_->isWrite(); + is_out_ = kwarg_only_ && is_alias; + } + + Argument(Argument&& rhs) noexcept = default; + + Argument(const Argument& rhs) + : name_(rhs.name_), + type_(rhs.type_), + real_type_(rhs.real_type_), + N_(rhs.N_), + default_value_(rhs.default_value_), + alias_info_(rhs.alias_info_ ? std::make_unique(*rhs.alias_info_) : nullptr), + kwarg_only_(rhs.kwarg_only_), + is_out_(rhs.is_out_) {} + + Argument& operator=(Argument&& rhs) = default; + + Argument& operator=(const Argument& rhs) { + if (this != &rhs) { + name_ = rhs.name_; + type_ = rhs.type_; + real_type_ = rhs.real_type_; + N_ = rhs.N_; + default_value_ = rhs.default_value_; + alias_info_ = rhs.alias_info_ ? std::make_unique(*rhs.alias_info_) : nullptr; + kwarg_only_ = rhs.kwarg_only_; + is_out_ = rhs.is_out_; + } + return *this; + } + + const std::string& name() const { + return name_; + } + const TypePtr& type() const { + return type_; + } + // if type() is non-null, this is guaranteed to be non-null (if no real + // type was provided, this takes on type()'s value) + const TypePtr& real_type() const { + return real_type_; + } + c10::optional N() const { + return N_; + } + const c10::optional& default_value() const { + return default_value_; + } + bool kwarg_only() const { + return kwarg_only_; + } + + bool is_out() const { + return is_out_; + } + + C10_NODISCARD const AliasInfo* alias_info() const { + return alias_info_.get(); + } + + bool is_inferred_type() const { + bool is_inferred_type = false; + TORCH_INTERNAL_ASSERT(type_); + if (auto pt = type_->cast()) { + if (pt->isInferredType()) { + is_inferred_type = true; + } + } + return is_inferred_type; + } + + std::string formatTypeMismatchMsg(const std::string& actual_type) const { + std::string inferred_type_hint; + if (is_inferred_type()) { + inferred_type_hint = c10::str( + "Inferred '", + name(), + "' to be of type 'Tensor' ", + "because it was not annotated with an explicit type.\n"); + } + return c10::str( + "Expected a value of type '", + type()->repr_str(), + "' for argument '", + name(), + "' but instead found type '", + actual_type, + "'.\n", + inferred_type_hint); + } + + Argument cloneWithType(TypePtr new_type) const { + return Argument( + name_, + std::move(new_type), + N_, + default_value_, + kwarg_only_, + alias_info_ ? c10::optional(*alias_info_) : c10::nullopt); + } + + // this function checks whether this Argument is backward compatible with + // the old one. we consider the following cases are backward compatible: + // 1) two arguments are equal + // 2) this arg's type should be subtype of old + // 3) this arg must provide the same default value if old arg has one, + bool isBackwardCompatibleWith( + const Argument& old, + std::ostream* why_not=nullptr) const; + + // this function checks whether this Argument is forward compatible with + // the old one. we consider the following cases are forward compatible: + // 1) two arguments are equal + // 2) this arg's type should be subtype of old + // 3) this arg must provide the same default value if old arg has one, + bool isForwardCompatibleWith( + const Argument& old, + std::ostream* why_not = nullptr) const; + + private: + std::string name_; + TypePtr type_; + TypePtr real_type_; // this is ScalarType, not int, e.g. + // for list types, an optional statically known length for the list + // e.g. for int[3]: type = ListType::ofInts(), N = 3 + // If present, this will allow scalars to be broadcast to this length to + // become a list. + c10::optional N_; + + c10::optional default_value_; + // AliasInfo is huge, so let's only allocate memory for it if + // necessary (which it isn't during schema parsing on startup, to + // give a pertinent example). + std::unique_ptr alias_info_; + // is this only specifiable as a keyword argument? + bool kwarg_only_; + // marks if the argument is out variant of the schema + bool is_out_; +}; + +inline bool operator==(const Argument& lhs, const Argument& rhs) { + return lhs.name() == rhs.name() + && *lhs.type() == *rhs.type() + && lhs.N() == rhs.N() + && lhs.default_value() == rhs.default_value() + && lhs.kwarg_only() == rhs.kwarg_only() + && (lhs.alias_info() == rhs.alias_info() + || (lhs.alias_info() != nullptr && rhs.alias_info() != nullptr + && *lhs.alias_info() == *rhs.alias_info())); +} + +inline bool operator!=(const Argument& lhs, const Argument& rhs) { + return !(lhs == rhs); +} + +enum struct TORCH_API SchemaArgType { input, output }; + +/** + * struct SchemaArgument + * + * Structure used to represent arguments or returns for a schema. + */ +struct TORCH_API SchemaArgument { + SchemaArgType type; + size_t index; + SchemaArgument(SchemaArgType tpe, size_t idx) : type(tpe), index(idx) {} + bool operator==(const SchemaArgument& rhs) const { + return type == rhs.type && index == rhs.index; + } +}; + +bool operator==(const FunctionSchema& lhs, const FunctionSchema& rhs); + +struct TORCH_API FunctionSchema { + FunctionSchema( + std::string name, + std::string overload_name, + std::vector arguments, + std::vector returns, + bool is_vararg = false, + bool is_varret = false) + : name_({std::move(name), std::move(overload_name)}), + arguments_(std::move(arguments)), + returns_(std::move(returns)), + is_vararg_(is_vararg), + is_varret_(is_varret) { + checkSchema(); + } + + FunctionSchema( + Symbol name, + std::string overload_name, + std::vector arguments, + std::vector returns, + bool is_vararg = false, + bool is_varret = false) + : FunctionSchema( + name.toQualString(), + std::move(overload_name), + std::move(arguments), + std::move(returns), + is_vararg, + is_varret) { + checkSchema(); + } + + // Checks whether this schema is backward compatible with the old one. + // The following conditions must be true: + // [Function structure] The new schema's name, overload-name, varargs, and + // return arity are the same. + // [Output Narrowing] The new schema's output type must be the same class + // or inherit from the old schema's output type. + // [Argument count] The new schema must have at least as many arguments as + // the old schema (considering the list of positional and kwargs). + // [Arg Compatibility] Every argument in the old schema has a corresponding + // argument in the new schema that: + // * is at the same position. + // * has the same name. + // * is either positional, or kwarg and the old argument was kwarg. + // * has the same type, or the old argument's type inherits from the + // new argument's type. + // [Default Values] Every new argument must have a default value. + // E.g. + // OK f_new(a, b, c=1) => f_old(a, b) + // NOK f_new(a, c=1, *, b) => f_old(a, *, b) + // OK f_new(a, b, *, c) => f_old(a, *, b, c) + // NOK f_new(a, *, b, c) -> f_old(a, b, *, c) + // NOK f_new(a, *, c, b) => f_old(a, *, b, c) + // OK f_new(a, *, b, c, d=1) => f_old(a, *, b, c) + bool isBackwardCompatibleWith( + const FunctionSchema& old, + std::ostream* why_not = nullptr) const; + + // Checks whether this schema is forward compatible with the old one. + // The following conditions must be true: + // [Function structure] The new schema's name, overload-name, varargs, and + // return arity are the same. + // [Output Narrowing] The new schema's output type must be the same class + // or inherit from the old schema's output type. + // [Arg Compatibility] Every argument in the old schema has a corresponding + // argument in the new schema that: + // * is at the same position. + // * has the same name. + // * is either positional, or kwarg and the old argument was kwarg. + // * has the same type, or the old argument's type inherits from the + // new argument's type. + // [Default Values] Every new argument must have a default value. + // Each default value type should NOT be a container type. + // [Positioning] All defaults arguments MUST go after either old + // default arguments or the end of positional arguments + // and right BEFORE all out arguments + bool isForwardCompatibleWith( + const FunctionSchema& old, + std::ostringstream& why_not) const; + + private: + OperatorName name_; + std::vector arguments_; + std::vector returns_; + // if true then this schema takes an arbitrary number of additional arguments + // after the argument specified in arguments + // currently this is used primarily to represent 'primitive' operators whose + // arguments are not checked by schema + bool is_vararg_; + bool is_varret_; + + // if no alias information is directly specified, what kind of "default" + // alias information should we infer? + // NB: due to alias analysis kind merging, this may be nullopt. Eventually + // this should always be set no matter what + c10::optional alias_kind_; + + template + void checkArg(const IValue& value, const Argument& argument, optional pos) const; + + void checkSchema() const { + bool seen_default_arg = false; + for (const auto& arg : arguments()) { + if (arg.default_value()) { + seen_default_arg = true; + } else { + // we have historically serialized broadcasting lists wo/default values, + // so to not break BC allow lists here + if (arg.type()->kind() == ListType::Kind) { + continue; + } + TORCH_INTERNAL_ASSERT( + !seen_default_arg || arg.kwarg_only(), + "Non-default positional argument follows default argument. Parameter ", + arg.name(), + " in ", + *this); + } + } + } + + public: + + void dump() const; + + const OperatorName& operator_name() const { + return name_; + } + const std::string& name() const { + return name_.name; + } + const std::string& overload_name() const { + return name_.overload_name; + } + const std::vector& arguments() const { + return arguments_; + } + const std::vector& returns() const { + return returns_; + } + bool is_vararg() const { + return is_vararg_; + } + bool is_varret() const { + return is_varret_; + } + bool is_aliasing(const c10::SchemaArgument &argument) const { + TORCH_INTERNAL_ASSERT( + argument.index < getCorrectList(argument.type).size(), + "Invalid index for schema."); + const AliasInfo* aliasInfo = getCorrectList(argument.type)[argument.index].alias_info(); + return aliasInfo; + } + bool is_mutable() const { + return std::any_of( + arguments_.cbegin(), arguments_.cend(), [](const Argument& arg) { + const AliasInfo* aliasInfo = arg.alias_info(); + return aliasInfo && aliasInfo->isWrite(); + }); + } + bool is_mutable(const c10::SchemaArgument &argument) const { + TORCH_INTERNAL_ASSERT( + argument.index < getCorrectList(argument.type).size(), + "Invalid index for schema."); + const AliasInfo* aliasInfo = getCorrectList(argument.type)[argument.index].alias_info(); + return aliasInfo && aliasInfo->isWrite(); + } + bool is_mutable(c10::string_view name) const { + c10::optional index = argumentIndexWithName(name); + TORCH_INTERNAL_ASSERT( + index != c10::nullopt, "Schema has no argument named ", name); + + return is_mutable({c10::SchemaArgType::input, static_cast(*index)}); + } + + // Returns whether lhs and rhs may alias directly. + // This does not account for cases where lhs or rhs are a container that + // may contain elements that alias the other argument. + // FunctionSchema::may_contain_alias will include that functionality. + bool may_alias(const SchemaArgument& lhs, const SchemaArgument& rhs) const; + + // Returns whether lhs and rhs may alias directly or whether lhs/rhs are a container + // that may contain elements that alias the other argument. + // bidirectional = false only returns whether lhs may contain an alias of rhs + // while bidirectional = true returns both directions. + bool may_contain_alias(const SchemaArgument& lhs, const SchemaArgument& rhs, bool bidirectional = true) const; + + // Returns whether the two AliasTypeSets contain any similarities + // ie: whether the two type sets can alias. + bool canAliasTypeSetsAlias(const c10::optional &lhs, const c10::optional &rhs) const; + + // Recursively Finds all contained types within the AliasTypeSet. + c10::optional getAliasTypeSetContainedTypes(const c10::optional &aliasTypeSet) const; + + // Similar to mapTypeToAliasTypeSet defined in alias_analysis.cpp. + // Used to map types to a type such that all types that can alias will be mapped to the same type. + // For example, calling this method on 'Optional[List[int]]' is the same as calling this method + // on 'List[int]'. + c10::optional mapTypeToAliasTypeSet(const TypePtr& type) const; + + // Returns either arguments() or returns() depending on the SchemaArgType + // output => returns(), input => arguments() + const std::vector& getCorrectList(SchemaArgType type) const; + + c10::optional argumentIndexWithName(c10::string_view name) const { + for (const auto i : c10::irange(arguments().size())) { + if(name == arguments()[i].name()) + return i; + } + return c10::nullopt; + } + FunctionSchema cloneWithName(std::string name, std::string overload_name) const { + return FunctionSchema( + std::move(name), + std::move(overload_name), + arguments(), + returns(), + is_vararg(), + is_varret() + ); + } + FunctionSchema cloneWithArguments(std::vector new_arguments) const { + return FunctionSchema( + name(), + overload_name(), + std::move(new_arguments), + returns(), + is_vararg(), + is_varret()); + } + FunctionSchema cloneWithReturns(std::vector new_returns) const { + return FunctionSchema( + name(), + overload_name(), + arguments(), + std::move(new_returns), + is_vararg(), + is_varret()); + } + + std::string formatTypeMismatchMsg( + const Argument& expected, + const std::string& actual_type, + c10::optional position = c10::nullopt, + c10::optional value = c10::nullopt) const; + + FunctionSchema cloneWithRemappedTypes( + const std::function type_map) const; + + FunctionSchema cloneWithRealTypes(bool with_symint=true) const; + + // Check that inputs have the correct types and appends any missing default + // values. + template + void checkAndNormalizeInputs( + std::vector& inputs, + const std::unordered_map& kwargs = + std::unordered_map{}) const; + + std::string findErrorInKwargs(const std::vector& kwargs) const; + + bool hasAnyAliasInfo() const { + for (const auto& arg : arguments_) { + if (arg.alias_info() != nullptr) { + return true; + } + } + for (const auto& ret : returns_) { + if (ret.alias_info() != nullptr) { + return true; + } + } + return false; + } + + + // TODO remove the mutation here + bool isDefaultAliasAnalysisKind() const { + return !alias_kind_; + } + AliasAnalysisKind aliasAnalysis() const { + return alias_kind_.value_or(AliasAnalysisKind::CONSERVATIVE); + } + void setAliasAnalysis(AliasAnalysisKind v) { + alias_kind_ = v; + } + + c10::optional getNamespace() const { + return name_.getNamespace(); + } + + // Returns true if we successfully set the namespace (as there + // was none set, and false otherwise) + bool setNamespaceIfNotSet(const char* ns) { + return name_.setNamespaceIfNotSet(ns); + } + + // can a function with this schema be substituted for a function of rhs's + // schema and have the program typecheck? + // as_method - if true, treat this schema as a method and ignore + // the first argument, which will be the object in both cases + bool isSubtypeOf(const FunctionSchema& rhs, bool as_method, std::ostream* why_not=nullptr) const; +}; + +inline bool operator==(const FunctionSchema& lhs, const FunctionSchema& rhs) { + return lhs.name() == rhs.name() + && lhs.overload_name() == rhs.overload_name() + && lhs.arguments() == rhs.arguments() + && lhs.returns() == rhs.returns() + && lhs.is_vararg() == rhs.is_vararg() + && lhs.is_varret() == rhs.is_varret(); +} + +inline bool operator!=(const FunctionSchema& lhs, const FunctionSchema& rhs) { + return !(lhs == rhs); +} + +// print out Argument, which is compatible with FunctionSchema parser +// full format: Type(alias)? name=default_value +inline std::ostream& operator<<(std::ostream& out, const Argument& arg) { + + // for adjusting the ? position. + // in schema, we have Tensor?(a!) input, and t(a!)?. + // however, t?(a!) doesn't work with schema parser. + // so we always use Type(alias)? format + // real_type versus fake_type: in order to be compatible with FunctionSchema + // parser, printing an argument with either MemoryFormat or Layout type should + // give us the original schema string, hence printing out real_type. + auto type = arg.real_type(); + bool is_opt = type->kind() == OptionalType::Kind; + auto unopt_type = is_opt ? type->castRaw()->getElementType() : type; + + if (unopt_type->kind() == ListType::Kind) { + // sized lists get size N from arg, not type + auto list = unopt_type->cast(); + out << list->getElementType()->str(); + if (arg.alias_info() && !arg.alias_info()->containedTypes().empty()){ + out << arg.alias_info()->containedTypes()[0]; + } + std::string N = ""; + if (arg.N()) { + N = std::to_string(*arg.N()); + } + out << "[" << N << "]"; + } else { + out << unopt_type->str(); + } + + // print alias info if it has beforeSets. + if (arg.alias_info() && !arg.alias_info()->beforeSets().empty()) { + out << *arg.alias_info(); + } + + if (is_opt) { + out << "?"; + } + + if (!arg.name().empty()) { + out << " " << arg.name(); + } + + if (arg.default_value()) { + out << "="; + if ((type->kind() == c10::TypeKind::StringType || + unopt_type->kind() == c10::TypeKind::StringType) && + arg.default_value().value().isString()) { + printQuotedString(out, arg.default_value().value().toStringRef()); + } else if (type->kind() == TypeKind::ListType && type->castRaw()->getElementType()->kind() == c10::TypeKind::IntType) { + // We want to faithfully replicate JIT schema. + // in native_functions.yaml defaults for int arrays with a single value always look like + // int[2] stride=1 + // instead of + // int[2] stride=[1, 1] + auto default_val = arg.default_value().value().toIntList(); + if (default_val.size() > 1) { + auto all_defaults_the_same = true; + for (const auto i : c10::irange(1, default_val.size())) { + if (default_val[0] != default_val[i]) all_defaults_the_same = false; + } + if (all_defaults_the_same) { + out << default_val[0]; + } else { + out << arg.default_value().value(); + } + } else { + out << arg.default_value().value(); + } + } else { + out << arg.default_value().value(); + } + } + + return out; +} + +inline std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema); + +inline std::string toString(const FunctionSchema& schema) { + std::ostringstream str; + str << schema; + return str.str(); +} + +} // namespace c10 + +namespace std { +template<> + struct hash { + size_t operator()(const c10::SchemaArgument& arg) const + { + return c10::hash_combine(std::hash()(arg.index), std::hash()(static_cast(arg.type))); + } + }; +template<> + struct hash { + size_t operator()(const c10::Argument& arg) const + { + auto hash = std::hash{}(arg.name()); + auto type_hash = std::hash{}(arg.type()); + auto kwarg_only_hash = std::hash{}(arg.kwarg_only()); + hash = c10::hash_combine(hash, type_hash); + hash = c10::hash_combine(hash, kwarg_only_hash); + // hashing optional fields if they exist + if (arg.default_value()) { + auto default_value_hash = c10::hash{}(arg.default_value().value()); + hash = c10::hash_combine(hash, default_value_hash); + } + if (arg.N()) { + auto N_hash = std::hash{}(*arg.N()); + hash = c10::hash_combine(hash, N_hash); + } + if (arg.alias_info()) { + auto alias_info_hash = std::hash{}(*arg.alias_info()); + hash = c10::hash_combine(hash, alias_info_hash); + } + return hash; + } + }; +template<> + struct hash { + size_t operator()(const c10::FunctionSchema& schema) const + { + auto hash = std::hash{}(schema.operator_name()); + auto args_hash = c10::hash>{}(schema.arguments()); + auto returns_hash = c10::hash>{}(schema.returns()); + auto is_vararg_hash = std::hash{}(schema.is_vararg()); + auto is_varret_hash = std::hash{}(schema.is_varret()); + hash = c10::hash_combine(hash, args_hash); + hash = c10::hash_combine(hash, returns_hash); + hash = c10::hash_combine(hash, is_vararg_hash); + hash = c10::hash_combine(hash, is_varret_hash); + return hash; + } + }; +} // namespace std + + +#include // IWYU pragma: keep diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..769ba38dc3770fc2d98e307b466f17cc4615fad2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _adaptive_avg_pool2d_out_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor adaptive_avg_pool2d_cpu(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor adaptive_avg_pool2d_cuda(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor adaptive_avg_pool2d_quantized_cpu(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor adaptive_avg_pool2d_quantized_cuda(const at::Tensor & self, at::IntArrayRef output_size); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_backward.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..5e2829eaa3dfe255d555694ab36f29dbc5243296 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_batch_norm_impl_index_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor) +inline ::std::tuple _batch_norm_impl_index_backward(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional & weight, const c10::optional & running_mean, const c10::optional & running_var, const c10::optional & save_mean, const c10::optional & save_var_transform, bool train, double eps, ::std::array output_mask, const at::Tensor & reservedSpace) { + return at::_ops::_batch_norm_impl_index_backward::call(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3b0bacdad27ef87e0fe1075ba30d097d7ccc3209 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _conj(const at::Tensor & self); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_dirichlet_grad_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_dirichlet_grad_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f5bba09c28ab025abe774d899e68f19924a7f914 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_dirichlet_grad_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _dirichlet_grad_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total); +TORCH_API at::Tensor & _dirichlet_grad_outf(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_dense_backward_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_dense_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..590c77ac08dbdd1a6afdb0ef0f1bfb579b454d48 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_dense_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _embedding_bag_dense_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::SymInt, bool, int64_t, const c10::optional &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_embedding_bag_dense_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor") + static at::Tensor call(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx); +}; + +struct TORCH_API _embedding_bag_dense_backward_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::SymInt, bool, int64_t, const c10::optional &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_embedding_bag_dense_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_embedding_bag_dense_backward.out(Tensor grad, Tensor indices, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional & per_sample_weights, int64_t padding_idx, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_add.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_add.h new file mode 100644 index 0000000000000000000000000000000000000000..c721860797c2dbc9b7b2170fab0d1b896bf5666d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_add.h @@ -0,0 +1,101 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_add.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] +inline ::std::vector _foreach_add(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_add_Scalar::call(self, scalar); +} + +// aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () +inline void _foreach_add_(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_add__Scalar::call(self, scalar); +} + +// aten::_foreach_add.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] +inline ::std::vector _foreach_add(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_List::call(self, other, alpha); +} + +// aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () +inline void _foreach_add_(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add__List::call(self, other, alpha); +} + +// aten::_foreach_add.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] +inline ::std::vector _foreach_add(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_add_ScalarList::call(self, scalars); +} + +// aten::_foreach_add_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () +inline void _foreach_add_(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_add__ScalarList::call(self, scalars); +} + +// aten::_foreach_add.Tensor(Tensor[] self, Tensor other, *, Scalar alpha=1) -> Tensor[] +inline ::std::vector _foreach_add(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_Tensor::call(self, other, alpha); +} + +// aten::_foreach_add_.Tensor(Tensor(a!)[] self, Tensor other, *, Scalar alpha=1) -> () +inline void _foreach_add_(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add__Tensor::call(self, other, alpha); +} + +// aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_add_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out); +} +// aten::_foreach_add.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_add_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out); +} + +// aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () +inline void _foreach_add_out(at::TensorList out, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_List_out::call(self, other, alpha, out); +} +// aten::_foreach_add.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () +inline void _foreach_add_outf(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { + return at::_ops::_foreach_add_List_out::call(self, other, alpha, out); +} + +// aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_add_out(at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out); +} +// aten::_foreach_add.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_add_outf(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out); +} + +// aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> () +inline void _foreach_add_out(at::TensorList out, at::TensorList self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_add_Tensor_out::call(self, other, alpha, out); +} +// aten::_foreach_add.Tensor_out(Tensor[] self, Tensor other, *, Scalar alpha=1, Tensor(a!)[] out) -> () +inline void _foreach_add_outf(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha, at::TensorList out) { + return at::_ops::_foreach_add_Tensor_out::call(self, other, alpha, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_exp_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_exp_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..13b1f5ff77cf54134b9298ac643d456a71523926 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_exp_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void _foreach_exp_out(at::TensorList out, at::TensorList self); +TORCH_API void _foreach_exp_outf(at::TensorList self, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lerp_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lerp_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2b2f6197eee49d9f28c3b81431eef18699efbdaf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lerp_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::vector _foreach_lerp(at::TensorList self, at::TensorList tensors1, at::TensorList weights); +TORCH_API void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, at::TensorList weights); +TORCH_API ::std::vector _foreach_lerp(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight); +TORCH_API void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_bin_edges_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_bin_edges_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0a03f50ccf20b2fefcc7f3b6172a1b477240fc67 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_histogramdd_bin_edges_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _histogramdd_bin_edges_out(const at::Tensor & self, at::IntArrayRef bins, c10::optional> range, const c10::optional & weight, bool density, at::TensorList out); +TORCH_API ::std::vector histogramdd_bin_edges(const at::Tensor & self, at::IntArrayRef bins, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_copy_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1bd3e5d06d8ef622644e261b6fc91f18d3f338f1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _make_dual_copy_out(const at::Tensor & primal, const at::Tensor & tangent, int64_t level, at::Tensor & out); +TORCH_API at::Tensor _make_dual_copy(const at::Tensor & primal, const at::Tensor & tangent, int64_t level); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose.h new file mode 100644 index 0000000000000000000000000000000000000000..5d6950760d7e34622dc4fd1139ace0f23f501c9b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_mkldnn_transpose.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor +inline at::Tensor _mkldnn_transpose(const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::_mkldnn_transpose::call(self, dim0, dim1); +} + +// aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) +inline at::Tensor & _mkldnn_transpose_(at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::_mkldnn_transpose_::call(self, dim0, dim1); +} + +// aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _mkldnn_transpose_out(at::Tensor & out, const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::_mkldnn_transpose_out::call(self, dim0, dim1, out); +} +// aten::_mkldnn_transpose.out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _mkldnn_transpose_outf(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) { + return at::_ops::_mkldnn_transpose_out::call(self, dim0, dim1, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4a4b019d6267307307b9e95d5e1b2508384e6d23 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _nested_tensor_from_tensor_list(at::TensorList list, c10::optional dtype=c10::nullopt, c10::optional layout=c10::nullopt, c10::optional device=c10::nullopt, c10::optional pin_memory=c10::nullopt); +TORCH_API at::Tensor & _nested_tensor_from_tensor_list_out(at::Tensor & out, at::TensorList list, c10::optional dtype=c10::nullopt, c10::optional layout=c10::nullopt, c10::optional device=c10::nullopt, c10::optional pin_memory=c10::nullopt); +TORCH_API at::Tensor & _nested_tensor_from_tensor_list_outf(at::TensorList list, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list_native.h new file mode 100644 index 0000000000000000000000000000000000000000..aded764013047f3244c255d21c8cd4581020a0fe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _nested_tensor_from_tensor_list(at::TensorList list, c10::optional dtype=c10::nullopt, c10::optional layout=c10::nullopt, c10::optional device=c10::nullopt, c10::optional pin_memory=c10::nullopt); +TORCH_API at::Tensor & _nested_tensor_from_tensor_list_out(at::TensorList list, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d5e60b7ffb3ce4d6995b796085283fb862a4e780 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _prelu_kernel(const at::Tensor & self, const at::Tensor & weight); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_compositeexplicitautogradnonfunctional_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6dd2795ecb91fc8a99375fbb77cd256229c478a5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor _softmax(const at::Tensor & self, int64_t dim, bool half_to_float); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..97d6f8e304d423c9728fe445951e1b99f5755871 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _test_string_default { + using schema = at::Tensor (const at::Tensor &, c10::string_view, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_string_default") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_string_default(Tensor dummy, str a=\"\\\"'\\\\\", str b='\"\\'\\\\') -> Tensor") + static at::Tensor call(const at::Tensor & dummy, c10::string_view a, c10::string_view b); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, c10::string_view a, c10::string_view b); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_impl_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_impl_native.h new file mode 100644 index 0000000000000000000000000000000000000000..08f6bda35740425be370ca46f6e0058089037563 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_backward_impl_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _thnn_fused_lstm_cell_backward_impl_out(const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +TORCH_API ::std::tuple _thnn_fused_lstm_cell_backward_impl_cuda(const c10::optional & grad_hy, const c10::optional & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_view_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_view_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..08ebff8f8d120a1c702011dee27079572c3dfaec --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_view_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _unsafe_view { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_unsafe_view") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_unsafe_view(Tensor self, SymInt[] size) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef size); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size); +}; + +struct TORCH_API _unsafe_view_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_unsafe_view") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..030f463b2347802cb42dd2feeba397c77cc42666 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _upsample_bicubic2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor _upsample_bicubic2d_aa_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_differentiable_backward_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_differentiable_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..13fd0436792b73a50d6dfc6e55031571a2c72086 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_differentiable_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _weight_norm_differentiable_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f38ef3f53c5cdf8ee361c43897e2ca5aa78c9c6a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API ::std::tuple adaptive_max_pool2d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool2d_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API ::std::tuple adaptive_max_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out, at::Tensor & indices); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/alias_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/alias_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c70954c94dbbb9e2793741fefaf1919b021dd8c3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/alias_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API alias { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::alias") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "alias(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/align_tensors.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/align_tensors.h new file mode 100644 index 0000000000000000000000000000000000000000..83b77239b765f7b1c81b4d422e5abe1fb9367d7e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/align_tensors.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::align_tensors(Tensor[] tensors) -> Tensor[] +inline ::std::vector align_tensors(at::TensorList tensors) { + return at::_ops::align_tensors::call(tensors); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/arccosh_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/arccosh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c527d5337556e55f4c3eaf915756df260a8cfda2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/arccosh_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API arccosh { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arccosh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arccosh(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API arccosh_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arccosh_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arccosh_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API arccosh_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arccosh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_gather_stats_with_counts.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_gather_stats_with_counts.h new file mode 100644 index 0000000000000000000000000000000000000000..12f9459dbfbbfb35554f05babcb35ddcde0b5efb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_gather_stats_with_counts.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor) +inline ::std::tuple batch_norm_gather_stats_with_counts(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, const at::Tensor & counts) { + return at::_ops::batch_norm_gather_stats_with_counts::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts); +} + +// aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple batch_norm_gather_stats_with_counts_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, const at::Tensor & counts) { + return at::_ops::batch_norm_gather_stats_with_counts_out::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1); +} +// aten::batch_norm_gather_stats_with_counts.out(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple batch_norm_gather_stats_with_counts_outf(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional & running_mean, const c10::optional & running_var, double momentum, double eps, const at::Tensor & counts, at::Tensor & out0, at::Tensor & out1) { + return at::_ops::batch_norm_gather_stats_with_counts_out::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts, out0, out1); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b39f4565d6f2198bdc2b2cd168055fce1091e3d7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple batch_norm_update_stats_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum); +TORCH_API ::std::tuple batch_norm_update_stats_outf(const at::Tensor & input, const c10::optional & running_mean, const c10::optional & running_var, double momentum, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..74958804b3329861f71999c15279ea78ab449482 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_or_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor bitwise_or(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_or_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_or_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_or_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag.h new file mode 100644 index 0000000000000000000000000000000000000000..44347049d91a9c3446c4927ce2dfcc57b5b96f4d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::block_diag(Tensor[] tensors) -> Tensor +inline at::Tensor block_diag(at::TensorList tensors) { + return at::_ops::block_diag::call(tensors); +} + +// aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & block_diag_out(at::Tensor & out, at::TensorList tensors) { + return at::_ops::block_diag_out::call(tensors, out); +} +// aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & block_diag_outf(at::TensorList tensors, at::Tensor & out) { + return at::_ops::block_diag_out::call(tensors, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bfc3a49df65860e9c885dc2a3a451e1c4ff73aac --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor coalesce(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/copysign_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/copysign_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..53ebe877957183a540ca9b833f0a27e460f3b903 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/copysign_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor copysign(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & copysign_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & copysign_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & copysign_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/dsplit_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/dsplit_native.h new file mode 100644 index 0000000000000000000000000000000000000000..def87f8d171f5096fcb5989ac8fed2a60a0d6068 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/dsplit_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::vector dsplit(const at::Tensor & self, int64_t sections); +TORCH_API ::std::vector dsplit(const at::Tensor & self, at::IntArrayRef indices); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/empty_quantized_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/empty_quantized_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6846fbf7213f906f92e013fccd278e02d20408c8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/empty_quantized_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & empty_quantized_out(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional memory_format, at::Tensor & out); +TORCH_API at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, c10::optional memory_format=c10::nullopt); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5de55711b372a748f094271b769672b46a4500f2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor exp2(const at::Tensor & self); +TORCH_API at::Tensor & exp2_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & exp2_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & exp2_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/expm1_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/expm1_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d4755b1a20f34e35b0efff99d5878c735fb4030f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/expm1_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor expm1(const at::Tensor & self); +TORCH_API at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & expm1_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ccec06658d996ab665d2ffbe4b1e6e146eca30a1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple fake_quantize_per_channel_affine_cachemask(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e3465a8e6f977747e7299f5f680d3293d52b7586 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fbgemm_linear_fp16_weight { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fbgemm_linear_fp16_weight") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e98915b4b60db02e06657370e089a7bd7e3241df --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fft_irfft2 { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, at::IntArrayRef, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_irfft2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm); +}; + +struct TORCH_API fft_irfft2_out { + using schema = at::Tensor & (const at::Tensor &, at::OptionalSymIntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_irfft2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5615ab89f3eaf2e83c745e344afeb74c8bd4ffd4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & geometric_(at::Tensor & self, double p, c10::optional generator=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/i0_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/i0_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f21daeb1bfe50734f5646f253d62d86a33406c04 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/i0_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor i0(const at::Tensor & self); +TORCH_API at::Tensor & i0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & i0_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & i0_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8840e6f40e210e20efc16443bc127ae901101f5a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor index_copy(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor & index_copy_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b6327ea0294cb9d1834af9ac0b19a6f522fab99b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_inv_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_inv { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_inv") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_inv(Tensor A) -> Tensor") + static at::Tensor call(const at::Tensor & A); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A); +}; + +struct TORCH_API linalg_inv_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_inv") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & A, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & A, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..1c9fc3c9067ac995e6eafe0a9077bf871d331c6a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_solve_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_lu_solve : public at::impl::MetaBase { + + + void meta(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint); +}; + +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..31eeef428664ed3588c8f0cb22aea6f5ef8e60bc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple miopen_batch_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon); +TORCH_API ::std::tuple miopen_batch_norm_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_backward_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9b72584abe9c4c4ce17c78d504d813f79c581212 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & mkldnn_max_pool2d_backward_out(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); +TORCH_API at::Tensor mkldnn_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/new_empty_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/new_empty_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f92ccf86e7112333ece9581eedf6dce66b943303 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/new_empty_compositeexplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor new_empty(const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor new_empty(const at::Tensor & self, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor new_empty_symint(const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor new_empty_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & new_empty_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size); +TORCH_API at::Tensor & new_empty_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out); +TORCH_API at::Tensor & new_empty_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size); +TORCH_API at::Tensor & new_empty_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_backward_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f06a25e37c7a1f0a09312ae28ce101e64d79cdd6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_backward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor nll_loss2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor & nll_loss2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor & nll_loss2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); +TORCH_API at::Tensor & nll_loss2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight); +TORCH_API at::Tensor & nll_loss2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..873278408f909df0623739937d9a66bb7d1c1244 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor poisson(const at::Tensor & self, c10::optional generator=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/put_compositeexplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/put_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bb27527d7cbe742badf62d884dc10a5842dfac88 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/put_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor put(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate=false); +TORCH_API at::Tensor & put_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate=false); +TORCH_API at::Tensor & put_outf(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..29e84eacb48864b14007685ce56ecdafeaa08d7c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_reflection_pad1d_backward_out_cpu : public at::meta::structured_reflection_pad1d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, at::ArrayRef padding, const at::Tensor & grad_input); +}; +struct TORCH_API structured_reflection_pad1d_backward_out_cuda : public at::meta::structured_reflection_pad1d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, at::ArrayRef padding, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_backward_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fefdb41e28e893b394bcc3bd5145148e29dd3e76 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_backward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor reflection_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor reflection_pad2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input); +TORCH_API at::Tensor & reflection_pad2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/remainder_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/remainder_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3c6bcbfc97fb839cb850562c37df29668341c78c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/remainder_cpu_dispatch.h @@ -0,0 +1,27 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor remainder(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & remainder_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & remainder_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & remainder_(at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor remainder(const at::Scalar & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..38f4925d4e1b591f7596a9eff97b97c3c2e4327c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor rrelu_with_noise(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & rrelu_with_noise_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & rrelu_with_noise_outf(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor & rrelu_with_noise_(at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scaled_dot_product_attention_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scaled_dot_product_attention_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..92d2feae0e4b84e1f5e95eddbe7e18ea78c3ed57 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scaled_dot_product_attention_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API scaled_dot_product_attention { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, double, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::scaled_dot_product_attention") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> Tensor") + static at::Tensor call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask, double dropout_p, bool is_causal, c10::optional scale); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask, double dropout_p, bool is_causal, c10::optional scale); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_compositeexplicitautogradnonfunctional_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4561c1f978ecb362ec541c70cddda43556fcfe87 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce); +TORCH_API at::Tensor scatter(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce); +TORCH_API at::Tensor & scatter_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/select_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/select_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b0b86a0b83348bbe186cdbad1017e8364ce32c24 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/select_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API select_Dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::select") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, at::Dimname dim, int64_t index); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, int64_t index); +}; + +struct TORCH_API select_int { + using schema = at::Tensor (const at::Tensor &, int64_t, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::select") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, int64_t dim, c10::SymInt index); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt index); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j1_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j1_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3f2806edb8eda61529f9f3311d43f98a76c16c41 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j1_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_bessel_j1(const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_j1_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_j1_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_meta_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5d3dfa23d14597b21155b15f3f962addd085d9df --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_bessel_y1(const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_y1_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_y1_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..77b0df6f8a77bb21916b9bcf060e9623b85c91e1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_log_ndtr { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_log_ndtr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_log_ndtr(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_log_ndtr_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_log_ndtr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_log_ndtr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_native.h new file mode 100644 index 0000000000000000000000000000000000000000..94e2978df3a17f278bf40e63c79e54b0bcc10fd7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_modified_bessel_i0_out : public at::meta::structured_special_modified_bessel_i0 { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k1_cpu_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k1_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..37874557bb2d137e985714ec722e6e041e6816bd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k1_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_modified_bessel_k1(const at::Tensor & self); +TORCH_API at::Tensor & special_modified_bessel_k1_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_modified_bessel_k1_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/squeeze_copy.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/squeeze_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..e470c71b9b423add52c06a514530969aee54a200 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/squeeze_copy.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::squeeze_copy(Tensor self) -> Tensor +inline at::Tensor squeeze_copy(const at::Tensor & self) { + return at::_ops::squeeze_copy::call(self); +} + +// aten::squeeze_copy.dim(Tensor self, int dim) -> Tensor +inline at::Tensor squeeze_copy(const at::Tensor & self, int64_t dim) { + return at::_ops::squeeze_copy_dim::call(self, dim); +} + +// aten::squeeze_copy.dims(Tensor self, int[] dim) -> Tensor +inline at::Tensor squeeze_copy(const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::squeeze_copy_dims::call(self, dim); +} + +// aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & squeeze_copy_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::squeeze_copy_out::call(self, out); +} +// aten::squeeze_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & squeeze_copy_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::squeeze_copy_out::call(self, out); +} + +// aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & squeeze_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim) { + return at::_ops::squeeze_copy_dim_out::call(self, dim, out); +} +// aten::squeeze_copy.dim_out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & squeeze_copy_outf(const at::Tensor & self, int64_t dim, at::Tensor & out) { + return at::_ops::squeeze_copy_dim_out::call(self, dim, out); +} + +// aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & squeeze_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::squeeze_copy_dims_out::call(self, dim, out); +} +// aten::squeeze_copy.dims_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & squeeze_copy_outf(const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { + return at::_ops::squeeze_copy_dims_out::call(self, dim, out); +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/std_mean_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/std_mean_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1d6bfb5751adb98ef6a18cb7ea2aef898e6ecf16 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/std_mean_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple std_mean(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/swapaxes_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/swapaxes_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f05972c32a4a7aa87f61f55cf2c2bbbd8d7301a2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/swapaxes_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor swapaxes(const at::Tensor & self, int64_t axis0, int64_t axis1); +TORCH_API at::Tensor & swapaxes_(at::Tensor & self, int64_t axis0, int64_t axis1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/swapaxes_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/swapaxes_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..234e116ef1d9038331211c49a8d133e82fee0a19 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/swapaxes_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API swapaxes { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::swapaxes") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, int64_t axis0, int64_t axis1); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t axis0, int64_t axis1); +}; + +struct TORCH_API swapaxes_ { + using schema = at::Tensor & (at::Tensor &, int64_t, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::swapaxes_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, int64_t axis0, int64_t axis1); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, int64_t axis0, int64_t axis1); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/triu_cuda_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/triu_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3a1d23e298a1571c122ee3400c39e54d17f55167 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/triu_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor triu(const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & triu_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & triu_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out); +TORCH_API at::Tensor & triu_(at::Tensor & self, int64_t diagonal=0); + +} // namespace cuda +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unique_consecutive_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unique_consecutive_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cb8b2291e20b8b75ff3dbf01e0cef04686ac413c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unique_consecutive_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API unique_consecutive { + using schema = ::std::tuple (const at::Tensor &, bool, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unique_consecutive") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional dim); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional dim); +}; + +struct TORCH_API unique_consecutive_out { + using schema = ::std::tuple (const at::Tensor &, bool, bool, c10::optional, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::unique_consecutive") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional dim, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +}; + +}} // namespace at::_ops diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_chunk_compositeimplicitautograd_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_chunk_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ead0429070f1744dca3fdce954e8dc2ab07d1eb0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unsafe_chunk_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::vector unsafe_chunk(const at::Tensor & self, int64_t chunks, int64_t dim=0); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_copy_compositeexplicitautogradnonfunctional_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1cca8aab39827c6524f4742bb07c0c9a378848b0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor unsqueeze_copy(const at::Tensor & self, int64_t dim); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward_native.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0a632a6a45aa97db338f13afb38cd7a121f894b8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_upsample_linear1d_backward_out_cpu : public at::meta::structured_upsample_linear1d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, c10::optional scales, const at::Tensor & grad_input); +}; +struct TORCH_API structured_upsample_linear1d_backward_out_cuda : public at::meta::structured_upsample_linear1d_backward { +void impl(const at::Tensor & grad_output, at::ArrayRef output_size, at::ArrayRef input_size, bool align_corners, c10::optional scales, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_compositeexplicitautogradnonfunctional_dispatch.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ae102620ef89f1da3f70198b4ec304d6cb0d6606 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor upsample_nearest3d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor upsample_nearest3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/value_selecting_reduction_backward.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/value_selecting_reduction_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..e2527686e79c8bc37a87b8f93d57b771262e0098 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/value_selecting_reduction_backward.h @@ -0,0 +1,47 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor +inline at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim) { + return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, c10::fromIntArrayRefSlow(sizes), keepdim); +} +namespace symint { + template ::value>> + at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim) { + return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, c10::fromIntArrayRefSlow(sizes), keepdim); + } +} + +// aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, SymInt[] sizes, bool keepdim) -> Tensor +inline at::Tensor value_selecting_reduction_backward_symint(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) { + return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, sizes, keepdim); +} +namespace symint { + template ::value>> + at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) { + return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, sizes, keepdim); + } +} + +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/vstack_ops.h b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/vstack_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..874e79c85c675eb3cb79f2259e11bfb2c983f7b0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/vstack_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API vstack { + using schema = at::Tensor (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::vstack") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "vstack(Tensor[] tensors) -> Tensor") + static at::Tensor call(at::TensorList tensors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +struct TORCH_API vstack_out { + using schema = at::Tensor & (at::TensorList, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::vstack") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(at::TensorList tensors, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors, at::Tensor & out); +}; + +}} // namespace at::_ops