diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/alias_analysis.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/alias_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..380943635ea352693a4b2e19e81f6a25143e3c36 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/alias_analysis.h @@ -0,0 +1,322 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +/** + * Alias analysis pass. + * + * This pass produces an AliasDb that contains aliasing and mutation + * information about the graph. Users can use this information to determine + * whether mutations to the graph are safe, i.e. they don't reorder/change + * nodes in a way that affects output. + * + * Every value with a mutable type (Tensors, Lists, Tuples, etc.) will be + * associated with one or more "alias sets". If two values share an alias set, + * that means they may alias, implying that a mutation to one value cannot be + * reordered past a use of the other. Only reordering two reads of an alias set + * is considered safe. + * + * There is a special alias set called the "wildcard set", which indicates that + * we're not sure what this value may alias. To be conservative, we consider the + * wildcard alias set as potentially aliasing any other wildcard value within + * the same type class. Whenever a value becomes contained by another value, + * such as when a Tensor is appended to a List[Tensor], the contained element + * becomes part of the wildcard set. + * + * Values that contain other mutable types, such as List[Tensor], are + * initialized as containing the Wildcard set for all contained mutable types. + * + * The AliasDb API references the idea of "mutable" vs "immutable" + * types. "Mutable" means that the object's value can change, while + * "immutable" means that the value is fixed. (For example, `List` is + * mutable, so you can add and delete elements from it. On the other + * hand, you can't modify a Tuple once you create it, making `Tuple` an + * immutable container.) + * + * `isFrozen` - if the Module is frozen then consider attributes as freshly + * created objects. Freezing API invokes alias analysis to check if they are + * mutated internally. + * + * `descendFunctionCalls` - recursively analyze function and method calls + * instead of conservative analysis. Generally analysis should be done after + * inlining so the implmentation for recursive analysis is unoptimized. + */ +class AliasDb { + public: + TORCH_API explicit AliasDb( + std::shared_ptr graphi, + bool isFrozen = false, + bool descendFunctionCalls = false); + TORCH_API ~AliasDb(); + + // There are limitations to what effects the alias analysis can track. Two + // kinds of nodes may have untracked effects: + // 1. Nodes that write to a value that may alias the graph inputs (since + // the inputs can be used outside the graph). + // 2. Nodes that write to something in the wildcard set. + // + // These nodes are considered not safe to eliminate or mutate under any + // circumstances. + bool writesToWildcard(Node* n) const; + + // Does `n` write to an alias of one of the values in `vs`? + // if `recurseBlocks` is true, consider writes on the nodes in `n`s sub-blocks + TORCH_API bool writesToAlias(Node* n, const ValueSet& vs) const; + + // Does `a` and `b` potentially share a memory location or do either + // hold in memory any element that exists in the other + TORCH_API bool mayContainAlias(Value* a, Value* b) const; + + TORCH_API bool mayContainAlias(Value* a, const at::ArrayRef b) const; + + // Do any values in group `a` share a memory location or hold in memory + // any element that exists in group `b` + TORCH_API bool mayContainAlias( + const at::ArrayRef a, + const at::ArrayRef b) const; + + // Do `a` and `b` potentially share a memory location? + TORCH_API bool mayAlias(const Value* a, const Value* b) const; + // Do any values in group `a` potentially share a memory location with any + // value in group `b`? i.e. may they overlap? + TORCH_API bool mayAlias(const ValueSet& a, const ValueSet& b) const; + + // Do any nodes write to an alias set input to `n`? + TORCH_API bool hasInputWriters(const Node* n) const; + + // Do any nodes write to an alias set output by `n`? + TORCH_API bool hasOutputWriters(const Node* n) const; + + // Do any nodes write to an alias set inputed/outputed by `n`? + TORCH_API bool hasWriters(const Node* n) const; + + // Do any nodes write to `v`s memory location? + TORCH_API bool hasWriters(const Value* v) const; + + // Is the operation in-place? i.e. doesn't write anywhere but locations it + // reads from. + TORCH_API bool isMutable(Node* n) const; + + TORCH_API bool escapesScope(const at::ArrayRef& vs) const; + + // Is it safe to change whether `a` and `b` alias each other ? + TORCH_API bool safeToChangeAliasingRelationship( + const at::ArrayRef& a, + const at::ArrayRef& b) const; + + // Move `n` (already in the graph) after `movePoint` in the topological order. + // + // Tries to preserve value dependencies, so other nodes might be moved. We + // make two guarantees about the postcondition of the node list: + // - `n` is directly after `movePoint`. + // - only nodes between `n` and `movePoint` have been moved. + // + // Returns `false` if it's impossible to move `n` after `MovePoint` without + // violating dependencies, otherwise executes the move and returns `true` + TORCH_API bool moveAfterTopologicallyValid(Node* n, Node* movePoint); + TORCH_API bool moveBeforeTopologicallyValid(Node* n, Node* movePoint); + + bool couldMoveAfterTopologically(Node* n, Node* movePoint); + bool couldMoveBeforeTopologically(Node* n, Node* movePoint); + + // For debugging: print alias db state to stdout + TORCH_API void dump() const; + TORCH_API std::string toString() const; + + // Generates a DOT (www.graphviz.org) graph representation + // + // Returns `true` if the output file was successfully generated + // + // WARNING: The output dot file path can't include shell specific notations, + // for example you can't use "~/temp/aliasdb.dot" + // (instead, use "/home/user/temp/aliasdb.dot") + // + TORCH_API bool dumpToGraphvizFile(const char* filename) const; + TORCH_API std::string toGraphviz() const; + + // Returns `true` if the given element is mutable or if it is a + // container type with an internal mutable element (e.g. + // `Tuple[int, Tensor]` has an internal mutable type `Tensor`, so + // it would be considered a "mutable type" in AliasDb) + static bool isMutableType(const Value* v); + static bool isMutableType(const TypePtr& type); + + /** + * Mutation API + * + * These methods allow you to update AliasDb in-place if you are performing + * graph mutation. + * + * WARNING: These methods should be considered INTERNAL. They do not perform + * very many correctness checks, the user is responsible for making sure they + * are updating AliasDb correctly. `Lint()`ing the AliasDb can help with + * this. + */ + // Copy `existing`s aliasing info to `new_value`, and remove `existing`. + TORCH_API void replaceWithNewValue(Value* existing, Value* new_value); + // Copy `from`s aliasing info to `to`. + TORCH_API void copyValue(Value* from, Value* to); + // Create a new `value` that does not alias anything else. + TORCH_API void createValue(const Value* value); + + // Enable more precise treatment of prim::TupleConstruct. + void enablePreciseTupleContainerAnalysis(); + + friend struct MutationRemover; + + private: + // Helper for topologically-safe node moves. + class WorkingSet; + enum class MoveSide { BEFORE, AFTER }; + bool tryMove(Node* toMove, Node* movePoint, MoveSide moveSide, bool dryRun); + void move(Node* toMove, Node* movePoint, MoveSide moveSide); + bool isBeforeOrAfter(const Node* n, MoveSide moveSide) const; + + bool isMutableTypeInternal(const Value* v) const; + bool isMutableTypeInternal(const TypePtr& type) const; + + /** + * Write and read internal API + */ + // Get all the values that `n` writes to. + // NOTE: this only returns values directly written to, not aliases thereof + // + // if `recurseBlocks` is true, gather writes on the nodes in `n`s sub-blocks + MemoryLocations getWrites(Node* n) const; + void getWritesImpl(Node* n, MemoryLocations& ret) const; + // Register the fact that `n` writes to `v`. + void registerWrite(const Value* v, Node* n, bool writeToContained = false); + // Get all the values that `n` reads from. + // if `recurseBlocks` is true, gather reads on the nodes in `n`s sub-blocks + MemoryLocations getReads(Node* n) const; + void getReadsImpl(Node* n, MemoryLocations& ret) const; + + /** + * Wildcard methods + */ + // Register `v` as a wildcard value. + c10::optional setWildcard(const Value* v); + + // Is this a value which will not alias? + bool nonAliasingValue(const Value* elem) const; + + /** + * Special analysis methods + */ + void analyze(const std::shared_ptr& graph); + void analyze(Block* block); + void analyze(Node* node); + void analyzeImpl(Node* node); + void analyzeIf(Node* node); + void analyzeLoop(Node* node); + void analyzeSubgraph(Node* node, std::shared_ptr subgraph); + void analyzeSubgraph(Node* node); + void analyzeCreator(Node* node); + void analyzeExtractor(Node* node); + void analyzeChunk(Node* node); + void analyzeBroadcastingChunk(Node* node); + void analyzeFork(Node* node); + void analyzeWait(Node* node); + void analyzeAwaitable(Node* node); + void analyzeAwaitableWait(Node* node); + void analyzeRpcAsync(Node* node); + void analyzeBatchNorm(Node* node); + void analyzeInstanceNorm(Node* node); + void analyzeGradOf(Node* node); + void analyzeSetAttr(Node* node); + void analyzeConservative(Node* node); + void analyzeContainerConstruct(Node* node); + bool tryRegisteredAnalysis(Node* node); + + /** + * Alias manipulation methods + */ + void makeAllAlias(const std::vector& values); + void makePointerTo(const Value* value, const Value* to); + TORCH_API void addToContainedElements( + const Value* element, + const Value* container); + void mapAliases(at::ArrayRef to, at::ArrayRef from); + void giveFreshAlias( + const Value* value, + bool add_wildcard_to_contained_elems = true); + Element* getOrCreateElement(const Value* value); + + const AliasTypeSet* mapTypeToAliasTypeSetPtr(const TypePtr& type) const; + bool functionalNonEscapingListUse(const Use& use) const; + bool functionalNonEscapingTupleUse(const Use& use) const; + + std::shared_ptr graph_; + + // If the Module is frozen then consider attributes as freshly created + // objects. Freezing API invokes alias analysis to check if they are mutated + // internally. + bool isFrozen_; + + bool descend_function_calls_; + std::unordered_map>> + function_call_copies_; + + // The points-to graph that stores aliasing relationships + std::unique_ptr memoryDAGBuilder_; + std::unique_ptr memoryDAG_; + + // Mapping of values to MemoryDAG elements + ska::flat_hash_map elementMap_; + // All wildcard Elements (one for each unique mutable type) + ska::flat_hash_map wildcardIndex_; + Element* getWildcard(const TypePtr& type) const; + c10::optional tryGetOrCreateWildcard(const TypePtr& type); + void addContainedTypesToFreshElement( + Element* container_elem, + const AliasTypeSet& mut_types); + void pointUnionTypeElementToAllContainedTypes( + Element* container_elem, + const AliasTypeSet& mut_types); + + std::vector getElements(at::ArrayRef vs) const; + bool mayAliasWildcard(const Value* v) const; + bool mayAliasWildcard(const at::ArrayRef vs) const; + bool hasWriters(const at::ArrayRef& values) const; + + // Cached mapping of type ptrs to their mutable types + mutable ska::flat_hash_map mapped_mutable_types_; + + /** + * State for tracking write info. + */ + // Write registry where the analysis can record the writes as it sees them. + // This information is later denormalized into various caches to improve query + // efficiency. + struct WriteRegistry; + std::unique_ptr writeRegistry_; + + // Map of nodes to the memory locations that they write to + using TWriteIndex = ska::flat_hash_map; + c10::optional writeIndex_; + // Collection of all memory locations that are written to. + c10::optional writtenToLocationsIndex_; + void buildWrittenToLocationsIndex(); + + std::unordered_set wildcards_; + + std::string getElementName(const Element* e) const; + + friend void Lint(const AliasDb* db); +}; + +// Helper check that invariants over AliasDb are maintained. +// Useful if you are using the AliasDb mutation API and want to check you did +// the right thing. +TORCH_API void Lint(const AliasDb* db); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/attributes.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/attributes.h new file mode 100644 index 0000000000000000000000000000000000000000..c053fc9d8d9d231425c20b389c71559b87a422d8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/attributes.h @@ -0,0 +1,184 @@ +#pragma once +#include +#include +#include + +#include +#include + +#include + +namespace torch { +namespace jit { + +using ::c10::Symbol; + +constexpr int max_tensor_display_size = 10; + +enum class AttributeKind { + f, + fs, + c, + cs, + i, + is, + s, + ss, + t, + ts, + g, + gs, + ty, + tys, + ival +}; +static inline const char* toString(AttributeKind kind) { + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + static const char* names[] = { + "f", + "c", + "cs", + "fs", + "i", + "is", + "s", + "ss", + "t", + "ts", + "g", + "gs", + "ty", + "tys", + "ival"}; + AT_ASSERT(size_t(kind) < sizeof(names) / sizeof(*names)); + return names[int(kind)]; +} + +struct AttributeValue { + AttributeValue(Symbol name) : name(name) {} + using Ptr = std::unique_ptr; + Symbol name; + virtual AttributeKind kind() const = 0; + virtual Ptr clone() const = 0; + virtual ~AttributeValue() = default; +}; + +template +struct ScalarAttributeValue : public AttributeValue { + using ConstructorType = T; + using ValueType = T; + ScalarAttributeValue(Symbol name, ConstructorType value_) + : AttributeValue(name), value_(std::move(value_)) {} + ValueType& value() { + return value_; + } + Ptr clone() const override { + return Ptr(new ScalarAttributeValue(name, value_)); + } + AttributeKind kind() const override { + return Kind; + } + + private: + ValueType value_; +}; + +template +struct VectorAttributeValue : public AttributeValue { + using ConstructorType = std::vector; + using ValueType = std::vector; + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + VectorAttributeValue(Symbol name, ConstructorType value_) + : AttributeValue(name), value_(std::move(value_)) {} + ValueType& value() { + return value_; + } + AttributeKind kind() const override { + return Kind; + } + std::unique_ptr clone() const override { + auto copy = value_; + return Ptr(new VectorAttributeValue(name, std::move(copy))); + } + + private: + ValueType value_; +}; + +using ComplexAttr = + ScalarAttributeValue, AttributeKind::c>; +using ComplexValsAttr = + VectorAttributeValue, AttributeKind::cs>; +using FloatAttr = ScalarAttributeValue; +using FloatsAttr = VectorAttributeValue; +using IntAttr = ScalarAttributeValue; +using IntsAttr = VectorAttributeValue; +using StringAttr = ScalarAttributeValue; +using StringsAttr = VectorAttributeValue; +using TensorAttr = ScalarAttributeValue; +using TensorsAttr = VectorAttributeValue; +using TypeAttr = ScalarAttributeValue; +using TypesAttr = VectorAttributeValue; +using IValueAttr = ScalarAttributeValue; + +struct Graph; + +// We special case Graph attributes like this because we want to ensure that +// Graph::copy() is called when we clone() these attributes. +struct TORCH_API GraphAttr : public AttributeValue { + using ConstructorType = std::shared_ptr; + using ValueType = std::shared_ptr; + GraphAttr(Symbol name, ConstructorType value_) + : AttributeValue(name), value_(std::move(value_)) {} + ValueType& value() { + return value_; + } + Ptr clone() const override; + AttributeKind kind() const override { + return AttributeKind::g; + } + + private: + std::shared_ptr value_; +}; + +struct TORCH_API GraphsAttr : public AttributeValue { + using ConstructorType = std::vector>; + using ValueType = std::vector>; + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + GraphsAttr(Symbol name, ConstructorType value_) + : AttributeValue(name), value_(std::move(value_)) {} + ValueType& value() { + return value_; + } + AttributeKind kind() const override { + return AttributeKind::gs; + } + std::unique_ptr clone() const override; + + private: + ValueType value_; +}; + +struct IRAttributeError : public std::exception { + IRAttributeError(Symbol name, bool defined) { + std::stringstream ss; + // NOLINTNEXTLINE(bugprone-branch-clone) + if (!defined) { + ss << "required keyword attribute '" << name.toUnqualString() + << "' is undefined"; + } else { + ss << "required keyword attribute '" << name.toUnqualString() + << "' has the wrong type"; + } + msg = ss.str(); + } + const char* what() const noexcept override { + return msg.c_str(); + } + + private: + std::string msg; +}; +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/constants.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/constants.h new file mode 100644 index 0000000000000000000000000000000000000000..d9d11075dd20425db8d6b5981901250157a00d8e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/constants.h @@ -0,0 +1,61 @@ +#pragma once +#include +#include +#include +#include +#include + +// helpers for handling constants in the IR +// - create constant nodes from ints, floats, complex, intlist, Tensors, and +// other types +// - implement primitive constant ops. +namespace torch { +namespace jit { + +using ::c10::IValue; + +struct Graph; +struct Value; + +// thrown when insertConstant cannot encode the IValue into a graph +struct TORCH_API constant_not_supported_error : public std::runtime_error { + using runtime_error::runtime_error; +}; + +TORCH_API Value* insertConstant( + Graph& g, + const IValue& val, + c10::optional loc = c10::nullopt, + c10::optional scope = c10::nullopt); + +// note: prefer g.insertConsant(val, loc) which does exactly the same thing +// this function is only declared/defined here because its implementation is +// closely related to the implementation of prim::Constant that is also in +// constants.cpp. +// +// returns a c10::nullopt if the IValue kind cannot be inserted as a constant +TORCH_API c10::optional tryInsertConstant( + Graph& g, + const IValue& val, + c10::optional loc = c10::nullopt, + c10::optional scope = c10::nullopt); + +//////////////////////////////////////////////////////////////////////////////// +// Helper for retrieving constants +//////////////////////////////////////////////////////////////////////////////// + +// attempt to convert a (possibly constant) Value* into an interpreter value +// (IValue). returns c10::nullopt if the Value* was not constant +TORCH_API c10::optional toIValue(const Value* v); + +// if a value is a constant then try to turn into type T using the +// same rules as the interpreter +template +c10::optional constant_as(const Value* v) { + if (auto ivalue = toIValue(v)) { + return ivalue->to(); + } + return c10::nullopt; +} +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_node_list.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_node_list.h new file mode 100644 index 0000000000000000000000000000000000000000..ee24ccae35bea518bc6f912a96e22c4bd961e9d4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_node_list.h @@ -0,0 +1,201 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Intrusive doubly linked lists with sane reverse iterators. +// The header file is named generic_graph_node_list.h because it is ONLY +// used for Graph's Node lists, and if you want to use it for other +// things, you will have to do some refactoring. +// +// At the moment, the templated type T must support a few operations: +// +// - It must have a field: T* next_in_graph[2] = { nullptr, nullptr }; +// which are used for the intrusive linked list pointers. +// +// - It must have a method 'destroy()', which removes T from the +// list and frees a T. +// +// In practice, we are only using it with Node and const Node. 'destroy()' +// needs to be renegotiated if you want to use this somewhere else. +// +// Regardless of the iteration direction, iterators always physically point +// to the element they logically point to, rather than +// the off-by-one behavior for all standard library reverse iterators like +// std::list. + +// The list is includes two sentinel nodes, one at the beginning and one at the +// end with a circular link between them. It is an error to insert nodes after +// the end sentinel node but before the beginning node: + +// Visualization showing only the next() links: +// HEAD -> first -> second -> ... -> last -> TAIL +// ^------------------------------------------ + +// Visualization showing only the prev() links: +// HEAD <- first <- second <- ... <- last <- TAIL +// ------------------------------------------^ + +static constexpr int kNextDirection = 0; +static constexpr int kPrevDirection = 1; + +template +struct generic_graph_node_list; + +template +struct generic_graph_node_list_iterator; + +struct Node; +using graph_node_list = generic_graph_node_list; +using const_graph_node_list = generic_graph_node_list; +using graph_node_list_iterator = generic_graph_node_list_iterator; +using const_graph_node_list_iterator = + generic_graph_node_list_iterator; + +template +struct generic_graph_node_list_iterator { + generic_graph_node_list_iterator() : cur(nullptr), d(kNextDirection) {} + generic_graph_node_list_iterator(T* cur, int d) : cur(cur), d(d) {} + generic_graph_node_list_iterator( + const generic_graph_node_list_iterator& rhs) = default; + generic_graph_node_list_iterator( + generic_graph_node_list_iterator&& rhs) noexcept = default; + generic_graph_node_list_iterator& operator=( + const generic_graph_node_list_iterator& rhs) = default; + generic_graph_node_list_iterator& operator=( + generic_graph_node_list_iterator&& rhs) noexcept = default; + T* operator*() const { + return cur; + } + T* operator->() const { + return cur; + } + generic_graph_node_list_iterator& operator++() { + AT_ASSERT(cur); + cur = cur->next_in_graph[d]; + return *this; + } + generic_graph_node_list_iterator operator++(int) { + generic_graph_node_list_iterator old = *this; + ++(*this); + return old; + } + generic_graph_node_list_iterator& operator--() { + AT_ASSERT(cur); + cur = cur->next_in_graph[reverseDir()]; + return *this; + } + generic_graph_node_list_iterator operator--(int) { + generic_graph_node_list_iterator old = *this; + --(*this); + return old; + } + + // erase cur without invalidating this iterator + // named differently from destroy so that ->/. bugs do not + // silently cause the wrong one to be called. + // iterator will point to the previous entry after call + void destroyCurrent() { + T* n = cur; + cur = cur->next_in_graph[reverseDir()]; + n->destroy(); + } + generic_graph_node_list_iterator reverse() { + return generic_graph_node_list_iterator(cur, reverseDir()); + } + + private: + int reverseDir() { + return d == kNextDirection ? kPrevDirection : kNextDirection; + } + T* cur; + int d; // direction 0 is forward 1 is reverse, see next_in_graph +}; + +template +struct generic_graph_node_list { + using iterator = generic_graph_node_list_iterator; + using const_iterator = generic_graph_node_list_iterator; + generic_graph_node_list_iterator begin() { + return generic_graph_node_list_iterator(head->next_in_graph[d], d); + } + generic_graph_node_list_iterator begin() const { + return generic_graph_node_list_iterator(head->next_in_graph[d], d); + } + generic_graph_node_list_iterator end() { + return generic_graph_node_list_iterator(head->next_in_graph[!d], d); + } + generic_graph_node_list_iterator end() const { + return generic_graph_node_list_iterator( + head->next_in_graph[!d], d); + } + generic_graph_node_list_iterator rbegin() { + return reverse().begin(); + } + generic_graph_node_list_iterator rbegin() const { + return reverse().begin(); + } + generic_graph_node_list_iterator rend() { + return reverse().end(); + } + generic_graph_node_list_iterator rend() const { + return reverse().end(); + } + generic_graph_node_list reverse() { + return generic_graph_node_list(head->next_in_graph[!d], !d); + } + const generic_graph_node_list reverse() const { + return generic_graph_node_list(head->next_in_graph[!d], !d); + } + T* front() { + return head->next_in_graph[d]; + } + const T* front() const { + return head->next_in_graph[d]; + } + T* back() { + return head->next_in_graph[!d]; + } + const T* back() const { + return head->next_in_graph[!d]; + } + generic_graph_node_list(T* head, int d) : head(head), d(d) {} + + private: + T* head; // both head and tail are sentinel nodes + // the first real node is head->next_in_graph[d] + // the tail sentinel is head->next_in_graph[!d] + int d; +}; + +template +static inline bool operator==( + generic_graph_node_list_iterator a, + generic_graph_node_list_iterator b) { + return *a == *b; +} + +template +static inline bool operator!=( + generic_graph_node_list_iterator a, + generic_graph_node_list_iterator b) { + return *a != *b; +} + +} // namespace jit +} // namespace torch + +namespace std { + +template +struct iterator_traits> { + using difference_type = int64_t; + using value_type = T*; + using pointer = T**; + using reference = T*&; + using iterator_category = bidirectional_iterator_tag; +}; + +} // namespace std diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_utils.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..6d4f296fb1327ef47c1ad236cad4635b059a0f11 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/graph_utils.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +#include + +namespace torch { +namespace jit { + +TORCH_API TypePtr getTensorType(const at::Tensor& t, bool complete); + +TORCH_API TypePtr inferShapeAndTypeForInput( + TypePtr input_type, + Stack::const_iterator& s_iter, + const Stack::const_iterator& s_iter_end, + bool complete); + +TORCH_API void setInputTensorTypes( + Graph& g, + const Stack& stack, + bool complete, + const std::vector& param_count_list = {}); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir.h new file mode 100644 index 0000000000000000000000000000000000000000..4781b15229cbb6eb2c0181051ccfcb12f4fed33b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/ir.h @@ -0,0 +1,1841 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +// Forward declare, the real meat is in python_ir.cpp +template +class THPPointer; +using THPObjectPtr = THPPointer; +using pyobj_list = std::vector; + +namespace torch { +namespace jit { +namespace utils { +TORCH_API std::string getNodesModuleHierarchy(const Node& n); +} // namespace utils +class AliasDb; + +using ::c10::Argument; +using ::c10::FunctionSchema; +using ::c10::Symbol; + +using ::c10::ivalue::Shared; + +using ::c10::IValue; +using ::c10::ivalue::Future; + +using ::c10::ivalue::ConstantString; + +#define C10_USING(T) using ::c10::T; +C10_FORALL_TYPES(C10_USING) +#undef C10_USING + +#define C10_USING(T) using ::c10::T##Ptr; +C10_FORALL_TYPES(C10_USING) +#undef C10_USING + +using ::c10::Type; +using ::c10::TypeEnv; +using ::c10::TypePtr; + +using ::c10::getTypePtr; +using ::c10::MatchTypeReturn; +using ::c10::TypeKind; + +using ::c10::fmap; + +namespace prim { +using namespace ::c10::prim; +} +namespace attr { +using namespace ::c10::attr; +} +namespace aten { +using namespace ::c10::aten; +} +namespace cuda { +#if !defined(USE_ROCM) +using namespace ::c10::cuda; +#endif +} // namespace cuda + +struct Function; +struct GraphFunction; +struct MatchedSchema; + +// A Graph represents one "function" of computation. +// It uses a simple ownership model where the graph owns all the nodes inside +// it. All references inside the graph are raw pointers. Destroying the Graph +// will invalidate any pointers to nodes in the graph. +struct Graph; + +// Node is the base class of the IR graph. It represents one computation +// and dependencies on a list of Values. The "prim-ops", so to speak. +struct Node; + +// A Value represents an input or output to node that is either a +// Tensor or an opaque Handle object, as determined by type(). +struct Value; + +TORCH_API std::ostream& operator<<(std::ostream& out, const Graph& g); +TORCH_API std::ostream& operator<<(std::ostream& out, const Node& n); + +// A list of nodes, with inputs and outputs +struct Block; + +// Each use is represented by this type, see 'Node::uses()' +// 'user' is the consumer of the value, 'offset' is the index into +// 'user's input this where the producers will be found. +struct Use { + Use(Node* user, size_t offset) : user(user), offset(offset) {} + Node* user; + size_t offset; + + bool operator==(const Use& b) { + return user == b.user && offset == b.offset; + } +}; + +// Note [User node does not uniquely identify use] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// A while back, we wrote some code manipulating uses that looked like this: +// +// for (auto& use : used_val->uses_) { +// if (use.user == this_node) { +// use.offset += 1; +// break; +// } +// } +// +// This code is trying to find a particular use (our node's use) to update it. +// However, it's wrong: there may be *multiple* uses of a value %x in a node, +// as might be the case in this IR: +// +// %y = Add %x %x +// +// In this case, there are two uses of %x whose user is the node 'Add %x %x'. +// So, "use induced by this node" is not a well-formed concept. +// +// If you are looking for "use induced by an input", it's best to use +// findUseForInput() to get it. + +// the list types are intentionally simple, but we type-def +// them here so if we need to change them, refactoring will be easier +using node_list = std::vector; +using value_list = std::vector; +using use_list = std::vector; +template +using ArrayRef = at::ArrayRef; +using NodeKind = Symbol; +using topo_position_t = int64_t; +using ValueSet = std::unordered_set; + +struct OperatorSet; +template +struct OperatorMap; + +// This is a wrapper to allow invalidating the Python object +// safely when the C++ object for a Node/Value/Block is deleted +// like much of graph, it isn't safe for different threads to +// access the same graph +template +struct Wrap { + explicit Wrap(T* p) : elem(p), clear_cb(nullptr) {} + void clear() { + if (clear_cb) { + clear_cb(elem); + } + elem = nullptr; + } + T* elem; + void (*clear_cb)(void*); +}; + +struct Value { + AT_DISALLOW_COPY_AND_ASSIGN(Value); + Value(Node* node_, size_t offset_); + + private: + friend struct Node; + friend struct Graph; + Node* node_; + size_t offset_; + size_t unique_ = 0; // unique id + use_list uses_; + std::string unique_name_; + TypePtr type_; + // a managing wrapper for Python to allow invalidation + std::shared_ptr> wrap_; + + public: + Value* setType(TypePtr type); + TORCH_API void inferTypeFrom(const at::Tensor& output); + TORCH_API void inferTypeFrom( + const c10::intrusive_ptr& output); + const TypePtr& type() const { + AT_ASSERT(type_ != nullptr); + return type_; + } + bool requires_grad() const { + return type()->requires_grad(); + } + bool isCompleteTensor() const { + if (auto pt = type()->cast()) { + return pt->isComplete(); + } + return false; + } + TORCH_API bool mustBeNone() const; + TORCH_API bool mustNotBeNone() const; + size_t unique() const { + return unique_; + } + bool hasDebugName() const { + return !unique_name_.empty(); + } + static bool isValidName(const std::string& name); + TORCH_API Value* setDebugName(const std::string& name); + std::string debugName() const { + if (hasDebugName()) { + return unique_name_; + } + return c10::to_string(unique()); + } + TORCH_API std::string debugNameBase() const; + Node* node() { + return node_; + } + size_t offset() const { + return offset_; + } + void setOffset(size_t offset) { + offset_ = offset; + } + const Node* node() const { + return node_; + } + + /** + * @warning NEVER pass raw pointer of smart pointer managed Graph to Python. + * Check #87343 for details. + */ + Graph* owningGraph(); + const Graph* owningGraph() const; + // TODO: make this more const correct + const use_list& uses() const { + return uses_; + } + + bool hasUses() const { + return !uses().empty(); + } + + TORCH_API void replaceFirstUseWith(Value* newValue); + + // Replaces all uses of this value with 'newValue'. + // + // Given: %3 = f(%1, %2) + // %4 = g(%3) + // %5 = h(%3, %3) + // Execute: %3.replaceAllUsesWith(%6) + // Result: %3 = f(%1, %2) + // %4 = g(%6) + // %5 = h(%6, %6) + TORCH_API void replaceAllUsesWith(Value* newValue); + + // Replaces all uses of this value with 'newValue' after 'node'. + // Given: %3 = f(%1, %2) + // %4 = g(%3) + // %5 = inplace_(%3) + // %6 = h(%3, %3) + // Execute: %3.replaceAllUsesAfterNodeWith(%5.node(), %5) + // Result: %3 = f(%1, %2) + // %4 = g(%3) + // %5 = inplace_(%3) + // %6 = h(%5, %5) + // XXX: does not check scoping legality, consider using + // replaceAllUsesDominatedByNodeWith + TORCH_API void replaceAllUsesAfterNodeWith(const Node* node, Value* newValue); + + // Replaces all uses of this value with 'newValue' that are dominated by + // 'node'. Given: + // x = op(...). + // if cond: + // z = foo(..) + // bar(x) + // else: + // print(x) + // x.replaceAllUsesDominatedByNodeWith(foo, z) would replace bar(x) + // but not print(x) because print is not dominated by foo. + // replaceAllUsesAfterNode does not check domination, so in this example + // it would produce invalid IR. + TORCH_API void replaceAllUsesDominatedByNodeWith( + const Node* node, + Value* newValue); + + TORCH_API Value* copyMetadata(Value* from); + + TORCH_API std::shared_ptr> wrap() { + if (!wrap_) { + wrap_ = std::make_shared>(this); + } + return wrap_; + } + + virtual ~Value() { + if (wrap_) { + wrap_->clear(); + } + } +}; + +struct TORCH_API Node { + AT_DISALLOW_COPY_AND_ASSIGN(Node); + friend struct Graph; + friend struct Block; + friend struct Value; + friend graph_node_list; + friend const_graph_node_list; + friend graph_node_list_iterator; + friend const_graph_node_list_iterator; + + private: + const NodeKind kind_; + std::vector inputs_; + std::vector outputs_; + // subblocks + std::vector blocks_; + Graph* graph_; + Block* owning_block_; + c10::optional source_range_; + ScopePtr scope_; + c10::optional callstack_; + // Assumes FunctionSchemas are persistent, so we don't manage their lifetime. + // This field is effective a cache that's populated on attribute lookups and + // invalidated every time we perform an operation that could potentially + // change the schema. note: mutable because schema_ is effectively a cache + mutable const Operator* op_; + topo_position_t topo_position_ = 0; + // a managing wrapper for Python to allow invalidation + std::shared_ptr> wrap_; + // Stores the full schema name, if the operator is historic + // When the operator is deprecated or the name of the operator + // is changed, we need to rely on this name + // to retrieve old schemas to successfully apply upgraders + // for this operator. + c10::optional historic_schema_name_ = c10::nullopt; + + protected: + Node(Graph* graph_, NodeKind kind_); // defined after graph + public: + // Each Node but Return/Param Nodes are associated with exactly one + // place in the Node list of the Graph. The Graph itself is a circular + // doubly-linked list. The Return Node is used as the sentinel for the + // "beginning"/"end" of the list. This means that you can tell when + // you've traversed the entire list without means worrying about null + // pointers. `next_in_graph[0]` is the pointer to the next Node, while + // `next_in_graph[1]` is the pointer to the previous Node. The + // linked list is implemented as an array to allow the same iterator + // class for forward and reversed Node lists. Taken together, this + // list also represents a topological sort of the Nodes in the Graph. + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-non-private-member-variables-in-classes,modernize-avoid-c-arrays) + Node* next_in_graph[2] = {nullptr, nullptr}; + + std::shared_ptr> wrap() { + if (!wrap_) { + wrap_ = std::make_shared>(this); + } + return wrap_; + } + + const c10::optional getHistoricSchemaName() { + return historic_schema_name_; + } + + void setHistoricSchemaName(const std::string& name) { + historic_schema_name_ = name; + } + + Node*& next() { + return next_in_graph[kNextDirection]; + } + Node*& prev() { + return next_in_graph[kPrevDirection]; + } + Node* const& next() const { + return next_in_graph[kNextDirection]; + } + Node* const& prev() const { + return next_in_graph[kPrevDirection]; + } + + NodeKind kind() const { + return kind_; + } + Node* setSourceRange(SourceRange r) { + source_range_ = std::move(r); + return this; + } + SourceRange sourceRange() const; + + /** + * @warning NEVER pass raw pointer of smart pointer managed Graph to Python. + * Check #87343 for details. + */ + Graph* owningGraph() { + return graph_; + } + const Graph* owningGraph() const { + return graph_; + } + Block* owningBlock() { + return owning_block_; + } + const Block* owningBlock() const { + return owning_block_; + } + ScopePtr scope() { + return scope_; + } + void setScope(ScopePtr scope) { + scope_ = std::move(scope); + } + std::string scopeName() const { + if (!scope_) { + return ""; + } + return scope_->namesFromRoot(); + } + + // Copies the source range, scope and callstack from another node. + Node* copyMetadata(Node* from) { + this->setSourceRange(from->sourceRange()); + this->setScope(from->scope()); + if (auto cs = from->callstack()) { + this->setCallStack(*cs); + } + return this; + } + + c10::optional callstack() const { + return callstack_; + } + void setCallStack(InlinedCallStackPtr cs) { + callstack_ = std::move(cs); + } + + // NB: This returns an ArrayRef; that means that it will + // get invalidated if you resize inputs (e.g., using addInput) + // We can't return a std::vector& because there's no + // way to soundly cast to std::vector (an insane + // implementation of std::vector could make this representationally + // different.) + at::ArrayRef inputs() { + return inputs_; + } + at::ArrayRef inputs() const { + // Vectors are not convertible in const-ness of elements, but + // raw pointers are. + return {inputs_.data(), inputs_.size()}; + } + // NB: This returns an ArrayRef; that means that it will + // get invalidated if you resize inputs (e.g., using addInput) + // We can't return a std::vector& because there's no + // way to soundly cast to std::vector (an insane + // implementation of std::vector could make this representationally + // different.) + at::ArrayRef outputs() { + return outputs_; + } + at::ArrayRef outputs() const { + // Vectors are not convertible in const-ness of elements, but + // raw pointers are. + return {outputs_.data(), outputs_.size()}; + } + Value* output(size_t i) const { + return outputs_.at(i); + } + bool hasUses() const { + for (auto o : outputs()) { + if (!o->uses().empty()) { + return true; + } + } + return false; + } + + void replaceAllUsesWith(Node* n); + + // replaces `this` with a new node with the same inputs and outputs + // but a new node symbol. does not destroy `this` + Node* replaceWithNewSymbol(Symbol new_symbol); + + // Checks if this node is dominated by `dominator` which means that + // `dominator` will always be executed before `this` and `dominator` + // is in scope of `this. + bool isDominatedBy(const Node* dominator) const; + + // lots of things like chunk have a single input or single output, so we have + // a helper to make accessing it easier + Value* input() { + AT_ASSERT(inputs_.size() == 1); + return inputs_.at(0); + } + Value* output() { + AT_ASSERT(outputs_.size() == 1); + return outputs_.at(0); + } + const Value* output() const { + AT_ASSERT(outputs_.size() == 1); + return outputs_.at(0); + } + const Value* input() const { + AT_ASSERT(inputs_.size() == 1); + return inputs_.at(0); + } + // Access a particular input. This is a checked index. + Value* input(size_t i) const { + return inputs_.at(i); + } + + bool hasNamedInput(const std::string& unqualName) const; + Value* namedInput(const std::string& unqualName) const; + Value* namedInput(Symbol name) const; + + c10::optional get(Symbol name) const; + + template + c10::optional get(Symbol name) const { + if (auto v = get(name)) { + return v->template to(); + } + return c10::nullopt; + } + + // Returns true if the value of input name is statically known + bool is_constant(Symbol name) const { + return static_cast(get(name)); + } + bool mustBeNone() const; + + bool isNondeterministic() const; + bool hasSideEffects() const; + + // instructions lowered by the interpreter and not run in the optimized graph + bool notExecutedOp() const { + return kind_ == prim::Constant || kind_ == prim::profile || + kind_ == prim::profile_ivalue; + } + + // Graphs + + // Note [Topological invariant] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // We always maintain an up-to-date topological ordering of all nodes via + // the next()/prev() links. All transformations to graphs must preserve + // this topological ordering: for example, it is only valid to 'addInput' + // with an input which is topologically before the current node. + // + // Usually, it is obvious whether or not topological order is maintained; + // for example, if you are adding nodes to the end of the topsort, it's + // impossible for them to refer to inputs that are not in the topsort. + // If it is not obvious, please comment accordingly. + + // Add 'node' as an input to 'this' at the end of existing + // arguments. Returns the added node for ease of chaining. + // + // Given: %3 = f(%1, %2) + // Execute: %3.addInput(%4) + // Result: %3 = f(%1, %2, %4) + Value* addInput(Value* value); + + // Add 'value' as an input to 'this' at the specified position in the + // arguments. Returns the added value for ease of chaining. + Value* insertInput(size_t i, Value* value); + + // Replace the input of 'this' at position 'i' with + // 'newValue', returning the old node. + // + // Given: %3 = f(%1, %2) + // Execute: %3.replaceInput(1, %4) + // Result: %3 = f(%1, %4) + Value* replaceInput(size_t i, Value* newValue); + + // Replace all occurrences of 'from' in the inputs of this + // node with 'to'. Corresponds to llvm's replaceUsesOfWith. + // + // Given: %3 = f(%1, %2, %1) + // Execute: %3.replaceInputWith(%1, %4) + // Result: %3 = f(%4, %2, %4) + void replaceInputWith(Value* from, Value* to); + + Value* addOutput(); + + Value* insertOutput(size_t i); + + void eraseOutput(size_t i); + + Block* addBlock(); + void eraseBlock(size_t i); + + // Each Node can have a list of subblocks. These are used to define structured + // nested control flow operators such as If and Loop. + // The meaning of a block is specific to the kind of node it is in, but + // all blocks share these semantics: + // * Nested lexical scoping: If a node 'Parent' has a subblock which contains + // a node 'Child', Child can use any value that was in scope for the Parent + // node in addition to any values defined before 'Child' in the subblock. + // * The list of inputs to the block are in scope for the duration of the + // block + // * the outputs of the Parent node are not in scope for the subblocks + // Typically the inputs to a block that represents control flow act as + // as the equivalents phi-nodes in standard SSA form, + // defining a new Value to represent any term that has multiple + // definitions depending on how control flowed. Outputs of the node containing + // control flow serve a similiar purpose defining new values for variables + // that would have different definitions depending on which way control + // flowed. + + at::ArrayRef blocks() { + return blocks_; + } + at::ArrayRef blocks() const { + // Vectors are not convertible in const-ness of elements, but + // raw pointers are. + return {blocks_.data(), blocks_.size()}; + } + + // Is 'this' before 'n' in the topological order? + bool isBefore(const Node* n) const; + + // Is 'this' after 'n' in the topological order? + bool isAfter(const Node* n) const; + + // Insert unattached 'this' node before 'n' in the topological order. + // Returns this (for chaining). + // + // Given: %3 = f(%1, %2) + // %4 = g(%3) + // and unattached: %5 = h(%1) + // Execute: %5.insertBefore(%4) + // Result: %3 = f(%1, %2) + // %5 = h(%1) + // %4 = g(%3) + Node* insertBefore(Node* n); + + // Insert unattached 'this' node after 'n' in the topological order. + // Returns this (for chaining). + // + // Given: %3 = f(%1, %2) + // %4 = g(%3) + // and unattached: %5 = h(%1) + // Execute: %5.insertAfter(%4) + // Result: %3 = f(%1, %2) + // %4 = g(%3) + // %5 = h(%1) + Node* insertAfter(Node* n); + + // Move 'this' (already in the graph) after 'n' in the topological order. + // + // NOTE: Does not check that value dependencies are preserved, see + // AliasDb::moveAfterTopologicallyValid + // + // Given: %2 = f(%1) + // %3 = g(%1) + // Execute: %2.moveAfter(%3) + // Result: %3 = g(%1) + // %2 = f(%1) + // + void moveAfter(Node* n); + + // Move a node 'n' (already in the graph) before 'this' in the topological + // order. + // + // NOTE: Does not check that value dependencies are preserved, see + // AliasDb::moveBeforeTopologicallyValid + // + // Given: %2 = f(%1) + // %3 = g(%1) + // Execute: %3.moveBefore(%2) + // Result: %3 = g(%1) + // %2 = f(%1) + void moveBefore(Node* n); + + // Remove the input at 'i' from this node. + // + // WARNING: This is O(n) in the number of inputs, so avoid repeatedly calling + // removeInput. + // + // Given: %3 = f(%1, %2) + // Execute: %3.removeInput(1) + // Result: %3 = f(%1) + void removeInput(size_t i); + + // Remove all inputs from a node. + // + // Given: %3 = f(%1, %2) + // Execute: %3.removeAllInputs() + // Result: %3 = f() + void removeAllInputs(); + + // Remove all outputs from a node. + // + // Given: %1, %2 = f() + // Execute:removeAllInputs() + // Result: = f() + void removeAllOutputs(); + + // Rearrange the ordering of inputs or outputs of a node + // Given: %3 = f(%1, %2) + // Execute: %3.permuteInputs({1, 0}) + // Result: %3 = f(%2, %1) + // Each index must appear exactly once + void permuteInputs(const std::vector& new_inputs); + void permuteOutputs(const std::vector& new_inputs); + + // iterators of the node list starting at this node + // useful for resuming a search starting at this node + inline graph_node_list_iterator iterator() { + return {this, 0}; + } + inline graph_node_list_iterator reverseIterator() { + return iterator().reverse(); + } + inline const_graph_node_list_iterator iterator() const { + return {this, 0}; + } + inline const_graph_node_list_iterator reverseIterator() const { + return iterator().reverse(); + } + + // Remove 'this' from the instruction list and deallocate it. + // + // Invariant: no outputs of 'this' may have any uses. + // + // Given: %2 = f(%1) + // %3 = g(%1) + // Execute: %2.destroy() + // Result: %3 = g(%1) + void destroy(); + + // Dynamically cast this node to the subclass indicated by the + // template variable, returning nullptr if the cast is invalid.. + // + // Example usage: if(auto s = n.cast