diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h new file mode 100644 index 0000000000000000000000000000000000000000..6203905732667776ed9646d4ff3b4fa0ea2458de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h @@ -0,0 +1,351 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +struct Def; +struct Property; +struct ClassDef; +struct SugaredValue; +struct Resolver; + +using ResolverPtr = std::shared_ptr; +struct Self { + virtual ~Self() = default; + virtual std::shared_ptr makeSugared(Value* v) const = 0; + virtual ClassTypePtr getClassType() const = 0; +}; + +// A CompilationUnit is a list of named Functions +// with helper methods to iterate the list or invoke the function. +// Classes have a CompilationUnit holding the class methods, +// and Modules have a CompilationUnit holding the Functions that +// are used to implement their Methods + +struct TORCH_API CompilationUnit { + enum class FunctionType { Method, Hook, PreHook }; + // constructor that takes a set of functions to compile using the native + // resolver + explicit CompilationUnit(const std::string& source); + CompilationUnit() = default; + + CompilationUnit& operator=(CompilationUnit&&) = default; + CompilationUnit(CompilationUnit&&) = default; + CompilationUnit& operator=(const CompilationUnit&) = delete; + CompilationUnit(const CompilationUnit&) = delete; + + Function* find_function(const c10::QualifiedName& name) const { + auto it = dict_.find(name); + if (it == dict_.end()) { + return nullptr; + } + return functions_[it->second].get(); + } + + Function& get_function(const c10::QualifiedName& name) const { + if (auto r = find_function(name)) { + return *r; + } + TORCH_CHECK(false, "attempted to get undefined function ", name.name()); + } + + void set_optimized(bool o) { + TORCH_WARN( + "CompilationUnit::set_optimized() is deprecated and has no effect. " + "Please use setGraphExecutorOptimize()"); + } + + bool is_optimized() const { + TORCH_WARN( + "CompilationUnit::is_optimized() is deprecated and always returns true. " + "Please use getGraphExecutorOptimize()"); + return true; + } + + // for historic reasons, these are defined in ir_emitter.cpp + // Returns the list of Functions just defined. + std::vector define( + const c10::optional& prefix, + const std::vector& properties, + const std::vector& propResolvers, + const std::vector& definitions, + const std::vector& + defResolvers, /* determines how we handle free + variables in each definition*/ + // if non-null, the first argument to each def, is bound to this value + const Self* self, + // see [name mangling] + bool shouldMangle = false, + c10::optional operator_set_version = c10::nullopt); + + void define_hooks( + const c10::optional& prefix, + const std::vector& hookDefs, + const std::vector& hookResolvers, + const std::vector& preHookDefs, + const std::vector& preHookResolvers, + const Self* self, + bool shouldMangle = false); + + // same as above but parse the definitions from source + // Returns the list of Functions just defined. + std::vector define( + // prefix namespace to put all the defined functions into + const c10::optional& prefix, + const std::string& source, + const ResolverPtr& resolver, + const Self* self); + + void define_interface( + const c10::QualifiedName& qualifiedName, + const ClassDef& classDef, + ResolverPtr rcb, + bool is_module = false); + + Function* create_function( + c10::QualifiedName name, + std::shared_ptr graph, + bool shouldMangle = false) { + if (shouldMangle) { + name = mangle(name); + } + auto fn = std::make_unique( + std::move(name), std::move(graph), nullptr); + auto ret = fn.get(); + register_function(std::move(fn)); + return ret; + } + + std::vector get_functions() const { + return fmap(functions_, [](const std::unique_ptr& fn) { + return fn.get(); + }); + } + + /// Run a method from this compilation. + /// + /// For example: + /// @code + /// IValue output = module->run("relu_script", a, b); + /// @endcode + /// + /// To get a compile a module from a source string, see torch::jit::compile + /// + /// @param method_name The name of the method to run + /// @param args Arguments to be passed to the method + /// @return An IValue containing the return value (or values if it is a tuple) + /// from the method + template + IValue run_method(const c10::QualifiedName& method_name, Types&&... args) { + return get_function(method_name)({IValue(std::forward(args))...}); + } + + void drop_all_functions() { + dict_.clear(); + functions_.clear(); + } + + /** + * Register a class as being owned by this compilation unit. + */ + void register_type(c10::NamedTypePtr namedType) { + // TODO: class types cannot be redefined because we have no way right now + // of invalidating their methods. NamedTuples are fine though, since they + // don't have methods. + TORCH_CHECK( + 0 == classDict_.count(*namedType->name()), + "class '", + namedType->name()->qualifiedName(), + "' already defined."); + classes_.push_back(std::move(namedType)); + classDict_[*classes_.back()->name()] = classes_.size() - 1; + }; + + c10::ClassTypePtr get_class(const c10::QualifiedName& name) const { + auto type = get_type(name); + if (!type) { + return nullptr; + } + return type->cast(); + } + + c10::InterfaceTypePtr get_interface(const c10::QualifiedName& name) const { + auto type = get_type(name); + if (!type) { + return nullptr; + } + return type->cast(); + } + + c10::TupleTypePtr get_named_tuple(const c10::QualifiedName& name) const { + for (const auto& cls : classes_) { + if (cls->name()->qualifiedName() == name.qualifiedName()) { + return cls->expect(); + } + } + return nullptr; + } + + c10::NamedTypePtr get_type(const c10::QualifiedName& name) const { + auto it = classDict_.find(name); + if (it == classDict_.end()) { + return nullptr; + } + return classes_[it->second]; + } + + // For testing: clear all Python-defined classes to ensure that unit tests + // have isolation. + void _clear_python_cu() { + // Delete all the associated class methods + for (const auto& type : classes_) { + if (auto cls = type->cast()) { + for (auto method : cls->methods()) { + // Tombstone the method in the compilation unit. + // Don't erase because the dict_ + auto it = dict_.find(method->qualname()); + if (it != dict_.end()) { + functions_[it->second] = nullptr; + // Erase in our big lookup table + dict_.erase(it); + } + } + // Classes can have multiple pointers to the same hook, + // need to make sure to not delete it twice + std::unordered_set hooks_to_delete; + for (const auto& hook : cls->getForwardHooks()) { + hooks_to_delete.insert(hook); + } + for (const auto& pre_hook : cls->getForwardPreHooks()) { + hooks_to_delete.insert(pre_hook); + } + for (const auto& hook : hooks_to_delete) { + // Tombstone the hook in the compilation unit. + auto it = dict_.find(hook->qualname()); + if (it != dict_.end()) { + functions_[it->second] = nullptr; + // Erase in our big lookup table + dict_.erase(it); + } + } + } + } + classes_.clear(); + classDict_.clear(); + } + + // [Internal Only] Remove method. + // Note Used for freezing. + void unsafeRemoveMethod(const c10::QualifiedName& method_name) { + auto it = dict_.find(method_name); + TORCH_CHECK( + it != dict_.end(), + "method '", + method_name.qualifiedName(), + "' does not exist."); + functions_[it->second] = nullptr; + dict_.erase(it); + } + + // [name mangling] All code objects must have a unique qualified name in a + // CompilationUnit. In Python, sometimes functions won't have unique qualified + // name (for example, nested functions). So we mangle Python functions to + // ensure that they are uniquely named. + // + // We also use mangling to distinguish different Module instances. Since each + // Module is a singleton class instance, different instances of the same + // Python Module will have different types but the same qualified name. + c10::QualifiedName mangle(const c10::QualifiedName& name) const { + auto mangled = name; + while (get_type(mangled) || find_function(mangled)) { + mangled = mangler_.mangle(mangled); + } + return mangled; + } + + private: + std::unique_ptr define( + const c10::optional& prefix, + const Def& def, + const ResolverPtr& resolver, + const Self* self, + const std::unordered_map& function_table, + bool shouldMangle = false, + FunctionType type = FunctionType::Method, + c10::optional version = c10::nullopt) const; + + // Define a property on \p self. + struct PropertyPair; + PropertyPair define_property( + const c10::optional& prefix, + const Property& prop, + const ResolverPtr& resolver, + const Self* self, + const std::unordered_map& function_table, + bool shouldMangle = false) const; + + Function& register_function(std::unique_ptr fn) { + TORCH_CHECK( + 0 == dict_.count(fn->qualname().qualifiedName()), + "method '", + fn->qualname().qualifiedName(), + "' already defined."); + functions_.emplace_back(std::move(fn)); + dict_[functions_.back()->qualname()] = functions_.size() - 1; + return *functions_.back(); + } + std::vector> functions_; + // for fast lookup + std::unordered_map dict_; + std::unordered_map classDict_; + + // [class ownership] Right now there are two relationships between classes + // and compilation units: + // 1. Classes have compilation units internally that hold their methods. + // 2. On load, the TypePtrs of any imported classes are owned by the main + // module's compilation unit. + std::vector classes_; + + mutable NameMangler mangler_; +}; + +// An owning pointer to a Function. Just a pair of a raw Function ptr and it's +// owning CU. We need this because pybind requires a ref-counted way to refer to +// Functions. +struct StrongFunctionPtr { + StrongFunctionPtr(std::shared_ptr cu, Function* function) + : cu_(std::move(cu)), function_(function) { + TORCH_INTERNAL_ASSERT(cu_); + TORCH_INTERNAL_ASSERT(function_); + } + std::shared_ptr cu_; + Function* function_; +}; + +namespace script { +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. +using CompilationUnit = ::torch::jit::CompilationUnit; +} // namespace script +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..51a8c0c9ea6780e3b4f3334663dff69d5651ff6f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h @@ -0,0 +1,194 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { + +struct TORCH_API GraphFunction : public Function { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + GraphFunction( + c10::QualifiedName name, + std::shared_ptr graph, + std::function function_creator, + c10::optional executor_execution_mode = + c10::nullopt) + : name_(std::move(name)), + graph_(std::move(graph)), + executor_execution_mode_(executor_execution_mode), + function_creator_(std::move(function_creator)) {} + + bool isGraphFunction() const override { + return true; + } + + void run(Stack& stack) override; + + std::function function_creator() const { + return function_creator_; + } + + c10::intrusive_ptr runAsync( + Stack& stack, + TaskLauncher taskLauncher = at::launch) override; + + std::shared_ptr graph() const { + return graph_; + } + + std::shared_ptr optimized_graph() const { + std::lock_guard lock(compile_mutex); + auto& optimized_graph = optimized_graphs_[currentSpecialization()]; + if (optimized_graph) { + return *optimized_graph; + } + optimized_graph = graph_->copy(); + if (getGraphExecutorOptimize()) { + preoptimizeGraph(*optimized_graph, force_no_amp_); + } + return *optimized_graph; + } + + const c10::QualifiedName& qualname() const override { + return name_; + } + + // private/unstable api. sets the initial execution mode + // will not affect executor if there is an existing executor + // created for this function + void _set_initial_executor_execution_mode(ExecutorExecutionMode mode) { + executor_execution_mode_ = mode; + } + // private/unstable api. sets flag of whether or not to ignore amp. + // will not affect executor if there is an existing executor + // created for this function + void _set_ignore_amp(bool ignore_amp) { + force_no_amp_ = ignore_amp; + } + + // if this isn't yet defined, run its method_creator function + void ensure_defined() override; + + size_t num_inputs() const override { + return graph()->inputs().size(); + } + + Function& setSchema(FunctionSchema schema) override { + schema_ = std::make_unique(std::move(schema)); + return *this; + } + + const FunctionSchema& getSchema() const override; + + GraphExecutorState getDebugState() { + return get_executor().getDebugState(); + } + + bool is_optimized() const { + TORCH_WARN( + "GraphFunction::is_optimized() is deprecated and always returns true. " + "Please use getGraphExecutorOptimize()"); + return true; + } + + void check_single_output() { + TORCH_CHECK( + graph()->outputs().size() == 1, + "Method (but not graphs in general) require a single output. Use None/Tuple for 0 or 2+ outputs"); + } + + GraphExecutor& get_executor() { + ensure_defined(); + std::lock_guard lock(compile_mutex); + auto& executor = executors_[currentSpecialization()]; + if (executor) { + return *executor; + } + check_single_output(); + const std::string& name = name_.name(); + std::shared_ptr opt_graph = optimized_graph(); + if (!executor_execution_mode_) { + executor = GraphExecutor(opt_graph, name); + } else { + executor = GraphExecutor(opt_graph, name, *executor_execution_mode_); + } + return *executor; + } + + using Function::call; + bool call( + Stack& stack, + c10::optional bailOut, + c10::function_ref f) override { + f(get_executor().getPlanFor(stack, bailOut).code); + return true; + } + + void clear_optimized_graphs() { + optimized_graphs_.fill(c10::nullopt); + } + + private: + enum SpecializationKey { + AutocastOff, + CpuAutocastOn, + GpuAutocastOn, + CpuGpuAutocastOn, + + // This provides the number of specializations + // (Must be last entry) + TotalCount + }; + + SpecializationKey currentSpecialization() const; + + private: + c10::QualifiedName name_; + // The original, non-optimized graph + std::shared_ptr graph_; // for debugging and for inlining + + // allows users to specify Simple/Profiling Executor for function + // TODO: add more executors + mutable c10::optional executor_execution_mode_; + + // if invoked on a graph that has already traced through amp + // don't invoke amp pass + mutable bool force_no_amp_ = false; + // Optimized graph, computed lazily. Used for inlining. + mutable std::array< + c10::optional>, + SpecializationKey::TotalCount> + optimized_graphs_; + + // GraphFunctions are invokable from multiple threads, so this lock needs to + // be held when we're initializing graph executor for the first time or + // computing the optimized graph. We're using reentrant mutex so that we don't + // need to worry about causing a deadlock by calling one method from another + // (e.g. optimized_graph() from get_executor()). + mutable std::recursive_mutex compile_mutex; + + // executor_[0] - autocast off + // executor_[1] - autocast cpu on + // executor_[2] - autocast gpu on + // executor_[3] - autocast cpu & gpu on + std::array, SpecializationKey::TotalCount> + executors_; + + // an optional function that actually creates the method when + // ensure_defined() is called. This is used by the compiler so + // that it can construct methods out of order + std::function function_creator_; + + // if absent, then we generate a default schema based on the graph + // mutable because getSchema caches the default schema if one is requested + // before a call to setSchema + mutable std::unique_ptr schema_; +}; + +// Short hands for dynamic_cast. +TORCH_API GraphFunction* tryToGraphFunction(Function&) noexcept; +TORCH_API GraphFunction& toGraphFunction(Function&); +TORCH_API const GraphFunction& toGraphFunction(const Function&); + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h new file mode 100644 index 0000000000000000000000000000000000000000..afbbbfe0f4135167e1e3c09127a3bed9774f7e25 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h @@ -0,0 +1,81 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch::jit { + +using ObjectPtr = c10::intrusive_ptr; + +// A method in a module, e.g. f in: +// +// class M(ScriptModule): +// @script_method +// def f(self, x): +// ... +// Note: because Method/Module are exposed to python these +// classes use python method naming conventions +struct TORCH_API Method : public torch::IMethod { + Method(ObjectPtr owner, Function* function); + + // the module that contains this method. + Module owner() const; + void run(Stack& stack); + void run(Stack&& stack) { + run(stack); + } + + c10::IValue operator()( + std::vector stack, + const Kwargs& kwargs = Kwargs()) const override; + + // Run method async. Invocation on this function would invokes a JIT + // interpreter that executes ops inline, one by one, on caller's thread. A + // model can utilize async op, i.e. `fork`, to launch an asynchronous task + // which will be launched on provided `taskLauncher`. + c10::intrusive_ptr run_async( + std::vector stack, + const Kwargs& kwargs = Kwargs(), + TaskLauncher taskLauncher = at::launch); + + std::shared_ptr graph() const { + return toGraphFunction(*function_).graph(); + } + + const std::string& name() const override { + return function_->name(); + } + + size_t num_inputs() const { + return function_->num_inputs(); + } + + GraphExecutor& get_executor() { + return toGraphFunction(*function_).get_executor(); + } + + Function& function() const { + return *function_; + } + + private: + void setArgumentNames(std::vector&) const override; + + // Methods are uniqued onwed by a single module. This raw pointer allows + // looking up the module. + ObjectPtr owner_; + + // Underlying unbound function + Function* function_; +}; + +namespace script { +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. +using Method = ::torch::jit::Method; +} // namespace script + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h new file mode 100644 index 0000000000000000000000000000000000000000..6c49b695cb6b5dec57e45f851f5db5b82533e4af --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h @@ -0,0 +1,685 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// This file contains classes which assist in desugaring Python style +// modules and their methods into flattened graphs which don't have any +// function calls. + +namespace torch::jit { + +using ::c10::Argument; +using ::c10::FunctionSchema; +using ::c10::QualifiedName; +// Map which stores filename to content. +using ExtraFilesMap = std::unordered_map; + +using ModulePtr = c10::intrusive_ptr; + +struct Module; + +template +struct slot_list_impl; + +template +struct Named { + std::string name; + T value; +}; + +using NameModule = Named; +using NameValue = Named; +using NameTensor = Named; + +namespace detail { +struct TORCH_API ModulePolicy; +struct TORCH_API ParameterPolicy; +struct TORCH_API AttributePolicy; +struct TORCH_API BufferPolicy; +template +struct NamedPolicy; +} // namespace detail + +using module_list = slot_list_impl; +using named_module_list = + slot_list_impl>; + +using parameter_list = slot_list_impl; +using named_parameter_list = + slot_list_impl>; + +using attribute_list = slot_list_impl; +using named_attribute_list = + slot_list_impl>; + +using buffer_list = slot_list_impl; +using named_buffer_list = + slot_list_impl>; + +using ModuleLookup = std::function&)>; + +struct TORCH_API Module : public Object { + explicit Module(c10::QualifiedName class_name); + Module(std::shared_ptr cu, const c10::ClassTypePtr& type); + Module() = default; + Module(const Module&) = default; + Module& operator=(const Module&) = default; + Module(Module&&) noexcept = default; + Module& operator=(Module&&) noexcept = default; + Module( + c10::QualifiedName, + std::shared_ptr cu, + bool shouldMangle = false); + Module(ModulePtr module_value) : Object(std::move(module_value)) {} + ~Module() = default; + + void set_optimized(bool o) { + TORCH_WARN( + "Module::set_optimized() is deprecated and has no effect. " + "Please use setGraphExecutorOptimize()"); + } + + bool is_optimized() const { + TORCH_WARN( + "Module::is_optimized() is deprecated and always returns true. " + "Please use getGraphExecutorOptimize()"); + return true; + } + + IValue forward(std::vector inputs, const Kwargs& kwargs = Kwargs()) { + return get_method("forward")(std::move(inputs), kwargs); + } + + // In script modules, buffers are Tensors attribute that are _not_ registered + // as parameters. This is different than in nn.Module where there is a special + // register_buffer method. With this simplification, we only need to track + // whether a slot is a parameter to be able to classify it. + void register_buffer(const std::string& name, at::Tensor v) { + bool is_param = false; + bool is_buffer = true; + std::lock_guard lock(*register_mutex_); + type()->addOrCheckAttribute(name, TensorType::get(), is_param, is_buffer); + _ivalue()->setAttr(name, std::move(v)); + } + + void register_parameter( + const std::string& name, + at::Tensor v, + bool is_buffer) { + std::lock_guard lock(*register_mutex_); + type()->addOrCheckAttribute(name, TensorType::get(), !is_buffer, is_buffer); + _ivalue()->setAttr(name, std::move(v)); + } + + void register_attribute( + const std::string& name, + const TypePtr& t, + IValue v, + bool is_param = false, + bool is_buffer = false) { + type()->addOrCheckAttribute(name, t, is_param, is_buffer); + _ivalue()->setAttr(name, std::move(v)); + } + + void register_module(const std::string& name, const Module& module) { + type()->addOrCheckAttribute(name, module.type()); + _ivalue()->setAttr(name, module._ivalue()); + } + + void apply(const std::function& fn); + + buffer_list buffers(bool recurse = true) const; + named_buffer_list named_buffers(bool recurse = true) const; + + module_list children() const; // direct modules + named_module_list named_children() const; + module_list modules() const; // all modules, including this one, recursively + named_module_list named_modules() const; + + // all tensors involved in gradient optimization + parameter_list parameters(bool recurse = true) const; + named_parameter_list named_parameters(bool recurse = true) const; + + // all members of the object, similar to iterating over dir(obj) in python + attribute_list attributes(bool recurse = true) const; + named_attribute_list named_attributes(bool recurse = true) const; + + void dump( + bool print_method_bodies, + bool print_attr_values, + bool print_param_values) const; + + std::string dump_to_str( + bool print_method_bodies, + bool print_attr_values, + bool print_param_values) const; + + /// Enables "training" mode. + void train(bool on = true); + /// Calls train(false) to enable "eval" mode. + /// Do not override this method, override `train()` instead. + void eval() { + train(/*on=*/false); + } + /// True if the module is in training mode. + bool is_training() const { + return attr("training", true).toBool(); + } + + /// Recursively casts all parameters to the given `dtype` and `device`. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + void to(at::Device device, at::ScalarType dtype, bool non_blocking = false); + + /// Recursively casts all parameters to the given dtype. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + void to(at::ScalarType dtype, bool non_blocking = false); + + /// Recursively moves all parameters to the given device. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + void to(at::Device device, bool non_blocking = false); + + void save( + std::ostream& out, + const ExtraFilesMap& extra_files = ExtraFilesMap()) const; + + void save( + const std::string& filename, + const ExtraFilesMap& extra_files = ExtraFilesMap()) const; + + void _save_for_mobile( + std::ostream& out, + const ExtraFilesMap& extra_files = ExtraFilesMap(), + bool save_mobile_debug_info = false, + bool use_flatbuffer = false) const; + + void _save_for_mobile( + const std::string& filename, + const ExtraFilesMap& extra_files = ExtraFilesMap(), + bool save_mobile_debug_info = false, + bool use_flatbuffer = false) const; + + Module copy() const; + + Module deepcopy(c10::optional device = c10::nullopt) const; + + // Clones both the underlying `ClassType` and the module instance(data), this + // function creates a new `ClassType` and returns a new instance that has the + // same data as the current instance but with the new type, shared ClassType + // will be preserved as well + Module clone(bool inplace = false) const; + + // Clones both the underlying `ClassType` and the module instance(data), this + // function creates a new `ClassType` and returns a new instance that has the + // same data as the current instance but with the new type, shared ClassType + // will be preserved as well. Also allows the caller to specify a set of + // method and attribute names to not clone. + Module clone( + bool inplace, + const std::unordered_set& ignored_method, + const std::unordered_set& ignored_attributes) const; + + void clone_method(const Module& orig, const std::string& name); + + IValue operator()(std::vector inputs); + + template + IValue create_class(const c10::QualifiedName& name, Types&&... args) const { + return create_class(name, {IValue(std::forward(args))...}); + } + + IValue create_class(const c10::QualifiedName& name, Stack stack) const; + + inline bool operator==(const Module& y) const noexcept { + return _ivalue() == y._ivalue(); + } + + void set_delete_memory(std::shared_ptr delete_mem) { + mem_to_delete_ = std::move(delete_mem); + } + + // A set of functions to maintain input shapes through torch.jit.save and + // torch.jit.load. It only works on tensors and lists/dicts of tensors + // because tracing is only supported by these types. + void store_traced_inputs(std::string func_name, std::vector inputs) { + if (inputs.size() == 0) { + return; + } + auto c10_inputs = c10::impl::GenericList(AnyType::get()); + for (IValue& value : inputs) { + // Not checking whether this is traceable type as that is already checked + // higher up in the stack and changing that would require a larger + // restructuring. + c10_inputs.emplace_back(std::move(value)); + } + traced_inputs_.insert_or_assign(func_name, c10_inputs); + } + + c10::Dict retrieve_traced_inputs() + const { + return traced_inputs_; + } + + private: + Module clone_impl( + std::unordered_map& type_remap, + bool inplace, + IValue::HashAliasedIValueMap memo, + const std::unordered_set& ignored_methods, + const std::unordered_set& ignored_attributes) const; + + void clone_method( + const Module& orig, + const Function& method, + const std::unordered_map& type_remap); + + c10::QualifiedName getNameForMethod(std::string basename) const { + return QualifiedName(*type()->name(), std::move(basename)); + } + + void to_impl( + const c10::optional& device, + const c10::optional& dtype, + bool non_blocking); + + // Extra handle for the module to delete when itself is deleted + std::shared_ptr mem_to_delete_; + + // Map of function names to the traced inputs that they have been traced with + c10::Dict traced_inputs_; + + // Mutex to keep registring buffer or parameter thread safe. + std::shared_ptr register_mutex_ = std::make_shared(); +}; + +// C++ equivalent api of `torch.jit.freeze`. See documentation there for +// details. +TORCH_API Module freeze( + const Module& module, + const c10::optional>& preserved_attrs = + c10::nullopt, + bool optimize_numerics = true); + +// C++ equivalent api of `torch.jit.optimize_for_inference`. See documentation +// there for details. +TORCH_API Module optimize_for_inference( + Module& module, + const std::vector& other_methods = {}); + +enum class FusionBehavior { STATIC, DYNAMIC }; + +using FusionStrategy = std::vector>; +// clang-format off +/* +Sets the type and number of specializations that can occur during fusion. + +Usage: provide a list of pairs (type, depth) where type is one of STATIC or DYNAMIC +and depth is an integer. + +Behavior - static vs dynamic: + In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined + based on some initial profiling runs. + In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple + shapes are possible. + +In both cases, we also recompile on new striding behavior, device, or dtype. + +Behavior - fallback functions & depth: + When an input doesn't match the format required by the specialized compiled op, it will run + a fallback function. Fallback functions are recursively be compiled and specialized based + on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to + limit the number of specializations that can be compiled, before giving up on recompiling and + falling back to a completely un-fused, un-specialized implementation. + +The list of (type, depth) pairs controls the type of specializations and the number of +specializations. For example: [(STATIC, 2), (DYNAMIC, 2)] indicates that the first +two specializations will use static fusions, the following two specializations will use +dynamic fusion, and any inputs that satisfy none of the 4 options will run an +unfused implementation. + +NB: in the future, if more as more fusion backends are added there may be more granular +apis for specific fusers. +*/ +// clang-format on +TORCH_API FusionStrategy getFusionStrategy(); +// returns previous strategy +TORCH_API FusionStrategy setFusionStrategy(FusionStrategy& fusion_strategy); + +namespace detail { + +struct TORCH_API SlotCursor { + Module module_; + int64_t i_; // slot offset, -1 indicates the module itself +}; + +} // namespace detail + +// This iterator allows the (optionally recursive) enumeration of +// the members of a Module. It performs a depth-first pre-order +// traversal of the module. The Policy template parameter determines +// which slots of the object should be included. For instance, +// when iterating parameters, we return the parameter tensors, +// but skip modules, buffers, and other attributes. +// See ModulePolicy for comments about Policy object's API. +template +struct slot_iterator_impl { + using SlotCursor = detail::SlotCursor; + using value_type = typename Policy::value_type; + slot_iterator_impl( + Module root, + bool recurse, // if true, do a depth-first search, otherwise, just look at + // slots of root + bool return_module) // if true include root itself as the first thing + // visited (used in modules()) + : cursors_({SlotCursor{std::move(root), return_module ? -1 : 0}}), + recurse_(recurse) { + // advance iterator to first valid element (or the end, if empty) + while_not_valid_next(); + } + // empty cursors_, represents end of iteration + slot_iterator_impl() : recurse_(false) {} + value_type operator*() const { + return Policy::create(cursors_, cur()); + } + value_type operator->() const { + return **this; + } + slot_iterator_impl& operator++() { + next_valid(); + return *this; + } + slot_iterator_impl operator++(int) { + // this is really expensive, should we delete it so people don't use it + // instead of prefix? + slot_iterator_impl old = *this; + ++(*this); + return old; + } + + private: + // return_module() is a corner case where instead of returning a submodule + // of root, we are returning root itself, because we are iterating modules(), + // which contains the root module itself. + // It is represented with a single SlotCursor whose index is -1. + bool return_module() const { + return top().i_ == -1; + } + const SlotCursor& top() const { + return cursors_.back(); + } + SlotCursor& top() { + return cursors_.back(); + } + IValue cur() const { + return return_module() ? top().module_._ivalue() + : top().module_._ivalue()->getSlot(top().i_); + } + + // advance to the next slot in a depth first pre-order traversal of the + // modules slots. This function does not guarantee the next slot is a + // valid element of the iteration. That is done by valid(). + // invariant: !cursors_.empty() + void next() { + // we just returned the module itself, advance i_ to 0 so we are now + // at the first slot of the module. + if (return_module()) { + ++top().i_; + return; + } + // the last traversal action advanced beyond the number of slots in the + // module so continue the iteration in the parent. + if (top().i_ >= int64_t(top().module_._ivalue()->type()->numAttributes())) { + cursors_.pop_back(); + if (!cursors_.empty()) { + ++top().i_; + } + return; + } + // if the current thing is a module, we have to scan it for recursive + // traversals. We do this by adding a new SlotCursor to track the traversal. + if (recurse_ && + top().module_._ivalue()->type()->getAttribute(top().i_)->is_module()) { + cursors_.emplace_back(SlotCursor{cur().toModule(), 0}); + return; + } + // common case: advance to the next slot. + ++top().i_; + } + // is the current position of the iterator a valid one? + // otherwise, we have to continue advancing. + bool valid() const { + return top().i_ < + int64_t(top().module_._ivalue()->type()->numAttributes()) && + Policy::valid( + top().module_._ivalue()->type(), + top().i_, + top().module_._ivalue()->getSlot(top().i_)); + } + void while_not_valid_next() { + // advance iteration until we are either at the end (cursors_.empty()) + // or in a valid state. return_module() is a special case, + // and is always considered valid, regardless of Policy, because it is + // it is only true when we are iterating modules. + while (!cursors_.empty() && !return_module() && !valid()) { + next(); + } + } + void next_valid() { + // avoid crashing if this is empty + if (cursors_.empty()) { + return; + } + // advance to next element, which is maybe not valid + next(); + while_not_valid_next(); + } + + std::vector cursors_; + bool recurse_; + + friend inline bool operator!=( + const slot_iterator_impl& a, + const slot_iterator_impl& b) { + // we are finished iteration when we have no more iteration SlotCursors. + // end is always an empty iterator with no cursors. + return (a.cursors_.empty() != b.cursors_.empty()); + } +}; + +// This type represents lists of parameters, attributes, and +// submodules contained in the module. It is abstract because +// they are not stored directly in std::vectors but inside the +// module's IValue object itself. +template +struct slot_list_impl { + using iterator = slot_iterator_impl; + using const_iterator = slot_iterator_impl; + using value_type = typename iterator::value_type; + slot_iterator_impl begin() const { + return slot_iterator_impl(module_, recurse_, return_module_); + } + slot_iterator_impl end() const { + return slot_iterator_impl(); + } + size_t size() const { + if (!size_) { + size_ = size_t(0); + // NOLINTNEXTLINE(clang-diagnostic-unused-variable) + for (const value_type& s : *(this)) { + (void)s; // Suppress unused variable warning + ++*size_; + } + } + return *size_; + } + + slot_list_impl(Module module, bool recurse, bool return_module) + : module_(std::move(module)), + recurse_(recurse), + return_module_(return_module), + size_(c10::nullopt) { + if (!recurse && !return_module && Policy::all_slots) { + size_ = module_.num_slots(); + } + } + + private: + Module module_; + bool recurse_; + bool return_module_; + // size of this list, cached on first request + // when we need to filter the slot list + mutable c10::optional size_; + friend struct Module; +}; + +namespace detail { + +// slot_iterator_impl always iterate over all the slots in a module, +// the Policy template argument determines slots should be returned and their +// types +struct TORCH_API ModulePolicy { + // the type of the value being returned + using value_type = Module; + + // the logic for creating the type being returned, given the raw IValue + // of that object. + static value_type create( + const std::vector& cursors, + IValue v) { + return Module(std::move(v).toObject()); + } + // is slot i in typ something that this iterator should return, otherwise, + // we skip it. + static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { + return typ->getAttribute(i)->is_module(); + } + // are we going to return everything? If so, we can optimize the calculate + // of the size of the list. + static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false; +}; + +struct TORCH_API ParameterPolicy { + using value_type = at::Tensor; + static value_type create( + const std::vector& cursors, + IValue v) { + return std::move(v).toTensor(); + } + static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { + return typ->is_parameter(i) && v.isTensor(); + } + static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false; +}; + +struct TORCH_API BufferPolicy { + using value_type = at::Tensor; + static value_type create( + const std::vector& cursors, + IValue v) { + return std::move(v).toTensor(); + } + static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { + return typ->getAttribute(i)->isSubtypeOf(*TensorType::get()) && + typ->is_buffer(i); + } + static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false; +}; + +struct TORCH_API AttributePolicy { + using value_type = IValue; + static value_type create( + const std::vector& cursors, + IValue v) { + return v; + } + static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { + return true; + } + static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = true; +}; + +// take a Policy object, and make a version of it that returns the slot. +// along with the fully qualified name of that slot. This is used for the named_ +// variants like named_parameters(). +template +struct NamedPolicy { + using value_type = Named; + static value_type create( + const std::vector& cursors, + IValue v) { + std::string name; + if (cursors.size() == 1) { + name = (cursors.back().i_ == -1) ? "" : nameFragment(cursors.back()); + } else { + std::ostringstream ss; + for (const auto i : c10::irange(cursors.size())) { + if (i > 0) { + ss << "."; + } + ss << nameFragment(cursors[i]); + } + name = ss.str(); + } + return value_type{std::move(name), Policy::create(cursors, std::move(v))}; + } + static bool valid(const ClassTypePtr& t, size_t i, const IValue& v) { + return Policy::valid(t, i, v); + } + static constexpr bool all_slots = Policy::all_slots; + + private: + static std::string nameFragment(const detail::SlotCursor& f) { + return f.module_.type()->getAttributeName(f.i_); + } +}; + +} // namespace detail + +TORCH_API bool& getInlineEverythingMode(); + +namespace script { +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. +using Module = ::torch::jit::Module; +using ExtraFilesMap = ::torch::jit::ExtraFilesMap; +} // namespace script + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h new file mode 100644 index 0000000000000000000000000000000000000000..7ccacf385be538f8f8e2ad738745e6874ce9ea62 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h @@ -0,0 +1,200 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace torch::jit { + +struct Resolver; +using ResolverPtr = std::shared_ptr; + +using ObjectPtr = c10::intrusive_ptr; + +// Throw this in C++ land if `attr` fails. This will be converted to a Python +// AttributeError by the Python binding code +class ObjectAttributeError : public std::runtime_error { + public: + ObjectAttributeError(const std::string& what) : std::runtime_error(what) {} +}; + +struct TORCH_API Object { + Object() = default; + Object(const Object&) = default; + Object& operator=(const Object&) = default; + Object(Object&&) noexcept = default; + Object& operator=(Object&&) noexcept = default; + Object(ObjectPtr _ivalue) : _ivalue_(std::move(_ivalue)) {} + Object(std::shared_ptr cu, const c10::ClassTypePtr& type); + Object( + c10::QualifiedName, + std::shared_ptr cu, + bool shouldMangle = false); + + ObjectPtr _ivalue() const { + TORCH_INTERNAL_ASSERT(_ivalue_); + return _ivalue_; + } + + c10::ClassTypePtr type() const { + return _ivalue()->type(); + } + + struct Property { + std::string name; + Method getter_func; + c10::optional setter_func; + }; + + void setattr(const std::string& name, c10::IValue v) { + if (_ivalue()->type()->hasConstant(name)) { + TORCH_CHECK( + false, + "Can't set constant '", + name, + "' which has value:", + _ivalue()->type()->getConstant(name)); + } else if (auto slot = _ivalue()->type()->findAttributeSlot(name)) { + const c10::TypePtr& expected = _ivalue()->type()->getAttribute(*slot); + TORCH_CHECK( + v.type()->isSubtypeOf(*expected), + "Expected a value of type '", + expected->repr_str(), + "' for field '", + name, + "', but found '", + v.type()->repr_str(), + "'"); + _ivalue()->setSlot(*slot, std::move(v)); + } else { + TORCH_CHECK(false, "Module has no attribute '", name, "'"); + } + } + + c10::IValue attr(const std::string& name) const { + if (auto r = _ivalue()->type()->findAttributeSlot(name)) { + return _ivalue()->getSlot(*r); + } + if (auto r = _ivalue()->type()->findConstantSlot(name)) { + return _ivalue()->type()->getConstant(*r); + } + std::stringstream err; + err << _ivalue()->type()->repr_str() << " does not have a field with name '" + << name.c_str() << "'"; + throw ObjectAttributeError(err.str()); + } + + c10::IValue attr(const std::string& name, c10::IValue or_else) const { + if (auto r = _ivalue()->type()->findAttributeSlot(name)) { + return _ivalue()->getSlot(*r); + } + if (auto r = _ivalue()->type()->findConstantSlot(name)) { + return _ivalue()->type()->getConstant(*r); + } + return or_else; + } + + bool hasattr(const std::string& name) const { + return _ivalue()->type()->hasAttribute(name) || + _ivalue()->type()->hasConstant(name); + } + + // each object owns its methods. The reference returned here + // is guaranteed to stay valid until this module has been destroyed + Method get_method(const std::string& name) const { + if (auto method = find_method(name)) { + return *method; + } + AT_ERROR("Method '", name, "' is not defined."); + } + + const std::vector get_methods() const { + return c10::fmap(type()->methods(), [&](Function* func) { + return Method(_ivalue(), func); + }); + } + + bool has_property(const std::string& name) const { + for (const auto& prop : type()->properties()) { + if (prop.name == name) { + return true; + } + } + return false; + } + + const Property get_property(const std::string& name) const { + for (const auto& prop : type()->properties()) { + if (prop.name == name) { + c10::optional setter = c10::nullopt; + if (prop.setter) { + setter = Method(_ivalue(), prop.setter); + } + return Property{ + prop.name, Method(_ivalue(), prop.getter), std::move(setter)}; + } + } + AT_ERROR("Property '", name, "' is not defined."); + } + + const std::vector get_properties() const { + return c10::fmap(type()->properties(), [&](ClassType::Property prop) { + c10::optional setter = c10::nullopt; + if (prop.setter) { + setter = Method(_ivalue(), prop.setter); + } + return Property{ + std::move(prop.name), + Method(_ivalue(), prop.getter), + std::move(setter)}; + }); + } + + c10::optional find_method(const std::string& basename) const; + + /// Run a method from this module. + /// + /// For example: + /// @code + /// IValue output = module->run("relu_script", a, b); + /// @endcode + /// + /// To get a compile a module from a source string, see torch::jit::compile + /// + /// @param method_name The name of the method to run + /// @param args Arguments to be passed to the method + /// @return An IValue containing the return value (or values if it is a tuple) + /// from the method + template + IValue run_method(const std::string& method_name, Types&&... args) { + return get_method(method_name)({IValue(std::forward(args))...}); + } + + // so that C++ users can easily add methods + void define(const std::string& src, const ResolverPtr& resolver = nullptr); + + size_t num_slots() const { + return _ivalue()->slots().size(); + } + + // shallow copy the object + Object copy() const; + + // Copies all the attributes of the object recursively without creating new + // `ClassType`, including deepcopy of Tensors + Object deepcopy() const; + + private: + // mutable be we lazily initialize in module_object. + mutable ObjectPtr _ivalue_; +}; + +namespace script { +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. +using Object = ::torch::jit::Object; +} // namespace script +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend.h new file mode 100644 index 0000000000000000000000000000000000000000..5aae642fa5517b8dd518117682734f24404c4ee7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend.h @@ -0,0 +1,119 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace { +// NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration) +inline c10::FunctionSchema getIsAvailableSchema() { + c10::Argument self("self", c10::AnyType::get()); + c10::Argument available("available", c10::BoolType::get()); + c10::FunctionSchema preprocessor_schema( + "is_available", + /*overload_name=*/"", + /*arguments=*/{self}, + /*returns=*/{available}); + return preprocessor_schema; +} + +constexpr static auto kBackendsNamespace = "__backends__"; + +// NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration) +inline c10::FunctionSchema getCompileSchema() { + c10::Argument self("self", c10::AnyType::get()); + c10::Argument mod("processed", c10::AnyType::get()); + auto any_dict_ty = + c10::DictType::create(c10::StringType::get(), c10::AnyType::get()); + c10::Argument method_compile_spec("method_compile_spec", any_dict_ty); + c10::Argument handles("handles", any_dict_ty); + + c10::FunctionSchema compile_schema( + "compile", + /*overload_name=*/"", + /*arguments=*/{self, mod, method_compile_spec}, + /*returns=*/{handles}); + return compile_schema; +} + +// NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration) +inline c10::FunctionSchema getExecuteSchema() { + auto any_list_ty = c10::ListType::create(c10::AnyType::get()); + c10::Argument self("self", c10::AnyType::get()); + c10::Argument handle("handle", c10::AnyType::get()); + c10::Argument input("input", any_list_ty); + c10::Argument output("output", any_list_ty); + return c10::FunctionSchema( + "execute", + /*overload_name=*/"", + /*arguments=*/{self, handle, input}, + /*returns=*/{output}); +} + +template +std::function getIsAvailableFunc() { + return [](Stack& stack) { + auto self = pop(stack).toCustomClass(); + auto ret = self->is_available(); + push(stack, ret); + }; +} + +template +std::function getCompileFunc() { + return [](Stack& stack) { + auto method_compile_spec = pop(stack).toGenericDict(); + auto processed = pop(stack); + auto self = pop(stack).toCustomClass(); + auto ret = self->compile(processed, method_compile_spec); + push(stack, ret); + }; +} + +template +std::function getExecuteFunc() { + return [](Stack& stack) { + auto args = pop(stack); + auto handle = pop(stack); + auto self = pop(stack); + auto backend = self.toCustomClass(); + auto res = backend->execute(handle, args.toList()); + push(stack, res); + }; +} +} // namespace + +// Static registration API for backends. +template +class backend { + static_assert( + std::is_base_of::value, + "torch::jit::backend requires T to inherit from PyTorchBackendInterface"); + std::string backend_name_; + + public: + // Registers a new backend with /p name, and the given /p preprocess + // function. + backend(const std::string& name) : backend_name_(name) { + static auto cls = torch::class_(kBackendsNamespace, name) + .def(torch::init<>()) + ._def_unboxed( + "is_available", + getIsAvailableFunc(), + getIsAvailableSchema()) + ._def_unboxed( + "compile", + getCompileFunc(), + getCompileSchema()) + ._def_unboxed( + "execute", + getExecuteFunc(), + getExecuteSchema()); + } +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_info.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_info.h new file mode 100644 index 0000000000000000000000000000000000000000..1d07beb6bdb3c0007d00dfb1719a138ae3bc63f1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_debug_info.h @@ -0,0 +1,65 @@ +#pragma once + +#ifndef BUILD_LITE_INTERPRETER +#include +#endif +#include + +namespace torch { +namespace jit { + +constexpr static auto kBackendUtilsNamespace = "backendutils"; +constexpr static auto kBackendDebugInfoClass = "BackendDebugInfo"; + +#ifndef BUILD_LITE_INTERPRETER +/* + * Custom class for holding debug information in lowered modules, intended + * purely for keeping this information to be later serialized outside of the + * lowered module itself. + * Its usage pattern is: + * 1. LoweredModule declares an instance of this class in __backend_debug_info + * 2. During serialization, __backend_debug_info is used to obtain the debug + * information. + * 3. The contents of LoweredModule.__backend_debug_info are not serialized + * within the LoweredModule itself. + */ +class TORCH_API PyTorchBackendDebugInfo : public torch::CustomClassHolder { + public: + PyTorchBackendDebugInfo() = default; + + c10::optional& getDebugInfoMap() { + return debug_info_map_; + } + + void setDebugInfoMap(BackendDebugInfoMapType&& debug_info_map) { + debug_info_map_ = std::move(debug_info_map); + } + + private: + c10::optional debug_info_map_; +}; + +#else + +/* + * Dummy instance exists for the following reason: + * __backend_debug_info is of type BackendDebugInfo which is a torchbind' + * class backed by cpp class PyTorchBackendDebugInfo. + * PyTorchBackendDebugInfo, depends on ir.h., scope.h, source_range etc. + * We dont include this on lite interpreter side. Thus on lite interpreter side + * we cannot have valid definition of PyTorchBackendDebugInfo. However we do not + * need valid instance of __backend_debug_info in lite interpreter anyway as we + * dont serialize this info as part of LowerdModule as mentioned ealrier. + * However since LoweredModule has registered attribute of __backend_debug_info + * we still need to make sure that BackendDebugInfo is registered with + * TorchScript. However in this instance it does not have to be backed by + * PyTorchBackendDebugInfo, so we create a dummy PyTorchBackendDebugInfoDummy + * just for this purpose. + */ +class PyTorchBackendDebugInfoDummy : public torch::CustomClassHolder { + public: + PyTorchBackendDebugInfoDummy() = default; +}; +#endif +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_detail.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_detail.h new file mode 100644 index 0000000000000000000000000000000000000000..7299ce259bc8f9c3dd8437abe939089c89dddf45 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_detail.h @@ -0,0 +1,41 @@ +#pragma once + +#include + +#include + +#include + +namespace torch { +namespace jit { + +using DebugHandleType = int64_t; + +using NodeToDebugHandle = std::unordered_map; + +using BackendDebugHandleGenerator = + std::function&)>; + +namespace detail { + +using BackendPreprocessFunction = std::function&, + const BackendDebugHandleGenerator& generate_debug_handles)>; + +TORCH_API void registerBackendPreprocessFunction( + const std::string& name, + const BackendPreprocessFunction& preprocess); + +bool hasBackendPreprocessFunction(const std::string& name); + +BackendPreprocessFunction getBackendPreprocessFunction(const std::string& name); + +TORCH_API Module codegen_backend_module( + const std::string& backend_name, + const Module& orig_module, + const c10::Dict& method_compile_spec, + const c10::DictTypePtr& any_dict_ty); +} // namespace detail +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_exception.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_exception.h new file mode 100644 index 0000000000000000000000000000000000000000..0e100a60bdae150d0913819a8e343abe91f5d8b7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_exception.h @@ -0,0 +1,54 @@ +#pragma once +#include + +namespace c10 { +class TORCH_API BackendRuntimeException : public c10::Error { + public: + // Use debug_handle to throw exception + BackendRuntimeException( + SourceLocation loc, + std::string msg, + int64_t debug_handle) + : c10::Error(loc, msg) { + debug_handles.push_back(debug_handle); + } + // If rethrowing, can push another debug_handle + // This is useful in couple of scenarios. + // 1. A submodule is lowered and lite interperter has CallMethod + // to lowered module's method. In this case lowered module will throw with + // a handle, plus there will be another debug handle corresponding + // to the CallMethod node in lite interpreter. Both together give complete + // trace. This function allows lite interpreter to rethrow with debug + // handle it has for CallMethod. + // 2. Another scenarios is when lite interperter can make function calls or + // the lowered backend also has function call ability. Thus we have + // multiple function frames. Now we need a stack of handles to symbolicate + // entire stack trace. + void pushDebugHandle(int64_t debug_handle) { + debug_handles.push_back(debug_handle); + } + const std::vector& getDebugHandles() { + return debug_handles; + } + + private: + // Stores stack of debug handles. + std::vector debug_handles; +}; + +} // namespace c10 +#define TORCH_DELEGATED_BACKEND_THROW(cond, msg, debug_handle) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + throw ::c10::BackendRuntimeException( \ + {__func__, __FILE__, static_cast(__LINE__)}, \ + msg, \ + debug_handle); \ + } + +#define TORCH_DELEGATED_BACKEND_RETHROW(e, debug_handle) \ + do { \ + e.pushDebugHandle(debug_handle); \ + throw; \ + } while (false) + +#define DEBUG_HANDLE_UNKNOWN -1 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_init.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_init.h new file mode 100644 index 0000000000000000000000000000000000000000..e7be08c765953e4a59fd9f3bb106efb195866128 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_init.h @@ -0,0 +1,11 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { +// Initialize Python bindings for JIT to_ functions. +void initJitBackendBindings(PyObject* module); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_interface.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_interface.h new file mode 100644 index 0000000000000000000000000000000000000000..099575da5285920c9bb3a1441e1721e32de0f6b5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_interface.h @@ -0,0 +1,34 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Interface for a JIT backend. +class TORCH_API PyTorchBackendInterface : public torch::CustomClassHolder { + public: + PyTorchBackendInterface() noexcept; + ~PyTorchBackendInterface() override; + + // Returns true if the backend is available to process delegation calls. + virtual bool is_available() = 0; + + // Compile the module contained in \p processed using the details provided in + // \p method_compile_spec for each module method that should be compiled for + // the backend. \p method_compile_spec should be of type Dict. + // \returns a dictionary of type Dict that contains a backend + // handle each method that can run on the backend (i.e. each key in \p + // method_compile_spec). + virtual c10::impl::GenericDict compile( + c10::IValue processed, + c10::impl::GenericDict method_compile_spec) = 0; + + // Execute the method specified by \p handle using \p inputs. \returns the + // outputs as a tuple. + virtual c10::impl::GenericList execute( + c10::IValue handle, + c10::impl::GenericList inputs) = 0; +}; +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_preprocess.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_preprocess.h new file mode 100644 index 0000000000000000000000000000000000000000..0a256134aa96ece80442b43b238b80af556e1062 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_preprocess.h @@ -0,0 +1,18 @@ +#pragma once + +#include +namespace torch { +namespace jit { +class backend_preprocess_register { + std::string backend_name_; + + public: + backend_preprocess_register( + const std::string& name, + const detail::BackendPreprocessFunction& preprocess) + : backend_name_(name) { + detail::registerBackendPreprocessFunction(name, preprocess); + } +}; +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_resolver.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_resolver.h new file mode 100644 index 0000000000000000000000000000000000000000..b0d5727d9d958fcd9ead129cb6bceda91e17fbcb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_resolver.h @@ -0,0 +1,10 @@ +#pragma once + +#include + +namespace torch { +namespace jit { +// Create a Resolver for use in generating LoweredModules for specific backends. +TORCH_API std::shared_ptr loweredModuleResolver(); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/builtin_functions.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/builtin_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..2ff8d13758541e24bdf0ace84b14ff89d51d54e0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/builtin_functions.h @@ -0,0 +1,11 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API const std::vector& getAllBuiltinFunctionsFor(Symbol name); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/canonicalize_modified_loop.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/canonicalize_modified_loop.h new file mode 100644 index 0000000000000000000000000000000000000000..f8a3a4ecc2690138fc3b2c836b2a56f4bee46215 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/canonicalize_modified_loop.h @@ -0,0 +1,16 @@ +#pragma once +#include + +#include + +namespace torch { +namespace jit { + +struct Graph; + +// Transforms loops so that they can be represented as python +// for or while loops +TORCH_API void CanonicalizeModifiedLoops(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/concrete_module_type.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/concrete_module_type.h new file mode 100644 index 0000000000000000000000000000000000000000..22349936687ce8a317e7bf7e6d54911487b87646 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/concrete_module_type.h @@ -0,0 +1,241 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +enum class IterableModuleKind { NONE, LIST, DICT, PARAMLIST, PARAMDICT }; +class ConcreteModuleType; + +// You can think of an nn.Module as a template that corresponds to a family of +// JIT types. The template "arguments" are things like the constant values. +// e.g. +// class M(nn.Module): +// __constants__ = ["const"] +// ... +// +// Is similar to writing the following in C++: +// +// template +// class M { +// ... +// } +// +// We need to consider each different member of the type family a different JIT +// type because, e.g. different constant values lead to different versions of +// the same method. +// +// ConcreteModuleType corresponds to a single member of the type family, with +// all template arguments fully specified. Two Modules that share a +// ConcreteModuleType can share a JIT type, and vice versa. +// +// Why not just use a JIT type to represent concrete types? Because constants, +// function attributes, etc. are currently not representable in the type system, +// so this acts a non-first-class way of tracking concrete types. +// +// ConcreteModuleType is also the source of truth for servicing all +// ModuleValue::attr calls. This is so we can guarantee that if two Module's +// share a JIT type (and thus a ConcreteModuleType), then they behave the same +// way when you access attributes on them. + +// ConcreteModuleType has two phases. +// 1. Creation: First we build it up, during the ScriptModule conversion +// process. This is represented by ConcreteModuleTypeBuilder. +// ...then the converter calls ConcreteModuleTypeBuilder::build(), producing +// a +// ConcreteModuleType ready for querying. +// 2. Querying: We use ConcreteModuleType as a source of truth for +// ModuleValue::attr calls during method compilation. + +// Represents a concrete type during in the process for construction. We use +// this to decide whether we can share types between modules. +class VISIBILITY_HIDDEN ConcreteModuleTypeBuilder { + public: + explicit ConcreteModuleTypeBuilder(py::object pyClass) { + TORCH_INTERNAL_ASSERT(pyClass); + pyClass_ = std::move(pyClass); + } + + void addConstant(std::string name, py::object value); + void addConstant(std::string name, IValue value); + void addAttribute( + std::string name, + const TypePtr& type, + bool isParameter, + bool isBuffer); + void addFunctionAttribute( + std::string name, + const TypePtr& type, + py::object pyFunction); + + void addModule(std::string name, std::shared_ptr meta); + + void addForwardHook(py::object hook); + void addForwardPreHook(py::object pre_hook); + + void addOverload( + std::string methodName, + std::vector overloadedMethodNames); + void addBuiltinFunction(std::string name, const std::string& symbol_name); + void addFailedAttribute(std::string name, std::string failureReason); + void addIgnoredAttribute(std::string name); + void setIterableModuleKind(IterableModuleKind kind); + + // If a ConcreteModuleType is poisoned, it will never compare equal to any + // other concrete type + void setPoisoned(); + + std::shared_ptr build() const { + return std::make_shared(*this); + } + + // This determines whether two modules can share a type. The container structs + // used by ConcreteModuleType have been defined such that operator== + // implements a meaningful comparison in that context. + bool equals(const ConcreteModuleTypeBuilder& other) const; + + struct FunctionAttribute { + FunctionTypePtr function_; + py::object pyFunction_; + + friend bool operator==( + const FunctionAttribute& lhs, + const FunctionAttribute& rhs) { + // Functions are not first class, so we can't do type comparison like a + // regular attribute. So we do a pointer equality check on the actual + // Python function object. + return lhs.pyFunction_.is(rhs.pyFunction_); + } + }; + + struct Attribute { + Attribute(TypePtr type, bool isParam, bool isBuffer) + : type_(std::move(type)), isParam_(isParam), isBuffer_(isBuffer) {} + + friend bool operator==(const Attribute& lhs, const Attribute& rhs) { + return *(lhs.type_) == *(rhs.type_) && lhs.isParam_ == rhs.isParam_; + } + TypePtr type_; + bool isParam_; + bool isBuffer_; + }; + + struct ModuleInfo { + ModuleInfo(std::string name, std::shared_ptr meta) + : name_(std::move(name)), meta_(std::move(meta)) {} + + friend bool operator==(const ModuleInfo& lhs, const ModuleInfo& rhs); + + std::string name_; + std::shared_ptr meta_; + }; + + private: + ConcreteModuleTypeBuilder() = default; + ClassTypePtr createTypeFromThis() const; + + // If true, this type will never compare equally to anything else. This is + // used if we want to ensure that this type is not shared (for example, if it + // came from a traced module) + bool isPoisoned_ = false; + + // The value of any constants defined by the module. + std::unordered_map constants_; + // The types of any attributes + OrderedDict attributes_; + // Overloads, in the same format as `__overloads__` in Python + std::unordered_map> overloads_; + // Any attributes we failed to convert to TorchScript, along with a hint as to + // why + std::unordered_map failedAttributes_; + // Any attributes that were marked as ignored. They cannot be used in + // TorchScript but can still be used in ignored function in Python. + std::unordered_set ignoredAttributes_; + // Any function attributes. These are special right now because functions are + // not first-class in the type system. + std::unordered_map functionAttributes_; + // Function attributes that are calls to builtin functions. These get + // de-sugared directly into the corresponding aten:: call. The map is + // attribute name -> aten symbol name + std::unordered_map builtinFunctions_; + // The concrete types of any submodules + std::vector modules_; + // Hooks to be called before/after forward when the module + // is called directly. Used to ensure modules have different types + // when they have different python hooks + // Actual hooks are added to ClassType directly during compilation + std::vector forwardHooks_; + std::vector forwardPreHooks_; + + // If something is a ModuleDict/ModuleList, it means: + // 1. The order of the submodules matters for comparing the type + // 2. The compiler is allowed to treat it like a dict/tuple + IterableModuleKind iterableModuleKind_ = IterableModuleKind::NONE; + + // The original `nn.Module` class that we derived this ScriptModule from. + py::object pyClass_; + + // NOTE: If you ever add any more state to this struct, you need to make sure + // operator== still makes sense! + friend ConcreteModuleType; +}; + +// Represents a finalized concrete type, used to service ModuleValue::attr calls +// during method compilation. +class VISIBILITY_HIDDEN ConcreteModuleType { + public: + explicit ConcreteModuleType(ConcreteModuleTypeBuilder data); + + static std::shared_ptr fromJitType(TypePtr type); + + TypePtr getJitType() const; + c10::optional getPyClass() const; + IterableModuleKind getIterableModuleKind() const; + c10::optional> findOverloads( + const std::string& name) const; + c10::optional findFunctionAttribute(const std::string& name) const; + c10::optional findBuiltinFunction(const std::string& name) const; + std::shared_ptr findSubmoduleConcreteType( + const std::string& name) const; + c10::optional findFailedAttribute(const std::string& name) const; + bool isIgnoredAttribute(const std::string& name) const; + + // These getters are only here to return things as types that can be + // automatically converted by pybind. + std::unordered_map getConstantsPy() const; + std::unordered_map> getAttributesPy() + const; + std::vector>> + getModulesPy() const; + + bool equals(const ConcreteModuleType& other) const { + if (jitType_ == other.jitType_) { + // If the computed types are the same, these modules can (obviously) share + // a type. + return true; + } + + return data_.equals(other.data_); + } + bool equals(const ConcreteModuleTypeBuilder& other) const { + return data_.equals(other); + } + + void dump() const; + + private: + ConcreteModuleType() = default; + + // The JIT type derived from this ConcreteModuleType. + ConcreteModuleTypeBuilder data_; + TypePtr jitType_; +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h new file mode 100644 index 0000000000000000000000000000000000000000..787eae80578881a29c05d51774927f54213239f0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h @@ -0,0 +1,16 @@ +#pragma once +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { + +// Convert a graph with Loads & Stores into SSA form +TORCH_API void ConvertToSSA(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/edit_distance.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/edit_distance.h new file mode 100644 index 0000000000000000000000000000000000000000..f0d999e83c1a2d7da8514ca2ad738c57d646df40 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/edit_distance.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API size_t ComputeEditDistance( + const char* word1, + const char* word2, + size_t maxEditDistance); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/error_report.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/error_report.h new file mode 100644 index 0000000000000000000000000000000000000000..f3a77c76abcd581feef108eeff492e6f3a0cd1de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/error_report.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +struct Call { + std::string fn_name; + SourceRange caller_range; +}; + +struct TORCH_API ErrorReport : public std::exception { + ErrorReport(const ErrorReport& e); + + explicit ErrorReport(SourceRange r); + explicit ErrorReport(const TreeRef& tree) : ErrorReport(tree->range()) {} + explicit ErrorReport(const Token& tok) : ErrorReport(tok.range) {} + + const char* what() const noexcept override; + + struct TORCH_API CallStack { + // These functions are used to report why a function was being compiled + // (i.e. what was the call stack of user functions at compilation time that + // led to this error) + CallStack(const std::string& name, const SourceRange& range); + ~CallStack(); + + // Change the range that is relevant for the current function (i.e. after + // each successful expression compilation, change it to the next expression) + static void update_pending_range(const SourceRange& range); + }; + + static std::string current_call_stack(); + + private: + template + friend const ErrorReport& operator<<(const ErrorReport& e, const T& t); + + mutable std::stringstream ss; + OwnedSourceRange context; + mutable std::string the_message; + std::vector error_stack; +}; + +template +const ErrorReport& operator<<(const ErrorReport& e, const T& t) { + e.ss << t; + return e; +} + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/exit_transforms.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/exit_transforms.h new file mode 100644 index 0000000000000000000000000000000000000000..84910c6bc1e4d6a9625ad7896b58deddfd3a9e64 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/exit_transforms.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void TransformExits(std::shared_ptr& graph); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/function_schema_parser.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/function_schema_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..a01ca7ad0b17792d927e071ba92ec5e3f113992c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/function_schema_parser.h @@ -0,0 +1,17 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace jit { + +TORCH_API std::variant parseSchemaOrName( + const std::string& schemaOrName); +TORCH_API c10::FunctionSchema parseSchema(const std::string& schema); +TORCH_API c10::OperatorName parseName(const std::string& name); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h new file mode 100644 index 0000000000000000000000000000000000000000..c5efa0b40151ac313b71099a81df26b7a4e12395 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h @@ -0,0 +1,16 @@ +#pragma once +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void InlineLoopCondition(std::shared_ptr& graph); +TORCH_API void InlineBlockBeforeNode(Node* before_node, Block* block); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/ir_emitter.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/ir_emitter.h new file mode 100644 index 0000000000000000000000000000000000000000..6b27a9a165b22486ab95e0b5d0778065221c08aa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/ir_emitter.h @@ -0,0 +1,21 @@ +#pragma once +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +TORCH_API void runCleanupPasses(std::shared_ptr& to_clean); + +TORCH_API bool meaningfulName(const std::string& name); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h new file mode 100644 index 0000000000000000000000000000000000000000..5ab926993c1f640cc11d47a016d9313eb7b590d0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h @@ -0,0 +1,576 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32") +#endif + +namespace torch { +namespace jit { + +// single character tokens are just the character itself '+' +// multi-character tokens need an entry here +// if the third entry is not the empty string, it is used +// in the lexer to match this token. + +// These kinds are also used in Tree.h as the kind of the AST node. +// Some kinds TK_APPLY, TK_LIST are only used in the AST and are not seen in the +// lexer. + +#define TC_FORALL_TOKEN_KINDS(_) \ + _(TK_EOF, "eof", "") \ + _(TK_WHITESPACE, "whitespace", "") \ + _(TK_WHITESPACE_EOF, "whitespace_eof", "") \ + _(TK_NUMBER, "number", "") \ + _(TK_NEWLINE, "newline", "") \ + _(TK_INDENT, "indent", "") \ + _(TK_DEDENT, "dedent", "") \ + _(TK_DEF, "def", "def") \ + _(TK_EQUIVALENT, "equivalent", "<=>") \ + _(TK_IDENT, "ident", "") \ + _(TK_STRING, "string", "") \ + _(TK_STRINGLITERAL, "string_literal", "") \ + _(TK_CONST, "const", "") \ + _(TK_LIST, "list", "") \ + _(TK_DICT, "dict", "") \ + _(TK_OPTION, "option", "") \ + _(TK_APPLY, "apply", "") \ + _(TK_COMPREHENSION, "comprehension", "") \ + _(TK_RANGE_CONSTRAINT, "range_constraint", "") \ + _(TK_PARAM, "param", "") \ + _(TK_INFERRED, "inferred", "") \ + _(TK_ACCESS, "access", "") \ + _(TK_ASSIGN, "assign", "") \ + _(TK_AUG_ASSIGN, "aug_assign", "") \ + _(TK_ATTRIBUTE, "attribute", "") \ + _(TK_IF, "if", "if") \ + _(TK_ELSE, "else", "else") \ + _(TK_ELIF, "elif", "elif") \ + _(TK_WHILE, "while", "while") \ + _(TK_EXPR_STMT, "expression statement", "") \ + _(TK_RETURN, "return", "return") \ + _(TK_IS, "is", "is") \ + _(TK_ISNOT, "is not", "is not") \ + _(TK_NE, "ne", "!=") \ + _(TK_EQ, "eq", "==") \ + _(TK_LE, "le", "<=") \ + _(TK_GE, "ge", ">=") \ + _(TK_FLOOR_DIV, "floordiv", "//") \ + _(TK_IF_EXPR, "if", "") \ + _(TK_TRUE, "True", "True") \ + _(TK_FALSE, "False", "False") \ + _(TK_NONE, "None", "None") \ + _(TK_AND, "and", "and") \ + _(TK_OR, "or", "or") \ + _(TK_NOT, "not", "not") \ + _(TK_LSHIFT, "<<", "<<") \ + _(TK_RSHIFT, ">>", ">>") \ + _(TK_CAST, "cast", "") \ + _(TK_PLUS_EQ, "+=", "+=") \ + _(TK_MINUS_EQ, "-=", "-=") \ + _(TK_TIMES_EQ, "*=", "*=") \ + _(TK_DIV_EQ, "/=", "/=") \ + _(TK_MOD_EQ, "%=", "%=") \ + _(TK_BIT_OR_EQ, "|=", "|=") \ + _(TK_BIT_AND_EQ, "&=", "&=") \ + _(TK_BIT_XOR_EQ, "^=", "^=") \ + _(TK_LSHIFT_EQ, "<<=", "<<=") \ + _(TK_RSHIFT_EQ, ">>=", ">>=") \ + _(TK_POW_EQ, "**=", "**=") \ + _(TK_GLOBAL, "global", "global") \ + _(TK_BUILT_IN, "built-in", "") \ + _(TK_SUBSCRIPT, "subscript", "") \ + _(TK_VAR, "variable", "") \ + _(TK_NOTHING, "nothing", "") \ + _(TK_DICT_LITERAL, "dict-literal", "") \ + _(TK_LIST_LITERAL, "list-literal", "") \ + _(TK_TUPLE_LITERAL, "tuple-literal", "") \ + _(TK_FOR, "for", "for") \ + _(TK_IN, "in", "in") \ + _(TK_NOTIN, "not in", "not in") \ + _(TK_STARRED, "starred", "") \ + _(TK_UNARY_MINUS, "unary minus", "") \ + _(TK_POW, "pow operator", "**") \ + _(TK_ARROW, "arrow", "->") \ + _(TK_DECL, "decl", "") \ + _(TK_SLICE_EXPR, "slice expr", "") \ + _(TK_TYPE_COMMENT, "type comment", "# type:") \ + _(TK_RAISE, "raise", "raise") \ + _(TK_ASSERT, "assert", "assert") \ + _(TK_DOTS, "dots", "...") \ + _(TK_LIST_COMP, "list comprehension", "") \ + _(TK_DICT_COMP, "dict comprehension", "") \ + _(TK_BREAK, "break", "break") \ + _(TK_CONTINUE, "continue", "continue") \ + _(TK_DELETE, "del", "del") \ + _(TK_PASS, "pass", "pass") \ + _(TK_CLASS_DEF, "class", "class") \ + _(TK_IMPORT, "import", "import") \ + _(TK_WITH, "with", "with") \ + _(TK_WITH_ITEM, "withitem", "") \ + _(TK_AS, "as", "as") \ + _(TK_PROP, "property", "") \ + _(TK_ELLIPSIS, "Ellipsis", "Ellipsis") \ + _(TK_NONE_TYPE, "NoneType", "NoneType") + +enum TokenKind { + // we use characters to represent themselves so skip all valid characters + // before + // assigning enum values to multi-char tokens. + TK_DUMMY_START = 256, +#define DEFINE_TOKEN(tok, _, _2) tok, + TC_FORALL_TOKEN_KINDS(DEFINE_TOKEN) +#undef DEFINE_TOKEN +}; + +TORCH_API std::string kindToString(int kind); +TORCH_API int stringToKind(const std::string& str); + +// nested hash tables that indicate char-by-char what is a valid token. +struct TokenTrie; +using TokenTrieRef = std::unique_ptr; +struct TokenTrie { + TokenTrie() : kind(0) {} + void insert(const char* str, int tok) { + if (*str == '\0') { + AT_ASSERT(kind == 0); + kind = tok; + return; + } + + for (size_t i = 0, e = child_chars.size(); i < e; ++i) { + if (child_chars[i] == *str) { + child_tries[i]->insert(str + 1, tok); + return; + } + } + + child_chars.emplace_back(*str); + child_tries.emplace_back(std::make_unique()); + child_tries.back()->insert(str + 1, tok); + } + int kind; // 0 == invalid token + + std::vector child_chars; + std::vector child_tries; +}; + +// stuff that is shared against all TC lexers/parsers and is initialized only +// once. +struct TORCH_API SharedParserData { + SharedParserData() : head(new TokenTrie()) { + std::stringstream ss; + for (const char* c = valid_single_char_tokens; *c; c++) { + std::string str(1, *c); + head->insert(str.c_str(), *c); + } + +#define ADD_CASE(tok, _, tokstring) \ + if (*(tokstring) != '\0') { \ + head->insert((tokstring), (tok)); \ + } + TC_FORALL_TOKEN_KINDS(ADD_CASE) +#undef ADD_CASE + } + + bool match( + StringCordView::Iterator pos, + bool continuation, // are we inside a scope where newlines don't count + // (e.g. inside parens) + bool whitespace_token, // should we treat whitespace as a token + int* kind, + StringCordView::Iterator* start, + StringCordView::Iterator* end) { + *start = pos; + // skip whitespace + while (pos.has_next() && isblank(*pos)) { + ++pos; + } + + // special handling + if (pos.has_next()) { + if (*pos == '#' && !isTypeComment(pos)) { + // skip comments + while (pos.has_next() && *pos != '\n') + ++pos; + // tail call, handle whitespace and more comments + return match(pos, continuation, whitespace_token, kind, start, end); + } + if (*pos == '\\') { + auto newiter = pos; + ++newiter; + if (newiter.has_next() && *newiter == '\n' && !whitespace_token) { + ++newiter; + return match(newiter, continuation, false, kind, start, end); + } + } + if (*pos == '\n') { + return match(++pos, continuation, !continuation, kind, start, end); + } + } + // we handle white space before EOF because in the case we have something + // like the following where we need to generate the dedent token if foo: + // ... + // else: + // pass + if (whitespace_token) { + *kind = !pos.has_next() ? TK_WHITESPACE_EOF : TK_WHITESPACE; + *end = pos; + return true; + } + if (!pos.has_next()) { + *kind = TK_EOF; + *start = pos; + *end = *start; + return true; + } + // invariant: the next token is not whitespace or newline + *start = pos; + // check for a valid number + size_t len; + if (isNumber(pos.rest_line(), 0, &len)) { + *end = *start; + *end += len; + *kind = TK_NUMBER; + return true; + } + // check for string + if (isString(pos.rest_line(), 0, &len)) { + *kind = TK_STRINGLITERAL; + *end = *start; + *end += len; + return true; + } + + // check for either an ident or a token + // ident tracks whether what we have scanned so far could be an identifier + // matched indicates if we have found any match. + bool matched = false; + bool ident = true; + TokenTrie* cur = head.get(); + // for (size_t i = 0; pos + i < str.size() && (ident || cur != nullptr); + // i++) + for (size_t i = 0; pos.has_next() && (ident || cur != nullptr); + ++pos, ++i) { + ident = ident && validIdent(i, *pos); + if (ident) { + matched = true; + *end = pos.next_iter(); + *kind = TK_IDENT; + } + // check for token second, so that e.g. 'max' matches the token TK_MAX + // rather the + // identifier 'max' + if (cur) { + const auto begin_it = cur->child_chars.begin(); + const auto end_it = cur->child_chars.end(); + const auto ch_it = std::find(begin_it, end_it, *pos); + + cur = (ch_it == end_it) ? nullptr + : cur->child_tries[ch_it - begin_it].get(); + + if (cur && cur->kind != 0) { + matched = true; + *end = pos.next_iter(); + *kind = cur->kind; + } + } + } + return matched; + } + + bool isUnary(int kind, int* prec); + bool isBinary(int kind, int* prec); + bool isRightAssociative(int kind) { + switch (kind) { + case '?': + case TK_POW: + case TK_IF: + return true; + default: + return false; + } + } + + private: + bool validIdent(size_t i, char n) { + return isalpha(n) || n == '_' || (i > 0 && isdigit(n)); + } + + // 1. skip whitespace + // 2. handle comment or newline + // + bool isNumber(c10::string_view str, size_t start, size_t* len) { + char first = str[start]; + // strtod allows numbers to start with + or - or nan or inf + // http://en.cppreference.com/w/cpp/string/byte/strtof + // but we want only the number part, otherwise 1+3 will turn into two + // adjacent numbers in the lexer + if (first == '-' || first == '+' || isalpha(first)) + return false; + const char* startptr = str.data() + start; + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + char* endptr; + torch::jit::strtod_c(startptr, &endptr); + *len = endptr - startptr; + // check if the number is complex valued + // access is safe because string is assumed to be null terminated + if (endptr != nullptr && *endptr == 'j') { + *len += 1; + } + return *len > 0; + } + + bool isCharCount(char c, c10::string_view str, size_t start, int len) { + // count checks from [start, start + len) + return start + len <= str.size() && + std::count(str.begin() + start, str.begin() + start + len, c) == len; + } + + // python concatenates all adjacent strings "a" "b" == "ab" + // strings can be enclosed with 1 or 3 single or double quotes + // if enclosed with 3 quotes newlines are valid + // as elsewhere, backslash and new line should be ignored + bool isString(c10::string_view str, size_t start, size_t* len) { + char quote = str[start]; + if (quote != '\"' && quote != '\'') + return false; + int quote_len = isCharCount(quote, str, start, 3) ? 3 : 1; + + // end is now set past the opening quotation marks + size_t end = start + quote_len; + while (end < str.size() && !isCharCount(quote, str, end, quote_len)) { + if (str[end] == '\n' && quote_len != 3) { + return false; + } + // handle escaped characters. advances past escaped quotation marks, + // escaped newlines and escaped backslashes + // multi-char escapes like \x1A are handled fine here because the + // remainder of the escape are valid string characters anyway + if (str[end] == '\\') { + end++; + } + end++; + } + // set length equal to the complete string including quotations + *len = end - start + quote_len; + // if end finished without going past the last character of the string than + // there is a match + return end < str.size(); + } + + bool isblank(int n) { + return isspace(n) && n != '\n'; + } + + bool isTypeComment(StringCordView::Iterator str_iter) { + c10::string_view rest_line = str_iter.rest_line(); + const std::string type_string = "# type:"; + if (rest_line.size() < type_string.length()) { + return false; + } + auto match_string = rest_line.substr(0, type_string.size()); + return match_string == type_string; + } + + // Make an exception ignoring comments for type annotation comments + bool isTypeComment(StringCordView str, size_t pos) { + const std::string type_string = "# type:"; + if (str.size() < pos + type_string.length()) { + return false; + } + auto match_string = str.substr(pos, type_string.size()); + return match_string == type_string; + } + + TokenTrieRef head; +}; + +TORCH_API SharedParserData& sharedParserData(); + +struct Token { + int kind; + SourceRange range; + Token(int kind, SourceRange range) : kind(kind), range(std::move(range)) {} + std::string text() { + return std::string(range.token_text()); + } + std::string kindString() const { + return kindToString(kind); + } +}; + +struct Lexer { + explicit Lexer(std::shared_ptr source) + : source(std::move(source)), + pos(0), + nesting(0), + indent_stack(), + next_tokens(), + shared(sharedParserData()) { + auto first_indent = lexRaw(true); + indent_stack.push_back(first_indent.range.size()); + lex(); + } + // Return the current token, and then move to the next one + Token next() { + if (next_tokens.empty()) + reportError("Lexer invariant violated: empty token queue"); + Token r = std::move(next_tokens.front()); + next_tokens.erase(next_tokens.begin()); + if (next_tokens.empty()) { + lex(); + } + return r; + } + // Skip the current token if it matches the given kind + bool nextIf(int kind) { + if (cur().kind != kind) + return false; + next(); + return true; + } + + [[noreturn]] void reportError(const std::string& what) { + reportError(what, cur()); + } + [[noreturn]] void reportError(const std::string& what, const Token& t) { + std::stringstream ss; + ss << what << ":\n"; + t.range.highlight(ss); + throw std::runtime_error(ss.str()); + } + [[noreturn]] void expected(const std::string& what, const Token& t) { + std::stringstream ss; + ss << "expected " << what << " but found '" << t.kindString() + << "' here:\n"; + t.range.highlight(ss); + throw std::runtime_error(ss.str()); + } + [[noreturn]] void expected(const std::string& what) { + expected(what, cur()); + } + // Check that the current token has a given kind, return the current token, + // and advance to the next one. + Token expect(int kind) { + if (cur().kind != kind) { + expected(kindToString(kind)); + } + return next(); + } + Token& lookahead() { + if (next_tokens.size() < 2) { + lex(); + } + return next_tokens[1]; + } + Token& cur() { + return next_tokens.front(); + } + + private: + void lex() { + auto r = lexRaw(); + switch (r.kind) { + case '(': + case '[': + case '{': + nesting++; + break; + case ')': + case ']': + case '}': + nesting--; + break; + case TK_WHITESPACE: + case TK_WHITESPACE_EOF: { + const auto depth = static_cast( + r.kind == TK_WHITESPACE_EOF ? indent_stack.front() + : r.range.size()); + // note: TK_WHITESPACE_EOF is whitespace right before the EOF token + // just like we allow the code to be indented to a particular initial + // indent level, we allow the final indent to be anything and set + // it back to the initial indent level. This allows the code to be + // put into string literals inside code without worrying about final + // whitespace + if (depth > indent_stack.back()) { + indent_stack.push_back(depth); + r.kind = TK_INDENT; + } else if (depth == indent_stack.back()) { + r.kind = TK_NEWLINE; + } else { + next_tokens.emplace_back(TK_NEWLINE, r.range); + while (indent_stack.back() != depth) { + indent_stack.pop_back(); + next_tokens.emplace_back(TK_DEDENT, r.range); + if (indent_stack.empty()) { + reportError("invalid indent level " + std::to_string(depth), r); + } + } + return; // We've already queued the tokens + } + } break; + default: + break; + } + next_tokens.push_back(std::move(r)); + } + Token lexRaw(bool whitespace_token = false) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int kind; + AT_ASSERT(source); + if (current == nullptr) { + AT_ASSERT(pos == 0); + current = std::make_unique( + source->text_str().begin()); + } + + StringCordView::Iterator start_iter = *current; + StringCordView::Iterator end_iter = *current; + if (!shared.match( + *current, + nesting > 0, + whitespace_token, + &kind, + &start_iter, + &end_iter)) { + expected( + "a valid token", + Token( + **current, + SourceRange(source, start_iter, start_iter.pos() + 1))); + } + + auto t = Token(kind, SourceRange(source, start_iter, end_iter.pos())); + pos = end_iter.pos(); + *current = end_iter; + return t; + } + + std::shared_ptr source; + std::unique_ptr current; + size_t pos; + size_t nesting; // depth of ( [ { nesting... + std::vector indent_stack; // stack of indentation level of blocks + // Invariant: this should always contain at least a single element + std::vector next_tokens; + SharedParserData& shared; +}; +} // namespace jit +} // namespace torch + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/mini_environment.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/mini_environment.h new file mode 100644 index 0000000000000000000000000000000000000000..9a56d31537fe2bfcd34079bb79f1df46d6c8d631 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/mini_environment.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +// Simple data structure for containing a type T in nested control blocks +// Should only be used after initial compilation where type checking and +// loads and stores are emitted + +template +struct MiniEnvironment { + MiniEnvironment(Block* b, std::shared_ptr next = nullptr) + : next(std::move(next)) {} + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::shared_ptr> next; + + T findInThisFrame(const std::string& name) { + auto it = table.find(name); + if (it != table.end()) { + return it->second; + } + return nullptr; + } + + T findInAnyFrame(const std::string& name) { + for (auto runner = this; runner; runner = runner->next.get()) { + if (auto r = runner->findInThisFrame(name)) { + return r; + } + } + return nullptr; + } + + void setVar(const std::string& name, T value) { + table[name] = value; + } + + std::vector definedVariables() { + std::vector result; + result.reserve(table.size()); + for (auto& kv : table) { + result.push_back(kv.first); + } + std::sort(result.begin(), result.end()); + return result; + } + + private: + std::unordered_map table; +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/name_mangler.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/name_mangler.h new file mode 100644 index 0000000000000000000000000000000000000000..07a0352bce69102d48d06abcec18b0c7c4ab8988 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/name_mangler.h @@ -0,0 +1,27 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +/** + * class NameMangler + * + * Utility to mangle qualified names in order to make them unique. We use this + * in various places where we to de-duplicate qualified names. + */ +class TORCH_API NameMangler { + public: + // Given a qualified name, return a mangled version that is guaranteed to be + // unique with respect to previous/future calls of `mangled()` on this name + // mangler instance. + c10::QualifiedName mangle(const c10::QualifiedName& name); + + private: + size_t mangleIndex_ = 0; +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parse_string_literal.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parse_string_literal.h new file mode 100644 index 0000000000000000000000000000000000000000..2ca1f150aacddb490bc5e6ed15a6d6c6f1f4089f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parse_string_literal.h @@ -0,0 +1,87 @@ +#pragma once +#include +#include +#include + +namespace torch { +namespace jit { + +inline bool isCharCount(char c, const std::string& str, size_t start, int len) { + // count checks from [start, start + len) + return start + len <= str.size() && + std::count(str.begin() + start, str.begin() + start + len, c) == len; +} + +inline c10::optional parseOctal(const std::string& str, size_t pos) { + //\xxx where x are 0-7 + if (pos + 3 >= str.size()) + return c10::nullopt; + size_t c = 0; + for (size_t i = 1, b = 64; i < 4; ++i, b /= 8) { + // NOLINTNEXTLINE(bugprone-signed-char-misuse) + int d = str[pos + i]; + if (d < '0' || d > '7') + return c10::nullopt; + c += b * (d - '0'); + } + if (c >= 256) + return c10::nullopt; + return c; +} + +inline std::string parseStringLiteral( + const SourceRange& range, + const std::string& str) { + int quote_len = isCharCount(str[0], str, 0, 3) ? 3 : 1; + auto ret_str = str.substr(quote_len, str.size() - quote_len * 2); + size_t pos = ret_str.find('\\'); + while (pos != std::string::npos) { + // invariant: pos has to escape a character because it is a valid string + char c = ret_str[pos + 1]; + size_t to_erase = 2; + switch (ret_str[pos + 1]) { + case '\\': + case '\'': + case '\"': + case '\n': + break; + case 'a': + c = '\a'; + break; + case 'b': + c = '\b'; + break; + case 'f': + c = '\f'; + break; + case 'n': + c = '\n'; + break; + case 'v': + c = '\v'; + break; + case 't': + c = '\t'; + break; + case 'x': + throw ErrorReport(range) << "unsupported hex specifier"; + case 'u': + case 'U': + throw ErrorReport(range) << "unsupported unicode specifier"; + default: + // octal value in format \nnn, n is [0-7] + if (auto v = parseOctal(ret_str, pos)) { + to_erase = 4; + c = *v; + } else { + throw ErrorReport(range) << " ill formed octal specifier"; + } + } + ret_str.replace(pos, to_erase, /* num copies */ 1, c); + pos = ret_str.find('\\', pos + 1); + } + return ret_str; +} + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser.h new file mode 100644 index 0000000000000000000000000000000000000000..6d856a090854a48947664964bd71f2d985a36832 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser.h @@ -0,0 +1,33 @@ +#pragma once +#include +#include +#include +#include + +namespace torch { +namespace jit { + +struct Decl; +struct ParserImpl; +struct Lexer; + +TORCH_API Decl mergeTypesFromTypeComment( + const Decl& decl, + const Decl& type_annotation_decl, + bool is_method); + +struct TORCH_API Parser { + explicit Parser(const std::shared_ptr& src); + TreeRef parseFunction(bool is_method); + TreeRef parseClass(); + Decl parseTypeComment(); + Expr parseExp(); + Lexer& lexer(); + ~Parser(); + + private: + std::unique_ptr pImpl; +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser_constants.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser_constants.h new file mode 100644 index 0000000000000000000000000000000000000000..28300611c8751acbe80c4fb539bdfc1397a7bd5d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser_constants.h @@ -0,0 +1,7 @@ +#pragma once + +namespace torch { +namespace jit { +static const char* valid_single_char_tokens = "+-*/%@()[]:,={}><.?!&^|~"; +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h new file mode 100644 index 0000000000000000000000000000000000000000..dc4ab61f67f7a39ef097cbe37fe8f6d5ec380d5f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h @@ -0,0 +1,68 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +struct Resolver; +using ResolverPtr = std::shared_ptr; + +/** + * class Resolver + * + * Represents an "outer environment" in which we an look up names and return + * a corresponding SugaredValue. This is used during compilation to resolve + * references to names which are not defined internal to the graph. + * + * Example: PythonResolver looks at the enclosing Python scope for `name`. + * + * NOTE: When adding methods, keep this an abstract class (i.e. all new methods + * should be purely virtual). Resist the urge to provide a default + * implementation; you should explicitly think about how each resolver would + * handle the method. + */ +struct Resolver { + virtual ~Resolver() = default; + + // Resolve a given name to a SugaredValue. This takes the method `m` that the + // caller is currently constructing, since we may need to insert nodes into + // the graph to create a value. + virtual std::shared_ptr resolveValue( + const std::string& name, + GraphFunction& m, + const SourceRange& loc) { + return nullptr; + } + + // Resolve `name` to a TypePtr. + virtual TypePtr resolveType(const std::string& name, const SourceRange& loc) { + return nullptr; + } +}; + +// A resolver that only understands "torch.foo()" lookups. +struct NativeResolver : public Resolver { + std::shared_ptr resolveValue( + const std::string& name, + GraphFunction& m, + const SourceRange& loc) override { + if (name == "torch") { + return std::make_shared("aten"); + } + return nullptr; + } + + TypePtr resolveType(const std::string& name, const SourceRange& loc) + override { + return nullptr; + } +}; + +inline std::shared_ptr nativeResolver() { + return std::make_shared(); +} +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_matching.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_matching.h new file mode 100644 index 0000000000000000000000000000000000000000..754ede24597e5e98c4c90810df9f9cd930941a12 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_matching.h @@ -0,0 +1,70 @@ +#pragma once +#include +#include +#include + +#include + +namespace torch { +namespace jit { + +// Try to match a list of inputs and keyword 'attributes' to this +// schema. Return the flat list of positional inputs to the call or +// `c10::nullopt` on failure (`failure_messages` contains a good error +// report in this case) + +struct MatchedSchema { + std::vector inputs; + std::vector return_types; + c10::OptNameList return_field_names; + std::string schema_name; +}; + +TORCH_API bool isBlockListedSchema(const FunctionSchema& schema); + +TORCH_API MatchedSchema matchSchema( + const ::c10::FunctionSchema& schema, + const SourceRange& loc, + Graph& graph, + at::ArrayRef args, + at::ArrayRef kwargs, + const c10::optional& self = c10::nullopt); + +TORCH_API std::pair matchSchemas( + const std::vector& schemas, + const SourceRange& loc, + Graph& graph, + at::ArrayRef args, + at::ArrayRef kwargs, + const c10::optional& self = c10::nullopt, + bool render_errors = false); + +TORCH_API bool convertibleToList( + const TypePtr& type, + const TypePtr& list_type_); + +TORCH_API std::string getFullSchemaName(const ::c10::FunctionSchema& schema); + +TORCH_API Value* emitBuiltinCall( + const SourceRange& loc, + Graph& graph, + Symbol name, + at::ArrayRef args, + at::ArrayRef kwargs, + const c10::optional& self = c10::nullopt); + +TORCH_API c10::optional findInputWithName( + const std::string& name, + at::ArrayRef kwargs, + bool is_aten = false); + +// applies implicit conversion from value trying to turn it into type +// concrete_type it succeeds if the return_value->isSubtypeOf(concrete_type) +TORCH_API Value* tryConvertToType( + const SourceRange& loc, + Graph& graph, + const TypePtr& concrete_type, + Value* value, + bool allow_conversions); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_type_parser.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_type_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..c43e4363da38645ec31cd562a5b8ca8e631c73de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_type_parser.h @@ -0,0 +1,40 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +using TypePtr = c10::TypePtr; + +struct TORCH_API SchemaTypeParser { + TypePtr parseBaseType(); + c10::optional parseAliasAnnotation(); + std::pair> parseType(); + std::tuple> + parseFakeAndRealType(); + c10::optional parseTensorDType(const std::string& dtype); + TypePtr parseRefinedTensor(); + + SchemaTypeParser(Lexer& L, bool parse_complete_tensor_types) + : complete_tensor_types(parse_complete_tensor_types), L(L) {} + + private: + c10::optional tryToParseRequiresGrad(); + c10::optional tryToParseDeviceType(); + void parseList( + int begin, + int sep, + int end, + c10::function_ref callback); + + bool complete_tensor_types; + Lexer& L; + size_t next_id = 0; +}; +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..3a05af9c598abdfa7e71d8ab90d668031023a829 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h @@ -0,0 +1,55 @@ +#pragma once +#include +#include +#include +#include + +namespace torch { +namespace jit { + +/** + * class ScriptTypeParser + * + * Parses expressions in our typed AST format (TreeView) into types and + * typenames. + */ +class TORCH_API ScriptTypeParser { + public: + explicit ScriptTypeParser() = default; + explicit ScriptTypeParser(ResolverPtr resolver) + : resolver_(std::move(resolver)) {} + + c10::TypePtr parseTypeFromExpr(const Expr& expr) const; + + c10::optional> parseBroadcastList( + const Expr& expr) const; + + c10::TypePtr parseType(const std::string& str); + + FunctionSchema parseSchemaFromDef(const Def& def, bool skip_self); + + c10::IValue parseClassConstant(const Assign& assign); + + private: + c10::TypePtr parseTypeFromExprImpl(const Expr& expr) const; + + c10::optional parseBaseTypeName(const Expr& expr) const; + at::TypePtr subscriptToType( + const std::string& typeName, + const Subscript& subscript) const; + std::vector evaluateDefaults( + const SourceRange& r, + const std::vector& default_types, + const std::vector& default_exprs); + std::vector parseArgsFromDecl(const Decl& decl, bool skip_self); + + std::vector parseReturnFromDecl(const Decl& decl); + + ResolverPtr resolver_ = nullptr; + + // Need to use `evaluateDefaults` in serialization + friend struct ConstantTableValue; + friend struct SourceImporterImpl; +}; +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h new file mode 100644 index 0000000000000000000000000000000000000000..7f0e936a1ef895d7098205249faaa8e3f02d1420 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h @@ -0,0 +1,459 @@ +#pragma once +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +class SourceRangeUnpickler; +struct SourceRange; + +// A stringlike class backed by a vector of string_view +// the string represented are logically the concatenation of the string_views +// This has advantage of not needing continues memory. +struct TORCH_API StringCordView { + StringCordView(); + StringCordView(const StringCordView&) = default; + StringCordView(StringCordView&&) noexcept = default; + StringCordView( + std::vector inputs, + std::vector> ownerships); + + StringCordView& operator=(const StringCordView&) = default; + StringCordView& operator=(StringCordView&&) noexcept = default; + + size_t size() const { + return accumulated_sizes_.back(); + } + + size_t find(const std::string& tok, size_t start) const; + size_t find_regex(const std::string& tok, size_t start) const; + StringCordView substr(size_t start, size_t size) const; + + char at(size_t index) const { + return *iter_for_pos(index); + } + char operator[](size_t index) const { + return at(index); + } + + std::string str() const { + std::stringstream ss; + for (auto s : pieces_) { + ss << std::string(s); + } + return ss.str(); + } + + bool operator==(const std::string& rhs) const; + + bool operator==(const StringCordView& rhs) const; + + c10::string_view piece(size_t index) const { + return pieces_[index]; + } + + struct Iterator { + Iterator( + const StringCordView* str, + size_t start_line, + size_t start_pos, + size_t size) + : line_(start_line), pos_(start_pos), str_(str), size_(size) {} + explicit Iterator(const StringCordView* str) + : Iterator(str, 0, 0, str->size()) {} + + Iterator() : Iterator(nullptr, 0, 0, 0) {} + + Iterator(const Iterator&) = default; + Iterator(Iterator&&) = default; + Iterator& operator=(const Iterator&) = default; + Iterator& operator=(Iterator&&) = default; + + Iterator operator++() { + if (size_ == 0) { + return *this; + } + if ((pos_ + 1) < str_->pieces_[line_].size()) { + pos_++; + } else { + line_++; + pos_ = 0; + } + return *this; + } + + Iterator operator++(int) { + Iterator prev(*this); + ++(*this); + return prev; + } + + Iterator next_iter() const { + Iterator next(*this); + ++next; + return next; + } + + Iterator& operator+=(size_t num) { + if (!has_next()) { + return *this; + } + size_t target_pos = pos_ + num; + if (target_pos >= str_->accumulated_sizes_[line_] && + (line_ + 1) < str_->accumulated_sizes_.size() && + target_pos < str_->accumulated_sizes_[line_ + 1]) { + pos_ = target_pos; + return *this; + } + + size_t target_abs_pos = pos() + num; + *this = str_->iter_for_pos(target_abs_pos); + return *this; + } + + bool operator==(const Iterator& rhs) const { + if (!has_next() && !rhs.has_next()) { + return true; + } + return (str_ == rhs.str_) && (line_ == rhs.line_) && (pos_ == rhs.pos_); + } + bool operator!=(const Iterator& rhs) { + return !((*this) == rhs); + } + bool has_next() const { + return size_ > 0 && (line_ < str_->pieces_.size()); + } + + char operator*() const { + TORCH_INTERNAL_ASSERT(line_ < str_->pieces_.size()); + TORCH_INTERNAL_ASSERT(pos_ < str_->pieces_[line_].size()); + return str_->pieces_[line_].at(pos_); + } + + // returns rest of the line of the current iterator + c10::string_view rest_line() const { + if (line_ >= str_->pieces_.size()) { + return ""; + } + + c10::string_view cur_line = str_->pieces_[line_]; + return cur_line.substr(pos_, std::string::npos); + } + + size_t pos() const { + if (size_ == 0) { + return 0; + } + return str_->accumulated_sizes_[line_] + pos_; + } + + private: + size_t line_; + size_t pos_; + const StringCordView* str_; + size_t size_; + friend struct StringCordView; + }; + + Iterator begin() const { + return Iterator(this, 0, 0, size()); + } + Iterator end() const { + return Iterator(this, pieces_.size(), 0, 0); + } + Iterator iter_for_pos(size_t pos) const; + + private: + std::vector pieces_; + std::vector accumulated_sizes_; + std::vector> owned_strings_; +}; + +// Source represents a code segment. It keeps track of: +// - text_view : the view into text of the code segment +// - filename (optional) : if present, represents the name of the file from +// which the code segment originated. +// - starting_line_no : represents the line in the original file where the +// code segment started. +struct TORCH_API Source { + // Whether or not Source should copy the string passed in the constructor. + enum CopiesString { COPIES_STRING, DONT_COPY }; + + explicit Source( + c10::string_view text_view, + c10::optional filename = c10::nullopt, + size_t starting_line_no = 0, + std::shared_ptr gen_ranges = nullptr, + CopiesString copies_str = COPIES_STRING) + : filename_(std::move(filename)), + starting_line_no_(starting_line_no), + gen_ranges_(std::move(gen_ranges)) { + if (copies_str == COPIES_STRING) { + std::shared_ptr allocated_str = + std::make_shared(text_view.data(), text_view.size()); + text_view_ = StringCordView({*allocated_str}, {allocated_str}); + } else { + text_view_ = StringCordView({text_view}, {}); + } + + calc_line_start_offsets(); + } + + explicit Source( + StringCordView str, + c10::optional filename = c10::nullopt, + size_t starting_line_no = 0, + std::shared_ptr gen_ranges = nullptr) + : text_view_(std::move(str)), + filename_(std::move(filename)), + starting_line_no_(starting_line_no), + gen_ranges_(std::move(gen_ranges)) { + calc_line_start_offsets(); + } + // Given a line number (within source_), return the byte offset of the + // beginning of that line. + size_t offset_for_line(size_t line) const { + return line_starting_offsets_.at(line); + } + + // Returns number of lines present. + size_t num_lines() const { + return line_starting_offsets_.size(); + } + + // Calculate the line (within the code segment) on which `offset` resides. + size_t lineno_for_offset(size_t offset) const { + auto iter = std::upper_bound( + line_starting_offsets_.begin(), line_starting_offsets_.end(), offset); + return iter - line_starting_offsets_.begin() - 1; + } + + // Calculate the line (within the original source file, if present) on which + // `lineno` resides. + size_t lineno_to_source_lineno(size_t lineno) const { + if (filename_) { + return lineno + starting_line_no_; + } else { + return lineno; + } + } + + StringCordView get_line(size_t lineno) const { + auto start = offset_for_line(lineno); + auto size = (lineno + 1) < num_lines() ? offset_for_line(lineno + 1) - start + : text_view_.size() - start; + return text_view_.substr(start, size); + } + + const StringCordView& text_str() const { + return text_view_; + } + + char char_at(size_t index) const { + return text_view_.at(index); + } + + size_t size() const { + return text_view_.size(); + } + + c10::optional& filename() { + return filename_; + } + + size_t starting_line_no() const { + return starting_line_no_; + } + + c10::optional findSourceRangeThatGenerated( + const SourceRange& range); + + ~Source() = default; + + private: + void calc_line_start_offsets() { + line_starting_offsets_.clear(); + line_starting_offsets_.push_back(0); + size_t pos = 0; + while ((pos = text_view_.find("\n", pos)) != std::string::npos) { + line_starting_offsets_.push_back(++pos); + } + } + + StringCordView text_view_; + + c10::optional filename_; + // If filename_ is not present, starting_line_no_ is don't care + size_t starting_line_no_; + // Starting offsets for lines into the source. e.g. line 0 starts at + // line_starting_offsets_[0], etc. + std::vector line_starting_offsets_; + + std::shared_ptr gen_ranges_; +}; + +// A SourceRange is a reference to subset of a Source, specified by `start` and +// `end` byte offsets into the source text. +struct TORCH_API SourceRange { + SourceRange(std::shared_ptr source_view, size_t start_, size_t end_) + : source_view_(std::move(source_view)), start_(start_), end_(end_) { + if (source_view_) { + start_iter_ = source_view_->text_str().iter_for_pos(start_); + } + } + + SourceRange() : source_view_(nullptr), start_(0), end_(0) {} + + SourceRange( + std::shared_ptr source_view_, + StringCordView::Iterator start_iter, + size_t end_) + : source_view_(std::move(source_view_)), + start_(start_iter.pos()), + end_(end_), + start_iter_(start_iter) {} + + const c10::string_view token_text() const { + size_t size = end() - start(); + return start_iter_.rest_line().substr(0, size); + } + + const StringCordView text() const { + return source_view_->text_str().substr(start(), end() - start()); + } + size_t size() const { + return end() - start(); + } + static const size_t CONTEXT = 3; + void highlight(std::ostream& out) const; + + // Customizable version of 'highlight' method. + void print_with_context( + std::ostream& out, + size_t context, + bool highlight, + const std::string& funcname) const; + + const std::shared_ptr& source() const { + return source_view_; + } + size_t start() const { + return start_; + } + size_t end() const { + return end_; + } + std::string str() const { + std::stringstream ss; + highlight(ss); + return ss.str(); + } + + c10::optional> file_line_col() const { + if (!source_view_ || !source()->filename()) { + return c10::nullopt; + } + + auto lineno = source_view_->lineno_for_offset(start_); + auto col_offset = (int)start_ - (int)source_view_->offset_for_line(lineno); + // TODO: c10::optional<>::value returns an rvalue ref so can't use it here?? + return std::make_tuple( + source_view_->filename().value_or(""), + source_view_->lineno_to_source_lineno(lineno), + (size_t)col_offset); + } + + bool operator==(const SourceRange& rhs) const { + return start() == rhs.start() && end() == rhs.end() && + source() == rhs.source(); + } + + bool operator!=(const SourceRange& rhs) const { + return !(*this == rhs); + } + + c10::optional findSourceRangeThatGenerated() const { + if (!source_view_) { + return c10::nullopt; + } + return source_view_->findSourceRangeThatGenerated(*this); + } + + protected: + std::shared_ptr source_view_; + + private: + size_t start_; + size_t end_; + StringCordView::Iterator start_iter_; +}; + +// OwnedSourceRange is just like a SourceRange except that it owns a `Source` +// instead of `Source`. Thus OwnedSourceRange owns a copy of source text. +struct OwnedSourceRange : public SourceRange { + explicit OwnedSourceRange(const SourceRange& source_range) + : SourceRange(source_range) { + const auto& source = source_range.source(); + if (source) { + source_view_ = std::make_shared( + source->text_str().str(), + source->filename(), + source->starting_line_no()); + } + } +}; + +struct TORCH_API SourceRangeHasher { + public: + size_t operator()(const torch::jit::SourceRange& key) const; +}; + +struct StackEntry { + std::string filename; + SourceRange range; +}; + +TORCH_API void format_stack_trace( + std::ostream& out, + const std::vector& entries); + +inline std::ostream& operator<<(std::ostream& out, const SourceRange& range) { + range.highlight(out); + return out; +} + +// A pair of (byte offset, SourceRange) describing a specific segment +// of the output stream +struct TaggedRange { + TaggedRange(size_t bytes, SourceRange range) + : bytes(bytes), range(std::move(range)) {} + size_t bytes; + SourceRange range; +}; +using SourceRangeRecords = std::vector; +using SourceRangeTagMap = + std::unordered_map; + +} // namespace jit +} // namespace torch + +namespace std { +template <> +struct iterator_traits { + using value_type = char; + using difference_type = ptrdiff_t; + using pointer = char*; + using reference = char&; + using iterator_category = std::forward_iterator_tag; +}; +} // namespace std diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h new file mode 100644 index 0000000000000000000000000000000000000000..185bd3c12684176dbbc2453b47b0e44832196e4c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace torch { +namespace jit { + +/** + * SourceRef does two things: + * 1. Owns a Source object. + * 2. Serves as lookup key to the owned Source in associative containers, for + * runtime data aggregation. + * We don't want to use std::shared_ptr directly because we want to + * support heteogeneous lookup, and also shared_ptr is an implementation detail + * which should be encapsulated. + */ +class TORCH_API SourceRef : public CustomClassHolder { + public: + explicit SourceRef(std::shared_ptr source_view) + : source_view_(std::move(source_view)) {} + bool operator==(const SourceRef& other) const { + return source_view_ == other.source_view_; + } + bool operator<(const Source& other) const { + return source_view_.get() < &other; + } + friend bool operator<(const Source& other, const SourceRef& self) { + return &other < self.source_view_.get(); + } + bool operator<(const SourceRef& other) const { + return *this < *other.source_view_.get(); + } + const Source* operator->() const { + return source_view_.get(); + } + + private: + std::shared_ptr source_view_; +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/strtod.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/strtod.h new file mode 100644 index 0000000000000000000000000000000000000000..dd03c3cdb02dfb47423054443601fb2bcbbbe042 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/strtod.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +TORCH_API double strtod_c(const char* nptr, char** endptr); +TORCH_API float strtof_c(const char* nptr, char** endptr); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h new file mode 100644 index 0000000000000000000000000000000000000000..9bf09f4a56e176f2153ac4ce8226a0b80313ed5a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h @@ -0,0 +1,857 @@ +#pragma once +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +using SugaredValuePtr = std::shared_ptr; + +// The AST can contain nodes like `self`, `self.b` or `python_fn` that +// are not first-class values in the graph representation, but instead +// will be desugared based on how they are used in the AST. + +// SugaredValue is used to temporarily represent these values in a way +// that separates their behavior from the AST -> IR converter itself. +// This allows us to keep dependencies on python minimal. + +struct TORCH_API SugaredValue + : public std::enable_shared_from_this { + // what is this node? for error reporting (e.g. Module, python function) + virtual std::string kind() const = 0; + + // what can we do with this thing? + // use it as a value e.g. `this + 4` + virtual Value* asValue(const SourceRange& loc, GraphFunction& m) { + throw ErrorReport(loc) << kind() << " cannot be used as a value"; + } + + // select an attribute on it, e.g. `this.field` + virtual std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) { + throw ErrorReport(loc) << "attribute lookup is not defined on " << kind(); + } + + virtual bool hasAttr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) { + throw ErrorReport(loc) << "attribute lookup is not defined on " << kind(); + } + + // assign an attribute on it, e.g. `this.field = newValue` + virtual void setAttr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field, + Value* newValue) { + throw ErrorReport(loc) << "attribute assignment is not defined on " + << kind(); + } + + // use it as a vector of values, e.g. a tuple of values as return value from + // a method invocation + virtual std::vector> asTuple( + const SourceRange& loc, + GraphFunction& m, + const c10::optional& size_hint = {}) { + throw ErrorReport(loc) << kind() << " cannot be used as a tuple"; + } + + // TODO @wconstab refactor to use ModuleValue::asTuple instead of new API + virtual SugaredValuePtr asTupleValue( + const SourceRange& loc, + GraphFunction& m) { + throw ErrorReport(loc) << kind() << " cannot be used as a tuplevalue"; + } + + virtual std::vector> asType( + const SourceRange& loc, + Method& m) { + throw ErrorReport(loc) << kind() << " cannot be used as a type"; + } + + // call it like a function, e.g. `outputs = this(inputs)` + virtual std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + // note: names for args will be 'argument 0', 'argument 1', etc.. + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) { + // n_binders is always set to the number of variables an expression is + // syntactically bound to: + // a = foo() # 1 binder (note in this case the single binder might be a + // tuple) a, * b = foo() # 1 binder a, b = foo() # 2 binders foo() # 0 + // binders + // + // In subexpressions, like bar() in foo(bar()), n_binders is always set to + // 1. n_binders is used as a hint to subexpressions to determine how many + // values they should return when that number is ambiguous statically. In + // particular it is currently used to decide how many tensors a call to a + // python function will return. It is only a hint, functions do not have to + // check that n_binders match the number of things they are returning, the + // assignment logic will do that anyway. + + throw ErrorReport(loc) << "cannot call a " << kind(); + } + + // This function is called when to convert a SugaredValue to its iterator. + // For example, when iterating through a Dict we iterate over its keys + virtual std::shared_ptr iter( + const SourceRange& loc, + GraphFunction& m) { + throw ErrorReport(loc) << kind() << " cannot be used as an iterable"; + } + + // If we are iterating over a Sugared Value and it returns a value from this + // function, then we emit an unrolled loop over the variable. This allows us + // to support containers of Heterogenous types, like Module Containers & + // Tuples + virtual c10::optional staticLen() { + return c10::nullopt; + } + + // When iterating over this SugaredValue, should we emit the for loop as an + // unrolled loop. + bool shouldEmitUnrolled() { + return staticLen() != c10::nullopt; + } + + // return length of this thing, if not then it can't be iterated. + // If it does not have a statically-determinable length, then it cannot + // be iterated over with a modulelist. If it does it must return a constant + // Value * + virtual Value* len(const SourceRange& loc, GraphFunction& m) { + throw ErrorReport(loc) << "'" << kind() << "'" + << " object is not iterable"; + } + + // expression for ith elemement for iterable value + virtual std::shared_ptr getitem( + const SourceRange& loc, + GraphFunction& m, + Value* idx, + TypePtr type_hint = nullptr) { + throw ErrorReport(loc) << "'" << kind() << "'" + << " object is not subscriptable"; + } + + virtual ~SugaredValue() = default; +}; + +// most things in the environment are just simple value types +// and not special python syntax sugar types +struct TORCH_API SimpleValue : public SugaredValue { + SimpleValue(Value* value) : value_(value) {} + std::string kind() const override { + std::stringstream ss; + // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage) + ss << "value of type '" << value_->type()->annotation_str() << "'"; + return ss.str(); + } + Value* asValue(const SourceRange& range, GraphFunction& m) override { + return value_; + } + std::vector> asTuple( + const SourceRange& loc, + GraphFunction& m, + const c10::optional& size_hint = {}) override; + std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + bool hasAttr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + void setAttr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field, + Value* newValue) override; + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + // note: names for args will be 'argument 0', 'argument 1', etc.. + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + std::shared_ptr iter(const SourceRange& loc, GraphFunction& m) + override; + + Value* getValue() const { + return value_; + } + + Value* len(const SourceRange& loc, GraphFunction& m) override; + SugaredValuePtr getitem( + const SourceRange& loc, + GraphFunction& m, + Value* idx, + TypePtr type_hint = nullptr) override; + + private: + Value* value_; +}; + +struct TORCH_API BuiltinFunction : public SugaredValue { + BuiltinFunction(Symbol symbol, c10::optional self) + : symbol(symbol), self(std::move(self)) {} + + // The symbol of the function (e.g. `aten::relu`). + Symbol symbol; + + // if this is method, then this is the self argument. + c10::optional self; + std::string kind() const override { + return "builtin"; + } + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + // try to create this builtin but if it doesn't exist or the self argument + // cannot possibly match, then return nullptr. Use in situations where it is + // not clear if it is a valid builtin + static std::shared_ptr tryCreate( + Symbol symbol, + c10::optional self); +}; + +struct TORCH_API SugaredTupleValue : public SugaredValue { + explicit SugaredTupleValue(std::vector> tup) + : tup_(std::move(tup)){}; + + std::vector> asTuple( + const SourceRange& loc, + GraphFunction& m, + const c10::optional& size_hint = {}) override { + return tup_; + }; + + Value* asValue(const SourceRange& loc, GraphFunction& m) override { + std::vector vec; + vec.reserve(tup_.size()); + for (const auto& sv : tup_) { + vec.push_back(sv->asValue(loc, m)); + } + Graph& g = *m.graph(); + return g.insertNode(g.createTuple(vec))->output(); + } + + std::string kind() const override { + return "Tuple"; + } + + SugaredValuePtr getitem( + const SourceRange& loc, + GraphFunction& m, + Value* idx, + TypePtr type_hint = nullptr) override { + if (!(idx->type()->cast() && toIValue(idx))) { + throw ErrorReport(loc) + << "Expected integer literal for index but got a variable or non-integer. " + << "ModuleList/Sequential indexing is only supported with integer literals. " + << "For example, 'i = 4; self.layers[i](x)' will fail because i is not a literal. " + << "Enumeration is supported, e.g. 'for index, v in enumerate(self): out = v(inp)'"; + } + auto index = toIValue(idx)->toInt(); + int64_t adj_index = + (index < 0) ? index + static_cast(tup_.size()) : index; + if (!(adj_index >= 0 && adj_index < static_cast(tup_.size()))) { + throw ErrorReport(loc) + << "Index " << index << " out of range of length " << tup_.size(); + } + return tup_.at(adj_index); + } + + // This function is called when a SugaredValue is used to convert a + // SugaredValue to its iterator. For example, when iterating through a Dict we + // iterate over its keys + std::shared_ptr iter(const SourceRange& loc, GraphFunction& m) + override { + return shared_from_this(); + }; + + // Because this is used to contain SugaredValues of Heterogenous types, + // we define staticLen() so that when this is iterated over it is emitted + // as an unrolled loop. + c10::optional staticLen() override { + return static_cast(tup_.size()); + } + + std::vector> tup_; +}; + +struct TORCH_API BuiltinModule : public SugaredValue { + BuiltinModule(std::string name, c10::optional version = at::nullopt) + : name(std::move(name)), version(version) {} + + std::string kind() const override { + return "builtin module"; + } + std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override { + if (field == "autograd") { + // When refering torch.autograd, it is also considered to be a + // BuiltinModule and we will dispatch to the aten operators for the + // methods under its module. + return std::make_shared("aten", version); + } + + auto sym = Symbol::fromQualString(name + "::" + field); + return std::make_shared(sym, c10::nullopt); + } + + private: + std::string name; + // when we add operator versioning, emit this op as it exising at 'version' + // if not set, use the latest version + c10::optional version; +}; + +// Represents a class, analagous to `int` or `dict`. Instances of classes, +// like `1` or `{"foo": 5}`, are represented as SimpleValues +struct TORCH_API ClassValue : public SugaredValue { + explicit ClassValue(ClassTypePtr type) : type_(std::move(type)) {} + + // Call the type's constructor, as in: + // n = Foo(constructor_arg) + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + std::string kind() const override { + return type_->str(); + } + + ClassTypePtr type_; +}; + +struct TORCH_API NamedTupleConstructor : public SugaredValue { + explicit NamedTupleConstructor(TupleTypePtr type) : type_(std::move(type)) {} + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + std::string kind() const override { + return type_->str(); + } + + TupleTypePtr type_; +}; + +struct FunctionValue : public SugaredValue { + FunctionValue(Function* callee) : callees_({callee}) {} + FunctionValue(const StrongFunctionPtr& p) + : callees_({p.function_}), cu_(p.cu_) {} + FunctionValue(const std::vector& callees) { + for (const StrongFunctionPtr& callee : callees) { + cu_ = cu_ ? cu_ : callee.cu_; + TORCH_INTERNAL_ASSERT(callee.cu_ == cu_); + callees_.push_back(callee.function_); + } + } + + std::string kind() const override { + return "function"; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& f, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override { + std::vector schemas; + for (Function* callee : callees_) { + try { + callee->ensure_defined(); + } catch (const RecursiveMethodCallError&) { + throw ErrorReport(loc) + << " function '" << callee->name() << "' is called recursively. " + << "Recursive calls are not supported"; + } + schemas.push_back(&callee->getSchema()); + } + auto match = matchSchemas(schemas, loc, *f.graph(), args, kwargs); + Value* output = + f.graph()->insertFunctionCall(callees_[match.first], match.second); + output->node()->setSourceRange(loc); + return std::make_shared(output); + } + + const std::vector& callees() { + return callees_; + } + + private: + std::vector callees_; + // TODO holding this thing is creepy + std::shared_ptr cu_; +}; + +struct TORCH_API ClosureValue : public SugaredValue { + ClosureValue(Value* value) : value_(value) { + TORCH_INTERNAL_ASSERT(value_->node()->kind() == prim::Closure); + } + std::string kind() const override { + return "closure"; + } + Value* asValue(const SourceRange& range, GraphFunction& m) override { + return value_; + } + Value* value_; +}; + +// defines how a method obtained from a module/class/interface behaves in script +struct MethodValue : public SugaredValue { + MethodValue(Value* self, std::vector method_names) + : self_(self), method_names_(std::move(method_names)) {} + MethodValue(Value* self, std::string method_name) + : MethodValue(self, std::vector({std::move(method_name)})) {} + + std::string kind() const override { + return "method"; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& f, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override { + std::vector argsWithSelf = {self_}; + argsWithSelf.insert(argsWithSelf.end(), args.begin(), args.end()); + std::vector schemas; + for (const std::string& method_name : method_names_) { + if (auto class_type = self_->type()->cast()) { + Function& method = class_type->getMethod(method_name); + try { + method.ensure_defined(); + } catch (const RecursiveMethodCallError&) { + throw ErrorReport(loc) + << " method '" << method.name() << "' is called recursively. " + << "Recursive calls are not supported"; + } + schemas.push_back(&method.getSchema()); + } else if (auto interface_type = self_->type()->cast()) { + schemas.push_back(interface_type->getMethod(method_name)); + } else { + TORCH_INTERNAL_ASSERT( + false, "method constructed that is not a class or interface"); + } + } + auto match = matchSchemas(schemas, loc, *f.graph(), argsWithSelf, kwargs); + Value* output = + f.graph()->insertMethodCall(method_names_[match.first], match.second); + output->node()->setSourceRange(loc); + return std::make_shared(output); + } + + private: + Value* self_; + std::vector method_names_; +}; + +struct TORCH_API PrintValue : public SugaredValue { + std::string kind() const override { + return "print"; + } + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; +}; + +// expressions like int(x) +// these are the same as call prim::Int or equivalent except it +// is a noop when the input is a subtype of 'type' +struct TORCH_API CastValue : public BuiltinFunction { + CastValue(TypePtr type, c10::Symbol method) + : BuiltinFunction(method, c10::nullopt), type_(std::move(type)) {} + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override { + if (args.size() == 1 && kwargs.empty()) { + auto len_op = std::make_shared(aten::len, at::nullopt); + auto gt_op = std::make_shared(aten::gt, at::nullopt); + auto zero = m.graph()->insertConstant(0); + + auto v = args[0].value(*m.graph()); + if (v->type()->isSubtypeOf(*type_)) { + return std::make_shared(v); + } else if ( + *type_ == *BoolType::get() && + (v->type()->isSubtypeOf(*AnyListType::get()) || + v->type()->isSubtypeOf(*StringType::get()) || + v->type()->cast())) { + auto len = len_op->call(loc, m, {v}, {}, 1); + return gt_op->call(loc, m, {len->asValue(loc, m), zero}, {}, 1); + } + } + return BuiltinFunction::call(loc, m, args, kwargs, n_binders); + } + + private: + TypePtr type_; +}; + +struct TORCH_API TensorCastValue : public SugaredValue { + TensorCastValue(at::ScalarType type, NamedValue self) + : dtype_(type), self_(std::move(self)) {} + + std::string kind() const override { + return "Cast"; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override { + TORCH_INTERNAL_ASSERT(args.empty() && kwargs.empty()); + Value* dtype_const = m.graph()->insertConstant(dtype_, loc); + std::vector kwargs_{ + self_, NamedValue(loc, "dtype", dtype_const)}; + Value* casted_val = m.graph()->insert( + /*opname=*/Symbol::fromQualString("aten::to"), + /*args=*/args, + /*kwargs=*/kwargs_, + /*range=*/loc); + return std::make_shared(casted_val); + } + + at::ScalarType dtype_; + NamedValue self_; +}; + +// builtins operators and functions that call a method if it exists +// on a class type, like 'len(x)' and 'x + y' +struct TORCH_API MagicMethod : public SugaredValue { + MagicMethod(std::string desugared_name, SugaredValuePtr base) + : base_value_(std::move(base)), + desugared_name_(std::move(desugared_name)) {} + + std::string kind() const override { + return desugared_name_; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + private: + SugaredValuePtr base_value_; + std::string desugared_name_; +}; + +// things that look like function applications, but +// perform non-standard evaluation are represented +// with SpecialFormValues, e.g. +// isinstance(x, int) +// fork(fn) +// annotate(int, 3) +// The implementation of each value is handled by a case inside emitApplyExpr +struct TORCH_API SpecialFormValue : public SugaredValue { + SpecialFormValue(Symbol form) : form_(form) {} + std::string kind() const override { + return form_.toUnqualString(); + } + Symbol form() const { + return form_; + } + static std::shared_ptr create(Symbol form) { + return std::make_shared(form); + } + + private: + Symbol form_; +}; + +struct TORCH_API LegacyTensorConstructor : public SpecialFormValue { + LegacyTensorConstructor(Symbol form, at::ScalarType dtype, at::Device device) + : SpecialFormValue(form), device_(device), dtype_(dtype) {} + + static std::shared_ptr create( + Symbol form, + at::ScalarType dtype, + at::Device device) { + return std::make_shared(form, dtype, device); + } + at::ScalarType dtype() const { + return dtype_; + } + + private: + at::Device device_; + at::ScalarType dtype_; +}; + +// matched against for special handling of range expressions +struct TORCH_API RangeValue : SugaredValue { + RangeValue( + const SourceRange& loc, + GraphFunction& m, + std::vector input, + c10::optional static_len = c10::nullopt); + + std::string kind() const override { + return "range"; + } + Value* len(const SourceRange& loc, GraphFunction& m) override; + SugaredValuePtr getitem( + const SourceRange& loc, + GraphFunction& m, + Value* idx, + TypePtr type_hint = nullptr) override; + std::shared_ptr iter(const SourceRange& loc, GraphFunction& m) + override; + + // When Range is instantiated via enumerate(iterable_with_static_len), + // then it takes the static length of the iterable + c10::optional staticLen() override { + return static_len_; + } + + private: + Value* start_{}; + Value* end_{}; + Value* step_{}; + // a flag to determine if it's a simple range() call with only end_ from + // arguments If true, we will not insert length calculation and index + // derivation nodes to simplify the graph and enable more possible + // optimizations + bool has_only_end_{}; + c10::optional static_len_; +}; + +// Specialized Tree structure to matched against for special handling +// of builtin functions iterables expressions like zip(), enumerate(), etc. +// zip and enumerate can be modeled as a tree of SimpleValue/RangeValue: +// zip(x, y) -> (x, y) with tuple assignment to each loop target +// enumerate(x) -> (range(0, math.inf, 1), x) +// So a complicated expression like zip(a, enumerate(b), range(0, 100)) will be: +// (a, (range(0, math.inf, 1), b), range(0, 100)) +// We use those base iterables to fill in the loop information like +// max_trip_count and set the value table for loop targets +// Iterables can contain lists of SugaredValues like ModuleLists. If it +// does, then we emit it unrolled and require that all values it contains +// have a statically-determinable length. +struct TORCH_API IterableTree : SugaredValue { + IterableTree() = default; + IterableTree( + const SourceRange& range, + GraphFunction& m, + at::ArrayRef children) { + for (const auto& child : children) { + addChild(range, m, child); + } + } + std::string kind() const override { + return "iterabletree"; + } + + std::shared_ptr iter(const SourceRange& loc, GraphFunction& m) + override { + return shared_from_this(); + } + + void addChild( + const SourceRange& range, + GraphFunction& m, + const SugaredValuePtr& iter_value); + + std::vector get_children() { + return children_; + } + + // If this iterable contains a ModuleList or Tuple, then it will have a + // static length, and we will emit it as an unrolled for loop. + c10::optional staticLen() override { + return unroll_length_; + } + + // given a IterableTree node, get all the base iterables/leaves under the + // IterableTree node. This enables + // us to get all the basic SugaredValues that contains valid loop information + // with len() and getitem() + std::vector get_base_iterables(); + + Value* len(const SourceRange& loc, GraphFunction& m) override; + SugaredValuePtr getitem( + const SourceRange& loc, + GraphFunction& m, + Value* idx, + TypePtr type_hint = nullptr) override; + + private: + c10::optional unroll_length_ = c10::nullopt; + std::vector children_; +}; + +static inline std::vector toValues( + Graph& g, + at::ArrayRef nvs) { + return fmap(nvs, [&](const NamedValue& v) { return v.value(g); }); +} + +struct SimpleSelf : public Self { + explicit SimpleSelf(ClassTypePtr classType) + : Self(), classType_(std::move(classType)) {} + std::shared_ptr makeSugared(Value* v) const override { + v->setType(classType_); + return std::make_shared(v); + } + ClassTypePtr getClassType() const override { + return classType_; + } + + private: + ClassTypePtr classType_; +}; + +// This is not a SimpleValue so it can not pass through the code paths that +// expect a SimpleValue as a sugared value. +struct TORCH_API ExceptionMessageValue : public SugaredValue { + explicit ExceptionMessageValue( + Value* value, + Value* qualified_class_name = nullptr) + : value_(value), qualified_class_name_(qualified_class_name) {} + + std::string kind() const override { + return "exception message"; + } + + Value* getValue() { + return value_; + } + + // qualified python class name + Value* getQualifiedClassName() { + return qualified_class_name_; + } + + private: + Value* value_; + Value* qualified_class_name_; +}; + +struct TORCH_API ExceptionValue : public SugaredValue { + explicit ExceptionValue(std::string message) : message_(std::move(message)) {} + + std::string kind() const override { + return "exception"; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef /*attributes*/, + size_t /*n_binders*/) override { + auto exception_message = insertConstant(*m.graph(), message_ + ": ", loc); + for (auto& input : args) { + auto input_str = input.value(*m.graph()); + if (!input_str->type()->isSubtypeOf(*StringType::get())) { + input_str = + emitBuiltinCall(loc, *m.graph(), aten::str, {input_str}, {}); + } + exception_message = emitBuiltinCall( + loc, *m.graph(), aten::add, {exception_message, input_str}, {}); + } + return std::make_shared(exception_message); + } + + std::string message_; +}; + +struct TORCH_API SugaredEnumClass : public SugaredValue { + explicit SugaredEnumClass(EnumTypePtr enum_type) + : enum_type_(std::move(enum_type)) {} + + std::string kind() const override { + return "EnumClass"; + } + + SugaredValuePtr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + SugaredValuePtr iter(const SourceRange& loc, GraphFunction& m) override; + + private: + EnumTypePtr enum_type_; +}; + +struct TORCH_API SliceValue : public SugaredValue { + explicit SliceValue(Value* start, Value* stop, Value* step) + : start_(start), stop_(stop), step_(step) {} + + std::string kind() const override { + return "Python slice value"; + } + + Value* start() { + return start_; + }; + Value* stop() { + return stop_; + }; + Value* step() { + return step_; + }; + + private: + Value* start_; + Value* stop_; + Value* step_; +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h new file mode 100644 index 0000000000000000000000000000000000000000..97cdbb237c641a683527f3074b55a2ba6b814c5e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h @@ -0,0 +1,414 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { +struct Node; +struct Value; +struct Graph; +struct Module; + +namespace tracer { + +using ::c10::ivalue::Shared; + +using ::c10::IValue; +using ::c10::ivalue::Future; + +using ::c10::ArrayRef; +using ::c10::TupleType; +using ::c10::TupleTypePtr; +using ::c10::ivalue::ConstantString; + +using torch::autograd::Variable; +using variable_list = std::vector; + +TORCH_API std::atomic& getTracerStateWarnMode(); + +struct TORCH_API TracingState + : public std::enable_shared_from_this { + TracingState(); + ~TracingState(); + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::shared_ptr graph; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool warn = getTracerStateWarnMode(); + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool strict = true; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + bool force_outplace = false; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::function lookup_var_name_fn = + [](const Variable& var) { return ""; }; + + void enterFrame() { + env_stack.emplace_back(); + } + + void leaveFrame() { + env_stack.pop_back(); + } + + void setValue(const IValue& v, Value* value); + void delValue(const IValue& var); + Value* getValue(const IValue& var); + Value* getOutput(const IValue& var, size_t i); + bool hasValue(const IValue& var) const; + + Node* createNode(c10::Symbol op_name, size_t num_outputs); + void insertNode(Node* node); + + private: + using WeakIValue = at::WeakIValue; + + struct WeakIValueHasher { + size_t operator()(const WeakIValue& t) const { + return t.hash(); + } + }; + + struct WeakIValueEq { + bool operator()(const WeakIValue& t1, const WeakIValue& t2) const { + return t1.isSameIdentity(t2); + } + }; + + using Frame = + std::unordered_map; + std::vector env_stack; +}; + +// This is meant to be used as a thread local place, where we can store extra +// info that gets lost when we call into ATen from Python bindings. One example +// for when this happens is when we get an IntArrayRef argument with e.g. sizes +// for view. When tracing, those might be tensors, which let us encode extra +// data dependencies, but once they get to the ATen call where we actually have +// the tracing logic, they get converted into a raw IntArrayRef, and we loose +// all information. To prevent this, we temporarily stash it in here. +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct ArgumentStash { + struct IntArrayRefTrace : std::vector { + IntArrayRefTrace(int size) : std::vector(size, nullptr) {} + }; + + static bool empty() { + return stash.intlists.empty(); + } + + TORCH_API static void stashIntArrayRefElem( + const std::string& arg_name, + size_t size, + size_t idx, + const Variable& var); + + static bool hasIntArrayRef(const std::string& arg_name) { + return stash.intlists.count(arg_name) > 0; + } + + static IntArrayRefTrace popIntArrayRef(const std::string& arg_name) { + auto info = std::move(stash.intlists.at(arg_name)); + stash.intlists.erase(arg_name); + return info; + } + + // Value stashing: Use these methods to stash arguments which correspond + // to regular Value*'s in the graph. i.e. they don't require special + // handling like in the case of IntArrayRefs + TORCH_API static void stashValue( + const std::string& arg_name, + size_t idx, + const Variable& var, + const c10::TypePtr& type = nullptr); + + static bool hasValue(const std::string& arg_name) { + return stash.values.count(arg_name) > 0; + } + + static Value* popValue(const std::string& arg_name) { + auto info = stash.values.at(arg_name); + stash.values.erase(arg_name); + return info; + } + + private: + static thread_local ArgumentStash stash; + std::unordered_map intlists; + std::unordered_map values; +}; + +// Retrieve or set the current tracing state. Returns a nullptr if tracing is +// disabled. +TORCH_API const std::shared_ptr& getTracingState(); +TORCH_API void setTracingState(std::shared_ptr state); + +inline bool isTracing() { + return static_cast(getTracingState()); +} + +using warn_fn_type = void (*)(const std::string& msg); +TORCH_API extern const char* WARN_PYTHON_DATAFLOW; +TORCH_API extern const char* WARN_CONSTRUCTOR; +TORCH_API extern const char* WARN_RESIZE; +TORCH_API extern const char* STRICT_TRACER_MSG; +TORCH_API void _do_warn(const char* _reason, const char* _kind); +inline void warn(const char* _reason, const char* _kind = nullptr) { + if (const auto& state = getTracingState()) { + if (!state->warn) + return; + _do_warn(_reason, _kind); + } +} +TORCH_API void setWarn(warn_fn_type fn); + +struct TORCH_API NoWarn { + NoWarn() : state(getTracingState()) { + if (state) { + prev = state->warn; + state->warn = false; + } + } + ~NoWarn() { + if (state) { + state->warn = prev; + } + } + std::shared_ptr state; + bool prev{false}; +}; + +struct WithNestedTracingFrame { + WithNestedTracingFrame() { + getTracingState()->enterFrame(); + } + + ~WithNestedTracingFrame() { + getTracingState()->leaveFrame(); + } +}; +TORCH_API void recordSourceLocation(Node* n); +TORCH_API void setRecordSourceLocation(void (*v)(Node*)); + +TORCH_API std::vector pythonCallstack(); +TORCH_API void setPythonCallstack(std::vector (*v)()); + +// Having finished adding a new 'node' to the graph IR 'setValueTrace' +// associates this node with an output variable, so that further operations +// involving this variable know which node in the IR to reference. +TORCH_API void setValueTrace(const IValue& v, Value* value); + +TORCH_API void delValueTrace(const IValue& var); + +TORCH_API std::function pauseTracing(); + +TORCH_API Value* getValueTrace(const IValue& var); + +TORCH_API std::pair, Stack> trace( + Stack inputs, + const std::function& traced_fn, + std::function var_name_lookup_fn, + bool strict = true, + bool force_outplace = false, + Module* self = nullptr, + const std::vector& argument_names = {}); + +TORCH_API void abandon(); + +// NB: those serve both as an intermediate steps in addInputs below, +// as well as the overloads that terminate template recursion +TORCH_API void addInputs(Node* n, const char* name, int64_t value); +TORCH_API void addInputs(Node* n, const char* name, c10::SymInt value); +TORCH_API void addInputs( + Node* n, + const char* name, + c10::optional value); +TORCH_API void addInputs(Node* n, const char* name, bool value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs(Node* n, const char* name, double value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs(Node* n, const char* name, const at::Scalar& value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs(Node* n, const char* name, const at::Tensor& value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs(Node* n, const char* name, ArrayRef value); +TORCH_API void addInputs(Node* n, const char* name, c10::SymIntArrayRef value); +TORCH_API void addInputs( + Node* n, + const char* name, + c10::optional value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional>& value); +TORCH_API void addInputs( + Node* n, + const char* name, + const at::OptionalIntArrayRef& opt_value); +TORCH_API void addInputs( + Node* n, + const char* name, + const at::OptionalSymIntArrayRef& opt_value); +TORCH_API void addInputs( + Node* n, + const char* name, + ArrayRef value, + bool allow_undefined = false); +TORCH_API void addInputs( + Node* n, + const char* name, + std::vector value, + bool allow_undefined = false); +TORCH_API void addInputs( + Node* n, + const char* name, + at::ITensorListRef value, + bool allow_undefined = false); +TORCH_API void addInputs( + Node* n, + const char* name, + const List>& value); +TORCH_API void addInputs( + Node* n, + const char* name, + ArrayRef> value, + const c10::ClassTypePtr& class_type); +TORCH_API void addInputs(Node* n, const char* name, ArrayRef value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional>& value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::string_view value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs(Node* n, const char* name, at::Device value); +TORCH_API void addInputs(Node* n, const char* name, c10::Stream stream); +TORCH_API void addInputs(Node* n, const char* name, at::Layout value); +TORCH_API void addInputs(Node* n, const char* name, at::ScalarType value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs(Node* n, const char* name, at::MemoryFormat value); +TORCH_API void addInputs( + Node* n, + const char* name, + c10::optional value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::optional& value); + +inline void addInputs( + Node* n, + const char* name, + const std::vector& value) { + AT_ERROR("Tracing a list of bool type is currently not supported!"); +} + +template +void addInputs(Node* n, const char* name, ArrayRef value) { + AT_ERROR("Tracing a list of arbitrary type is currently not supported!"); +} +template +void addInputs( + Node* n, + const char* name, + const std::unordered_map& value) { + AT_ERROR("Tracing a dict of arbitrary types is currently not supported!"); +} + +template +void addInputs(Node* n, const char* name, std::array value) { + throw std::runtime_error( + "Found an unsupported argument type in the JIT tracer. File a bug report."); +} + +TORCH_API void addInputs( + Node* n, + const char* name, + const c10::intrusive_ptr& obj); + +TORCH_API void ensureUniqueIfOutOfPlaced( + const char* name, + const at::Tensor& tensor); +TORCH_API void ensureUniqueIfOutOfPlaced( + const char* name, + const c10::optional& tensor); + +template < + typename T, + typename = torch::enable_if_t<( + !std::is_convertible, at::TensorList>::value && + !std::is_convertible, c10::List>::value && + !std::is_convertible, at::Tensor>::value && + !std::is_convertible< + torch::decay_t, + c10::intrusive_ptr>::value)>> +void addOutput(Node* node, T&&) { + AT_ERROR( + "Found an unsupported argument type ", + c10::demangle_type(), + " in the JIT tracer. File a bug report."); +} +TORCH_API void addOutput(Node* node, const at::Tensor& tensor); +TORCH_API void setOutput(Value* value, const at::Tensor& output); +TORCH_API void addOutput(Node* node, const std::vector& list); +TORCH_API void addOutput(Node* node, const c10::List& list); +TORCH_API void addOutput( + Node* node, + const c10::intrusive_ptr& output); + +TORCH_API autograd::Variable getSizeOf( + const autograd::Variable& var, + int64_t dim); + +TORCH_API autograd::Variable getNumelOf(const autograd::Variable& var); + +} // namespace tracer +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h new file mode 100644 index 0000000000000000000000000000000000000000..33a1223581866dcb10df6fcddd677b975217e1d4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h @@ -0,0 +1,220 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +namespace torch { +namespace jit { + +// Trees are used to represent all forms of TC IR, pre- and post-typechecking. +// Rather than have a full class hierarchy for all TC statements, trees are a +// slight variation of Lisp s-expressions. For instance, the expression a*b+1 +// is represented as: +// (+ (* (ident a) (ident b)) (const 1)) +// Atoms like 'a', 'b', and '1' are represented by subclasses of Tree which +// define stringValue(). Everything else is a Compound object, which has a +// 'kind' that is a token from lexer.h's TokenKind enum. Single-character +// operators like '+' are represented using the character itself (so, add.kind() +// would be '+'). Each Compound object also contains a list of subtrees and is +// associated with a SourceRange for error reporting. +// Memory management of trees is done using intrusive_ptr. + +struct Tree; +using TreeRef = c10::intrusive_ptr; +using TreeList = at::SmallVector; + +struct Tree : c10::intrusive_ptr_target { + Tree(int kind_) : kind_(kind_) {} + int kind() const { + return kind_; + } + virtual bool isAtom() const { + return true; + } + virtual const SourceRange& range() const { + throw std::runtime_error("is an Atom"); + } + virtual const std::string& stringValue() const { + throw std::runtime_error("stringValue can only be called on TK_STRING"); + } + virtual const TreeList& trees() const { + static const TreeList empty_trees = {}; + return empty_trees; + } + const TreeRef& tree(size_t i) const { + return trees().at(i); + } + virtual TreeRef map(const std::function& fn) { + (void)fn; + c10::raw::intrusive_ptr::incref(this); // we are creating a new pointer + // from a raw `this` pointer + // so we need to bump the refcount + // to account for this ownership + return TreeRef::reclaim(this); + } + template + void match(int k, Args&... args) const { + matchD(k, "unknown", 0, args...); + } + template + void matchD(int k, const char* filename, int lineno, Args&... args) const { + std::initializer_list vars = {args...}; + matchNumSubtreesD(k, filename, lineno, vars.size(), true); + size_t i = 0; + for (TreeRef* v : vars) { + *v = trees()[i++]; + } + } + void matchNumSubtrees(int k, size_t expected_subtrees) { + return matchNumSubtreesD(k, "unknown", 0, expected_subtrees, false); + } + void matchNumSubtreesD( + int k, + const char* filename, + int lineno, + size_t expected_subtrees, + bool allow_more) const { + if (kind() != k) { + std::stringstream ss; + ss << filename << ":" << lineno << ": expecting kind '" << kindToString(k) + << "' but found '" << kindToString(kind()) << "'\n"; + range().highlight(ss); + throw std::runtime_error(ss.str()); + } + if (trees().size() < expected_subtrees || + (!allow_more && trees().size() != expected_subtrees)) { + std::stringstream ss; + ss << filename << ":" << lineno << ": expected at least " + << expected_subtrees << " subtrees, but found only " << trees().size() + << "\n"; + range().highlight(ss); + throw std::runtime_error(ss.str()); + } + } + ~Tree() override = default; + + private: + int kind_; +}; + +struct String : public Tree { + String(std::string value) : Tree(TK_STRING), value_(std::move(value)) {} + const std::string& stringValue() const override { + return value_; + } + template + static TreeRef create(Args&&... args) { + return c10::make_intrusive(std::forward(args)...); + } + + private: + std::string value_; +}; + +static SourceRange mergeRanges(SourceRange c, const TreeList& others) { + for (const auto& t : others) { + if (t->isAtom()) + continue; + size_t s = std::min(c.start(), t->range().start()); + size_t e = std::max(c.end(), t->range().end()); + c = SourceRange(c.source(), s, e); + } + return c; +} + +struct Compound : public Tree { + Compound(int kind, SourceRange range) + : Tree(kind), range_(std::move(range)) {} + Compound(int kind, const SourceRange& range_, TreeList&& trees_) + : Tree(kind), + range_(mergeRanges(range_, trees_)), + trees_(std::move(trees_)) {} + const TreeList& trees() const override { + return trees_; + } + static TreeRef create( + int kind, + const SourceRange& range_, + TreeList&& trees_) { + return c10::make_intrusive(kind, range_, std::move(trees_)); + } + bool isAtom() const override { + return false; + } + TreeRef map(const std::function& fn) override { + TreeList ret; + for (auto& t : trees()) { + ret.push_back(fn(t)); + } + return Compound::create(kind(), range(), std::move(ret)); + } + + const SourceRange& range() const override { + return range_; + } + + private: + SourceRange range_; + TreeList trees_; +}; + +// tree pretty printer +struct pretty_tree { + pretty_tree(const TreeRef& tree, size_t col = 40) : tree(tree), col(col) {} + const TreeRef& tree; + size_t col; + std::unordered_map flat_strings; + const std::string& get_flat(const TreeRef& t) { + auto it = flat_strings.find(t); + if (it != flat_strings.end()) + return it->second; + + std::stringstream out; + switch (t->kind()) { + case TK_STRING: + out << t->stringValue(); + break; + default: + out << "(" << kindToString(t->kind()); + for (const auto& e : t->trees()) { + out << " " << get_flat(e); + } + out << ")"; + break; + } + auto it_ = flat_strings.emplace(t, out.str()); + return it_.first->second; + } + void print(std::ostream& out, const TreeRef& t, int indent) { + const std::string& s = get_flat(t); + if (indent + s.size() < col || t->isAtom()) { + out << s; + return; + } + std::string k = kindToString(t->kind()); + out << "(" << k; + for (const auto& e : t->trees()) { + out << "\n" << std::string(indent + 2, ' '); + print(out, e, indent + 2); + } + out << ")"; + } +}; + +static inline std::ostream& operator<<(std::ostream& out, pretty_tree t_) { + t_.print(out, t_.tree, 0); + return out << std::endl; +} + +static inline std::ostream& operator<<(std::ostream& out, const TreeRef& t) { + return out << pretty_tree(t); +} + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h new file mode 100644 index 0000000000000000000000000000000000000000..a6488c92f40694332c010defcc0f46bea37e0cf5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h @@ -0,0 +1,1275 @@ +#pragma once +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// clang-format off +// TreeView provides a statically-typed way to traverse the tree, which should +// be formed according to the grammar below. +// +// A few notes on types and their aliases: +// - List is really a Tree with kind TK_LIST and elements as subtrees +// - Maybe is really a Tree with kind TK_OPTION that has 0 or 1 subtree of type T +// - Builtin types are: Ident (TK_IDENT), String (TK_STRING) +// +// Param = Param(Maybe type, Ident name) TK_PARAM +// +// Decl = Decl(List params, Maybe return_type) TK_DECL +// Def = Def(Ident name, Decl decl, List body) TK_DEF +// ClassDef = ClassDef(Ident name, TK_CLASS_DEF +// Maybe superclass, +// List body) +// +// Stmt = If(Expr cond, List true_body, List false_body) TK_IF +// | For(List targets, List iters, List body) TK_FOR +// | While(Expr cond, List body) TK_WHILE +// | Global(List idents) TK_GLOBAL +// -- NB: the only type of Expr's allowed on lhs are Var +// Or a tuple containing Var with an optional terminating Starred +// | Assign(Expr lhs, Maybe rhs, Maybe type) TK_ASSIGN +// | AugAssign(Expr lhs, AugAssignKind aug_op, Expr rhs) TK_AUG_ASSIGN +// | Return(List values) TK_RETURN +// | ExprStmt(List expr) TK_EXPR_STMT +// | Raise(Expr expr) TK_RAISE +// | Def TK_DEF +// | With(List targets, List body) TK_WITH +// +// Expr = TernaryIf(Expr cond, Expr true_expr, Expr false_expr) TK_IF_EXPR +// | BinOp(Expr lhs, Expr rhs) +// | And TK_AND +// | Or TK_OR +// | Lt '<' +// | Gt '>' +// | Eq TK_EQ +// | Le TK_LE +// | Ge TK_GE +// | Ne TK_NE +// | Is TK_IS +// | IsNot TK_ISNOT +// | Add '+' +// | Sub '-' +// | Mul '*' +// | Div '/' +// | Mod '%' +// | MatMult '@' +// | Pow TK_POW +// | UnaryOp(Expr expr) +// | Not TK_NOT +// | USub '-' +// | Const(String value) TK_CONST +// -- NB: x.name(y) is desugared into name(x, y) +// | Apply(Ident name, List args, List kwargs) TK_APPLY +// | Select(Expr value, Ident selector) '.' +// | Subscript(Expr value, List subscript_exprs) TK_SUBSCRIPT +// | SliceExpr(Maybe start, Maybe end) TK_SLICE_EXPR +// | Var(Ident name) TK_VAR +// | ListLiteral(List inputs) TK_LIST_LITERAL +// | TupleLiteral(List inputs) TK_TUPLE_LITERAL +// | Starred(Expr expr) TK_STARRED +// | WithItem(Expr target, Maybe var) TK_WITH_ITEM +// -- NB: only allowed expressions are Const or List(Const) +// (List as a value, not type constructor) +// Attribute = Attribute(Ident name, Expr value) TK_ATTRIBUTE +// +// AugAssignKind = +// | Add() TK_PLUS_EQ +// | Sub() TK_MINUS_EQ +// | Mul() TK_TIMES_EQ +// | Div() TK_DIV_EQ +// | Mod() TK_MOD_EQ +// + +// Each subclass of TreeView should provide: +// 1. Constructor that takes a TreeRef, and checks that it's of the right type. +// 2. Accessors that get underlying information out of the object. If they +// return subtrees, they should wrap them in appropriate views too. +// 3. Static method 'create' that creates the underlying TreeRef object +// for every TreeRef kind that has a TreeView, the parser always uses +// (e.g.) Ident::create rather than Compound::Create, this means that +// changes to the structure of Ident are always made right here rather +// than both in the parser and in this code. +// XXX: these structs should have no fields to prevent slicing when passing by value +// clang-format on +struct TreeView { + explicit TreeView(TreeRef tree) : tree_(std::move(tree)) {} + TreeRef tree() const { + return tree_; + } + const SourceRange& range() const { + return tree_->range(); + } + operator TreeRef() const { + return tree_; + } + const TreeRef& get() const { + return tree_; + } + int kind() const { + return tree_->kind(); + } + void dump() const { + std::cout << tree_; + } + + protected: + const TreeRef& subtree(size_t i) const { + return tree_->trees().at(i); + } + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + TreeRef tree_; +}; + +template +struct ListIterator { + ListIterator(TreeList::const_iterator it) : it(it) {} + bool operator!=(const ListIterator& rhs) const { + return it != rhs.it; + } + bool operator==(const ListIterator& rhs) const { + return it == rhs.it; + } + T operator*() const { + return T(*it); + } + ListIterator& operator+=(std::ptrdiff_t n) { + it += n; + return *this; + } + ListIterator& operator++() { + ++it; + return *this; + } + ListIterator& operator--() { + --it; + return *this; + } + + private: + TreeList::const_iterator it; +}; + +template +struct List : public TreeView { + using iterator = ListIterator; + using const_iterator = ListIterator; + + List(const TreeRef& tree) : TreeView(tree) { + tree->match(TK_LIST); + // Iterate over list to temporarily instantiate Ts that will check the type + for (const T& elem : *this) { + (void)elem; // silence unused warning + } + } + iterator begin() const { + return iterator(tree_->trees().begin()); + } + iterator end() const { + return iterator(tree_->trees().end()); + } + bool empty() const { + return tree_->trees().begin() == tree_->trees().end(); + } + T operator[](size_t i) const { + return T(subtree(i)); + } + TreeRef map(const std::function& fn) { + return tree_->map([&](TreeRef v) { return fn(T(v)); }); + } + static List create(const SourceRange& range, const std::vector& subtrees) { + TreeList type_erased_sub{subtrees.begin(), subtrees.end()}; + return List(Compound::create(TK_LIST, range, std::move(type_erased_sub))); + } + static List unsafeCreate(const SourceRange& range, TreeList&& subtrees) { + return List(Compound::create(TK_LIST, range, std::move(subtrees))); + } + size_t size() const { + return tree_->trees().size(); + } +}; + +template +struct Maybe : public TreeView { + explicit Maybe(const TreeRef& tree) : TreeView(tree) { + tree_->match(TK_OPTION); + if (tree_->trees().size() > 1) + throw ErrorReport(tree) << "Maybe trees can have at most one subtree"; + } + /* implicit */ Maybe(const T& tree) : TreeView(tree) {} + bool present() const { + return tree_->trees().size() > 0; + } + T get() const { + return T(tree_->trees().at(0)); + } + TreeRef map(const std::function& fn) { + return tree_->map([&](TreeRef v) { return fn(T(v)); }); + } + static Maybe create(const SourceRange& range) { + return Maybe(Compound::create(TK_OPTION, range, {})); + } + static Maybe create(const SourceRange& range, const T& value) { + return Maybe(Compound::create(TK_OPTION, range, {value})); + } +}; + +struct Ident : public TreeView { + explicit Ident(const TreeRef& tree) : TreeView(tree) { + tree_->match(TK_IDENT); + } + const std::string& name() const { + return subtree(0)->stringValue(); + } + static Ident create(const SourceRange& range, std::string name) { + return Ident( + Compound::create(TK_IDENT, range, {String::create(std::move(name))})); + } +}; + +//////////////////////////////////////////////////////////////////////////////// +// Base types (production LHS) +//////////////////////////////////////////////////////////////////////////////// + +struct Stmt : public TreeView { + explicit Stmt(const TreeRef& tree) : TreeView(tree) { + switch (tree->kind()) { + case TK_IF: + case TK_FOR: + case TK_WHILE: + case TK_GLOBAL: + case TK_ASSIGN: + case TK_AUG_ASSIGN: + case TK_RETURN: + case TK_EXPR_STMT: + case TK_RAISE: + case TK_ASSERT: + case TK_PASS: + case TK_BREAK: + case TK_DELETE: + case TK_CONTINUE: + case TK_DEF: + case TK_WITH: + return; + default: + throw ErrorReport(tree) + << kindToString(tree->kind()) << " is not a valid Stmt"; + } + } +}; + +struct Expr : public TreeView { + explicit Expr(const TreeRef& tree) : TreeView(tree) { + switch (tree->kind()) { + case TK_IF_EXPR: + case TK_AND: + case TK_OR: + case '<': + case '>': + case TK_IS: + case TK_ISNOT: + case TK_EQ: + case TK_LE: + case TK_GE: + case TK_NE: + case '+': + case '-': + case TK_UNARY_MINUS: + case '~': + case '*': + case TK_STARRED: + case '/': + case '%': + case TK_NOT: + case TK_CONST: + case TK_STRINGLITERAL: + case TK_TRUE: + case TK_FALSE: + case TK_NONE: + case TK_NONE_TYPE: + case TK_CAST: + case TK_APPLY: + case '.': + case TK_SUBSCRIPT: + case TK_SLICE_EXPR: + case TK_VAR: + case TK_LIST_LITERAL: + case TK_TUPLE_LITERAL: + case TK_DICT_LITERAL: + case '@': + case TK_POW: + case TK_LSHIFT: + case TK_RSHIFT: + case TK_FLOOR_DIV: + case '&': + case '^': + case '|': + case TK_LIST_COMP: + case TK_DICT_COMP: + case TK_DOTS: + case TK_IN: + case TK_WITH_ITEM: + return; + default: + throw ErrorReport(tree) + << kindToString(tree->kind()) << " is not a valid Expr"; + } + } +}; + +//////////////////////////////////////////////////////////////////////////////// +// Helper nodes (mostly for function arguments) +//////////////////////////////////////////////////////////////////////////////// + +struct Attribute : public TreeView { + explicit Attribute(const TreeRef& tree) : TreeView(tree) { + tree_->match(TK_ATTRIBUTE); + } + Ident name() const { + return Ident(subtree(0)); + } + Expr value() const { + return Expr(subtree(1)); + } + static Attribute create( + const SourceRange& range, + const Ident& name, + const TreeRef& value) { + return Attribute(Compound::create(TK_ATTRIBUTE, range, {name, value})); + } +}; + +struct Param : public TreeView { + explicit Param(const TreeRef& tree) : TreeView(tree) { + tree_->match(TK_PARAM); + } + static Param create( + const SourceRange& range, + const Ident& ident, + const Maybe& type, + const Maybe& def, + bool kwarg_only) { + TreeRef kwarg_only_tree = + Compound::create(kwarg_only ? TK_TRUE : TK_FALSE, range, {}); + return Param(Compound::create( + TK_PARAM, range, {ident, type, def, std::move(kwarg_only_tree)})); + } + Ident ident() const { + return Ident(subtree(0)); + } + Maybe type() const { + return Maybe(subtree(1)); + } + Maybe defaultValue() const { + return Maybe(subtree(2)); + } + bool kwarg_only() const { + return TK_TRUE == subtree(3)->kind(); + } + Param withType(const Maybe& typ) const { + return Param::create(range(), ident(), typ, defaultValue(), kwarg_only()); + } +}; + +//////////////////////////////////////////////////////////////////////////////// +// Top level definitions +//////////////////////////////////////////////////////////////////////////////// + +struct Decl : public TreeView { + explicit Decl(const TreeRef& tree) : TreeView(tree) { + tree->match(TK_DECL); + } + List params() const { + return List(subtree(0)); + } + Maybe return_type() const { + return Maybe(subtree(1)); + } + static Decl create( + const SourceRange& range, + const List& params, + const Maybe& return_type) { + return Decl(Compound::create(TK_DECL, range, {params, return_type})); + } +}; + +struct Def : public TreeView { + explicit Def(const TreeRef& tree) : TreeView(tree) { + tree->match(TK_DEF); + } + Def withName(std::string new_name) const { + auto new_ident = Ident::create(name().range(), std::move(new_name)); + return create(range(), new_ident, decl(), statements()); + } + Def withDecl(const Decl& decl) const { + return create(range(), name(), decl, statements()); + } + Ident name() const { + return Ident(subtree(0)); + } + Decl decl() const { + return Decl(subtree(1)); + } + List statements() const { + return List(subtree(2)); + } + static Def create( + const SourceRange& range, + const Ident& name, + const Decl& decl, + const List& stmts) { + return Def(Compound::create(TK_DEF, range, {name, decl, stmts})); + } +}; + +// Property represents a named attribute combined with a getter and setter +// method to access and mutate that attribute. +struct Property : public TreeView { + explicit Property(const TreeRef& tree) : TreeView(tree) { + tree->match(TK_PROP); + } + Ident name() const { + return Ident(subtree(0)); + } + Def getter() const { + return Def(subtree(1)); + } + Maybe setter() const { + return Maybe(subtree(2)); + } + static Property create( + const SourceRange& range, + const Ident& name, + const Def& getter, + const Maybe& setter) { + return Property(Compound::create(TK_PROP, range, {name, getter, setter})); + } +}; + +struct Assign; + +struct ClassDef : public TreeView { + explicit ClassDef(const TreeRef& tree) : TreeView(tree) { + tree->match(TK_CLASS_DEF); + } + explicit ClassDef(TreeRef&& tree) : TreeView(std::move(tree)) { + tree_->match(TK_CLASS_DEF); + } + ClassDef withName(std::string new_name) const { + auto new_ident = Ident::create(name().range(), std::move(new_name)); + return create(range(), new_ident, superclass(), body()); + } + Ident name() const { + return Ident(subtree(0)); + } + Maybe superclass() const { + return Maybe(subtree(1)); + } + List body() const { + return List(subtree(2)); + } + Maybe> properties() const { + return Maybe>(subtree(3)); + } + Maybe> assigns() const { + return Maybe>(subtree(4)); + } + static ClassDef create( + const SourceRange& range, + const Ident& name, + const Maybe& superclass, + const List& body) { + return ClassDef(Compound::create( + TK_CLASS_DEF, + range, + {name, + superclass, + body, + Maybe>::create(range), + Maybe>::create(range)})); + } + static ClassDef create( + const SourceRange& range, + const Ident& name, + const Maybe& superclass, + const List& body, + const List& properties, + const List& assigns); +}; + +TORCH_API std::vector getUnresolvedClassAttributes( + const ClassDef& def); + +//////////////////////////////////////////////////////////////////////////////// +// Statements +//////////////////////////////////////////////////////////////////////////////// + +struct If : public Stmt { + explicit If(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_IF); + } + Expr cond() const { + return Expr(subtree(0)); + } + List trueBranch() const { + return List(subtree(1)); + } + List falseBranch() const { + return List(subtree(2)); + } + If withNewBranches( + const List& true_branch, + const List& false_branch) const { + return create(range(), cond(), true_branch, false_branch); + } + static If create( + const SourceRange& range, + const Expr& cond, + const List& true_branch, + const List& false_branch) { + return If( + Compound::create(TK_IF, range, {cond, true_branch, false_branch})); + } +}; + +struct While : public Stmt { + explicit While(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_WHILE); + } + Expr cond() const { + return Expr(subtree(0)); + } + List body() const { + return List(subtree(1)); + } + static While create( + const SourceRange& range, + const Expr& cond, + const List& body) { + return While(Compound::create(TK_WHILE, range, {cond, body})); + } +}; + +struct For : public Stmt { + explicit For(const TreeRef& tree) : Stmt(tree) { + tree->match(TK_FOR); + } + List targets() const { + return List(subtree(0)); + } + List itrs() const { + return List(subtree(1)); + } + List body() const { + return List(subtree(2)); + } + static For create( + const SourceRange& range, + const List& targets, + const List& itrs, + const List& body) { + return For(Compound::create(TK_FOR, range, {targets, itrs, body})); + } +}; + +// TODO: supports only single comprehension for now +struct ListComp : public Expr { + explicit ListComp(const TreeRef& tree) : Expr(tree) { + tree->match(TK_LIST_COMP); + } + Expr elt() const { + return Expr(subtree(0)); + } + Expr target() const { + return Expr(subtree(1)); + } + Expr iter() const { + return Expr(subtree(2)); + } + // TODO: no ifs for now + static ListComp create( + const SourceRange& range, + const Expr& elt, + const Expr& target, + const Expr& iter) { + return ListComp(Compound::create(TK_LIST_COMP, range, {elt, target, iter})); + } +}; + +// TODO: supports only single comprehension for now +struct DictComp : public Expr { + explicit DictComp(const TreeRef& tree) : Expr(tree) { + tree->match(TK_DICT_COMP); + } + Expr key() const { + return Expr(subtree(0)); + } + Expr value() const { + return Expr(subtree(1)); + } + Expr target() const { + return Expr(subtree(2)); + } + Expr iter() const { + return Expr(subtree(3)); + } + // TODO: no ifs for now + static DictComp create( + const SourceRange& range, + const Expr& key, + const Expr& value, + const Expr& target, + const Expr& iter) { + return DictComp( + Compound::create(TK_DICT_COMP, range, {key, value, target, iter})); + } +}; + +struct Global : public Stmt { + explicit Global(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_GLOBAL); + } + List names() { + return List(subtree(0)); + } + static Global create(const SourceRange& range, const List& names) { + return Global(Compound::create(TK_GLOBAL, range, {names})); + } +}; + +struct AugAssignKind : public TreeView { + explicit AugAssignKind(const TreeRef& tree) : TreeView(tree) { + switch (tree->kind()) { + case '+': + case '-': + case '*': + case '/': + case '%': + case '|': + case '&': + case '^': + case TK_POW: + case TK_LSHIFT: + case TK_RSHIFT: + return; + default: + throw ErrorReport(tree) << "is not a valid AugAssignKind"; + } + } +}; + +// Augmented assignment, like "foo += bar" +struct AugAssign : public Stmt { + explicit AugAssign(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_AUG_ASSIGN); + } + static AugAssign create( + const SourceRange& range, + const Expr& lhs, + const AugAssignKind& aug_op, + const Expr& rhs) { + return AugAssign( + Compound::create(TK_AUG_ASSIGN, range, {lhs, aug_op, rhs})); + } + Expr lhs() const { + return Expr(subtree(0)); + } + int aug_op() const { + return subtree(1)->kind(); + } + Expr rhs() const { + return Expr(subtree(2)); + } +}; + +struct Assign : public Stmt { + explicit Assign(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_ASSIGN); + } + static Assign create( + const SourceRange& range, + const List& lhs, + const Maybe& rhs, + const Maybe& type) { + return Assign(Compound::create(TK_ASSIGN, range, {lhs, rhs, type})); + } + + List lhs_list() const { + return List(subtree(0)); + } + + Expr lhs() const { + const auto& li = lhs_list(); + TORCH_INTERNAL_ASSERT(li.size() == 1); + return *li.begin(); + } + + Maybe rhs() const { + return Maybe(subtree(1)); + } + + Maybe type() const { + return Maybe(subtree(2)); + } +}; + +struct Return : public Stmt { + explicit Return(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_RETURN); + } + Expr expr() const { + return Expr(subtree(0)); + } + static Return create(const SourceRange& range, const Expr& value) { + return Return(Compound::create(TK_RETURN, range, {value})); + } +}; + +struct Raise : public Stmt { + explicit Raise(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_RAISE); + } + Expr expr() const { + return Expr(subtree(0)); + } + static Raise create(const SourceRange& range, const Expr& expr) { + return Raise(Compound::create(TK_RAISE, range, {expr})); + } +}; + +struct Assert : public Stmt { + explicit Assert(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_ASSERT); + } + Expr test() const { + return Expr(subtree(0)); + } + Maybe msg() const { + return Maybe(subtree(1)); + } + static Assert create( + const SourceRange& range, + const Expr& test, + const Maybe& msg) { + return Assert(Compound::create(TK_ASSERT, range, {test, msg})); + } +}; + +struct Pass : public Stmt { + explicit Pass(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_PASS); + } + static Pass create(const SourceRange& range) { + return Pass(Compound::create(TK_PASS, range, {})); + } +}; + +struct Dots : public Expr { + explicit Dots(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_DOTS); + } + static Dots create(const SourceRange& range) { + return Dots(Compound::create(TK_DOTS, range, {})); + } +}; + +struct Break : public Stmt { + explicit Break(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_BREAK); + } + static Break create(const SourceRange& range) { + return Break(Compound::create(TK_BREAK, range, {})); + } +}; + +struct Continue : public Stmt { + explicit Continue(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_CONTINUE); + } + static Continue create(const SourceRange& range) { + return Continue(Compound::create(TK_CONTINUE, range, {})); + } +}; + +struct ExprStmt : public Stmt { + explicit ExprStmt(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_EXPR_STMT); + } + Expr expr() { + return Expr(subtree(0)); + } + static ExprStmt create(const SourceRange& range, const Expr& list) { + return ExprStmt(Compound::create(TK_EXPR_STMT, range, {list})); + } +}; + +//////////////////////////////////////////////////////////////////////////////// +// Expressions +//////////////////////////////////////////////////////////////////////////////// + +struct BinOp : public Expr { + explicit BinOp(const TreeRef& tree) : Expr(tree) { + switch (tree->kind()) { + case TK_AND: + case TK_OR: + case '<': + case '>': + case TK_IS: + case TK_ISNOT: + case TK_EQ: + case TK_LE: + case TK_GE: + case TK_NE: + case '+': + case '*': + case '/': + case '-': + case '@': + case TK_POW: + case TK_LSHIFT: + case TK_RSHIFT: + case '%': + case '&': + case '^': + case '|': + case TK_FLOOR_DIV: + case TK_IN: + if (tree->trees().size() != 2) + throw ErrorReport(tree) + << "BinOp expected 2 subtrees, found " << tree->trees().size(); + return; + default: + throw ErrorReport(tree) + << kindToString(tree->kind()) << " is not a valid BinOp"; + } + } + Expr lhs() const { + return Expr(subtree(0)); + } + Expr rhs() const { + return Expr(subtree(1)); + } + static BinOp create( + const SourceRange& range, + int kind, + const Expr& lhs, + const Expr& rhs) { + return BinOp(Compound::create(kind, range, {lhs, rhs})); + } +}; + +struct UnaryOp : public Expr { + explicit UnaryOp(const TreeRef& tree) : Expr(tree) { + switch (tree->kind()) { + case TK_UNARY_MINUS: + case '~': + case TK_NOT: + if (tree->trees().size() != 1) + throw ErrorReport(tree) + << "UnaryOp expected 1 subtree, found " << tree->trees().size(); + return; + default: + throw ErrorReport(tree) + << kindToString(tree->kind()) << " is not a valid UnaryOp"; + } + } + static UnaryOp create(const SourceRange& range, int kind, const Expr& expr) { + return UnaryOp(Compound::create(kind, range, {expr})); + } +}; + +struct Const : public Expr { + explicit Const(const TreeRef& tree) : Expr(tree) { + tree_->matchNumSubtrees(TK_CONST, 1); + } + bool isFloatingPoint() const { + if (isComplex()) + return false; + + bool is_inf = subtree(0)->stringValue() == "inf"; + return is_inf || + subtree(0)->stringValue().find_first_of(".eE") != std::string::npos; + } + bool isIntegral() const { + return !isFloatingPoint() && !isComplex(); + } + bool isComplex() const { + return subtree(0)->stringValue().find_first_of('j') != std::string::npos; + } + int64_t asIntegral() const { + try { + // NOLINTNEXTLINE(modernize-use-nullptr) + return std::stoll(subtree(0)->stringValue(), /*__idx=*/0, /*base=*/0); + } catch (const std::out_of_range&) { + throw ErrorReport(range()) << "Integral constant out of range " + "(must fit in a signed 64 bit integer)"; + } + } + double asFloatingPoint() const { + // We can't pass in nullptr as the dummy pointer gets dereferenced for + // Android version of strtod_c(). + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + char* dummy; + return torch::jit::strtod_c(subtree(0)->stringValue().c_str(), &dummy); + } + c10::complex asComplex() const { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + char* dummy; + auto str = subtree(0)->stringValue(); + // Complex numbers (a+bj, where a is non-zero) are parsed as an addition + // between float/int a and a complex number "bj". When a is 0, a complex + // number bj is created as above. So, while parsing the string, we don't + // have to worry about the real component of the complex number. + auto imag = + torch::jit::strtod_c(str.substr(0, str.size() - 1).c_str(), &dummy); + return c10::complex(0, imag); + } + const std::string& text() const { + return subtree(0)->stringValue(); + } + static Const create(const SourceRange& range, const std::string& value) { + return Const(Compound::create(TK_CONST, range, {String::create(value)})); + } +}; + +struct StringLiteral : public Expr { + explicit StringLiteral(const TreeRef& tree) : Expr(tree) { + tree_->matchNumSubtrees(TK_STRINGLITERAL, 1); + } + const std::string& text() const { + return subtree(0)->stringValue(); + } + static StringLiteral create( + const SourceRange& range, + const std::string& value) { + return StringLiteral( + Compound::create(TK_STRINGLITERAL, range, {String::create(value)})); + } +}; + +struct Apply : public Expr { + explicit Apply(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_APPLY); + } + Expr callee() const { + return Expr(subtree(0)); + } + List inputs() const { + return List(subtree(1)); + } + List attributes() const { + return List(subtree(2)); + } + static Apply create( + const SourceRange& range, + const Expr& callee, + const List& inputs, + const List& attributes) { + return Apply( + Compound::create(TK_APPLY, range, {callee, inputs, attributes})); + } +}; + +struct Select : public Expr { + explicit Select(const TreeRef& tree) : Expr(tree) { + tree_->match('.'); + } + Expr value() const { + return Expr(subtree(0)); + } + Ident selector() const { + return Ident(subtree(1)); + } + static Select create( + const SourceRange& range, + const Expr& value, + const Ident& selector) { + return Select(Compound::create('.', range, {value, selector})); + } +}; + +struct SliceExpr : public Expr { + explicit SliceExpr(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_SLICE_EXPR); + } + Maybe start() const { + return Maybe(subtree(0)); + } + Maybe end() const { + return Maybe(subtree(1)); + } + Maybe step() const { + return Maybe(subtree(2)); + } + Expr startOr(int64_t alternative) const { + const auto startOption = start(); + return startOption.present() ? startOption.get() : createInt(alternative); + } + Expr endOr(int64_t alternative) const { + const auto endOption = end(); + return endOption.present() ? endOption.get() : createInt(alternative); + } + Expr stepOr(int64_t alternative) const { + const auto stepOption = step(); + return stepOption.present() ? stepOption.get() : createInt(alternative); + } + static SliceExpr create( + const SourceRange& range, + const Maybe& start, + const Maybe& end, + const Maybe& step) { + return SliceExpr( + Compound::create(TK_SLICE_EXPR, range, {start, end, step})); + } + + private: + Expr createInt(int64_t value) const { + return Expr(Const::create(range(), c10::to_string(value))); + } +}; + +struct Subscript : public Expr { + explicit Subscript(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_SUBSCRIPT); + } + Expr value() const { + return Expr(subtree(0)); + } + List subscript_exprs() const { + return List(subtree(1)); + } + static Subscript create( + const SourceRange& range, + const Expr& value, + const List& subscript_exprs) { + auto whole_range = SourceRange( + range.source(), range.start(), subscript_exprs.range().end() + 1); + return Subscript( + Compound::create(TK_SUBSCRIPT, whole_range, {value, subscript_exprs})); + } +}; + +struct Var : public Expr { + explicit Var(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_VAR); + }; + Ident name() const { + return Ident(subtree(0)); + } + static Var create(const SourceRange& range, const Ident& name) { + return Var(Compound::create(TK_VAR, range, {name})); + } +}; + +// WithItem represents an item using with a WithStmt. +struct WithItem : public Expr { + explicit WithItem(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_WITH_ITEM); + } + + Expr target() const { + return Expr(subtree(0)); + } + + Maybe var() const { + return Maybe(subtree(1)); + } + + static WithItem create( + const SourceRange& range, + const Expr& target, + const Maybe& var) { + return WithItem(Compound::create(TK_WITH_ITEM, range, {target, var})); + } +}; + +// With represents a with statement consisting of a list of with items and a +// body of statements. +struct With : public Stmt { + explicit With(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_WITH); + } + + List targets() const { + return List(subtree(0)); + } + + List body() const { + return List(subtree(1)); + } + + static With create( + const SourceRange& range, + const List& targets, + const List& body) { + return With(Compound::create(TK_WITH, range, {targets, body})); + } +}; + +struct TernaryIf : public Expr { + explicit TernaryIf(const TreeRef& tree) : Expr(tree) { + tree_->matchNumSubtrees(TK_IF_EXPR, 3); + }; + Expr cond() const { + return Expr(subtree(0)); + } + Expr true_expr() const { + return Expr(subtree(1)); + } + Expr false_expr() const { + return Expr(subtree(2)); + } + static TernaryIf create( + const SourceRange& range, + const Expr& cond, + const Expr& true_expr, + const Expr& false_expr) { + return TernaryIf( + Compound::create(TK_IF_EXPR, range, {cond, true_expr, false_expr})); + }; +}; + +struct ListLiteral : public Expr { + explicit ListLiteral(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_LIST_LITERAL); + } + List inputs() const { + return subtree(0); + } + static ListLiteral create( + const SourceRange& range, + const List& inputs) { + return ListLiteral(Compound::create(TK_LIST_LITERAL, range, {inputs})); + } +}; + +struct TupleLiteral : public Expr { + explicit TupleLiteral(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_TUPLE_LITERAL); + } + List inputs() const { + return subtree(0); + } + static TupleLiteral create( + const SourceRange& range, + const List& inputs) { + return TupleLiteral(Compound::create(TK_TUPLE_LITERAL, range, {inputs})); + } +}; + +struct DictLiteral : public Expr { + explicit DictLiteral(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_DICT_LITERAL); + } + List key_inputs() const { + return subtree(0); + } + List value_inputs() const { + return subtree(1); + } + static DictLiteral create( + const SourceRange& range, + const List& keys, + const List& values) { + return DictLiteral( + Compound::create(TK_DICT_LITERAL, range, {keys, values})); + } +}; + +struct Starred : public Expr { + explicit Starred(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_STARRED); + } + Expr expr() const { + return Expr(subtree(0)); + } + static Starred create(const SourceRange& range, const Expr& expr) { + return Starred(Compound::create(TK_STARRED, range, {expr})); + } +}; + +struct Delete : public Stmt { + explicit Delete(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_DELETE); + } + List targets() const { + return subtree(0); + } + static Delete create(const SourceRange& range, const List& targets) { + return Delete(Compound::create(TK_DELETE, range, {targets})); + } +}; + +/* + * NOTE: transforming PEP 604 union into equivalent union type + * + * NOTE: Union[int, float] parses into: + * expr:(subscript + * (variable (ident Union)) + * (list + * (variable (ident int)) + * (variable (ident float)))) + * subscript + * + * NOTE: (int | float) parses into: + * expr:(| + * (variable (ident int)) + * (variable (ident float))) + * | + */ + +inline void _flatten_pep604_union( + const torch::jit::Expr& node, + std::vector* result) { + // flatten possibly nested union expressions like (int | (float | str)) + // into a flat list of expressions like [int, float, str] + if (node.kind() == '|') { + auto as_binop = torch::jit::BinOp(node); + _flatten_pep604_union(as_binop.lhs(), result); + _flatten_pep604_union(as_binop.rhs(), result); + } else { + result->push_back(node); + } +} + +inline std::vector get_pep604_union_members(const Expr& node) { + std::vector result; + _flatten_pep604_union(node, &result); + return result; +} + +// Flattens a PEP 604 union into a classical union. +// For example, ((x | y) | z) is transformed into Union[x, y, z]. +inline Expr pep604union_to_union(const Expr& expr) { + // noop if not a pep604 union + if (expr.kind() != '|') + return expr; + + // In order to support unions with more than 2 operands ((x|y)|z), we need to + // recursively flatten the tree of | expressions. + auto members = get_pep604_union_members(expr); + auto synthesised_union = Subscript::create( + expr.range(), + Var::create(expr.range(), Ident::create(expr.range(), "Union")), + List::create(expr.range(), members)); + return std::move(synthesised_union); +} + +} // namespace jit +} // namespace torch + +namespace std { + +template +struct iterator_traits> + : std::iterator_traits {}; + +} // namespace std diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/versioned_symbols.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/versioned_symbols.h new file mode 100644 index 0000000000000000000000000000000000000000..e3caf26ba250828b39a0b2658811280aa1e94a79 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/versioned_symbols.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include +#include + +#include + +namespace torch { +namespace jit { +// Maps the given symbol into an implementation of its behavior at the +// given version. +// See note [Versioned Symbols] +TORCH_API Symbol +get_symbol_for_version(const Symbol name, const uint64_t version); + +// Maps the given kind to the minimum version that supports it. +// See note [Dynamic Versions and torch.jit.save vs. torch.save] +TORCH_API uint64_t get_min_version_for_kind(const NodeKind& kind); +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h new file mode 100644 index 0000000000000000000000000000000000000000..128b193b63aa53d70272754c81226929871f1006 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h @@ -0,0 +1,39 @@ +#pragma once + +#include + +#include +#include +#include + +namespace torch { +namespace jit { +namespace mobile { + +using Stack = std::vector; +using DebugHandle = int64_t; + +class Function; + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct Code { + std::vector instructions_; + std::vector debug_handles_; + std::vector op_names_; + std::vector operator_input_sizes_; + std::vector> operators_; + std::vector constants_; + std::vector types_; + // TODO After we actually export CALL instructions we can remove this. + // We may need a two-stage importing scheme, where we firstly construct all + // function objects, and then append referenced function pointers. This could + // be done in parseMethods(). + std::vector functions_; + size_t register_size_ = 0; // Aggregated output size. + // initialized means operators_ array is filled with operators + bool initialized = false; +}; + +} // namespace mobile +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h new file mode 100644 index 0000000000000000000000000000000000000000..f3eb202b7f00aa262573d72d7718d3f199eb1776 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h @@ -0,0 +1,38 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +namespace torch { +namespace jit { + +/** + * Loads named parameters from the serialized data in @p in. + * + * Calls #TORCH_CHECK() if the data format is not recognized. + */ +TORCH_API std::map _load_parameters( + std::istream& in, + c10::optional device = c10::nullopt); + +/** + * Loads named parameters from the serialized data in @p filename. + * + * Calls #TORCH_CHECK() if the data format is not recognized. + */ +TORCH_API std::map _load_parameters( + const std::string& filename, + c10::optional device = c10::nullopt); + +// NOTE: Please prefer using _load_parameters over using the function below. +TORCH_API std::map mobile_module_to_parameter_map( + const mobile::Module& module); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h new file mode 100644 index 0000000000000000000000000000000000000000..c4a957d00113360b1da71ef06aff455123df5bf8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h @@ -0,0 +1,45 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { +namespace mobile { + +class Module; + +struct TORCH_API Method { + Method(const Module* owner, Function* function); + + void run(Stack& stack) const; + void run(Stack&& stack) const { + run(stack); + } + + c10::IValue operator()(std::vector stack) const; + + const std::string& name() const { + return function_->name(); + } + + int64_t get_debug_handle(size_t pc) const { + return function_->get_debug_handle(pc); + } + + Function& function() const { + return *function_; + } + + private: + // Methods are uniquely owned by a single module. + // This raw pointer allows referencing the module + const Module* owner_; + + // Underlying unbound function + Function* function_; +}; + +} // namespace mobile +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/module.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/module.h new file mode 100644 index 0000000000000000000000000000000000000000..5e5d87f946355b3771bee469d769bf480a0b9936 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/module.h @@ -0,0 +1,197 @@ +#pragma once +#include +#include +#include +#include +#include + +#include + +namespace torch { +namespace jit { +namespace mobile { +using Stack = std::vector; + +// A CompilationUnit object is the one that gets executed by the lite +// interpreter. +// +// A CompilationUnit object contains a list of Method Objects. These are methods +// that appear in the original PyTorch Model. These method correspond to Python +// member functions of the Model class. +// +// Methods in turn contain a Function, and a back-pointer to the Module that +// owns this Method instance. +// +// A Function contains a Code Object (code_) which is defined in interpreter.h +// +// A Code object contains the following: +// +// std::vector instructions_; +// std::vector op_names_; +// std::vector> operators_; +// std::vector constants_; +// std::vector types_; +// size_t register_size_; // Aggregated output size. +// +class CompilationUnit { + public: + void register_function(std::unique_ptr fn); + std::vector>& methods() { + return methods_; + } + const std::vector>& methods() const { + return methods_; + } + Function* find_function(const c10::QualifiedName& qn); + const Function* find_function(const c10::QualifiedName& qn) const; + + void unsafeRemoveFunction(const int64_t index) { + methods_.erase(methods_.begin() + index); + } + + private: + std::vector> methods_; +}; + +// A Torch Mobile Module is a representation of the model (trained in case +// of inference). A Mobile Module contains +// +// 1. data (object_) +// 2. metadata (optional) about the model (metadata_ from the metadata.pkl +// file added after training) +// 3. Compilation Unit (cu_) +// +class TORCH_API Module { + public: + Module( + c10::intrusive_ptr object, + std::shared_ptr cu) + : object_(std::move(object)), cu_(std::move(cu)) {} + Module() = default; + Method get_method(const std::string& method_name) const; + template + c10::IValue run_method(const std::string& method_name, Types&&... args) { + return get_method(method_name)({IValue(std::forward(args))...}); + } + c10::IValue forward(std::vector inputs) { + return get_method("forward")(std::move(inputs)); + } + c10::optional find_method(const std::string& basename) const; + + const std::string name() const { + return object_->name(); + } + const std::vector& slots() const { + return object_->slots(); + } + const c10::intrusive_ptr _ivalue() const { + return object_; + } + const std::vector parameters() const; + const std::map named_parameters() const; + std::string get_forward_method_debug_info(int64_t debug_handle) const; + std::string getModuleHierarchy(const int64_t debug_handle) const; + std::string getCallStack(const int64_t debug_handle) const; + /// Enables "training" mode. + void train(bool on = true); + /// Calls train(false) to enable "eval" mode. + void eval() { + train(/*on=*/false); + } + /// True if the module is in training mode. + bool is_training() const; + const std::unordered_map getMetadata() const { + return metadata_; + } + void setMetadata( + const std::unordered_map& metadata) { + metadata_ = metadata; + } + const std::vector get_methods() const; + + c10::IValue attr(const std::string& name, c10::IValue or_else) const { + if (auto r = object_->type()->findAttributeSlot(name)) { + return object_->getSlot(*r); + } + if (auto r = object_->type()->findConstantSlot(name)) { + return object_->type()->getConstant(*r); + } + return or_else; + } + + void setDebugTable(MobileDebugTable&& debug_table) { + debug_table_ = std::move(debug_table); + } + const MobileDebugTable& getDebugTable() const { + return debug_table_; + } + + void setHasDebugHandles(bool has_debug_handles) { + has_debug_handles_ = has_debug_handles; + } + + bool hasDebugHandles() const { + return has_debug_handles_; + } + + const CompilationUnit& compilation_unit() const { + return *cu_.get(); + } + + void set_delete_memory(std::shared_ptr delete_mem) { + mem_to_delete_ = std::move(delete_mem); + } + + void set_min_operator_version(int64_t version) { + min_operator_version_ = version; + } + + int64_t min_operator_version() const { + return min_operator_version_; + } + + void set_bytecode_version(int64_t version) { + bytecode_version_ = version; + } + + int64_t bytecode_version() const { + return bytecode_version_; + } + + private: + friend class quantization::PTQQuanizationHelper; + + bool compareMethodSchemas( + const std::string& name_1, + const std::string& name_2); + + void unsafeRemoveMethod(const std::string& basename); + + void unsafeCopyMethod( + const std::string& new_method_name, + const Function& to_be_copied); + + c10::intrusive_ptr object_; + std::unordered_map metadata_; + std::shared_ptr cu_; + MobileDebugTable debug_table_; + bool has_debug_handles_ = false; + int64_t min_operator_version_ = 4; + int64_t bytecode_version_ = 4; + + // Extra handle for the module to delete when itself is deleted + std::shared_ptr mem_to_delete_; +}; + +struct TORCH_API ModuleInfo { + uint64_t bytecode_version; + uint64_t operator_version; + std::unordered_map opname_to_num_args; + std::unordered_set function_names; + std::unordered_set type_names; +}; +TORCH_API ModuleInfo get_module_info(const mobile::Module& module); + +} // namespace mobile +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/observer.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/observer.h new file mode 100644 index 0000000000000000000000000000000000000000..694fe1df82c10a4227fd585282f2dd78af6c8ce8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/observer.h @@ -0,0 +1,110 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { + +class MobileDebugInfo : public c10::DebugInfoBase { + public: + const std::string& getModelName() { + return model_name_; + } + + void setModelName(const std::string& model_name) { + model_name_ = model_name; + } + + const std::string& getMethodName() { + return method_name_; + } + + void setMethodName(const std::string& method_name) { + method_name_ = method_name; + } + + size_t getOpIdx() { + return op_idx_; + } + + void setOpIdx(size_t op_idx) { + op_idx_ = op_idx; + } + + private: + std::string model_name_; + std::string method_name_; + // TODO: Kimish + // If we launch a thread such as for at::launch, interepter continuation + // and if the caching allocator is enabled in the base thread + // then, in order to propagate this information, that is caching allocator + // is enabled, across thread boundaries we can use the mechanism provided + // by ThreadLocalDebugInfo + // Once the thread local MobileDebugInfo is accessible in the launched + // thread, it can be accessed in that thread and that thread can set + // its own thread local CachingAllocatorInfo. + // However, we cannot expect every launched thread to extract and set + // its own thread local copy of CachingAllocatorInfo. + // But this can be done in lite interpreter, where in the run method + // it can do info = + // c10::ThreadLocalDebugInfo::get(c10::DebugInfoKind::MOBILE_RUNTIME_INFO)) + // .get_caching_allocator_info(); + // GetThreadLocalCachingAllocatorInfo() = info; + // Other option is to have MobileDebugInfo itself be the place where thread + // local copy of CachingAllocatorInfo is stored. Then + // DefaultMobileCPUAllocator inspects this to decide if to use + // CachingAllocator. However, current lite interpreter does not support FORK, + // thus from the run method of lite interpreter we are not really gonna launch + // another instance of lite interpreter in a different thread. So for now not + // getting bothered about passing CachingAllocatorInfo across thread + // boundaries. c10::CachingAllocatorInfo caching_allocator_info; + size_t op_idx_ = 0; +}; + +class MobileModuleObserver { + public: + virtual ~MobileModuleObserver() = default; + + virtual void onEnterRunMethod(const int32_t) {} + virtual void onExitRunMethod( + const std::unordered_map&, + const std::string&, + const int32_t) {} + virtual void onFailRunMethod( + const std::unordered_map&, + const std::string&, + const int32_t, + const char*) {} + virtual void onEnterLoadModel(const int32_t) {} + virtual void onExitLoadModel( + const int32_t, + const std::unordered_map&) { + } // key: filename, value: file content + virtual void onFailLoadModel(const int32_t, const char*) {} + virtual void onFailLoadModel( + const int32_t, + const char*, + const std::unordered_map&) {} + virtual std::vector getDefaultExtraFiles() = 0; + virtual std::unordered_map processMetadataFromExtra( + const std::unordered_map&) = 0; +}; + +class MobileObserverConfig { + public: + void setModuleObserver(std::unique_ptr reporter) { + module_observer_ = std::move(reporter); + } + MobileModuleObserver* getModuleObserver() { + return module_observer_.get(); + } + + private: + std::unique_ptr module_observer_; +}; + +MobileObserverConfig& observerConfig(); + +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_operators.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_operators.h new file mode 100644 index 0000000000000000000000000000000000000000..e7969696f703ee2c6f02398ec482fcc8bc66fa76 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_operators.h @@ -0,0 +1,27 @@ +#pragma once +#include + +namespace torch { +namespace jit { +using c10::IValue; + +enum MobileModuleLoadOptions { + OPERATOR_CHECK = 1, + // PARSE_ALL_EXTRA_FILE_MAPS is used to gate for ExtraFileMaps to pull all + // files automatically without explicit entries mapping. Refer to PR for a + // detail: https://github.com/pytorch/pytorch/pull/99747 + PARSE_ALL_EXTRA_FILE_MAPS = 2, +}; + +const uint64_t kDefaultMobileLoadOptions = + MobileModuleLoadOptions::OPERATOR_CHECK; + +namespace mobile { + +TORCH_API void parseOperators( + c10::ivalue::TupleElements&& ops_list, + const uint64_t& module_load_options, + mobile::Function* function); +} // namespace mobile +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/profiler_edge.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/profiler_edge.h new file mode 100644 index 0000000000000000000000000000000000000000..6ac74b053c36393f2bcd318bf43593af70658536 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/profiler_edge.h @@ -0,0 +1,119 @@ +#pragma once +#include +#include + +namespace torch { +namespace jit { +namespace mobile { + +// If we dont have kineto available then edge profiler does not +// work since it relies on Kineto +#ifdef USE_KINETO +class TORCH_API KinetoEdgeCPUProfiler { + public: + // This profiler only profiles KINETO events + // No GPU_FALLBACK or NVTX + /* + * @param m is the instance of mobile Module which is being profiled. + * Note that this implies that KinetoEdgeCPUProfiler can be used + * to profile specific Module (see usage below), unliked ProfilerKineto + * which can profile pytorch runtime in arbitrary scope. + * @param fname is the name of the file to which chrome trace is written. + * @param report_input_shapes: whether to record shapes of op's inputs. + * @param with_stack: whether to record model's python stacktrace for the op. + * @param with_flops: whether to report flops corresponding to the op. + * @param with_modules: whether to report original python module + * hierarchy to which the op belongs. + * @param events + * @param adjust_vulkan_timestamps: whether to adjust vulkan timestamps from + * query pool to align with cpu event times + * + * Usage pattern for this profiler must be as follows: + * + * { + * KinetoEdgeCPUProfiler(m, filename, args); + * m.forward(...); + * } + * + * The reason being that KinetoEdgeCPUProfiler has a dependency on Module + * and thus it must not outlive it. + * + * Thus, when KinetoEdgeCPUProfiler is used as RAII to do profiling + * within certain scope. In that scope, the captured reference to + * Module will outlive KinetoEdgeCPUProfiler. This is gauranteed because + * KinetoEdgeCPUProfiler must be constructed later than Module, on stack. + * + * An example of the anti-pattern and wrong usage is: + * + * std::shared_ptr profiler(m, filename, args); + * m.forward(...); + * + * Since KinetoEdgeCPUProfiler object would then be constructed on heap + * with its lifetime managed manually or via smart pointers. + */ + KinetoEdgeCPUProfiler( + const torch::jit::mobile::Module& m, + const std::string& fname, + const bool report_input_shapes = false, + const bool profile_memory = false, + const bool with_stack = false, + const bool with_flops = false, + const bool with_modules = false, + std::vector events = {}, + const bool adjust_vulkan_timestamps = false); + + const std::unique_ptr& + disableProfiler(); + const std::unique_ptr& + getProfilerResult(); + void recordBackendEvent( + const int64_t start_time_us, + const int64_t end_time_us, + const int64_t debug_handle, + const std::string& event_name, + const std::string& backend_name); + void recordBackendMemoryEvent( + void* ptr, + int64_t alloc_size, + size_t total_allocated, + size_t total_reserved, + c10::Device device); + + ~KinetoEdgeCPUProfiler(); + + private: + /* + * We store a reference to Module to make such dependency explicit, since + * a Module reference is already stored in a functor. + */ + const mobile::Module& m_; + std::string trace_file_name_; + std::unique_ptr profiler_result_; +}; + +TORCH_API KinetoEdgeCPUProfiler* getCurrentEdgeProfiler(); + +#define RECORD_BACKEND_EVENT_TO_EDGE_PROFILER( \ + start_time_us, end_time_us, debug_handle, event_name, backend_name) \ + if (mobile::getCurrentEdgeProfiler()) { \ + mobile::getCurrentEdgeProfiler()->recordBackendEvent( \ + start_time_us, end_time_us, debug_handle, event_name, backend_name); \ + } + +#define RECORD_BACKEND_MEMORY_EVENT_TO_EDGE_PROFILER( \ + ptr, alloc_size, total_allocated, total_reserved, device) \ + if (mobile::getCurrentEdgeProfiler()) { \ + mobile::getCurrentEdgeProfiler()->recordBackendMemoryEvent( \ + ptr, alloc_size, total_allocated, total_reserved, device); \ + } +#else + +#define RECORD_BACKEND_EVENT_TO_EDGE_PROFILER( \ + start_time_us, end_time_us, debug_handle, event_name, backend_name) + +#define RECORD_BACKEND_MEMORY_EVENT_TO_EDGE_PROFILER( \ + ptr, alloc_size, total_allocated, total_reserved, device) +#endif +} // namespace mobile +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h new file mode 100644 index 0000000000000000000000000000000000000000..2a66cc3228470c56fa09a8ac086bfe5c3c676d09 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +namespace torch::jit { + +void initJITBindings(PyObject* module); + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h new file mode 100644 index 0000000000000000000000000000000000000000..434881e419cb35367de1dcec81adbe0fc7d2c345 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h @@ -0,0 +1,213 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace py = pybind11; + +namespace torch::jit { + +// This is a variant of shared_ptr that "sees through" a wrapper. +// We use it to convert Value, Node, Block and node to "wrapped" Python +// values. When we destruct the C++ object, the wrapper's pointer will +// be set to 0 and any future dereferencing will throw. We need this +// because the Python objects may hang around after the C++ object +// has already been destroyed. +// This also needs the magic type_caster below, which is from the +// workaround offered in https://github.com/pybind/pybind11/issues/2751 +template +class unwrapping_shared_ptr { + static_assert( + std::is_same::value || + std::is_same::value || + std::is_same::value, + "unwrapping type only defined for Graph object types"); + + private: + std::shared_ptr> impl; + + public: + unwrapping_shared_ptr() : impl({}) {} + explicit unwrapping_shared_ptr(T* p) : impl(p->wrap()) { + impl->clear_cb = &clear_registered_instances; + } + T* get() const { + if (!impl->elem) { + throw std::logic_error("has been invalidated"); + } + return impl->elem; + } + // we need to disable the overloaded & for PyBind11 < 2.3 due. + // see https://github.com/pybind/pybind11/pull/1435 +#if (PYBIND11_VERSION_MAJOR > 2) || \ + ((PYBIND11_VERSION_MAJOR == 2) && (PYBIND11_VERSION_MINOR >= 3)) + T** operator&() { + if (!impl->elem) { + throw std::logic_error("has been invalidated"); + } + return &(impl->elem); + } +#endif +}; + +} // namespace torch::jit + +PYBIND11_DECLARE_HOLDER_TYPE(T, torch::jit::unwrapping_shared_ptr, true); + +namespace pybind11::detail { + +#define CREATE_UNWRAPPING_CASTER(Class) \ + template <> \ + struct type_caster : public type_caster_base { \ + public: \ + using type = Class; \ + using holder_type = torch::jit::unwrapping_shared_ptr; \ + \ + bool load(handle src, bool convert) { \ + return load_impl>(src, convert); \ + } \ + \ + explicit operator type*() { \ + return static_cast(value); \ + } \ + explicit operator type&() { \ + return *static_cast(value); \ + } \ + \ + protected: \ + friend class type_caster_generic; \ + \ + bool load_value(value_and_holder&& v_h) { \ + if (v_h.holder_constructed()) { \ + value = v_h.template holder().get(); \ + return true; \ + } else { \ + throw cast_error( \ + "Unable to cast from non-held to held instance (#Class& to Holder<#Class>)"); \ + } \ + } \ + } + +CREATE_UNWRAPPING_CASTER(torch::jit::Node); +CREATE_UNWRAPPING_CASTER(torch::jit::Value); +CREATE_UNWRAPPING_CASTER(torch::jit::Block); + +#undef CREATE_UNWRAPPING_CASTER + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(torch::jit::IValue, _("IValue")); + + bool load(handle src, bool) { + try { + value = torch::jit::toTypeInferredIValue(src); + return true; + } catch (std::exception& e) { + return false; + } + } + + static handle cast( + torch::jit::IValue src, + return_value_policy /* policy */, + handle /* parent */) { + return torch::jit::toPyObject(std::move(src)).release(); + } +}; + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(torch::jit::Symbol, _("Symbol")); + + bool load(handle src, bool) { + // TODO: Is there a way to py::cast that doesn't raise an exception on + // failure? Can we catch pybind11::cast_error here instead? + std::string src_str; + try { + src_str = py::cast(src); + } catch (std::exception& e) { + return false; + } + value = torch::jit::Symbol::fromQualString(src_str); + return true; + } + + static handle cast( + torch::jit::Symbol src, + return_value_policy /* policy */, + handle /* parent */) { + return py::cast(std::string(src.toQualString()), return_value_policy::copy) + .release(); + } +}; + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(torch::jit::AttributeKind, _("AttributeKind")); + + bool load(handle src, bool) { + return false; + } + + static handle cast( + torch::jit::AttributeKind src, + return_value_policy /* policy */, + handle /* parent */) { + return py::cast( + std::string(torch::jit::toString(src)), + return_value_policy::copy) + .release(); + } +}; + +// See https://github.com/pybind/pybind11/issues/637 +using ListCasterBase = pybind11::detail:: + list_caster, torch::jit::Node*>; +template <> +struct type_caster> : ListCasterBase { + static handle cast( + const std::vector& src, + return_value_policy, + handle parent) { + return ListCasterBase::cast(src, return_value_policy::reference, parent); + } + static handle cast( + const std::vector* src, + return_value_policy pol, + handle parent) { + return cast(*src, pol, parent); + } +}; + +} // namespace pybind11::detail + +namespace torch::jit { + +static inline py::tuple tuple_tail(const py::tuple& tup) { + py::tuple r(tup.size() - 1); + for (const auto i : c10::irange(1, tup.size())) { + r[i - 1] = tup[i]; + } + return r; +} + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_custom_class.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_custom_class.h new file mode 100644 index 0000000000000000000000000000000000000000..d7cff488f27311915d6c31b36a366c6a34735b49 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_custom_class.h @@ -0,0 +1,20 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { + +void initPythonCustomClassBindings(PyObject* module); + +struct ScriptClass { + ScriptClass(c10::StrongTypePtr class_type) + : class_type_(std::move(class_type)) {} + + py::object __call__(py::args args, py::kwargs kwargs); + + c10::StrongTypePtr class_type_; +}; + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_dict.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_dict.h new file mode 100644 index 0000000000000000000000000000000000000000..81927f95fdd4e550e45936215324e11668b5df12 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_dict.h @@ -0,0 +1,126 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch::jit { + +void initScriptDictBindings(PyObject* module); + +/// An iterator over the keys of ScriptDict. This is used to support +/// .keys() and iteration. +class ScriptDictKeyIterator final { + public: + ScriptDictKeyIterator( + c10::impl::GenericDict::iterator iter, + c10::impl::GenericDict::iterator end) + : iter_(std::move(iter)), end_(std::move(end)) {} + IValue next(); + + private: + c10::impl::GenericDict::iterator iter_; + c10::impl::GenericDict::iterator end_; +}; + +/// An iterator over the key-value pairs of ScriptDict. This is used to support +/// .items(). +class ScriptDictIterator final { + public: + ScriptDictIterator( + c10::impl::GenericDict::iterator iter, + c10::impl::GenericDict::iterator end) + : iter_(std::move(iter)), end_(std::move(end)) {} + IValue next(); + + private: + c10::impl::GenericDict::iterator iter_; + c10::impl::GenericDict::iterator end_; +}; + +/// A wrapper around c10::Dict that can be exposed in Python via pybind +/// with an API identical to the Python dictionary class. This allows +/// dictionaries to have reference semantics across the Python/TorchScript +/// boundary. +class ScriptDict final { + public: + // Constructor. + ScriptDict(IValue data) : dict_(AnyType::get(), AnyType::get()) { + TORCH_INTERNAL_ASSERT(data.isGenericDict()); + dict_ = data.toGenericDict(); + } + + // Get the type of the dictionary. + DictTypePtr type() const { + return DictType::create(dict_.keyType(), dict_.valueType()); + } + + // Return a string representation that can be used + // to reconstruct the instance. + std::string repr() const { + std::ostringstream s; + s << '{'; + bool f = false; + for (auto const& kv : dict_) { + if (f) { + s << ", "; + } + s << kv.key() << ": " << kv.value(); + f = true; + } + s << '}'; + return s.str(); + } + + // Return an iterator over the keys of the dictionary. + ScriptDictKeyIterator iter() const { + auto begin = dict_.begin(); + auto end = dict_.end(); + return ScriptDictKeyIterator(begin, end); + } + + // Return an iterator over the key-value pairs of the dictionary. + ScriptDictIterator items() const { + auto begin = dict_.begin(); + auto end = dict_.end(); + return ScriptDictIterator(begin, end); + } + + // Interpret the dictionary as a boolean; empty means false, non-empty means + // true. + bool toBool() const { + return !(dict_.empty()); + } + + // Get the value for the given key. Throws std::out_of_range if the key does + // not exist. + IValue getItem(const IValue& key) { + return dict_.at(key); + }; + + // Set the value for the given key. + void setItem(const IValue& key, const IValue& value) { + dict_.insert_or_assign(key, value); + }; + + // Check whether the dictionary contains the given key. + bool contains(const IValue& key) { + return dict_.contains(key); + } + + // Delete the given key from the dictionary. + bool delItem(const IValue& key) { + return dict_.erase(key); + } + + // Get the size of the dictionary. + int64_t len() const { + return dict_.size(); + } + + // A c10::Dict instance that holds the actual data. + c10::impl::GenericDict dict_; +}; + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_list.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_list.h new file mode 100644 index 0000000000000000000000000000000000000000..d70e653043c933c80b8e15df21fc9e4afbf8b57e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_list.h @@ -0,0 +1,228 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +void initScriptListBindings(PyObject* module); + +/// An iterator over the elements of ScriptList. This is used to support +/// __iter__(), . +class ScriptListIterator final { + public: + ScriptListIterator( + c10::impl::GenericList::iterator iter, + c10::impl::GenericList::iterator end) + : iter_(iter), end_(end) {} + IValue next(); + bool done() const; + + private: + c10::impl::GenericList::iterator iter_; + c10::impl::GenericList::iterator end_; +}; + +/// A wrapper around c10::List that can be exposed in Python via pybind +/// with an API identical to the Python list class. This allows +/// lists to have reference semantics across the Python/TorchScript +/// boundary. +class ScriptList final { + public: + // TODO: Do these make sense? + using size_type = size_t; + using diff_type = ptrdiff_t; + using ssize_t = Py_ssize_t; + + // Constructor for empty lists created during slicing, extending, etc. + ScriptList(const TypePtr& type) : list_(AnyType::get()) { + auto list_type = type->expect(); + list_ = c10::impl::GenericList(list_type); + } + + // Constructor for instances based on existing lists (e.g. a + // Python instance or a list nested inside another). + ScriptList(IValue data) : list_(AnyType::get()) { + TORCH_INTERNAL_ASSERT(data.isList()); + list_ = data.toList(); + } + + ListTypePtr type() const { + return ListType::create(list_.elementType()); + } + + // Return a string representation that can be used + // to reconstruct the instance. + std::string repr() const { + std::ostringstream s; + s << '['; + bool f = false; + for (auto const& elem : list_) { + if (f) { + s << ", "; + } + s << IValue(elem); + f = true; + } + s << ']'; + return s.str(); + } + + // Return an iterator over the elements of the list. + ScriptListIterator iter() const { + auto begin = list_.begin(); + auto end = list_.end(); + return ScriptListIterator(begin, end); + } + + // Interpret the list as a boolean; empty means false, non-empty means + // true. + bool toBool() const { + return !(list_.empty()); + } + + // Get the value for the given index. + IValue getItem(diff_type idx) { + idx = wrap_index(idx); + return list_.get(idx); + }; + + // Set the value corresponding to the given index. + void setItem(diff_type idx, const IValue& value) { + idx = wrap_index(idx); + return list_.set(idx, value); + } + + // Check whether the list contains the given value. + bool contains(const IValue& value) { + for (const auto& elem : list_) { + if (elem == value) { + return true; + } + } + + return false; + } + + // Delete the item at the given index from the list. + void delItem(diff_type idx) { + idx = wrap_index(idx); + auto iter = list_.begin() + idx; + list_.erase(iter); + } + + // Get the size of the list. + ssize_t len() const { + return list_.size(); + } + + // Count the number of times a value appears in the list. + ssize_t count(const IValue& value) const { + ssize_t total = 0; + + for (const auto& elem : list_) { + if (elem == value) { + ++total; + } + } + + return total; + } + + // Remove the first occurrence of a value from the list. + void remove(const IValue& value) { + auto list = list_; + + int64_t idx = -1, i = 0; + + for (const auto& elem : list) { + if (elem == value) { + idx = i; + break; + } + + ++i; + } + + if (idx == -1) { + throw py::value_error(); + } + + list.erase(list.begin() + idx); + } + + // Append a value to the end of the list. + void append(const IValue& value) { + list_.emplace_back(value); + } + + // Clear the contents of the list. + void clear() { + list_.clear(); + } + + // Append the contents of an iterable to the list. + void extend(const IValue& iterable) { + list_.append(iterable.toList()); + } + + // Remove and return the element at the specified index from the list. If no + // index is passed, the last element is removed and returned. + IValue pop(c10::optional idx = c10::nullopt) { + IValue ret; + + if (idx) { + idx = wrap_index(*idx); + ret = list_.get(*idx); + list_.erase(list_.begin() + *idx); + } else { + ret = list_.get(list_.size() - 1); + list_.pop_back(); + } + + return ret; + } + + // Insert a value before the given index. + void insert(const IValue& value, diff_type idx) { + // wrap_index cannot be used; idx == len() is allowed + if (idx < 0) { + idx += len(); + } + + if (idx < 0 || idx > len()) { + throw std::out_of_range("list index out of range"); + } + + list_.insert(list_.begin() + idx, value); + } + + // A c10::List instance that holds the actual data. + c10::impl::GenericList list_; + + private: + // Wrap an index so that it can safely be used to access + // the list. For list of size sz, this function can successfully + // wrap indices in the range [-sz, sz-1] + diff_type wrap_index(diff_type idx) { + auto sz = len(); + if (idx < 0) { + idx += sz; + } + + if (idx < 0 || idx >= sz) { + throw std::out_of_range("list index out of range"); + } + + return idx; + } +}; + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tracer.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tracer.h new file mode 100644 index 0000000000000000000000000000000000000000..c8d51349a06e65b928102e2f0636f959cf2ca808 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tracer.h @@ -0,0 +1,45 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace torch::jit { + +struct Module; + +namespace tracer { +void initPythonTracerBindings(PyObject* module); + +SourceRange getPythonInterpreterSourceRange(); + +Node* preRecordPythonTrace( + THPObjectPtr pyobj, + const std::string& arg_types, + at::ArrayRef inputs, + std::vector scalar_args); + +std::pair, Stack> createGraphByTracingWithDict( + const py::function& func, + const py::dict& inputs_dict, + Stack inputs, + const py::function& var_name_lookup_fn, + bool strict, + bool force_outplace, + Module* self = nullptr, + const std::vector& argument_names = {}); + +std::pair, Stack> createGraphByTracing( + const py::function& func, + Stack inputs, + const py::function& var_name_lookup_fn, + bool strict, + bool force_outplace, + Module* self = nullptr, + const std::vector& argument_names = {}); +} // namespace tracer +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/script_init.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/script_init.h new file mode 100644 index 0000000000000000000000000000000000000000..65c8ad3be6850e6629c52238b9d64a20062c5c0a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/script_init.h @@ -0,0 +1,7 @@ +#pragma once + +#include + +namespace torch::jit { +void initJitScriptBindings(PyObject* module); +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/update_graph_executor_opt.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/update_graph_executor_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..81cfd658f6ede87517a2edad395984293885ab76 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/update_graph_executor_opt.h @@ -0,0 +1,6 @@ +#pragma once +#include +namespace torch::jit { +TORCH_API void setGraphExecutorOptimize(bool o); +TORCH_API bool getGraphExecutorOptimize(); +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/utf8_decoding_ignore.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/utf8_decoding_ignore.h new file mode 100644 index 0000000000000000000000000000000000000000..3b50bce86ff5b9cb5984f16b3e2c248cded67569 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/utf8_decoding_ignore.h @@ -0,0 +1,6 @@ +#pragma once +#include +namespace torch::jit { +TORCH_API void setUTF8DecodingIgnore(bool o); +TORCH_API bool getUTF8DecodingIgnore(); +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor_impl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..3aae2eb85279664b9c5a1a10e0c66669ad95b9f0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor_impl.h @@ -0,0 +1,113 @@ +#pragma once +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +void packGradient(const Gradient& gradient, Node* dnode); +bool needsGradient(const std::shared_ptr& graph); +void runOptimization( + std::shared_ptr& graph, + bool unroll_non_constant_loops = true, + bool const_prop_user_classes = true); +void runNondiffOptimization( + std::shared_ptr& graph, + bool strict_fuser_check = false); +void debugSetAutodiffSubgraphInlining(bool state); +bool TORCH_API getAutodiffSubgraphInlining(); + +void debugSetFusionGroupInlining(bool state); +bool getFusionGroupInlining(); + +// Tunable parameters for deciding when to create/keep subgraphs of +// differentiable code +const size_t autodiffSubgraphNodeThreshold = 2; +const size_t autodiffSubgraphInlineThreshold = 5; + +// a Graph can be created via tracing, or via a language-based frontend +// GraphExecutor runs it. It can run the same graph on many different sizes +// and different requires_grad states, and handles specializations for each +// situation. GraphExecutor is completely unaware of tracing or module +// parameters to keep the tracing concerns separated. +struct GraphExecutorImplBase { + static std::shared_ptr prepareGraph( + const std::shared_ptr& graph) { + auto copy = graph->copy(); + EraseShapeInformation(copy); + return copy; + } + + GraphExecutorImplBase( + const std::shared_ptr& graph, + std::string function_name) + : graph(prepareGraph(graph)), + function_name_(std::move(function_name)), + num_inputs(this->graph->inputs().size()), + num_outputs(this->graph->outputs().size()) {} + + // entry point where execution begins + void run(Stack& stack); + c10::intrusive_ptr runAsync( + Stack& stack, + TaskLauncher taskLauncher = at::launch); + + virtual const ExecutionPlan& getPlanFor( + Stack& stack, + c10::optional remaining_bailout_depth = c10::nullopt) = 0; + virtual GraphExecutorState getDebugState() = 0; + virtual ~GraphExecutorImplBase() = default; + + virtual bool isOptimized() const { + return false; + } + + protected: + friend struct GraphExecutor; + + // The unoptimized starting graph. This field is effectively const, but we + // can't make it so because Graph::copy() is not const (and making it const is + // not that easy at this point). + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::shared_ptr graph; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::string function_name_; + + // If false, we'll run the graph as we get it, without any optimizations. + // Useful for debugging. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const size_t num_inputs; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const size_t num_outputs; + + // GraphExecutors can be accessed from multiple threads, so this thread needs + // to be held every time we access the fallback or plan_cache. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::mutex compile_mutex; +}; + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_record.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_record.h new file mode 100644 index 0000000000000000000000000000000000000000..c45dcde7b0bf0ea2314eb676ea87e958499ff7a2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_record.h @@ -0,0 +1,205 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +// We would like to assign each position/axis of a tensor an abstract size +// * For each `tensor` we have a profiled `Value` of a `TensorType` describing +// the properties of the `tensor`. +// * `TensorType` has a property called `symbolic_sizes_` to describe observed +// `tensor.sizes()` +// * `symbolic_sizes_` is a vector of abstract sizes (or +// `std::vector`) where +// * `ShapeSymbol`at `symbolic_sizes_[i]` describes the size value +// (`Dimension`) at `tensor.sizes()[i]` +// * We may see the same `Dimension` at different positions `i` in +// `tensor.sizes()` or even in different `tensor` +// * First, we would like associate the same `ShapeSymbol` to the same +// `Dimension` across **one** profiling execution or run of a TorchScript +// function. +// * The same `ShapeSymbol`s in different positions of `symbolic_shapes_` in +// possibly different `TensorType`s (i.e. `TensorType`s for different +// profiled values) form an implicit set. The elements of such a set are +// called *dimension locations*. +// * These sets allow us to track how the shapes of input arguments of some +// operation relate to operation's output shapes as the input and output +// shapes might share the same `ShapeSymbol`s +// * For **every** profiling run, we would like to maintain the invariant that +// *the same `ShapeSymbol` is always associated with the same `Dimension`*. +// * To maintain this invariant we merge the profiling information from all +// profiling runs, +// * For every two runs, we iterate over all `symbic_shapes_` and compare +// their `ShapeSymbol`s in the same position. +// * if we observe that for every dimension location that has +// the`ShapeSymbol S1` in run #1 there is **only one** `ShapeSymbol S2` in +// the same dimension location in run #2, we conclude that the invariant +// holds. +// * However, if we observe some dimension locations in run #2 have +// `ShapeSymbol S2` and the other ones have `ShapeSymbol S3` we would like +// to partition the virtual set of dimension locations associated with +// `ShapeSymbol S1` into two new subsets, so the invariant holds. +// * The partitioning works by assigning a new symbol to the dimension +// locations (associated with `ShapeSymbol S1`) that have `ShapeSymbol S2` +// and another new symbol to the dimension locations that have `ShapeSymbol +// S3`. In other words, +// * Subset #1 will consist of the dimension locations that in run #2 have +// `ShapeSymbol S2` and will have `ShapeSymbol S4` in those dimension +// locations +// * Subset #2 will consist of the dimension locations that in run #2 have +// `ShapeSymbol S4` and will have `ShapeSymbol S5` in those dimension +// locations +// * The effective result of merging the profiling information from two runs +// is new `TensorTypes` whose `symbolic_sizes_` /dimension locations have +// either `ShapeSymbol S4` or `ShapeSymbol S5`. +// * Partitioning can be done even before we have seen all the dimension +// locations associated with `ShapeSymbol S1` +// * We use `getSymbolInSet` of `ShapeSymbolTable` to remember all +// `ShapeSymbols` from run #2 we observed in the dimension locations +// associated with `ShapeSymbol S1` . +// * For every `ShapeSymbol` from run #2 in the dimension location +// associated with `ShapeSymbol S1` `getSymbolInSet` returns a symbol +// that we assign to the dimension location in a new TensorType. +// * It's important to point out that the same `ShapeSymbol S2` from run +// #2 in two dimension locations that have different `ShapeSymbol`s in +// run #1 are different! These dimension locations will belong to +// different subsets and have different `ShapeSymbol`s after merge. +// * On the other hand, for the same `ShapeSymbol S2` in two dimension +// locations that have `ShapeSymbol S1` in run #1`getSymbolInSet` will +// return the same symbol. + +namespace torch::jit { + +using ::c10::TensorTypePtr; +using Dimension = int64_t; + +TORCH_API void RegisterProfilingNode(const std::function&); + +struct ProfilingRecord; + +// `SetPartitioningHelper` is used to maintain the following invariant: +// For **every** profiling run, *the same `ShapeSymbol` is always associated +// with the same `Dimension`*. +// while merging the profiling information from multiple runs. +struct SetPartitioningHelper { + std::map> + sets2subsets_; + + // `partitionSetByDimension` partitions a virtual set + // of dimension locations associated with ShapeSymbol `symbol` into subsets. + // Partitioning is equivalent to giving (or renaming) a particular + // dimension location a new `ShapeSymbol`. + // The same `Dimension` value in different dimension locations + // that used to have `symbol` will receive the same + // new `ShapeSymbol`, effectively forming a new set. + c10::ShapeSymbol partitionSetByDimension( + Dimension new_size, + c10::ShapeSymbol symbol) { + auto& dims2symbols = getSetForSymbol(symbol); + + if (dims2symbols.count(new_size) == 0) { + auto new_sym = c10::ShapeSymbol::newSymbol(); + dims2symbols[new_size] = new_sym; + return new_sym; + } + + return dims2symbols[new_size]; + } + + private: + std::map& getSetForSymbol(c10::ShapeSymbol s) { + auto& set = sets2subsets_[s]; + // N.B. adding a mapping { s.static_size(), s } + // makes sure we preserve the fact that + // some dimension values remain the same + // across all profiled runs + if (s.is_static()) { + set.insert({s.static_size(), s}); + } + return set; + } +}; + +// ShapeSymbolTable is used by Interpreter +// to assign dimension values to ShapeSymbols +// and fail a guard if the same symbol +// is assigned more than one dimension value. +struct ShapeSymbolTable { + // N.B. we treat static symbols as always assigned + // to themselves + bool isBound(c10::ShapeSymbol s) { + if (s.is_static()) { + return true; + } + return data_.count(s) != 0; + } + + // N.B. we treat static symbols as always assigned + // to themselves + Dimension getValue(c10::ShapeSymbol s) { + if (s.is_static()) { + return s.static_size(); + } + return data_[s]; + } + void assign(c10::ShapeSymbol s, Dimension v) { + TORCH_INTERNAL_ASSERT(!s.is_static()); + data_[s] = v; + } + std::map data_; + // Tries to assign dimension values from `new_sizes` to + // `ShapeSymbol`s `sym_shapes`. + // Returns `true` if every dimension value from `new_sizes` + // can be assigned to the corresponding `ShapeSymbol` from + // `sym_shapes` + // A dimension value can be assigned to a `ShapeSymbol` + // * if the symbol isn't assigned yet any dimension value + // * if the symbol is assigned and its value is equal to + // the dimension value from `new_sizes` + bool bindSymbolicShapes( + at::IntArrayRef new_sizes, + const c10::SymbolicShape& sym_shapes); +}; + +struct ProfilingRecord { + // N.B. ProfilingRecord's copy and move c-tor are disabled, so we won't + // end up accidentally copying or moving ProfilingRecords whose addresses + // are captured in callbacks_ + ProfilingRecord(const ProfilingRecord&) = delete; + ProfilingRecord(ProfilingRecord&&) noexcept = delete; + TORCH_API static std::unique_ptr instrumentGraph( + const std::shared_ptr& graph); + TORCH_API static void removeProfilingNodes(Block* b); + TORCH_API static void removeProfileCounter(Block* b); + + std::shared_ptr profiled_graph_; + mutable std::mutex mutex_; + size_t profiling_count_; + + bool ready() const; + + std::shared_ptr graph() const { + return profiled_graph_; + } + + TORCH_API ProfileIValueOp* createProfileIValueNode(Value* in_val); + TORCH_API ProfileIValueOp* createProfileIValueNode(ArrayRef inputs); + + private: + ProfileOp* createProfileNode( + const std::function& fp, + at::ArrayRef inputs); + void instrumentBlock(Block* block); + void insertShapeProfile(Node* n, size_t offset, const TypePtr& input_type); + ProfilingRecord(std::shared_ptr g); +}; + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/slice_indices_adjust.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/slice_indices_adjust.h new file mode 100644 index 0000000000000000000000000000000000000000..720c8b69e5ecd55cbe9a00d13342fa9f5cbc98db --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/slice_indices_adjust.h @@ -0,0 +1,26 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { + +// Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +// 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software +// Foundation; All Rights Reserved +// +// Stolen (with appropriate modifications) by @agolynski +// (https://github.com/pytorch/pytorch/pull/33019) from cpython repo +// Objects/sliceobject.c with comment: this is harder to get right than you +// might think +// +// This adjusts indexes according to python list semantics and returns number +// of elements in the resulting list. +TORCH_API int64_t slice_indices_adjust( + int64_t length, + int64_t* start, + int64_t* stop, + int64_t step); + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..2d09eb27876b7674e6bb29651c7a5082d0f6b599 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry.h @@ -0,0 +1,69 @@ +#pragma once +// This file is temporary until native_functions.yaml and derivatives.yaml are +// merged. Ideally this should all go into native_functions.yaml + +#include +#include + +namespace torch::jit { + +/* +ADDING A NEW SHAPE GRAPH: +- For one node schema, there is one corresponding registered shape compute +graph. The schema of the graph should be the same except for Tensor arguments. +For every Tensor input in operator schema, there should be a List[int] +corresponding to that Tensor's shape. For example: "aten::linear(Tensor input, +Tensor weight, Tensor? bias=None) -> Tensor" ==> def linear(input: List[int], +weight: List[int], bias: Optional[List[int]]) + +Additionally, arguments which are unused at the end of the schema may be left +off. This allows sharing a single graph for multiple function schemas, such as +unary operators with different trailing arguments that do not affect the output +shape. + +The shape graph should return a new, unaliased List[int] (or tuple of lists for +multiple returns) and should not modify any input lists. This allows the shape +graphs to be composed and executed. + +The shape analysis (particularly for non-complete, or symbolic shapes) works by +partially evaluating the JIT IR. It may be possible for a Graph to be registered +that we cannot currently partially evaluate. If this happens, please file an +issue. There are lints registered to avoid particular known patterns (continue +or break or early return in a loop). Those may be improved in the future, please +file an issue if necessary. + +To debug (and write initially) the recommended flow is to define these functions +in python and iterate there. Functions should be added to +torch/jit/_shape_functions. + +To test operators, the preferred flow is through OpInfos, with +`assert_jit_shape_analysis=True`. If this is not feasible, you can look at tests +in `test_symbolic_shape_analysis.py` such as `test_adaptive_avg_pool2d`. + +Operators which take in a list of tensors, such as concat, are not yet +supported. Concat has been special cased and could be generalized as needed. +Please file an issue. +*/ + +struct BoundedShapeGraphs { + std::shared_ptr lower_bound; + std::shared_ptr upper_bound; +}; + +TORCH_API void RegisterShapeComputeGraphForSchema( + const FunctionSchema& schema, + std::shared_ptr g); + +TORCH_API c10::optional> shapeComputeGraphForSchema( + const FunctionSchema& schema); + +TORCH_API c10::optional boundedGraphsForSchema( + const FunctionSchema& schema); + +TORCH_API std::vector RegisteredShapeComputeSchemas(); + +TORCH_API void LintShapeComputeGraph( + const FunctionSchema* schema, + const std::shared_ptr& graph); + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry_util.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry_util.h new file mode 100644 index 0000000000000000000000000000000000000000..e1280504e5c914f51809e300d0d46bc182ae9789 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry_util.h @@ -0,0 +1,12 @@ +#pragma once +// This file is temporary until native_functions.yaml and derivatives.yaml are +// merged. Ideally this should all go into native_functions.yaml + +#include +#include + +namespace torch::jit { + +TORCH_API const OperatorMap& get_tensorexpr_elementwise_set(); + +} // namespace torch::jit diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/callstack_debug_info_serialization.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/callstack_debug_info_serialization.h new file mode 100644 index 0000000000000000000000000000000000000000..ac1bdf8d3b1d846c6cd02724fa1fd07f0208c40f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/callstack_debug_info_serialization.h @@ -0,0 +1,91 @@ +#pragma once + +#include +#include +#include + +#include + +#include + +#include + +namespace c10 { +struct IValue; +} + +namespace torch { +namespace jit { + +class Pickler; +class InlinedCallStackSerializer { + public: + // Serialize InlinedCallStack as + // SerializedInlinedCallStack = + // [module_info, source range tag, SerializedInlinedCallStack] + // module_info = [ClassType.qualifiedName, instance_name] + // source_range_tag = unique source range id + c10::IValue serialize( + const InlinedCallStackPtr& cs_ptr, + const SourceRangeTagMap& source_range_tags); + + private: + // module_info = [ClassType.qualifiedName, instance_name] + c10::IValue serialize_module_instance_info( + const c10::optional& m); + + // This caches serialized inlined callstack ptr, since many + // InlinedCallStackPtr can refer to the same one. + ska::flat_hash_map + serialized_inlined_callstack_; + // This caches serialized module instance info. + // There might be many nodes that are part of the same + // parent, grandparent etc. module. + ska::flat_hash_map serialized_module_instance_info_; +}; + +class TORCH_API CallStackDebugInfoPickler { + public: + CallStackDebugInfoPickler() = default; + + std::vector pickle( + const std::unordered_map& callstack_ptrs, + const SourceRangeTagMap& source_range_tags); + + private: + InlinedCallStackSerializer css_; +}; + +class InlinedCallStackDeserializer { + public: + InlinedCallStackPtr deserialize( + const c10::IValue& iv, + const ska::flat_hash_map& source_range_map, + const std::shared_ptr& cu); + + private: + c10::optional deserialize_module_instance_info( + const c10::IValue& iv, + const std::shared_ptr& cu); + + ska:: + flat_hash_map, InlinedCallStackPtr> + cached_inlined_callstacks_; + ska::flat_hash_map, ModuleInstanceInfo> + cached_module_instance_info_; +}; + +class TORCH_API CallStackDebugInfoUnpickler { + public: + ska::flat_hash_map unpickle( + at::DataPtr&& data, + size_t size, + const ska::flat_hash_map& source_range_map, + const std::shared_ptr& cu); + + private: + InlinedCallStackDeserializer csds_; +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/export_bytecode.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/export_bytecode.h new file mode 100644 index 0000000000000000000000000000000000000000..96397a56eac8617b94d6562f38406d5b8a9136a4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/export_bytecode.h @@ -0,0 +1,46 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +struct TORCH_API CompilationOptions { + bool incl_interface_call = false; + bool enable_default_value_for_unspecified_arg = false; + bool enable_default_args_before_out_args = true; + bool enable_emit_promoted_ops = true; + int model_version = caffe2::serialize::kProducedBytecodeVersion; +}; + +TORCH_API mobile::Module jitModuleToMobile( + const Module& module, + const CompilationOptions& options); + +mobile::Code compileGraphToMobileCode( + const std::string& name, + const std::shared_ptr& graph, + const CompilationOptions& compilation_options, + BackendDebugInfoRecorder& debug_info_recorder); + +TORCH_API std::unique_ptr convertJitFunctionToMobileFunction( + const GraphFunction& function, + const CompilationOptions& options); + +TORCH_API IValue convertMobileFunctionToCodeTable( + const mobile::Function& func, + const CompilationOptions& compilation_options); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/flatbuffer_serializer.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/flatbuffer_serializer.h new file mode 100644 index 0000000000000000000000000000000000000000..43e8062ef2dce517ae252fe5f8569a0084121e17 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/flatbuffer_serializer.h @@ -0,0 +1,94 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include + +/** + * Defines the public API for serializing mobile modules to flatbuffer. + * Note that this header must not include or depend on flatbuffer-defined + * types, to avoid leaking those details to PyTorch clients. + */ + +namespace torch { +namespace jit { + +/// Maps file names to file contents. +using ExtraFilesMap = std::unordered_map; + +/** + * Represents a span of data. Typically owned by a UniqueDetachedBuffer. + */ +class TORCH_API DetachedBuffer final { + public: + /// Creates a new DetachedBuffer with an optional data owner. This interface + /// is provided to let users create objects of this type for testing. + DetachedBuffer(void* data, size_t size, void* internal_data_owner = nullptr) + : data_(data), size_(size), data_owner_(internal_data_owner) {} + + /// Returns a pointer to the data. + C10_NODISCARD void* data() { + return data_; + } + /// Returns a pointer to the data. + C10_NODISCARD const void* data() const { + return data_; + } + /// Returns the size of the data, in bytes. + C10_NODISCARD size_t size() const { + return size_; + } + + /// Wrapper type that typically owns data_owner_. + using UniqueDetachedBuffer = + std::unique_ptr>; + + private: + /// Deletes the owner, if present, and the buf itself. + /// Note: we could have provided a movable type with a destructor that did + /// this work, but the unique wrapper was easier in practice. + static void destroy(DetachedBuffer* buf); + + /// Provides access to destroy() for implementation and testing. + friend struct DetachedBufferFriend; + friend struct DetachedBufferTestingFriend; + + /// Pointer to the data. Not owned by this class. + void* data_; + /// The size of `data_`, in bytes. + size_t size_; + /// Opaque pointer to the underlying owner of `data_`. This class + /// (DetachedBuffer) does not own the owner or the data. It will typically be + /// owned by a UniqueDetachedBuffer that knows how to delete the owner along + /// with this class. + void* data_owner_; +}; + +TORCH_API void save_mobile_module( + const mobile::Module& module, + const std::string& filename, + const ExtraFilesMap& extra_files = ExtraFilesMap(), + const ExtraFilesMap& jit_sources = ExtraFilesMap(), + const std::vector& jit_constants = {}); + +TORCH_API DetachedBuffer::UniqueDetachedBuffer save_mobile_module_to_bytes( + const mobile::Module& module, + const ExtraFilesMap& extra_files = ExtraFilesMap(), + const ExtraFilesMap& jit_sources = ExtraFilesMap(), + const std::vector& jit_constants = {}); + +TORCH_API void save_mobile_module_to_func( + const mobile::Module& module, + const std::function& writer_func); + +// TODO(qihan): delete +TORCH_API bool register_flatbuffer_serializer(); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_constants.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_constants.h new file mode 100644 index 0000000000000000000000000000000000000000..704d7239f2b46cceda6eb07c1cb3c1864bf8bc95 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_constants.h @@ -0,0 +1,21 @@ +#pragma once +#include + +namespace torch { +namespace jit { +constexpr size_t BYTECODE_INDEX_INSTRUCTION = 0; +constexpr size_t BYTECODE_INDEX_OPERATOR = 1; +constexpr size_t BYTECODE_INDEX_CONSTANT = 2; +constexpr size_t BYTECODE_INDEX_TYPE = 3; +constexpr size_t BYTECODE_INDEX_REGISTER_SIZE = 4; + +constexpr size_t BYTECODE_INDEX_SCHEMA_ARGUMENTS = 0; +constexpr size_t BYTECODE_INDEX_SCHEMA_RETURNS = 1; + +constexpr size_t BYTECODE_INDEX_ARGUMENT_NAME = 0; +constexpr size_t BYTECODE_INDEX_ARGUMENT_TYPE = 1; +constexpr size_t BYTECODE_INDEX_ARGUMENT_DEFAULT_VALUE = 2; + +constexpr size_t BYTECODE_INDEX_MODULE_DEBUG_HANDLES = 0; +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_helpers.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..c94993207e6a3cd0c9277496ee4dbe2ee547b345 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_export_helpers.h @@ -0,0 +1,32 @@ +#pragma once + +#include +#include + +namespace caffe2 { +namespace serialize { +class PyTorchStreamReader; +} +} // namespace caffe2 + +namespace torch { +namespace jit { + +struct Source; + +// Convert a class type's qualifier name to the corresponding path the source +// file it should be written to. +// +// Qualifier is like: foo.bar.baz +// Returns: libs/foo/bar/baz.py +std::string qualifierToArchivePath( + const std::string& qualifier, + const std::string& export_prefix); + +std::shared_ptr findSourceInArchiveFromQualifier( + caffe2::serialize::PyTorchStreamReader& reader, + const std::string& export_prefix, + const std::string& qualifier); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_legacy.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_legacy.h new file mode 100644 index 0000000000000000000000000000000000000000..a2618281095969011b9ee4483794c7e6ce1422b2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_legacy.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +namespace caffe2 { +namespace serialize { +class PyTorchStreamReader; +} // namespace serialize +} // namespace caffe2 + +namespace torch { +namespace jit { + +struct CompilationUnit; + +// Deserializes a model in legacy format. +Module LEGACY_deserialize( + std::shared_ptr cu, + std::shared_ptr reader, + const c10::optional& device); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_read.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_read.h new file mode 100644 index 0000000000000000000000000000000000000000..ab89f93880c34f4dfda499e46852c824af953b37 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/import_read.h @@ -0,0 +1,31 @@ +#pragma once + +#include +#include + +namespace caffe2 { +namespace serialize { +class PyTorchStreamReader; +} // namespace serialize +} // namespace caffe2 + +namespace torch { +namespace jit { + +TORCH_API IValue readArchiveAndTensors( + const std::string& archive_name, + const std::string& pickle_prefix, + const std::string& tensor_prefix, + c10::optional type_resolver, + c10::optional obj_loader, + c10::optional device, + caffe2::serialize::PyTorchStreamReader& stream_reader, + c10::TypePtr (*type_parser)(const std::string&) = + Unpickler::defaultTypeParser, + std::shared_ptr storage_context = nullptr); + +bool check_zip_file( + std::shared_ptr rai); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/pickler.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/pickler.h new file mode 100644 index 0000000000000000000000000000000000000000..4f553b6f7ca8a912eeb8a9d1153d5a3c0bad220d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/pickler.h @@ -0,0 +1,429 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +// See Python's pickletools.py for a detailed description of each of these codes +enum class PickleOpCode : char { + MARK = '(', + STOP = '.', + POP = '0', + POP_MARK = '1', + DUP = '2', + FLOAT = 'F', + INT = 'I', + BININT = 'J', + BININT1 = 'K', + LONG = 'L', + BININT2 = 'M', + NONE = 'N', + PERSID = 'P', + BINPERSID = 'Q', + REDUCE = 'R', + STRING = 'S', + BINSTRING = 'T', + SHORT_BINSTRING = 'U', + // NB: Avoid using UNICODE as it is a macro in the Windows API + UNICODE_ = 'V', + BINUNICODE = 'X', + APPEND = 'a', + BUILD = 'b', + GLOBAL = 'c', + DICT = 'd', + EMPTY_DICT = '}', + APPENDS = 'e', + GET = 'g', + BINGET = 'h', + INST = 'i', + LONG_BINGET = 'j', + LIST = 'l', + EMPTY_LIST = ']', + OBJ = 'o', + PUT = 'p', + BINPUT = 'q', + LONG_BINPUT = 'r', + SETITEM = 's', + TUPLE = 't', + EMPTY_TUPLE = ')', + SETITEMS = 'u', + BINFLOAT = 'G', + + // Protocol 2 + PROTO = char('\x80'), + NEWOBJ = '\x81', + EXT1 = '\x82', + EXT2 = '\x83', + EXT4 = '\x84', + TUPLE1 = '\x85', + TUPLE2 = '\x86', + TUPLE3 = '\x87', + NEWTRUE = '\x88', + NEWFALSE = '\x89', + LONG1 = '\x8a', + LONG4 = '\x8b', + + // Protocol 3 (Python 3.x) + BINBYTES = 'B', + SHORT_BINBYTES = 'C', + + // Protocol 4 + SHORT_BINUNICODE = char('\x8c'), + BINUNICODE8 = '\x8d', + BINBYTES8 = '\x8e', + EMPTY_SET = '\x8f', + ADDITEMS = '\x90', + FROZENSET = '\x91', + NEWOBJ_EX = '\x92', + STACK_GLOBAL = '\x93', + MEMOIZE = '\x94', + FRAME = '\x95' +}; + +using ::c10::IValue; + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct WriteableTensorData { + const char* data() const { + return static_cast(tensor_.storage().data()); + } + size_t sizeInBytes() const { + return size_; + } + size_t nbytes() const { + return tensor_.storage().nbytes(); + } + bool storageHasDeleter() const { + return tensor_.storage().data_ptr().get_context() != nullptr; + } + + private: + friend TORCH_API WriteableTensorData + getWriteableTensorData(const at::Tensor& tensor, bool to_cpu); + at::Tensor tensor_; + uint64_t size_; +}; + +void setTypeTags(bool state); +bool getTypeTags(); + +class TORCH_API Pickler { + AT_DISALLOW_COPY_AND_ASSIGN(Pickler); + + public: + Pickler(std::function writer) + : Pickler(std::move(writer), nullptr, nullptr, nullptr) {} + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Pickler( + std::function writer, + std::vector* tensor_table, + std::function type_renamer, + std::vector* memoized_class_types, + std::function get_tensor_id = nullptr, + bool tag_aggregates = true) + : writer_(std::move(writer)), + tensor_table_(tensor_table), + type_renamer_(std::move(type_renamer)), + memoized_class_types_(memoized_class_types), + get_tensor_id_(std::move(get_tensor_id)), + tag_aggregates_(tag_aggregates) {} + // NOLINTNEXTLINE(bugprone-exception-escape) + ~Pickler(); + + // Push protocol onto the stack + void protocol(); + + // Push STOP PickleOpCode onto the stack + void stop(); + + void pushIValue(const IValue& ivalue); + + void startTuple(); + void endTuple(); + + const std::vector& tensorData() { + return tensor_data_; + } + + void pushEmptyDict(); + void pushDict(const IValue& ivalue); + void pushInt(int64_t value); + void pushLong(const std::string& data); + + private: + void pushIValueImpl(const IValue& ivalue); + void startTypeTag(); + void endTypeTag(const IValue& value); + void pushBool(bool value); + void pushDouble(double value); + void pushComplexDouble(const IValue& value); + void pushGenericList(const IValue& ivalue); + void pushIntList(const IValue& ivalue); + void pushList(const IValue& ivalue); + void pushTensor(const IValue& ivalue); + void pushTensorReference(const IValue& ivalue); + void pushLiteralTensor(const IValue& ivalue); + void pushLiteralSparseTensor(const at::Tensor& tensor); + void pushTuple(const IValue& ivalue); + void pushString(const std::string& string); + void pushDevice(const IValue& ivalue); +#ifdef USE_DISTRIBUTED + void pushRRef(const IValue& ivalue); +#endif + // unmemoized version + void pushStringImpl(const std::string& string); + void pushStorageOfTensor(const at::Tensor& tensor); + + void pushBinGet(uint32_t memo_id); + void pushSpecializedList( + const IValue& ivalue, + const char* list_name, + const std::function& item_pusher); + void pushGlobal(c10::string_view module_name, c10::string_view class_name); + // raw string data is appended directly to the byte stream + void pushBytes(const std::string& string); + void pushTensorData(const at::Tensor& tensor); + + // Add a BINPUT op and return the memoization id used + size_t pushNextBinPut(); + + const void* getPointer(const IValue& ivalue); + + // Caller checks that bufferPos_ > 0 + void flushNonEmpty() { + writer_(buffer_.data(), bufferPos_); + bufferPos_ = 0; + } + + void flush() { + if (bufferPos_ != 0) { + flushNonEmpty(); + } + } + + // These convert values to bytes and add them to the stack (NB: since T is to + // the left of a '::', its type cannot be deduced by the compiler so one must + // explicitly instantiate the template, i.e. push(int) works, push(int) + // does not) + static CONSTEXPR_EXCEPT_WIN_CUDA size_t kBufferSize = 256; + template + void push(typename std::common_type::type value) { + const char* begin = reinterpret_cast(&value); + if (bufferPos_ + sizeof(T) > buffer_.size()) { + flushNonEmpty(); + } + static_assert(sizeof(T) <= kBufferSize, "Buffer size assumption"); + memcpy(buffer_.data() + bufferPos_, begin, sizeof(T)); + bufferPos_ += sizeof(T); + } + + // Stream to write binary data to + // Code shouldn't call writer_ directly without first flush()ing. + std::function writer_; + + // Buffer to avoid calling a writer_ on a per-byte basis. + std::array buffer_; + size_t bufferPos_{0}; + + // Stack of opcodes/data + std::vector stack_; + + // External table of tensors to serialize. If this is missing, then tensors + // are serialized directly into the pickle + std::vector* tensor_table_; + + // TODO: only use this if necessary (add a pass to find all shared ivalues, + // and only memoize those) + uint32_t memo_id_ = 0; + + // Memoization of IValues that have been written (index in table is used for + // BINPUT opcodes) to enable shared references + c10::FastMap memoized_ivalue_map_; + + // because we de-dup ivalues based on their raw pointer address in the above + // map we need to keep all the memoized values alive during the pickle. + // Otherwise, it is possible that a raw address gets reused for another + // object, and we will alias it to the old object at that address. + std::vector memoized_ivalues_; + + std::function type_renamer_; + + // List of all the types that it wrote, inspect from the IValues it wrote. + std::vector* memoized_class_types_; + + // Function to grab next id_name for tensor storage, function is responsible + // for returning unique ids + std::function get_tensor_id_; + + // List of tensor storages to serialize in the same binary as the pickle data + // similar to ivalues, they are memoized using BINPUT + std::vector tensor_data_; + c10::FastMap memoized_storage_map_; + + c10::FastMap memoized_globals_map_; + c10::FastMap memoized_strings_map_; + c10::FastMap memoized_devices_map_; + // when true, List and Dict objects will be wrapped in a + // torch.jit._pickle.restore_type_tag call to correctly set the dynamic + // TorchScript type for the object. When true the thing unpickling must have + // torch installed. + bool tag_aggregates_; +}; + +// returns a (tensor, record_size) for a tensor, converting it to a CPU tensor +// if it was CUDA and to_cpu is True. +TORCH_API WriteableTensorData +getWriteableTensorData(const at::Tensor& tensor, bool to_cpu = true); + +// return the value of the tensor's storage pointer +uint64_t getStorageKey(const at::Tensor& tensor); + +// if the cls has __getstate__/__setstate__ +// assert they have the right schema and return true, +// otherwise return false +bool checkHasValidSetGetState(const std::shared_ptr& cls); + +// Declare BackendMeta serialization and deserialization function pointer types. +using BackendMetaPtr = std::function< + void(const at::Tensor&, std::unordered_map&)>; + +// A allowlist of device type, currently available is PrivateUse1 +inline std::unordered_set& GetBackendMetaAllowlist() { + static std::unordered_set DeviceTypeAllowlist{ + c10::DeviceType::PrivateUse1}; + return DeviceTypeAllowlist; +} + +// Dynamically obtain serialization function pairs +// that require the corresponding backend. +inline std::array< + c10::optional>, + at::COMPILE_TIME_MAX_DEVICE_TYPES>& +GetBackendMetaSerialization() { + // The array to save function pointer for BackendMeta serialization. + // key is the DeviceType, value is std::pair obj. + // value.first represent get function and value.seconde represent set function + static std::array< + c10::optional>, + at::COMPILE_TIME_MAX_DEVICE_TYPES> + BackendMetaSerialization; + return BackendMetaSerialization; +} + +// Register function pointer of Tensor BackendMetadata for serialization. +TORCH_API inline void TensorBackendMetaRegistry( + c10::DeviceType t, + const BackendMetaPtr& get_fptr, + const BackendMetaPtr& set_fptr) { + // allowlist verification + // Only if the devicetype is in the allowlist, + // we allow the serialization extension to be registered for backendmeta data. + const auto& DeviceTypeAllowlist = GetBackendMetaAllowlist(); + TORCH_CHECK( + DeviceTypeAllowlist.find(t) != DeviceTypeAllowlist.end(), + "It is not allowed to register the serialization method ", + "of backendMeta data for PrivateUse1. ", + "If you have related serialization requirements, ", + "please expand the allowlist"); + // Register function pointer + int device_type = static_cast(t); + auto& BackendMetaSerialization = GetBackendMetaSerialization(); + TORCH_CHECK( + !BackendMetaSerialization[device_type].has_value(), + "The tensor BackendMeta serialization function pointer for ", + t, + " has been registered."); + BackendMetaSerialization[device_type] = + c10::optional>( + std::make_pair(get_fptr, set_fptr)); +} + +// Return a map of Tensor Metadata which including BackendMetaData for +// serialization. For now, it only takes care of `conj` and `neg` bit. +inline std::unordered_map getTensorMetadata( + const at::Tensor& t) { + // We don't support serializing `ZeroTensor` as it is not public + // facing yet. + TORCH_CHECK( + !t._is_zerotensor(), + "ZeroTensor is not serializable,", + " please file an issue if required."); + std::unordered_map metadata{}; + + // Only add meta-data if the value is not default. + if (t.is_conj()) { + metadata["conj"] = true; + } + if (t.is_neg()) { + metadata["neg"] = true; + } + // Only add BackendMetaData for custom backend if the function pointer is + // registered. + int device_type = static_cast(t.device().type()); + const auto& BackendMetaSerialization = GetBackendMetaSerialization(); + if (BackendMetaSerialization[device_type].has_value()) { + // Pass the tensor and metadata map references as parameters to the custom + // serialization function. + BackendMetaPtr fptr = BackendMetaSerialization[device_type].value().first; + fptr(t, metadata); + } + return metadata; +} + +// set Tensor Metadata based on the map. +// Refer: getTensorMetadata +inline void setTensorMetadata( + const at::Tensor& t, + std::unordered_map metadata) { + auto iter_end = metadata.end(); + auto iter_temp = metadata.find("conj"); + if (iter_temp != iter_end) { + t._set_conj(true); + metadata.erase(iter_temp); + } + iter_temp = metadata.find("neg"); + if (iter_temp != iter_end) { + t._set_neg(true); + metadata.erase(iter_temp); + } + // Only set BackendMetaData for custom backend if the function pointer is + // registered. + int device_type = static_cast(t.device().type()); + const auto& BackendMetaSerialization = GetBackendMetaSerialization(); + if (BackendMetaSerialization[device_type].has_value()) { + // Pass the tensor and metadata map references as parameters to the custom + // deserialization function. + BackendMetaPtr fptr = BackendMetaSerialization[device_type].value().second; + fptr(t, metadata); + } +} + +// set Tensor metadata based on the map. +// NOTE: This overload is required by unpickler.cpp +inline void setTensorMetadata( + const at::Tensor& t, + const c10::Dict& metadata_idict) { + std::unordered_map metadata; + for (auto& pair : metadata_idict) { + auto key = *pair.key().toString(); + metadata[key] = pair.value().toBool(); + } + setTensorMetadata(t, std::move(metadata)); +} + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/python_print.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/python_print.h new file mode 100644 index 0000000000000000000000000000000000000000..ede364ec02f81e4e4652fb659c801b62bbc8a99e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/python_print.h @@ -0,0 +1,58 @@ +#pragma once +#include +#include +#include +#include + +namespace torch { +namespace jit { + +struct Method; +struct Module; +struct PythonPrintImpl; + +struct PrintDepsTable { + void add(const c10::NamedTypePtr& type); + + size_t size() const { + return table_.size(); + } + + const c10::NamedTypePtr& operator[](size_t index) const { + return table_[index]; + } + + private: + std::vector table_; + std::unordered_set non_unique_; +}; + +struct TORCH_API PythonPrint { + PythonPrint( + std::vector& constant_table, + PrintDepsTable& deps_table, + c10::TypePrinter type_printer = nullptr, + bool enforce_importable = false); + + void printNamedType(const c10::NamedTypePtr& classType); + void printFunction(const Function& callee); + void printMethod(const Function& callee); + + std::string str() const; + const SourceRangeRecords& ranges() const; + uint64_t minVersion() const; + + private: + std::shared_ptr pImpl; +}; + +TORCH_API bool printerHasSpecialCaseFor(c10::Symbol sym); + +TORCH_API void jitModuleToPythonCodeAndConstants( + const Module& module, + ExtraFilesMap* jit_sources, // output + std::vector* constants // output +); + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/source_range_serialization_impl.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/source_range_serialization_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..2b7cd5a14ba9282090b025f23cdf28eb9bf1c5de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/source_range_serialization_impl.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Do this clownyness with virtual functions because of the split +// between ATen core and torch + +class ConcreteSourceRangeUnpickler : public SourceRangeUnpickler { + public: + ConcreteSourceRangeUnpickler(at::DataPtr&& data, size_t size); + + c10::optional findSourceRangeThatGenerated( + const SourceRange& range) override; + + private: + at::DataPtr data; + size_t size; + + void unpickle(); + + std::mutex mutex; + std::shared_ptr deserializer; + std::shared_ptr unpickled_records; +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/storage_context.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/storage_context.h new file mode 100644 index 0000000000000000000000000000000000000000..d065fc003cf4f4f5e7314e829f06b4c350b2143b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/storage_context.h @@ -0,0 +1,85 @@ +#pragma once + +#include + +namespace torch { +namespace jit { + +// Used in torch.package and TorchScript serialization to coordinate +// sharing of storages between models. Also used to create deterministic +// naming for storages. +class TORCH_API SerializationStorageContext { + public: + explicit SerializationStorageContext() = default; + SerializationStorageContext operator=(const SerializationStorageContext&) = + delete; + SerializationStorageContext(const SerializationStorageContext&) = delete; + + uint64_t getOrAddStorage(const c10::Storage& storage) { + if (!hasStorage(storage)) { + uint64_t size = storage_id_map_.size(); + storage_id_map_[storage] = size; + } + return storage_id_map_[storage]; + } + + bool hasStorage(const c10::Storage& storage) { + return storage_id_map_.find(storage) != storage_id_map_.end(); + } + + ~SerializationStorageContext() = default; + + private: + class StorageSerializationHash { + public: + size_t operator()(const c10::Storage& storage) const { + return std::hash()( + reinterpret_cast(storage.unsafeGetStorageImpl())); + } + }; + + class StorageSerializationEqual { + public: + bool operator()(const c10::Storage& lhs, const c10::Storage& rhs) const { + return lhs.unsafeGetStorageImpl() == rhs.unsafeGetStorageImpl(); + } + }; + + std::unordered_map< + c10::Storage, + uint64_t, + StorageSerializationHash, + StorageSerializationEqual> + storage_id_map_; +}; + +// Used in torch.package and TorchScript deserialization to coordinate +// sharing of storages between models. +class TORCH_API DeserializationStorageContext { + public: + explicit DeserializationStorageContext() = default; + DeserializationStorageContext operator=( + const DeserializationStorageContext&) = delete; + DeserializationStorageContext(const DeserializationStorageContext&) = delete; + + void addStorage(std::string name, c10::Storage storage) { + TORCH_INTERNAL_ASSERT(!hasStorage(name)); + name_storage_map_.emplace(std::move(name), std::move(storage)); + } + + bool hasStorage(const std::string& name) { + return name_storage_map_.find(name) != name_storage_map_.end(); + } + + c10::Storage getStorage(const std::string& name) { + TORCH_INTERNAL_ASSERT(hasStorage(name)); + return name_storage_map_.find(name)->second; + } + ~DeserializationStorageContext() = default; + + private: + std::unordered_map name_storage_map_; +}; + +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/type_name_uniquer.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/type_name_uniquer.h new file mode 100644 index 0000000000000000000000000000000000000000..addedc5492cff19673d777887185cc894bf77321 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/type_name_uniquer.h @@ -0,0 +1,33 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { + +/** + * class TypeNameUniquer + * + * Generates a unique name for every type `t` passed in. Types that compare + * equal with EqualType will receive the same unique name. + * + * This is used during Module::save(), to resolve type name collisions during + * serialization. + */ +class TORCH_API TypeNameUniquer { + public: + c10::QualifiedName getUniqueName(c10::ConstNamedTypePtr t); + + private: + NameMangler mangler_; + std::unordered_set used_names_; + std::unordered_map< + c10::ConstNamedTypePtr, + c10::QualifiedName, + HashType, + EqualType> + name_map_; +}; +} // namespace jit +} // namespace torch diff --git a/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/unpickler.h b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/unpickler.h new file mode 100644 index 0000000000000000000000000000000000000000..bc980bf90522b5c2abec5d99cc3a06a2f66082e1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/jit/serialization/unpickler.h @@ -0,0 +1,203 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { + +using TypeResolver = + std::function; + +using ObjLoader = std::function< + c10::intrusive_ptr(const at::StrongTypePtr&, IValue)>; + +class DeserializationStorageContext; + +// [unpickler refactor] there is some cruft around PickleOpCode::BUILD, +// PickleOpCode::NEWOBJ, and the last_opcode_ member below that should be +// deleted at some point, the Pickler doesn't produce it and it's only around to +// support models saved before 1.1 +class TORCH_API Unpickler { + AT_DISALLOW_COPY_AND_ASSIGN(Unpickler); + + using TypeParserT = c10::TypePtr (*)(const std::string&); + + public: + // tensors inside the pickle are references to the tensor_table. + // class_resolver is to resolve strong class type, type_resolver_ is + // to resolve any JIT type. class_resolver and type_resolver are not merged + // here because some use cases need to get strong class type that + // type_resolver_ can not return. + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Unpickler( + std::function reader, + TypeResolver type_resolver, + c10::ArrayRef tensor_table, + TypeParserT type_parser = defaultTypeParser) + : reader_(std::move(reader)), + tensor_table_(tensor_table), + type_resolver_(std::move(type_resolver)), + use_storage_device_(false), + type_parser_(type_parser), + version_(caffe2::serialize::kProducedFileFormatVersion) {} + + Unpickler( + std::function reader, + TypeResolver type_resolver, + c10::ArrayRef tensor_table, + ObjLoader obj_loader, + TypeParserT type_parser = defaultTypeParser) + : reader_(std::move(reader)), + tensor_table_(tensor_table), + type_resolver_(std::move(type_resolver)), + obj_loader_(std::move(obj_loader)), + use_storage_device_(false), + type_parser_(type_parser), + version_(caffe2::serialize::kProducedFileFormatVersion) {} + + // tensors inside the pickle contain meta-data, the raw tensor + // dead is retrieved by calling `read_record`. + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Unpickler( + std::function reader, + TypeResolver type_resolver, + ObjLoader obj_loader, + std::function read_record, + c10::optional device, + bool use_storage_device = false, + TypeParserT type_parser = defaultTypeParser, + std::shared_ptr storage_context = nullptr) + : reader_(std::move(reader)), + tensor_table_(), + type_resolver_(std::move(type_resolver)), + obj_loader_(std::move(obj_loader)), + read_record_(std::move(read_record)), + // NOLINTNEXTLINE(performance-move-const-arg) + device_(std::move(device)), + use_storage_device_(use_storage_device), + type_parser_(type_parser), + storage_context_(std::move(storage_context)), + version_(caffe2::serialize::kProducedFileFormatVersion) {} + + // consume the pickle stream, producing an IValue from the contents. + // Type Tags: the pickler will restore the type tags on + // List and Dict objects when possible IValue is an Object. + // Otherwise, Dict and List objects will end up with Any as their tag. + // If you know the type of the ivalue, tags can be restored with + // restoreAccurateTypeTags + IValue parse_ivalue(); + + // [type tag serialization] + // This is used to determine whether to restore type tags be recursively + // descending into the returned stack object (if version_number <= 2), or + // if version_number >= 3, to use the type strings included in the pickle + // archive for container types. By default this is set to + // `kProducedFileFormatVersion` so unless you're loading a pickle file + // from alongside a corresponding `version` file, you don't need to set + // the version manually. + void set_version(uint64_t version_number) { + version_ = version_number; + } + + static c10::TypePtr defaultTypeParser(const std::string& str) { + ScriptTypeParser parser; + return parser.parseType(str); + } + + private: + // No arguments ensures that a template argument must be specified + // so that the number of bytes read / type read is explicit + template + T read() { + T item; + if (sizeof(T) <= buffer_remaining_) { + // Fast path: entirely from buffer. + memcpy(&item, buffer_.data() + buffer_pos_, sizeof(T)); + buffer_remaining_ -= sizeof(T); + buffer_pos_ += sizeof(T); + } else { + // Don't over-template the slow path, to avoid code size bloat. + readSlowWithBuffer(reinterpret_cast(&item), sizeof(T)); + } + return item; + } + void readSlowWithBuffer(char* dest, size_t sz); + std::string readBytes(size_t num_bytes); + + double readFloat(); + void readGlobal( + const std::string& module_name, + const std::string& class_name); + void rebuildTensor(bool quantized); + void rebuildTensorFromTypeV2(); + void rebuildSparseTensor(); +#ifdef USE_DISTRIBUTED + void rebuildRRef(); +#endif + PickleOpCode readInstruction(); + PickleOpCode readOpCode() { + return static_cast(read()); + } + std::string readString(); + void readList(IValue list_ivalue); + void readListElements(IValue list_ivalue, size_t start); + void setInput(size_t memo_id); + void run(); + + // Returns the number of bytes read. This should statefully + // remember the position. Don't call reader_ directly. + std::function reader_; + // Small buffer to avoid calling reader_ on a per-byte basis. + std::array buffer_; + size_t buffer_pos_{0}; + size_t buffer_remaining_{0}; + + std::vector stack_; + + // globals are represented on the stack as IValue integer indices + // into this list + std::vector> globals_; + std::vector memo_table_; + std::vector marks_; + c10::ArrayRef tensor_table_; + + // When deserializing types on lists and dicts, cache the type here + // so we don't have to parse the same type multiple times. Strings + // are already de-duplicated and replaced with BINGETs in the + // pickler, so we can just use the actual data pointer of each string. + std::unordered_map type_cache_; + + // optionally nullptr, needs to be present for creating classes + TypeResolver type_resolver_; + ObjLoader obj_loader_; + IValue empty_tuple_; + + std::function read_record_; + c10::optional device_; + // When set to true, Unpickler will ignore the pickled device and use the + // device of the DataPtr returned by the read_record_ function. The default + // value of this flag is false. + const bool use_storage_device_; + + TypeParserT type_parser_{defaultTypeParser}; + + // Used for torch.package to enable sharing of storages across + // ScriptModules and eager modules + std::shared_ptr storage_context_; + + // See [type tag serialization] + uint64_t version_; + + // See [NOTE] skip_next_read_global + uint8_t skip_next_read_global = 0; +}; + +void restoreAccurateTypeTags(const IValue& root, const c10::TypePtr& type_tag); + +} // namespace jit +} // namespace torch