diff --git a/ckpts/universal/global_step20/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..2e2b57b6fa3b715fc8a1e09c053e3efca5e818c8 --- /dev/null +++ b/ckpts/universal/global_step20/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b02933cf8d72b33232bdea1819375ba11fd1c881b6d08f552ccde9d82b5a954 +size 33555612 diff --git a/ckpts/universal/global_step20/zero/18.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/18.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..eeb68fc7dee1479a92d69fa3ab24ca5ef0dcfc05 --- /dev/null +++ b/ckpts/universal/global_step20/zero/18.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:300476c759a9584056de01692721be4fb0c87c24610b227124cb5a950e8e36a9 +size 33555627 diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h new file mode 100644 index 0000000000000000000000000000000000000000..2a66cc3228470c56fa09a8ac086bfe5c3c676d09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +namespace torch::jit { + +void initJITBindings(PyObject* module); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/module_python.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/module_python.h new file mode 100644 index 0000000000000000000000000000000000000000..3ab34f5cd8e779af16d867b37faa4f32e5cd45df --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/module_python.h @@ -0,0 +1,35 @@ +#pragma once +#include +#include +#include +#include + +namespace py = pybind11; + +namespace torch::jit { + +inline c10::optional as_module(py::handle obj) { + static py::handle ScriptModule = + py::module::import("torch.jit").attr("ScriptModule"); + if (py::isinstance(obj, ScriptModule)) { + return py::cast(obj.attr("_c")); + } + return c10::nullopt; +} + +inline c10::optional as_object(py::handle obj) { + static py::handle ScriptObject = + py::module::import("torch").attr("ScriptObject"); + if (py::isinstance(obj, ScriptObject)) { + return py::cast(obj); + } + + static py::handle RecursiveScriptClass = + py::module::import("torch.jit").attr("RecursiveScriptClass"); + if (py::isinstance(obj, RecursiveScriptClass)) { + return py::cast(obj.attr("_c")); + } + return c10::nullopt; +} + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h new file mode 100644 index 0000000000000000000000000000000000000000..434881e419cb35367de1dcec81adbe0fc7d2c345 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h @@ -0,0 +1,213 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace py = pybind11; + +namespace torch::jit { + +// This is a variant of shared_ptr that "sees through" a wrapper. +// We use it to convert Value, Node, Block and node to "wrapped" Python +// values. When we destruct the C++ object, the wrapper's pointer will +// be set to 0 and any future dereferencing will throw. We need this +// because the Python objects may hang around after the C++ object +// has already been destroyed. +// This also needs the magic type_caster below, which is from the +// workaround offered in https://github.com/pybind/pybind11/issues/2751 +template +class unwrapping_shared_ptr { + static_assert( + std::is_same::value || + std::is_same::value || + std::is_same::value, + "unwrapping type only defined for Graph object types"); + + private: + std::shared_ptr> impl; + + public: + unwrapping_shared_ptr() : impl({}) {} + explicit unwrapping_shared_ptr(T* p) : impl(p->wrap()) { + impl->clear_cb = &clear_registered_instances; + } + T* get() const { + if (!impl->elem) { + throw std::logic_error("has been invalidated"); + } + return impl->elem; + } + // we need to disable the overloaded & for PyBind11 < 2.3 due. + // see https://github.com/pybind/pybind11/pull/1435 +#if (PYBIND11_VERSION_MAJOR > 2) || \ + ((PYBIND11_VERSION_MAJOR == 2) && (PYBIND11_VERSION_MINOR >= 3)) + T** operator&() { + if (!impl->elem) { + throw std::logic_error("has been invalidated"); + } + return &(impl->elem); + } +#endif +}; + +} // namespace torch::jit + +PYBIND11_DECLARE_HOLDER_TYPE(T, torch::jit::unwrapping_shared_ptr, true); + +namespace pybind11::detail { + +#define CREATE_UNWRAPPING_CASTER(Class) \ + template <> \ + struct type_caster : public type_caster_base { \ + public: \ + using type = Class; \ + using holder_type = torch::jit::unwrapping_shared_ptr; \ + \ + bool load(handle src, bool convert) { \ + return load_impl>(src, convert); \ + } \ + \ + explicit operator type*() { \ + return static_cast(value); \ + } \ + explicit operator type&() { \ + return *static_cast(value); \ + } \ + \ + protected: \ + friend class type_caster_generic; \ + \ + bool load_value(value_and_holder&& v_h) { \ + if (v_h.holder_constructed()) { \ + value = v_h.template holder().get(); \ + return true; \ + } else { \ + throw cast_error( \ + "Unable to cast from non-held to held instance (#Class& to Holder<#Class>)"); \ + } \ + } \ + } + +CREATE_UNWRAPPING_CASTER(torch::jit::Node); +CREATE_UNWRAPPING_CASTER(torch::jit::Value); +CREATE_UNWRAPPING_CASTER(torch::jit::Block); + +#undef CREATE_UNWRAPPING_CASTER + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(torch::jit::IValue, _("IValue")); + + bool load(handle src, bool) { + try { + value = torch::jit::toTypeInferredIValue(src); + return true; + } catch (std::exception& e) { + return false; + } + } + + static handle cast( + torch::jit::IValue src, + return_value_policy /* policy */, + handle /* parent */) { + return torch::jit::toPyObject(std::move(src)).release(); + } +}; + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(torch::jit::Symbol, _("Symbol")); + + bool load(handle src, bool) { + // TODO: Is there a way to py::cast that doesn't raise an exception on + // failure? Can we catch pybind11::cast_error here instead? + std::string src_str; + try { + src_str = py::cast(src); + } catch (std::exception& e) { + return false; + } + value = torch::jit::Symbol::fromQualString(src_str); + return true; + } + + static handle cast( + torch::jit::Symbol src, + return_value_policy /* policy */, + handle /* parent */) { + return py::cast(std::string(src.toQualString()), return_value_policy::copy) + .release(); + } +}; + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(torch::jit::AttributeKind, _("AttributeKind")); + + bool load(handle src, bool) { + return false; + } + + static handle cast( + torch::jit::AttributeKind src, + return_value_policy /* policy */, + handle /* parent */) { + return py::cast( + std::string(torch::jit::toString(src)), + return_value_policy::copy) + .release(); + } +}; + +// See https://github.com/pybind/pybind11/issues/637 +using ListCasterBase = pybind11::detail:: + list_caster, torch::jit::Node*>; +template <> +struct type_caster> : ListCasterBase { + static handle cast( + const std::vector& src, + return_value_policy, + handle parent) { + return ListCasterBase::cast(src, return_value_policy::reference, parent); + } + static handle cast( + const std::vector* src, + return_value_policy pol, + handle parent) { + return cast(*src, pol, parent); + } +}; + +} // namespace pybind11::detail + +namespace torch::jit { + +static inline py::tuple tuple_tail(const py::tuple& tup) { + py::tuple r(tup.size() - 1); + for (const auto i : c10::irange(1, tup.size())) { + r[i - 1] = tup[i]; + } + return r; +} + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind_utils.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..cbb7791652a86eed3036d9af2376ba4049b1de18 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind_utils.h @@ -0,0 +1,1158 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef USE_DISTRIBUTED +#include +#include +#endif + +#include +#include +#ifdef USE_C10D_NCCL +#include +#include +#endif +#include +#include +#include + +#include +#include +#include +#include +#include + +// The visibility attribute is to avoid a warning about storing a field in the +// struct that has a different visibility (from pybind) than the struct. +#ifdef _WIN32 +#define VISIBILITY_HIDDEN +#else +#define VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) +#endif + +namespace torch::jit { + +using ResolutionCallback = std::function; + +void clear_registered_instances(void* ptr); + +TORCH_PYTHON_API IValue toIValue( + py::handle obj, + const TypePtr& type, + c10::optional N = c10::nullopt); + +TORCH_PYTHON_API py::object toPyObject(IValue ivalue); + +// Hack to overload the behavior of toIValue to accept Python +// numbers in places where a Tensor is expected +// See also torch::should_allow_numbers_as_tensors +class ToIValueAllowNumbersAsTensors { + bool old_; + + public: + ToIValueAllowNumbersAsTensors(bool enable); + ~ToIValueAllowNumbersAsTensors(); +}; + +// Wrap Python function to guard deref +// NB: Need VISIBILITY_HIDDEN for silencing compiler error, +// 'torch::jit::PythonFunctionGuard' declared with greater visibility than the +// type of its field 'torch::jit::PythonFunctionGuard::func_' +struct VISIBILITY_HIDDEN PythonFunctionGuard { + explicit PythonFunctionGuard(py::function func) : func_(std::move(func)) {} + + ~PythonFunctionGuard() { + pybind11::gil_scoped_acquire ag; + func_.dec_ref(); + // explicitly setting PyObject* to nullptr to prevent py::object's dtor to + // decref on the PyObject again. + // See Note [Destructing py::object] in python_ivalue.h + func_.ptr() = nullptr; + } + + py::function func_; +}; + +// The PythonFutureWrapper for ivalue::Future +// +// NB: VISIBILITY_HIDDEN is for silencing compiling error, +// "error: 'torch::jit::PythonFutureWrapper' declared with greater visibility +// than the type of its field 'torch::jit::PythonFutureWrapper::unwrap_func' +// [-Werror=attributes]" +// +// NB: inherit from enable_shared_from_this because then(py::function) needs to +// get a shared_ptr from this pointer. +struct VISIBILITY_HIDDEN PythonFutureWrapper + : std::enable_shared_from_this { + using UnwrapFunc = std::function; + + explicit PythonFutureWrapper( + c10::intrusive_ptr fut, + c10::optional unwrap_func = c10::nullopt) + : fut(std::move(fut)), unwrap_func(std::move(unwrap_func)) {} + + explicit PythonFutureWrapper(const PythonFutureWrapper&) = delete; + PythonFutureWrapper& operator=(const PythonFutureWrapper&) = delete; + + bool done() { + return fut->completed(); + } + + py::object value() { + // acquiring GIL as toPyObject creates new py::object + // without grabbing the GIL. + py::gil_scoped_acquire acquire; + py::object py_obj = toPyObject(fut->value()); + // unwrap_func is a general compositional function that takes in a + // py::object and executes some python function. It is currently mostly used + // to throw python exceptions. + if (unwrap_func) { + (*unwrap_func)(py_obj); + } + return py_obj; + } + + py::object wait() { + fut->wait(); + if (jit::tracer::isTracing()) { + auto graph = jit::tracer::getTracingState()->graph; + + Value* fut_val = jit::tracer::getValueTrace(fut); + auto output = graph->insert(aten::wait, {fut_val}); + jit::tracer::setValueTrace(fut->value(), output); + } + return value(); + } + + // The py::function cb arg must take a std::shared_ptr + // (i.e., torch._C.Future) as the only argument. If the type mismatches, an + // error will be thrown when waiting for the value of this returned Future. + std::shared_ptr then(py::function cb) { + // We need this an additional layer of wrapper here to guard the + // destruction of the py::function object. Because, the + // Future owns a reference to the py::function in its callback + // vector, but Future does not acquire GIL on destruction. + auto pf = std::make_shared(std::move(cb)); + + return std::make_shared(fut->then( + // Capture a copy of the ivalue::Future instead of the `this` pointer + // because the PythonFutureWrapper object could have been deleted + // when the callbacks are fired. For example, RPC only captures the + // ivalue::Future instead of PythonFutureWrapper in JitFuture's + // callback functions. Hence, if user code does not hold a reference to + // this PythonFutureWrapper object, there is no guarantee that the + // PythonFutureWrapper is still valid when running the callback. + [pyFut(this->getPtr()), + pf(std::move(pf))](c10::ivalue::Future& /* unused */) -> IValue { + try { + pybind11::gil_scoped_acquire ag; + return toIValue(pf->func_(pyFut), PyObjectType::get()); + } catch (py::error_already_set& e) { + auto err = std::runtime_error(c10::str( + "Got the following error when running the callback: ", + e.what())); + { + pybind11::gil_scoped_acquire ag; + // Release ownership on py::objects and also restore Python + // Error Indicator. + e.restore(); + // Clear the Python Error Indicator as we has recorded the + // exception in the response message. + PyErr_Clear(); + } + + throw err; + } + }, + PyObjectType::get())); + } + + void add_done_callback(py::function cb) { + auto pf = std::make_shared(std::move(cb)); + // NOLINTNEXTLINE(modernize-avoid-bind) + fut->addCallback(std::bind( + [pyFut(this->getPtr())](std::shared_ptr pf) { + try { + pybind11::gil_scoped_acquire ag; + pf->func_(pyFut); + } catch (py::error_already_set& e) { + { + pybind11::gil_scoped_acquire ag; + // Release ownership on py::objects and also restore Python + // Error Indicator. + e.restore(); + // Clear the Python Error Indicator as we has recorded the + // exception in the response message. + PyErr_Clear(); + } + // Log and ignore exceptions raised through the callback + LOG(ERROR) << "Got the following error when running the callback: " + << e.what(); + + } catch (const std::exception& e) { + // Log and ignore exceptions raised through the callback + LOG(ERROR) << "Got the following error when running the callback: " + << e.what(); + } + }, + std::move(pf))); + } + + void markCompleted(const py::object& pyValue) { + DCHECK(PyGILState_Check()); + IValue value = toIValue(pyValue, PyObjectType::get()); + + py::gil_scoped_release release; + fut->markCompleted(std::move(value)); + } + + c10::intrusive_ptr fut; + // unwrap_func works like a callback for the value returned by + // PythonFutureWrapper::wait(). + c10::optional unwrap_func; + + private: + std::shared_ptr getPtr() { + return shared_from_this(); + } +}; + +// The PythonAwaitWrapper for ivalue::Await +// +// Expresses delayed function execution with Lazy semantic. +// i.e. Await[W] in eager mode can be used as W. +// When the attribute of W type is requested, Await[W] will return the +// attribute of W, transparently calling wait() beforehand. +// No Lazy semantic for script, explicit wait(Await[W]) -> W must be called to +// convert to type W. +// +// The Await object takes shared ownership of specified function and the +// arguments. After first call for wait() it owns the result. Deliberately no +// type inference for eager mode. +struct VISIBILITY_HIDDEN PythonAwaitWrapper + : std::enable_shared_from_this { + explicit PythonAwaitWrapper(c10::intrusive_ptr aw) + : aw_(std::move(aw)) {} + explicit PythonAwaitWrapper(py::handle input) { + args_ = py::tuple(1u); + args_[0] = input; + auto type = PyObjectType::get(); + aw_ = c10::make_intrusive(type); + aw_->markCompleted(toIValue(input, type)); + } + + explicit PythonAwaitWrapper(py::function pf, py::tuple args) { + pyfg_ = std::make_shared(std::move(pf)); + args_ = std::move(args); + std::function f = [fg(pyfg_), &args(args_)]() { + pybind11::gil_scoped_acquire ag; + return toIValue(fg->func_(*args), PyObjectType::get()); + }; + aw_ = c10::make_intrusive( + PyObjectType::get(), std::move(f)); + } + + explicit PythonAwaitWrapper(const PythonAwaitWrapper&) = delete; + PythonAwaitWrapper& operator=(const PythonAwaitWrapper&) = delete; + + py::object wait() { + py::gil_scoped_acquire acquire; + return toPyObject(aw_->wait()); + } + + // Nowait semantic means trivial case when Await is constructed from the + // result + bool is_nowait() { + return pyfg_ == nullptr; + } + + const py::function fn() { + TORCH_CHECK( + pyfg_, "Await constructed as awaitable_nowait does not have fn"); + return pyfg_->func_; + } + + const py::tuple args() { + return args_; + } + + TypePtr type() { + return aw_->type(); + } + + c10::intrusive_ptr aw_; + std::shared_ptr pyfg_; + py::tuple args_; + + private: + std::shared_ptr getPtr() { + return shared_from_this(); + } +}; + +// error reporting: when reporting user-caused errors, these functions should +// not use AT_ERROR macros, since these macros add stack trace information +// that is confusing to display to the end user since it always reports +// locations in libtorch code rather than user code. + +inline std::shared_ptr get_python_cu() { + return py::module::import("torch.jit._state") + .attr("_python_cu") + .cast>(); +} + +struct TypedIValue : public std::pair { + using pair::pair; + + IValue& ivalue() { + return this->first; + } + TypePtr& type() { + return this->second; + } +}; + +inline TypedIValue toDictKeyIValue(py::handle key) { + if (py::isinstance(key)) { + return TypedIValue( + ConstantString::create(py::cast(key)), StringType::get()); + } else if (py::isinstance(key)) { + return TypedIValue(py::cast(key), IntType::get()); + } else if (py::isinstance(key)) { + return TypedIValue(py::cast(key), FloatType::get()); + } else { + AT_ERROR("Dictionary inputs may only have string, int, or float keys"); + } +} + +inline c10::optional unifyOrInitializeType( + const TypePtr& accum, + const TypePtr& unify) { + if (!accum) { + return unify; + } + return unifyTypes(accum, unify); +} + +using InferredType = c10::InferredType; + +InferredType tryToInferContainerType(py::handle input, bool primitiveTypeOnly); + +// Try to infer the type of a Python object +// The type cannot be inferred if: +// input is an empty container (list, dict) +// input is an list with element types that cannot be unified +// input is an dict with key or value types that cannot be unified +inline InferredType tryToInferType(py::handle input) { + // Try tensor types + if (THPVariable_Check(input.ptr())) { + return InferredType(TensorType::get()); + } + + if (input.is_none()) { + return InferredType(NoneType::get()); + } + + if (py::isinstance(input)) { + auto fn = py::cast(input).function_; + return InferredType(FunctionType::create(fn)); + } + + // Try basic types first + if (py::isinstance(input)) { + return InferredType(BoolType::get()); + // NOLINTNEXTLINE(bugprone-branch-clone) + } else if (py::isinstance(input)) { + return InferredType(IntType::get()); + } else if (py::isinstance(input)) { + return InferredType(FloatType::get()); + } else if (PyComplex_CheckExact(input.ptr())) { + return InferredType(ComplexType::get()); + } else if (py::isinstance(input)) { + return InferredType(StringType::get()); + } else if (THPLayout_Check(input.ptr())) { + return InferredType(IntType::get()); + } else if (THPDevice_Check(input.ptr())) { + return InferredType(DeviceObjType::get()); + } else if (THPGenerator_Check(input.ptr())) { + return InferredType(GeneratorType::get()); + } else if (THPStream_Check(input.ptr())) { + return InferredType(StreamObjType::get()); + } else if (THPDtype_Check(input.ptr())) { + return InferredType(IntType::get()); + } else if (THPQScheme_Check(input.ptr())) { + return InferredType(IntType::get()); + } else if (THPLayout_Check(input.ptr())) { + return InferredType(IntType::get()); + } + + auto enum_type = py::module::import("enum").attr("Enum"); + py::bool_ isEnumValue = py::isinstance(input, enum_type); + if (py::cast(isEnumValue)) { + auto enum_class = input.attr("__class__"); + auto enum_type = py::cast( + py::module::import("torch.jit.annotations") + .attr("try_ann_to_type")(enum_class, SourceRange())); + return InferredType(std::move(enum_type)); + } + + py::bool_ isClass = + py::module::import("inspect").attr("isclass")(input.get_type()); + if (py::cast(isClass)) { + // Assume that the class is compiled already or will compile. Invalidate + // this later if needed. + bool class_compiled = true; + + // Check if the type is already compiled. + py::object existing_ty = py::module::import("torch.jit._state") + .attr("_get_script_class")(input.get_type()); + + if (existing_ty.is_none()) { + // If not, try to compile it. + py::bool_ can_compile = py::module::import("torch._jit_internal") + .attr("can_compile_class")(input.get_type()); + + if (py::cast(can_compile)) { + // Try to compile the class. This is wrapped in a try-catch because + // compilation of class types can raise an Exception and in that case, + // we want to defer to other attempts at type inference below rather + // than fail compilation altogether. + try { + py::module::import("torch.jit._script") + .attr("_recursive_compile_class")( + input.get_type(), SourceRange()); + } catch (...) { + // Invalidate the assumption that the class compiled so that we don't + // look up and return its JIT type as the type for the input. + class_compiled = false; + } + } + } + + // If the class compiled successfully, look up the existing JIT type by + // qualified name and return it. + if (class_compiled) { + auto script_class = py::module::import("torch.jit._state") + .attr("_get_script_class")(input.get_type()); + + if (!script_class.is_none()) { + auto class_type = py::cast(script_class); + + if (class_type && !class_type->is_module()) { + return InferredType(std::move(class_type)); + } + } + } + } + + if (py::isinstance(input)) { + auto object = py::cast(input); + return InferredType(object.type()); +#ifdef USE_RPC + } else if (py::isinstance(input)) { + auto rref_ivalue = input.cast().toIValue(); + return InferredType(rref_ivalue.type()); +#endif + } + + auto await_type = py::module::import("torch._awaits").attr("_Await"); + py::bool_ is_await = py::isinstance(input, await_type); + if (py::cast(is_await)) { + auto awptr = input.cast>(); + return InferredType(AwaitType::create(awptr->aw_->elementType())); + } + + if (as_module(py::cast(input))) { + return InferredType("Cannot infer type of ScriptModule"); + } + + auto module_type = py::module::import("torch.nn").attr("Module"); + py::bool_ is_module = py::isinstance(input, module_type); + if (py::cast(is_module)) { + return InferredType("Cannot infer concrete type of torch.nn.Module"); + } + + // Try container types + return tryToInferContainerType(input, false); +} + +// This function is similar to tryToInferType, but it only tries to infer +// primitive types (int, float, bool, complex) or nested container of primitive +// types. +inline InferredType tryToInferPrimitiveType(py::handle input) { + if (input.is_none()) { + return InferredType(NoneType::get()); + } + + // Only primitive data type + if (py::isinstance(input)) { + return InferredType(BoolType::get()); + // NOLINTNEXTLINE(bugprone-branch-clone) + } else if (py::isinstance(input)) { + return InferredType(IntType::get()); + } else if (py::isinstance(input)) { + return InferredType(FloatType::get()); + } else if (PyComplex_CheckExact(input.ptr())) { + return InferredType(ComplexType::get()); + } + + // Try container types + return tryToInferContainerType(input, true); +} + +inline InferredType tryToInferContainerType( + py::handle input, + bool primitiveTypeOnly = false) { + if (six::isTuple(input)) { + py::tuple tuple = py::cast(input); + std::vector element_types; + element_types.reserve(tuple.size()); + + for (py::handle elem : tuple) { + auto type_match = primitiveTypeOnly ? tryToInferPrimitiveType(elem) + : tryToInferType(elem); + if (type_match.success()) { + element_types.push_back(type_match.type()); + } else { + // Forward error message along + return type_match.reason(); + } + } + return InferredType(TupleType::create(std::move(element_types))); + } else if (PyDict_Check(input.ptr())) { + // Check to make sure we can generate useful input/output types + auto dict = py::cast(input); + size_t len = py::len(dict); + if (!len) { + return InferredType("Dictionary inputs must have entries"); + } + + TypePtr key_type = nullptr; + TypePtr value_type = nullptr; + + for (auto entry : dict) { + // Try to infer the key type and unify it with the existing one + auto entry_key_type_match = primitiveTypeOnly + ? tryToInferPrimitiveType(entry.first) + : tryToInferType(entry.first); + if (!entry_key_type_match.success()) { + return entry_key_type_match.reason(); + } + auto unified_key = + unifyOrInitializeType(key_type, entry_key_type_match.type()); + if (!unified_key) { + return InferredType(c10::str( + "Dictionary inputs to traced functions must have consistent type. Found ", + key_type->repr_str(), + " and ", + (entry_key_type_match.type())->repr_str())); + } + + // Try to infer the value type and unify it with the existing one + auto entry_value_type_match = primitiveTypeOnly + ? tryToInferPrimitiveType(entry.second) + : tryToInferType(entry.second); + if (!entry_value_type_match.success()) { + return entry_value_type_match.reason(); + } + auto unified_value = + unifyOrInitializeType(value_type, entry_value_type_match.type()); + if (!unified_value) { + return InferredType(c10::str( + "Dictionary inputs to traced functions must have consistent type. Found ", + value_type->repr_str(), + " and ", + (entry_value_type_match.type())->repr_str())); + } + + key_type = *unified_key; + value_type = *unified_value; + } + return InferredType( + DictType::create(std::move(key_type), std::move(value_type))); + } else if (PyList_Check(input.ptr())) { + auto list = py::cast(input); + size_t len = py::len(list); + if (!len) { + return InferredType("List trace inputs must have elements"); + } + + TypePtr element_type = nullptr; + for (auto elem : list) { + auto element_type_match = primitiveTypeOnly + ? tryToInferPrimitiveType(elem) + : tryToInferType(elem); + if (!element_type_match.success()) { + return InferredType(c10::str( + "Could not infer type of list element: ", + element_type_match.reason())); + } + auto unified_type = + unifyOrInitializeType(element_type, element_type_match.type()); + if (!unified_type) { + return InferredType(c10::str( + "List inputs to traced functions must have consistent element type. Found ", + element_type->repr_str(), + " and ", + (element_type_match.type())->repr_str())); + } + element_type = *unified_type; + } + return InferredType(ListType::create(element_type)); + } else { + if (primitiveTypeOnly) { + return InferredType(c10::str( + "Only tuple, list, or dict (possibly nested) of primitive types (bool, float, int, complex)", + "are supported ", + "as inputs or outputs of traced functions", + ", but instead got value of type ", + py::str(input.get_type().attr("__name__")), + ".")); + } else { + // TODO: this message is not correct anymore, since this InferredType is + // used from a bunch of circumstances unrelated to tracing. We can re-use + // this instead of the attribute_failure stuff in concreteType + return InferredType(c10::str( + "Only tensors and (possibly nested) tuples of tensors, lists, or dicts", + "are supported ", + "as inputs or outputs of traced functions", + ", but instead got value of type ", + py::str(input.get_type().attr("__name__")), + ".")); + } + } +} + +inline bool isTraceableType(const TypePtr& type) { + if (type->isSubtypeOf(*TensorType::get())) { + return true; + } + + if (auto list_type = type->cast()) { + return isTraceableType(list_type->getElementType()); + } + + if (auto tuple_type = type->cast()) { + return std::all_of( + tuple_type->elements().begin(), + tuple_type->elements().end(), + [](const TypePtr& element_type) { + return isTraceableType(element_type); + }); + } + + if (auto dict_type = type->cast()) { + return isTraceableType(dict_type->getValueType()); + } + + return false; +} + +inline IValue toTypeInferredIValue(py::handle input) { + auto match = tryToInferType(input); + if (!match.success()) { + auto object = py::cast(input); + if (auto mod = as_module(object)) { + // if obj is already a ScriptModule, just return its ivalue + auto ptr = mod.value()._ivalue(); + // explict copy semantics for strong ownership of the resource. + return c10::intrusive_ptr::reclaim_copy( + ptr.release()); + } + + // Check if the obj is a ScriptObject. + if (auto script_obj = as_object(object)) { + auto ptr = script_obj.value()._ivalue(); + return c10::intrusive_ptr::reclaim_copy( + ptr.release()); + } + AT_ERROR( + "Tracer cannot infer type of ", py::str(input), "\n:", match.reason()); + } + return toIValue(input, match.type()); +} + +inline Stack toTraceableStack(const py::tuple& inputs) { + auto info = toTypeInferredIValue(inputs); + TORCH_CHECK( + isTraceableType(info.type()), + "Type '", + info.type()->repr_str(), + "' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and" + " Tuples of Tensors can be traced"); + return info.toTupleRef().elements().vec(); +} + +// Serialize the python dictionary into a traceable stack. +inline Stack toTraceableStack(const py::dict& inputs) { + Stack res; + for (auto it = inputs.begin(); it != inputs.end(); it++) { + if (THPVariable_Check(it->second.ptr())) { + res.push_back(toIValue(it->second, tryToInferType(it->second).type())); + } + } + return res; +} + +inline IValue createGenericList(py::handle obj, const TypePtr& elem_type) { + auto elems = c10::impl::GenericList(elem_type); + for (auto elem : obj) { + elems.push_back(toIValue(elem, elem_type)); + } + return IValue(elems); +} + +inline IValue createGenericDict( + const py::dict& obj, + const TypePtr& key_type, + const TypePtr& value_type) { + c10::impl::GenericDict elems(key_type, value_type); + elems.reserve(py::len(obj)); + for (auto& entry : obj) { + elems.insert( + toIValue(entry.first, key_type), toIValue(entry.second, value_type)); + } + return IValue(elems); +} + +template +inline void guardAgainstNamedTensor(const T& var) { + TORCH_CHECK( + !var.has_names(), + "NYI: Named tensors are currently unsupported in TorchScript. As a " + "workaround please drop names via `tensor = tensor.rename(None)`."); +} + +// Extract custom class registered with torchbind +template +c10::intrusive_ptr toCustomClass(py::handle obj) { + static_assert( + std::is_base_of::value, "T is not a CustomClass"); + const auto& type = c10::getCustomClassType>(); + c10::IValue ivalue = toIValue(obj, type); + return std::move(ivalue).toCustomClass(); +} + +// Small wrapper around getting the type name string from Python to make +// types easier to interpret, e.g. give the structural type for a NamedTuple +inline std::string friendlyTypeName(py::handle obj) { + if (py::isinstance(obj) && py::hasattr(obj, "_fields")) { + auto field_names = + py::cast>(py::getattr(obj, "_fields")); + std::stringstream ss; + ss << py::str(obj.get_type().attr("__name__")); + ss << " (aka NamedTuple("; + bool first = true; + for (auto& field_name : field_names) { + if (!first) { + ss << ", "; + } + ss << field_name; + first = false; + } + ss << "))"; + return ss.str(); + } else { + return py::str(obj.get_type().attr("__name__")); + } +} + +// Thrown when trying to create a schema for a list of python +// arguments that cannot be converted. +// Can be caught by the caller to attempt to use other schema +// when there is an overloaded operator. +struct schema_match_error : public std::runtime_error { + using std::runtime_error::runtime_error; +}; + +inline IValue argumentToIValue( + const FunctionSchema& schema, + size_t argumentPosition, + py::handle object) { + const auto& argument = schema.arguments().at(argumentPosition); + try { + return toIValue(object, argument.real_type(), argument.N()); + } catch (const py::cast_error& error) { + throw schema_match_error(c10::str( + schema.formatTypeMismatchMsg( + argument, + friendlyTypeName(object), + argumentPosition, + py::repr(object)), + "\nCast error details: ", + error.what())); + } catch (const py::error_already_set& error) { + throw schema_match_error(c10::str( + schema.formatTypeMismatchMsg( + argument, + friendlyTypeName(object), + argumentPosition, + py::repr(object)), + "\n Python error details: ", + error.what())); + } +} + +inline IValue returnToIValue(const TypePtr& type, py::handle object) { + try { + return toIValue(object, type); + } catch (const py::cast_error& error) { + throw std::runtime_error(c10::str( + " expected value of type ", + type->str(), + " for return value but instead got value of type ", + py::str(object.get_type().attr("__name__")), + ".", + "\nValue: ", + py::repr(object), + "\nCast error details: ", + error.what())); + } +} + +inline py::object getScriptedClassOrError(const c10::NamedTypePtr& classType) { + auto py_class = + py::module::import("torch.jit._state") + .attr("_get_python_class")(classType->name()->qualifiedName()); + if (py_class.is_none()) { + std::stringstream err; + err << "Unknown reference to ScriptClass "; + err << classType->name()->qualifiedName(); + err << ". (Did you forget to import it?)"; + throw std::runtime_error(err.str()); + } + return py_class; +} + +struct VISIBILITY_HIDDEN tuple_slice { + /*implicit*/ tuple_slice(py::tuple tup_) + : tup(std::move(tup_)), b(0), e(tup.size()) {} + tuple_slice(py::tuple tup_, int64_t b_) + : tup(std::move(tup_)), b(b_), e(tup.size()) {} + tuple_slice(py::tuple tup_, int64_t b_, int64_t e_) + : tup(std::move(tup_)), b(b_), e(e_) {} + py::detail::tuple_iterator begin() const { + return {tup, static_cast(b)}; + } + py::detail::tuple_iterator end() const { + return {tup, static_cast(e)}; + } + size_t size() const { + return e - b; + } + py::detail::tuple_accessor operator[](size_t index) const { + return {tup, static_cast(b + index)}; + } + + private: + py::tuple tup; + int64_t b; + int64_t e; +}; + +inline Stack createStackForSchema( + const FunctionSchema& schema, + const tuple_slice& args, + const py::kwargs& kwargs, + c10::optional self) { + size_t all_arguments = (self ? 1 : 0) + args.size() + kwargs.size(); + if (all_arguments > schema.arguments().size()) { + throw schema_match_error(c10::str( + schema.name(), + "() expected at most ", + schema.arguments().size(), + " argument(s) but received ", + all_arguments, + " argument(s). Declaration: ", + schema)); + } + Stack stack; + stack.reserve(schema.arguments().size()); + + int64_t arg_idx = 0; + if (self) { + push(stack, std::move(*self)); + arg_idx++; + } + // First push all positional args. + for (const auto& arg : args) { + // ...but refuse to do it if the schema says that this was supposed + // to be keyword only + if (schema.arguments()[arg_idx].kwarg_only()) { + throw schema_match_error(c10::str( + schema.name(), + "() takes ", + arg_idx, + " positional argument(s) but ", + self ? 1 + args.size() : args.size(), + " was/were given. Declaration: ", + schema)); + } + // Use the type information from the schema to convert the PyObject. + push(stack, argumentToIValue(schema, stack.size(), arg)); + arg_idx++; + } + + // Now for every remaining non-positional argument in the schema, look for it + // in the kwargs dict and push it if found, or use its default value if it + // has one. + size_t consumed_kwargs = 0; + for (size_t i = stack.size(); i < schema.arguments().size(); ++i) { + const auto& arg = schema.arguments()[i]; + if (kwargs.contains(arg.name().c_str())) { + push(stack, argumentToIValue(schema, i, kwargs[arg.name().c_str()])); + consumed_kwargs += 1; + } else if (arg.default_value()) { + push(stack, *arg.default_value()); + } else { + throw schema_match_error(c10::str( + schema.name(), + "() is missing value for argument '", + arg.name(), + "'. Declaration: ", + schema)); + } + } + + if (consumed_kwargs != kwargs.size()) { + std::vector names; + for (const auto& kwarg : kwargs) { + names.emplace_back(py::cast(kwarg.first)); + } + throw schema_match_error(schema.findErrorInKwargs(names)); + } + + return stack; +} + +inline py::object createPyObjectForStack(Stack&& stack) { + if (stack.empty()) { + return py::none(); + } + + // Return a simple value and not a single-element tuple if there is only one + // return value. + if (stack.size() == 1) { + return toPyObject(std::move(stack[0])); + } + + // If there is more than one return value, pop them into a py::tuple. + py::tuple return_values(stack.size()); + for (const auto ret : c10::irange(return_values.size())) { + return_values[ret] = toPyObject(std::move(stack[ret])); + } + + return std::move(return_values); +} + +// TODO: Remove once we clean up the GraphExecutor usage. +inline Stack evilDeprecatedBadCreateStackDoNotUse( + const py::tuple& tuple, + at::ArrayRef inputs, + size_t reserve_extra_space = 0) { + if (tuple.size() != inputs.size()) { + AT_ERROR( + "expected " + std::to_string(inputs.size()) + " inputs, but got " + + std::to_string(tuple.size())); + } + Stack result; + result.reserve(tuple.size() + reserve_extra_space); + for (const auto i : c10::irange(inputs.size())) { + result.push_back(toIValue(std::move(tuple[i]), inputs[i]->type())); + } + return result; +} + +// Run `callee`, potentially inserting a CallFunction/CallMethod node into the +// tracing graph. +inline py::object runAndInsertCall( + Function& callee, + const tuple_slice& args, + const py::kwargs& kwargs, + c10::optional self, + // Lambda that tells this function how to insert `callee` into the graph if + // we're tracing. + const std::function& + callInserter) { + auto stack = + createStackForSchema(callee.getSchema(), args, kwargs, std::move(self)); + const auto& tracing_state = tracer::getTracingState(); + if (!tracing_state) { + pybind11::gil_scoped_release no_gil_guard; + // If we're not tracing, just run the callee as normal. + callee.run(stack); + } else { + // If we are tracing, insert the appropriate CallFunction or CallMethod node + // and then run the callee with tracing disabled. + + // Get the graph `Value`s that represent the input IValues + auto inputs = last(stack, callee.num_inputs()); + auto input_values = + fmap(inputs, [](const IValue& v) { return tracer::getValueTrace(v); }); + TORCH_INTERNAL_ASSERT(callee.getSchema().returns().size() == 1) + auto return_type = callee.getSchema().returns().at(0).type(); + auto graph = tracing_state->graph; + std::vector named_values; + named_values.reserve(input_values.size()); + for (Value* v : input_values) { + named_values.emplace_back(v); + } + + // Add a call node. + MatchedSchema match = matchSchema( + callee.getSchema(), + tracer::getPythonInterpreterSourceRange(), + *graph, + named_values, + {}); + auto output_value = callInserter(*graph, match); + + // Actually run the callee. Pause the tracer so that we don't double-add the + // callee nodes. + { + pybind11::gil_scoped_release no_gil_guard; + ResourceGuard guard(tracer::pauseTracing()); + callee.run(stack); + } + + // Associate the output IValues with the output `Value`s in the graph + tracer::setValueTrace(stack.back(), output_value); + } + + TORCH_CHECK( + !stack.empty(), + "Expected values in the stack after execution but found none"); + return toPyObject(std::move(stack.back())); +} + +inline c10::optional maybeTorchFunctionDispatch( + const py::object& callee, + const tuple_slice& args_no_self, + const py::kwargs& kwargs, + const c10::QualifiedName qualname) { + std::vector args_vec; + for (const auto& arg : args_no_self) { + args_vec.push_back(arg); + } + py::tuple args = py::cast(args_vec); + + // Handle __torch_function__ dispatch + std::vector overloaded_args; + size_t total_arg_num = args.size() + kwargs.size(); + for (const auto& arg : args) { + is_tensor_and_append_overloaded(arg.ptr(), &overloaded_args); + is_tensor_list_and_append_overloaded( + arg.ptr(), + &overloaded_args, + static_cast(total_arg_num), + false /* throw_error */); + } + // NB: for kwargs, we cannot guarantee the order of appending + // is the same as the argument order in operator's schema. + // This is suboptimal, but should be fine. Later when we have + // better schema matching and argument parsing, we could + // match the operator in `operations` first, then the order will + // be guaranteed. + for (auto item : kwargs) { + is_tensor_and_append_overloaded(item.second.ptr(), &overloaded_args); + is_tensor_list_and_append_overloaded( + item.second.ptr(), + &overloaded_args, + total_arg_num, + false /* throw_error */); + } + if (!overloaded_args.empty()) { + return pybind11::reinterpret_steal( + handle_torch_function_no_python_arg_parser( + /*overloaded_args=*/overloaded_args, + /*args=*/args.ptr(), + /*kwargs=*/kwargs.ptr(), + /*func_name=*/qualname.name().c_str(), + /*torch_api_function=*/callee.ptr(), + /*module_name=*/qualname.prefix().c_str())); + } + + return c10::nullopt; +} + +inline py::object invokeScriptFunctionFromPython( + Function& callee, + const tuple_slice& args, + const py::kwargs& kwargs) { + // TODO: we could add __torch_function__ dispatch here but I don't know + // the implications of doing so + + return runAndInsertCall( + callee, + args, + kwargs, + /*self=*/c10::nullopt, + [&](Graph& graph, const MatchedSchema& match) { + return graph.insertFunctionCall(&callee, match); + }); +} + +inline py::object invokeScriptMethodFromPython( + Method& callee, + const tuple_slice& args, + const py::kwargs& kwargs) { + auto self = callee.owner()._ivalue(); + + if (auto torch_fn_result = maybeTorchFunctionDispatch( + py::cast(callee), args, kwargs, callee.name())) { + return *torch_fn_result; + } + + return runAndInsertCall( + callee.function(), + args, + kwargs, + self, + [&](Graph& graph, const MatchedSchema& match) { + return graph.insertMethodCall(callee.name(), match); + }); +} + +TORCH_PYTHON_API std::pair, Stack> getOpWithStack( + const std::vector>& operations, + py::args args, + const py::kwargs& kwargs); + +TORCH_PYTHON_API py::object invokeOperatorFromPython( + const std::vector>& operations, + py::args args, + const py::kwargs& kwargs, + c10::optional dk = c10::nullopt); + +TORCH_PYTHON_API py::object _get_operation_for_overload_or_packet( + const std::vector>& operations, + Symbol symbol, + py::args args, + const py::kwargs& kwargs, + bool is_overload, + c10::optional dk = c10::nullopt); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_arg_flatten.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_arg_flatten.h new file mode 100644 index 0000000000000000000000000000000000000000..232f5b6ea08129b9ec29c4940a85d682c301d2c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_arg_flatten.h @@ -0,0 +1,119 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace torch::jit::python { + +struct IODescriptor { + struct VariableMetadata { + VariableMetadata(const autograd::Variable& var) + : sizes(var.sizes().vec()), + type(var.scalar_type()), + device(var.device()), + requires_grad(var.requires_grad()) {} + + bool operator==(const VariableMetadata& o) const { + return std::tie(device, requires_grad, type, sizes) == + std::tie(o.device, o.requires_grad, o.type, o.sizes); + } + + static size_t hash(const VariableMetadata& m) { + return c10::get_hash(m.sizes, m.device, m.requires_grad, m.type); + } + + std::vector sizes; + at::ScalarType type; + at::Device device; + bool requires_grad; + }; + + bool operator==(const IODescriptor& o) const { + return std::tie(structure, metadata, grad_enabled) == + std::tie(o.structure, o.metadata, o.grad_enabled); + } + + static size_t hash(const IODescriptor& o) { + return c10::get_hash(o.structure, o.metadata, o.grad_enabled); + } + + void extend(const autograd::variable_list& list) { + metadata.reserve(metadata.size() + list.size()); + for (auto& var : list) + metadata.emplace_back(var); + } + + // Description of argument structure. Variables are replaced with + // different characters, depending on their flags, beginnings and + // ends of tuples and lists are denoted by a pair of parenthesis + // of their corresponding kind. They should always be paired. + // Example desc: (vv[v(v)v]) + // NOTE: if extend() was ever called then metadata.size() can be + // different than the number of 'v's in structure. + std::string structure; + std::vector strings; + std::vector metadata; + bool grad_enabled = false; +}; + +static inline std::ostream& operator<<( + std::ostream& out, + const IODescriptor::VariableMetadata& meta) { + at::Device meta_device = meta.device; + auto& t = at::getDeprecatedTypeProperties( + meta_device.is_cpu() ? at::Backend::CPU : at::Backend::CUDA, meta.type); + out << t << "(requires_grad=" << meta.requires_grad; + if (meta_device.is_cuda()) { + out << ", device=" << meta_device.index(); + } + out << ") {"; + for (const auto i : c10::irange(meta.sizes.size())) { + if (i > 0) + out << ", "; + out << meta.sizes[i]; + } + out << "}"; + return out; +} + +static inline std::ostream& operator<<( + std::ostream& out, + const IODescriptor& desc) { + out << desc.structure << "\n"; + out << " with grad_enabled=" << desc.grad_enabled << "\n"; + for (const auto i : c10::irange(desc.metadata.size())) { + out << " with v" << i << " having type " << desc.metadata[i] << "\n"; + } + return out; +} + +struct ParsedArgs { + // Flat vector of Variables found in arguments + autograd::variable_list vars; + // Metadata describing nesting of objects received from Python and + // metadata of vars and whether grad is enabled. + IODescriptor desc; + + void extend(const autograd::variable_list& list) { + if (list.empty()) + return; + vars.reserve(vars.size() + list.size()); + for (auto& var : list) + vars.emplace_back(var); + desc.extend(list); + } +}; + +ParsedArgs flatten(py::handle obj); +PyObject* unflatten( + at::ArrayRef vars, + const IODescriptor& structure); + +} // namespace torch::jit::python diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_custom_class.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_custom_class.h new file mode 100644 index 0000000000000000000000000000000000000000..d7cff488f27311915d6c31b36a366c6a34735b49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_custom_class.h @@ -0,0 +1,20 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { + +void initPythonCustomClassBindings(PyObject* module); + +struct ScriptClass { + ScriptClass(c10::StrongTypePtr class_type) + : class_type_(std::move(class_type)) {} + + py::object __call__(py::args args, py::kwargs kwargs); + + c10::StrongTypePtr class_type_; +}; + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_dict.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_dict.h new file mode 100644 index 0000000000000000000000000000000000000000..81927f95fdd4e550e45936215324e11668b5df12 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_dict.h @@ -0,0 +1,126 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch::jit { + +void initScriptDictBindings(PyObject* module); + +/// An iterator over the keys of ScriptDict. This is used to support +/// .keys() and iteration. +class ScriptDictKeyIterator final { + public: + ScriptDictKeyIterator( + c10::impl::GenericDict::iterator iter, + c10::impl::GenericDict::iterator end) + : iter_(std::move(iter)), end_(std::move(end)) {} + IValue next(); + + private: + c10::impl::GenericDict::iterator iter_; + c10::impl::GenericDict::iterator end_; +}; + +/// An iterator over the key-value pairs of ScriptDict. This is used to support +/// .items(). +class ScriptDictIterator final { + public: + ScriptDictIterator( + c10::impl::GenericDict::iterator iter, + c10::impl::GenericDict::iterator end) + : iter_(std::move(iter)), end_(std::move(end)) {} + IValue next(); + + private: + c10::impl::GenericDict::iterator iter_; + c10::impl::GenericDict::iterator end_; +}; + +/// A wrapper around c10::Dict that can be exposed in Python via pybind +/// with an API identical to the Python dictionary class. This allows +/// dictionaries to have reference semantics across the Python/TorchScript +/// boundary. +class ScriptDict final { + public: + // Constructor. + ScriptDict(IValue data) : dict_(AnyType::get(), AnyType::get()) { + TORCH_INTERNAL_ASSERT(data.isGenericDict()); + dict_ = data.toGenericDict(); + } + + // Get the type of the dictionary. + DictTypePtr type() const { + return DictType::create(dict_.keyType(), dict_.valueType()); + } + + // Return a string representation that can be used + // to reconstruct the instance. + std::string repr() const { + std::ostringstream s; + s << '{'; + bool f = false; + for (auto const& kv : dict_) { + if (f) { + s << ", "; + } + s << kv.key() << ": " << kv.value(); + f = true; + } + s << '}'; + return s.str(); + } + + // Return an iterator over the keys of the dictionary. + ScriptDictKeyIterator iter() const { + auto begin = dict_.begin(); + auto end = dict_.end(); + return ScriptDictKeyIterator(begin, end); + } + + // Return an iterator over the key-value pairs of the dictionary. + ScriptDictIterator items() const { + auto begin = dict_.begin(); + auto end = dict_.end(); + return ScriptDictIterator(begin, end); + } + + // Interpret the dictionary as a boolean; empty means false, non-empty means + // true. + bool toBool() const { + return !(dict_.empty()); + } + + // Get the value for the given key. Throws std::out_of_range if the key does + // not exist. + IValue getItem(const IValue& key) { + return dict_.at(key); + }; + + // Set the value for the given key. + void setItem(const IValue& key, const IValue& value) { + dict_.insert_or_assign(key, value); + }; + + // Check whether the dictionary contains the given key. + bool contains(const IValue& key) { + return dict_.contains(key); + } + + // Delete the given key from the dictionary. + bool delItem(const IValue& key) { + return dict_.erase(key); + } + + // Get the size of the dictionary. + int64_t len() const { + return dict_.size(); + } + + // A c10::Dict instance that holds the actual data. + c10::impl::GenericDict dict_; +}; + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ir.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ir.h new file mode 100644 index 0000000000000000000000000000000000000000..296fc3f0b1f2ec349e8aa5ba517a32d256d5c12d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ir.h @@ -0,0 +1,50 @@ +#pragma once + +#include +#include + +namespace torch::jit { + +void initPythonIRBindings(PyObject* module); + +// execute a Python function, used for Ops we can't optimize but that we want to +// optimize around +struct ConcretePythonOp : public PythonOp { + static Symbol Kind; + + ConcretePythonOp(Graph* graph) : PythonOp(graph, ::c10::prim::PythonOp) {} + ConcretePythonOp* init( + THPObjectPtr&& pyobj, + const std::string& cconv, + pyobj_list&& scalar_args) { + this->pyobj = std::move(pyobj); + this->scalar_args = std::move(scalar_args); + this->cconv = cconv; + return this; + } + // The Python object which contains the implementation of this function. + // This is either a class (non-legacy) or an object (legacy). See + // TraceInterpreterState for execution semantics. + THPObjectPtr pyobj; + // The calling convention for the Python function. + // 'c' -- constant argument + // 'd' -- dynamic argument + std::string cconv; + // Scalar arguments to the Python function. Not necessarily passed to + // the function in this order; see cconv for the correct order. + std::vector scalar_args; + + std::string name() const override; + void cloneFrom(Node* other_) override; + Node* allocNewInstance(Graph* g) override { + return new ConcretePythonOp(g); + } + // recover the autograd.Function instance, if this PythonOp's function + // was originally SomeFunction.apply + // used in ONNX for discovering symbolics + c10::optional autogradFunction() const override; + void writeScalars(std::ostream& out) const override; + void lint_python() const override; +}; + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ivalue.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ivalue.h new file mode 100644 index 0000000000000000000000000000000000000000..f33ceca30f2d00a32c290eb03727502bfb4b77dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ivalue.h @@ -0,0 +1,97 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace py = pybind11; + +namespace c10::ivalue { + +// concrete ivalue Holder that hold a py::object +struct C10_EXPORT ConcretePyObjectHolder final : PyObjectHolder { + public: + static c10::intrusive_ptr create(py::object py_obj) { + return c10::make_intrusive(std::move(py_obj)); + } + + static c10::intrusive_ptr create(const py::handle& handle) { + py::gil_scoped_acquire ag; + return c10::make_intrusive( + handle.cast()); + } + + PyObject* getPyObject() override { + return py_obj_.ptr(); + } + + InferredType tryToInferType() override { + pybind11::gil_scoped_acquire ag; + return torch::jit::tryToInferType(py_obj_); + } + + IValue toIValue(const TypePtr& type, c10::optional N = c10::nullopt) + override { + pybind11::gil_scoped_acquire ag; + return torch::jit::toIValue(py_obj_, type, N); + } + + std::string toStr() override { + pybind11::gil_scoped_acquire ag; + return py::str(py_obj_); + } + + std::vector extractTensors() override { + // We could implement this entirely in C++ via pybind11 but it turns out to + // be substantially slower. Namely, the total time taken by markCompleted on + // a CUDAFuture is 21.5us with this implementation, but goes up to 58.7us + // when using C++. The reason is unclear. + try { + pybind11::gil_scoped_acquire ag; + static py::object& extractorFn = *new py::object( + py::module::import("torch._jit_internal").attr("_extract_tensors")); + return extractorFn(py_obj_).cast>(); + } catch (py::error_already_set& e) { + auto err = std::runtime_error( + c10::str("Cannot extract tensors from value: ", e.what())); + { + pybind11::gil_scoped_acquire ag; + e.restore(); + PyErr_Clear(); + } + throw err; + } + } + + // Note [Destructing py::object] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~ + // + // (1) Why py_obj_ = py::none(); does not work. Because we also need to + // acquire GIL when destructing py::object of None that de-references None. + // https://docs.python.org/3/c-api/none.html#c.Py_RETURN_NONE + // + // https://stackoverflow.com/questions/15287590/why-should-py-increfpy-none-be-required-before-returning-py-none-in-c + // + // (2) Why we need to call dec_ref() explicitly. Because py::object of + // nullptr, on destruction, effectively does nothing because of it calls + // Py_XDECREF(NULL) underlying. + // https://docs.python.org/3/c-api/refcounting.html#c.Py_XDECREF + ~ConcretePyObjectHolder() override { + pybind11::gil_scoped_acquire ag; + py_obj_.dec_ref(); + // explicitly setting PyObject* to nullptr to prevent py::object's dtor to + // decref on the PyObject again. + py_obj_.ptr() = nullptr; + } + + // explicit construction to avoid errornous implicit conversion and + // copy-initialization + explicit ConcretePyObjectHolder(py::object py_obj) + : py_obj_(std::move(py_obj)) {} + + private: + py::object py_obj_; +}; + +} // namespace c10::ivalue diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_list.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_list.h new file mode 100644 index 0000000000000000000000000000000000000000..d70e653043c933c80b8e15df21fc9e4afbf8b57e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_list.h @@ -0,0 +1,228 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +void initScriptListBindings(PyObject* module); + +/// An iterator over the elements of ScriptList. This is used to support +/// __iter__(), . +class ScriptListIterator final { + public: + ScriptListIterator( + c10::impl::GenericList::iterator iter, + c10::impl::GenericList::iterator end) + : iter_(iter), end_(end) {} + IValue next(); + bool done() const; + + private: + c10::impl::GenericList::iterator iter_; + c10::impl::GenericList::iterator end_; +}; + +/// A wrapper around c10::List that can be exposed in Python via pybind +/// with an API identical to the Python list class. This allows +/// lists to have reference semantics across the Python/TorchScript +/// boundary. +class ScriptList final { + public: + // TODO: Do these make sense? + using size_type = size_t; + using diff_type = ptrdiff_t; + using ssize_t = Py_ssize_t; + + // Constructor for empty lists created during slicing, extending, etc. + ScriptList(const TypePtr& type) : list_(AnyType::get()) { + auto list_type = type->expect(); + list_ = c10::impl::GenericList(list_type); + } + + // Constructor for instances based on existing lists (e.g. a + // Python instance or a list nested inside another). + ScriptList(IValue data) : list_(AnyType::get()) { + TORCH_INTERNAL_ASSERT(data.isList()); + list_ = data.toList(); + } + + ListTypePtr type() const { + return ListType::create(list_.elementType()); + } + + // Return a string representation that can be used + // to reconstruct the instance. + std::string repr() const { + std::ostringstream s; + s << '['; + bool f = false; + for (auto const& elem : list_) { + if (f) { + s << ", "; + } + s << IValue(elem); + f = true; + } + s << ']'; + return s.str(); + } + + // Return an iterator over the elements of the list. + ScriptListIterator iter() const { + auto begin = list_.begin(); + auto end = list_.end(); + return ScriptListIterator(begin, end); + } + + // Interpret the list as a boolean; empty means false, non-empty means + // true. + bool toBool() const { + return !(list_.empty()); + } + + // Get the value for the given index. + IValue getItem(diff_type idx) { + idx = wrap_index(idx); + return list_.get(idx); + }; + + // Set the value corresponding to the given index. + void setItem(diff_type idx, const IValue& value) { + idx = wrap_index(idx); + return list_.set(idx, value); + } + + // Check whether the list contains the given value. + bool contains(const IValue& value) { + for (const auto& elem : list_) { + if (elem == value) { + return true; + } + } + + return false; + } + + // Delete the item at the given index from the list. + void delItem(diff_type idx) { + idx = wrap_index(idx); + auto iter = list_.begin() + idx; + list_.erase(iter); + } + + // Get the size of the list. + ssize_t len() const { + return list_.size(); + } + + // Count the number of times a value appears in the list. + ssize_t count(const IValue& value) const { + ssize_t total = 0; + + for (const auto& elem : list_) { + if (elem == value) { + ++total; + } + } + + return total; + } + + // Remove the first occurrence of a value from the list. + void remove(const IValue& value) { + auto list = list_; + + int64_t idx = -1, i = 0; + + for (const auto& elem : list) { + if (elem == value) { + idx = i; + break; + } + + ++i; + } + + if (idx == -1) { + throw py::value_error(); + } + + list.erase(list.begin() + idx); + } + + // Append a value to the end of the list. + void append(const IValue& value) { + list_.emplace_back(value); + } + + // Clear the contents of the list. + void clear() { + list_.clear(); + } + + // Append the contents of an iterable to the list. + void extend(const IValue& iterable) { + list_.append(iterable.toList()); + } + + // Remove and return the element at the specified index from the list. If no + // index is passed, the last element is removed and returned. + IValue pop(c10::optional idx = c10::nullopt) { + IValue ret; + + if (idx) { + idx = wrap_index(*idx); + ret = list_.get(*idx); + list_.erase(list_.begin() + *idx); + } else { + ret = list_.get(list_.size() - 1); + list_.pop_back(); + } + + return ret; + } + + // Insert a value before the given index. + void insert(const IValue& value, diff_type idx) { + // wrap_index cannot be used; idx == len() is allowed + if (idx < 0) { + idx += len(); + } + + if (idx < 0 || idx > len()) { + throw std::out_of_range("list index out of range"); + } + + list_.insert(list_.begin() + idx, value); + } + + // A c10::List instance that holds the actual data. + c10::impl::GenericList list_; + + private: + // Wrap an index so that it can safely be used to access + // the list. For list of size sz, this function can successfully + // wrap indices in the range [-sz, sz-1] + diff_type wrap_index(diff_type idx) { + auto sz = len(); + if (idx < 0) { + idx += sz; + } + + if (idx < 0 || idx >= sz) { + throw std::out_of_range("list index out of range"); + } + + return idx; + } +}; + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tracer.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tracer.h new file mode 100644 index 0000000000000000000000000000000000000000..c8d51349a06e65b928102e2f0636f959cf2ca808 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tracer.h @@ -0,0 +1,45 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace torch::jit { + +struct Module; + +namespace tracer { +void initPythonTracerBindings(PyObject* module); + +SourceRange getPythonInterpreterSourceRange(); + +Node* preRecordPythonTrace( + THPObjectPtr pyobj, + const std::string& arg_types, + at::ArrayRef inputs, + std::vector scalar_args); + +std::pair, Stack> createGraphByTracingWithDict( + const py::function& func, + const py::dict& inputs_dict, + Stack inputs, + const py::function& var_name_lookup_fn, + bool strict, + bool force_outplace, + Module* self = nullptr, + const std::vector& argument_names = {}); + +std::pair, Stack> createGraphByTracing( + const py::function& func, + Stack inputs, + const py::function& var_name_lookup_fn, + bool strict, + bool force_outplace, + Module* self = nullptr, + const std::vector& argument_names = {}); +} // namespace tracer +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tree_views.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tree_views.h new file mode 100644 index 0000000000000000000000000000000000000000..796bf125defd824520dda38496aa09d71480252a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tree_views.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +namespace torch::jit { + +void initTreeViewBindings(PyObject* module); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/script_init.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/script_init.h new file mode 100644 index 0000000000000000000000000000000000000000..65c8ad3be6850e6629c52238b9d64a20062c5c0a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/script_init.h @@ -0,0 +1,7 @@ +#pragma once + +#include + +namespace torch::jit { +void initJitScriptBindings(PyObject* module); +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/update_graph_executor_opt.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/update_graph_executor_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..81cfd658f6ede87517a2edad395984293885ab76 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/update_graph_executor_opt.h @@ -0,0 +1,6 @@ +#pragma once +#include +namespace torch::jit { +TORCH_API void setGraphExecutorOptimize(bool o); +TORCH_API bool getGraphExecutorOptimize(); +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/utf8_decoding_ignore.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/utf8_decoding_ignore.h new file mode 100644 index 0000000000000000000000000000000000000000..3b50bce86ff5b9cb5984f16b3e2c248cded67569 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/utf8_decoding_ignore.h @@ -0,0 +1,6 @@ +#pragma once +#include +namespace torch::jit { +TORCH_API void setUTF8DecodingIgnore(bool o); +TORCH_API bool getUTF8DecodingIgnore(); +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/analysis.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..d45863399f5c3fbf34a518f4530d0f15af4ac1c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/analysis.h @@ -0,0 +1,406 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { +class HasRand : public IRVisitor { + public: + HasRand(StmtPtr stmt) : stmt_(std::move(stmt)) { + stmt_->accept(this); + } + + bool has_rand() const { + return has_rand_; + } + + private: + void visit(IntrinsicsPtr v) override { + if (v->op_type() == IntrinsicsOp::kRand) { + has_rand_ = true; + } else { + IRVisitor::visit(std::move(v)); + } + } + StmtPtr stmt_; + bool has_rand_ = false; +}; + +template +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +class NodeFinder : public IRVisitor { + public: + void visit(NodePtr v) override { + nodes.push_back((NodePtr)v); + IRVisitor::visit(v); + } + + static std::vector> find(StmtPtr s) { + NodeFinder nf; + s->accept(&nf); + return nf.nodes; + } + + static std::vector> find(ExprPtr e) { + NodeFinder nf; + e->accept(&nf); + return nf.nodes; + } + + std::vector> nodes; +}; + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +class VarFinder : public IRVisitor { + public: + void visit(VarPtr v) override { + vars_.insert(v); + IRVisitor::visit(std::move(v)); + } + + static std::unordered_set find(StmtPtr s) { + VarFinder nf; + s->accept(&nf); + return nf.vars(); + } + + static std::unordered_set find(ExprPtr e) { + VarFinder nf; + e->accept(&nf); + return nf.vars(); + } + + const std::unordered_set& vars() { + return vars_; + } + + private: + std::unordered_set vars_; +}; + +class BufFinder : public IRVisitor { + public: + void visit(BufPtr v) override { + bufs_.insert(v); + IRVisitor::visit(std::move(v)); + } + + static std::unordered_set find(StmtPtr s) { + BufFinder nf; + s->accept(&nf); + return nf.bufs(); + } + + static std::unordered_set find(ExprPtr e) { + BufFinder nf; + e->accept(&nf); + return nf.bufs(); + } + + const std::unordered_set& bufs() { + return bufs_; + } + + private: + std::unordered_set bufs_; +}; + +// Finds all kinds of write operations to the provided Buf. +class WritesToBuf : public IRVisitor { + public: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + WritesToBuf(BufPtr target) : target_(std::move(target)) {} + + std::vector writes() { + return writes_; + } + + static std::vector find(StmtPtr s, BufPtr b) { + WritesToBuf finder(std::move(b)); + s->accept(&finder); + return finder.writes(); + } + + private: + void visit(StorePtr v) override { + if (v->buf() == target_) { + writes_.push_back(v); + } + } + + void visit(AtomicAddPtr v) override { + if (v->buf() == target_) { + writes_.push_back(v); + } + } + + BufPtr target_; + std::vector writes_; +}; + +class StmtsReadingBuf : public IRVisitor { + public: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + StmtsReadingBuf(BufPtr target) : target_(std::move(target)) {} + + std::vector reads() { + return reads_; + } + + static std::vector find(StmtPtr s, BufPtr b) { + StmtsReadingBuf finder(std::move(b)); + s->accept(&finder); + return finder.reads(); + } + + private: + bool readsBuffer(StmtPtr s) { + auto loads = NodeFinder::find(std::move(s)); + for (const auto& l : loads) { + if (l->buf() == target_) { + return true; + } + } + return false; + } + + void visit(StorePtr v) override { + if (readsBuffer(v)) { + reads_.push_back(v); + } + } + + void visit(LetPtr v) override { + if (readsBuffer(v)) { + reads_.push_back(v); + } + } + + void visit(CondPtr v) override { + if (readsBuffer(v)) { + reads_.push_back(v); + } + } + + void visit(AtomicAddPtr v) override { + if (readsBuffer(v)) { + reads_.push_back(v); + } + } + + BufPtr target_; + std::vector reads_; +}; + +class ExternalAllocBufFinder : public IRVisitor { + public: + void visit(ExternalCallWithAllocPtr v) override { + const auto& bufs_out = v->buf_out_args(); + bufs_.insert(bufs_out.begin(), bufs_out.end()); + IRVisitor::visit(std::move(v)); + } + + static std::unordered_set find(StmtPtr s) { + ExternalAllocBufFinder f; + s->accept(&f); + return f.bufs(); + } + + static std::unordered_set find(ExprPtr e) { + ExternalAllocBufFinder f; + e->accept(&f); + return f.bufs(); + } + + const std::unordered_set& bufs() { + return bufs_; + } + + private: + std::unordered_set bufs_; +}; + +// Traverses the IR to determine if a particular Var is modified within it. +class ModifiesVarChecker : public IRVisitor { + public: + ModifiesVarChecker(VarPtr v) : var_(std::move(v)) {} + + static bool check(StmtPtr s, VarPtr v) { + ModifiesVarChecker checker(std::move(v)); + s->accept(&checker); + return checker.found(); + } + + bool found() { + return found_; + } + + private: + void visit(StorePtr v) override { + if (v->buf()->base_handle() == var_) { + found_ = true; + return; + } + IRVisitor::visit(std::move(v)); + } + + void visit(AtomicAddPtr v) override { + if (v->buf()->base_handle() == var_) { + found_ = true; + return; + } + IRVisitor::visit(std::move(v)); + } + + void visit(LetPtr v) override { + if (v->var() == var_) { + found_ = true; + return; + } + IRVisitor::visit(std::move(v)); + } + + void visit(ForPtr v) override { + if (v->var() == var_) { + found_ = true; + return; + } + IRVisitor::visit(std::move(v)); + } + + VarPtr var_; + bool found_{false}; +}; + +// Traverse the Block stmt to identify the live range of the specified buf. The +// live range, indicated by a pair of integers, specifies the first and last +// stmt in block stmts that access to the buf. +class BufLiveRange : public IRVisitor { + public: + BufLiveRange(BufPtr b) : buf_(std::move(b)) {} + + static std::tuple liveRange(StmtPtr s, BufPtr b) { + BlockPtr block = to(std::move(s)); + // We Only analyze buffer live ranges for block stmts. + if (!block) { + return std::make_tuple(0, 0); + } + + BufLiveRange analyzer(std::move(b)); + block->accept(&analyzer); + return analyzer.getLiveRange(); + } + + private: + std::tuple getLiveRange() { + return std::make_tuple(begin_, end_); + } + + bool hasBufReads(StmtPtr s) { + auto loads1 = NodeFinder::find(s); + for (const auto& l : loads1) { + if (l->buf() == buf_) { + return true; + } + } + auto loads2 = NodeFinder::find(s); + for (const auto& l : loads2) { + for (const auto& lb : l->buf_args()) { + if (lb == buf_) { + return true; + } + } + } + auto loads3 = NodeFinder::find(std::move(s)); + for (const auto& l : loads3) { + for (const auto& lb : l->buf_args()) { + if (lb == buf_) { + return true; + } + } + } + return false; + } + + bool hasBufWrites(StmtPtr s) { + auto writes1 = NodeFinder::find(s); + for (const auto& w : writes1) { + if (w->buf() == buf_) { + return true; + } + } + auto writes2 = NodeFinder::find(s); + for (const auto& w : writes2) { + if (w->buf() == buf_) { + return true; + } + } + auto writes3 = NodeFinder::find(std::move(s)); + for (const auto& w : writes3) { + for (const auto& wb : w->buf_out_args()) { + if (wb == buf_) { + return true; + } + } + } + return false; + } + + void findAccAndUpdateLiveRange(StmtPtr s) { + bool has_reads = hasBufReads(s), has_writes = hasBufWrites(std::move(s)); + if (has_reads || has_writes) { + if (begin_ == -1) { + begin_ = curr_index_; + }; + end_ = curr_index_; + } + } + + void visit(BlockPtr v) override { + for (const StmtPtr& s : *v) { + curr_index_ += 1; + findAccAndUpdateLiveRange(s); + } + } + + BufPtr buf_; + int32_t begin_ = -1; + int32_t end_ = -1; + int32_t curr_index_ = -1; +}; + +// A class that analyzes the given program relevant for Block backend +// It creates a map of multi dim buffers and their flat versions +class CreateBufferMap : public IRVisitor { + public: + const std::unordered_map& getBufferMap() const { + return map_input_to_tensor_bufs_; + } + + private: + void visit(StorePtr v) override { + auto load_node = to(v->value()); + if (load_node) { + auto t_buf = load_node->buf(); + map_input_to_tensor_bufs_.emplace(t_buf->name_hint(), v->buf()); + } else { + auto add_node = to(v->value()); + auto mul_node = to(v->value()); + // This means for now, v->value() can be Add or Mul + TORCH_INTERNAL_ASSERT(add_node || mul_node, buildErrorMessage()); + map_input_to_tensor_bufs_.emplace(v->buf()->name_hint(), v->buf()); + } + v->value()->accept(this); + } + std::unordered_map map_input_to_tensor_bufs_; +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/block_codegen.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/block_codegen.h new file mode 100644 index 0000000000000000000000000000000000000000..fca5fdc9fd0e2eeff5613bbfeba1b96f270b29a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/block_codegen.h @@ -0,0 +1,150 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +// A class that analyzes the given program relevant for Block backend. +class BlockAnalysis : public IRVisitor { + public: + bool is_buf_store_target(BufPtr buf) const { + return store_targets_.count(buf) > 0; + } + + const std::unordered_set& loads() const { + return loads_; + } + + const std::unordered_set& stores() const { + return store_targets_; + } + + int block_size() const { + return block_size_; + } + + bool areBufsInMap(const std::unordered_set& bufs) const; + + BufPtr getMultiDimBuf(BufPtr buf) const; + + std::string getInputName(BufPtr buf) const; + + std::string getFlatInputName(BufPtr buf) const { + return getInputName(std::move(buf)) + "_flat"; + } + + std::unordered_map getBufferMap() const { + return map_input_to_tensor_bufs_; + } + + private: + void visit(StorePtr v) override; + void visit(LoadPtr v) override; + void visit(ForPtr v) override; + + std::unordered_map map_input_to_tensor_bufs_; + std::unordered_set store_targets_; + std::unordered_set loads_; + int block_size_ = 32; +}; + +// A class that overrides the underlying IRPrinter to produce Block. +class BlockPrinter : public IRPrinter { + public: + BlockPrinter(std::ostream* os, BlockAnalysis* block_analysis) + : IRPrinter(*os), block_analysis_(block_analysis) {} + + using IRPrinter::name_manager; + using IRPrinter::visit; + + private: + BlockAnalysis* block_analysis_; + std::unordered_map dim_values_map; + std::vector dim_names = {"N", "H", "W", "C"}; + std::vector flat_dim_names = {"N", "NH", "NHW", "NHWC"}; + void PrintTensorInfo(const std::unordered_set& bufs); + void PrintArguments(const std::unordered_set& bufs); + void PrintBufferInfo(const std::unordered_set& bufs); + void PrintDistribution(const std::unordered_set& bufs); + void PrintLoop(const std::unordered_set& bufs, bool block_idx = true); + void PrintReshapeInfo( + const std::unordered_set& bufs, + bool reverse = false); + void PrintDMAs(const std::unordered_set& bufs); + void PrintAdjustBuffers(const std::unordered_set& bufs); + + void visit(ForPtr v) override; + void visit(LoadPtr v) override; + void visit(StorePtr v) override; + void visit(BlockPtr v) override; + void visit(AddPtr v) override; + void visit(MulPtr v) override; +}; + +class TORCH_API BlockCodeGen : public CodeGen { + public: + template + /* implicit */ + BlockCodeGen(StmtPtr stmt, Ts... ts) + : CodeGen( + stmt, + std::vector({BufferArg(ts)...}), + at::Device(at::kCPU)) { + Initialize(); + } + + BlockCodeGen( + StmtPtr stmt, + const std::vector& buffer_args, + at::Device device = at::Device(at::kCPU), + const std::string& kernel_func_name = "func") + : CodeGen(stmt, buffer_args, device, kernel_func_name) { + Initialize(); + } + + ~BlockCodeGen() override; + + void call(const std::vector& args) override; + void call_raw(const std::vector& args) override; + + void Initialize(); + + std::string getCodeText(const std::string& attr = "") override { + return oss_.str(); + } + + private: + UniqueNameManager* name_manager() { + if (!printer_) { + throw std::runtime_error("Null IRPrinter is not expected"); + } + return printer_->name_manager(); + } + + std::ostream& os() { + return printer_->os(); + } + + std::ostringstream oss_; + std::unique_ptr printer_; + std::unique_ptr block_analysis_; + + std::string GetUniqueFuncName(const std::string& func_prefix); +}; +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_inference.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_inference.h new file mode 100644 index 0000000000000000000000000000000000000000..fadf064bce4556f4c3772a996e642063afaebdc2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_inference.h @@ -0,0 +1,80 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +class Expr; +class Buf; +class Stmt; + +enum C10_API_ENUM TensorAccessKind { kLoad, kStore, kMutate }; + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct TORCH_API TensorAccessBoundsInfo { + TensorAccessKind kind; + std::vector start; + std::vector stop; +}; + +using BoundsInfo = + std::unordered_map>; + +TORCH_API BoundsInfo inferBounds(StmtPtr s, bool distinctAccessKinds = true); + +// Bounds inference caching the analysis. The MemDependencyChecker must already +// have been run. +TORCH_API BoundsInfo getInferredBounds( + analysis::MemDependencyChecker& analyzer, + StmtPtr s, + bool distinctAccessKinds = true); +TORCH_API BoundsInfo getInferredBounds( + analysis::MemDependencyChecker& analyzer, + ExprPtr e, + bool distinctAccessKinds = true); + +TORCH_API void printBoundsInfo(const BoundsInfo& v); + +TORCH_API std::vector getBoundExtents( + const std::vector& infos); + +// The kind of dependency found, in increasing order of exclusivity. +enum class HazardKind { + ReadAfterWrite, + WriteAfterRead, + WriteAfterWrite, + NoDependency, +}; +TORCH_API HazardKind getPotentialHazards( + analysis::MemDependencyChecker& analyzer, + StmtPtr A, + StmtPtr B); + +// Returns true if there is a conflicting overlap between accesses in +// statements A and B. A conflicting overlap is an overlap in buffer accesses +// where at least one of the accesses is a Store. +TORCH_API bool hasConflictingOverlap( + analysis::MemDependencyChecker& analyzer, + StmtPtr A, + StmtPtr B); +// Same as above, between accesses in stores S1 and S2. +TORCH_API bool isOverlapping( + analysis::MemDependencyChecker& analyzer, + StorePtr S1, + StorePtr S2); +// Same as above, between accesses in store S and load L. +TORCH_API bool isOverlapping( + analysis::MemDependencyChecker& analyzer, + StorePtr S, + LoadPtr L); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_overlap.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_overlap.h new file mode 100644 index 0000000000000000000000000000000000000000..a602e2938f293bdb6931b7278e72fc65b9142401 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_overlap.h @@ -0,0 +1,128 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { +namespace analysis { + +// A simple class containing the start and end of a range in a single dimension. +struct TORCH_API Bound { + ExprPtr start{nullptr}; + ExprPtr end{nullptr}; + + // This stores whether or not the start and end of this Bound have previously + // been swapped. This occurs when the bound is in a loop with a negative + // stride. + bool swapped{false}; + + Bound() = default; + Bound(ExprPtr s, ExprPtr e) : start(std::move(s)), end(std::move(e)) {} + + void print() const; + bool equals(const Bound& other) const; + + // The comparison operators are conservative. If the compare operator returns + // true, it means that all the elements satisfy the logical expression. But + // the false does not mean the opposite comparison is satisfied. It could be + // but not always. + bool operator==(const Bound& other) const; + bool operator!=(const Bound& other) const; + bool operator<(const Bound& other) const; + bool operator<=(const Bound& other) const; + bool operator>(const Bound& other) const; + bool operator>=(const Bound& other) const; + + void swap() { + std::swap(start, end); + swapped = !swapped; + } +}; + +struct BoundHash { + size_t operator()(const Bound& b) const { + return std::hash()(b.start) ^ std::hash()(b.end); + } +}; + +// The type of overlap found. Each condition is true only if none of the +// previous conditions hold. +// ContainedOrEqual: All elements in the Bound A are in the Bound B (this +// includes the case where the bounds are equal). +// Contains: All elements in the Bound B are in the Bound B. +// PartialOverlap: Any elements in the Bound B are in the Bound A. +// NoOverlap: No elements in the Bound A are in the bound B. +enum class OverlapKind { + ContainedOrEqual, + Contains, + PartialOverlap, + NoOverlap +}; + +// The Bound comparison result. +// True: Every Bound element always satisfies the given comparison operator +// False: Every Bound element always does NOT satisfy the given comparison +// operator +// NotDetermined: Some elements satisfy the given comparison operator and +// some elements not +enum class CmpEvalResult { True, False, NotDetermined }; + +// Returns the kind of overlap between Bound A and Bound A in a single +// dimension. +OverlapKind TORCH_API boundOverlap(Bound A, Bound B); + +// The comparison is conservative and the compare result is deterministic. +// It means that every element of the Bound to be compared needs to satisfy +// the given comparison operator. +CmpEvalResult TORCH_API compareBound( + const Bound& a, + const Bound& b, + const CompareSelectOperation& cmp_op); + +// A multi dimensional bound representing the bound of a set of indices. +using IndexBounds = std::vector; + +// Returns true if two IndexBounds are equivalent. +bool TORCH_API indexBoundsEquals(const IndexBounds& A, const IndexBounds& B); + +// Flattens a multi dimensional bound to a single dimension. The IndexBounds "a" +// *must* encapsulate the entire range of the buffer. +Bound TORCH_API flattenBounds(const IndexBounds& a); + +// Determines the kind of overlap in X dimensions. +OverlapKind TORCH_API overlaps(const IndexBounds& a, const IndexBounds& b); + +// Returns the Bound slices created by subtracing bound B from bound A. +// Multiple Bounds can be returned in the case where B slices A into two +// distinct regions with no overlap. +// +// For example: +// subtractBound((0, 10), (2, 4)) => [(0, 1), (5, 10)] +// bound A: (0, 10) +// bound B: (2, 4) +// If we remove slice (2, 4) from the slice (0, 10), we will be left +// with 2 slices, one at the start (0, 1), and one at the end (5, 10). +// So, the result of this subtraction is [(0, 1), (5, 10)]. +// +// Note: this doesn't use IndexBounds because the Bounds returned do not +// represent multiple different dimensions. +std::vector TORCH_API subtractBound(Bound a, Bound b); + +// Returns the bound slices created by subtracting the IndexBounds B from A. +std::vector TORCH_API subtractIndicesBounds( + const IndexBounds& A, + const IndexBounds& B, + OverlapKind overlap); +std::vector TORCH_API +subtractIndicesBounds(const IndexBounds& A, const IndexBounds& B); + +} // namespace analysis +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/codegen.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/codegen.h new file mode 100644 index 0000000000000000000000000000000000000000..fdcf3425e3abcba05d88d5637f53e14d7346a0d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/codegen.h @@ -0,0 +1,283 @@ +#pragma once + +#include +#include +#include + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +template +class PaddedBuffer; + +class TORCH_API CodeGen { + public: + class BufferArg; + class CallArg; + + template + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + CodeGen(StmtPtr stmt, Ts... ts) + : stmt_(std::move(stmt)), buffer_args_({BufferArg(ts)...}) {} + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + CodeGen( + StmtPtr stmt, + std::vector buffer_args, + at::Device device = at::kCPU, + std::string kernel_func_name = "func"); + + virtual ~CodeGen() = default; + + StmtPtr stmt() const { + return stmt_; + } + + void set_stmt(StmtPtr s) { + stmt_ = s; + } + + void apply_mutator(IRMutator* mutator) { + stmt_ = stmt_->accept_mutator(mutator); + } + + void apply_visitor(IRVisitor* visitor) { + stmt_->accept(visitor); + } + + std::vector& buffer_args() { + return buffer_args_; + } + + const std::vector& buffer_args() const { + return buffer_args_; + } + + at::Device device() { + return device_; + } + + // This function returns the generated code as + // a string. + virtual std::string getCodeText(const std::string& attr = "") { + return (""); + } + + // TODO: Figure out how to unify these call interfaces. + + /// Call a function with a vector of CallArgs, which are tagged + /// unions that properly type the arguments. + virtual void call(const std::vector& args) = 0; + + /// Call a function faster than a regular `call` by assuming that + /// the generated kernel already knows the type of the arguments, so + /// they can be type-punned with `void*`s. + virtual void call_raw(const std::vector& args) = 0; + + /// Call a function even faster than a regular call, by assuming + /// that the number of thread blocks can be derived from `numel` via + /// a simple division, rather than evaluating an expression. + virtual void call_with_numel(void** args, int64_t numel); + + virtual at::Tensor empty_strided( + c10::IntArrayRef size, + c10::IntArrayRef stride, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt) { + return at::empty_strided( + size, stride, dtype_opt, layout_opt, device_opt, pin_memory_opt); + } + + const std::string& kernel_func_name() const { + return kernel_func_name_; + } + + void allocIntermediateBufs(); + + protected: + static void* argToPtr(const BufferArg& bufferArg, const CallArg& callArg); + + private: + StmtPtr stmt_; + std::vector buffer_args_; + at::Device device_ = at::kCPU; + std::string kernel_func_name_ = "func"; +}; + +class TORCH_API ExtCallMemoryReuse : public IRMutator { + static std::unordered_map makeExtCallFuncNameMap(); + static const std::unordered_map extCallFuncNameMap_; + + public: + explicit ExtCallMemoryReuse( + const std::vector& bufferArgs); + ~ExtCallMemoryReuse() override = default; + StmtPtr mutate(ExternalCallPtr v) override; + + private: + std::unordered_set bufferArgs_; +}; + +class CodeGen::BufferArg { + public: + BufferArg(const Tensor& tensor) : buf_(tensor.buf()) {} + BufferArg(const VarHandle& var) : var_(var.node()), isVar_(true) {} + BufferArg(const BufHandle& buf) : buf_(buf.node()) {} + BufferArg(BufPtr buf) : buf_(std::move(buf)) {} + + VarPtr var() const { + return isVar_ ? var_ : buf_->base_handle(); + } + + BufPtr buf() const { + return buf_; + } + + bool isVar() const { + return isVar_; + } + + Dtype dtype() const { + return isVar_ ? var_->dtype() : buf_->dtype(); + } + + private: + VarPtr var_ = nullptr; + BufPtr buf_ = nullptr; + bool isVar_ = false; +}; + +class CodeGen::CallArg { + public: + template + CallArg(const PaddedBuffer& buffer); + + template + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init,cppcoreguidelines-pro-type-const-cast) + CallArg(const std::vector& buffer) + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) + : data_(const_cast(buffer.data())) {} + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + CallArg(void* ptr) : data_(ptr) {} + +#define ARG_TYPE_CTOR(Type, Name) \ + CallArg(Type v) { \ + memcpy(buffer_, &v, sizeof(Type)); \ + data_ = (void*)buffer_; \ + } + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, ARG_TYPE_CTOR); +#undef ARG_TYPE_CTOR + + void* data() const { + return data_; + } + + CallArg(const CallArg& rhs) { + if (rhs.data_ == rhs.buffer_) { + memcpy(this->buffer_, rhs.buffer_, sizeof(rhs.buffer_)); + this->data_ = (void*)(this->buffer_); + } else { + this->data_ = rhs.data_; + } + } + + CallArg& operator=(const CallArg& rhs) { + if (rhs.data_ == rhs.buffer_) { + memcpy(this->buffer_, rhs.buffer_, sizeof(rhs.buffer_)); + this->data_ = (void*)(this->buffer_); + } else { + this->data_ = rhs.data_; + } + return *this; + } + +#define ARG_PTR_DEFINE(Type, Name) \ + Type* Name##Ptr() const { \ + TORCH_INTERNAL_ASSERT(data_ == (void*)buffer_); \ + return (Type*)data_; \ + } + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, ARG_PTR_DEFINE); +#undef ARG_PTR_DEFINE + + private: + void* data_; + // Regarding a scalar value, CallArg uses void**=&data_ to store it. But the + // bit width of a pointer is 32bit on a 32bit platform. It cannot store the + // scalar if the bit width of the scalar is larger than 32bit, such as double + // and long. Hence, we add 8 bytes buffer dedicated to storing the scalar + // value regardless its bit width is less or greater than 32bits. + char buffer_[8] = {0}; // 64bits +}; + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +class RegisterCodeGenList { + public: + TORCH_API static RegisterCodeGenList& GetInstance() { + static RegisterCodeGenList codegen_list; + return codegen_list; + } + + using StmtFactoryMethod = std::function( + StmtPtr stmt, + const std::vector&, + at::Device device, + const std::string& kernel_func_name)>; + + TORCH_API StmtFactoryMethod FindStmtFactoryMethod(const std::string& name); + RegisterCodeGenList(const RegisterCodeGenList&) = delete; + RegisterCodeGenList& operator=(const RegisterCodeGenList&) = delete; + + private: + template + friend class RegisterCodeGen; + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + RegisterCodeGenList() = default; + TORCH_API void AddStmtFactoryMethod( + const std::string& name, + const StmtFactoryMethod& stmt_factory_method); + + std::unordered_map stmt_factory_methods_; +}; + +template +class RegisterCodeGen { + public: + explicit RegisterCodeGen(const std::string& name) { + RegisterCodeGenList& codegen_list = RegisterCodeGenList::GetInstance(); + codegen_list.AddStmtFactoryMethod( + name, + [](StmtPtr stmt, + const std::vector& params, + at::Device device, + const std::string& kernel_func_name) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::unique_ptr method( + new CodeGenType(stmt, params, device, kernel_func_name)); + return method; + }); + } +}; + +TORCH_API std::unique_ptr CreateCodeGen( + const std::string& name, + StmtPtr stmt, + const std::vector& params, + at::Device device = at::kCPU, + const std::string& kernel_func_name = "func"); + +class TORCH_API GenericIntrinsicsExpander : public IRMutator { + protected: + ExprPtr mutate(IntrinsicsPtr v) override; +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_codegen.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_codegen.h new file mode 100644 index 0000000000000000000000000000000000000000..a6d583ed4efb7c5d7cf74114ce3847cca8c06580 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_codegen.h @@ -0,0 +1,102 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +class CppVarNameRewriter; + +// Generates C++ code from the IR. +// +// Vector operations are unrolled. +// For example: +// C[Ramp(0, 1, 3)] = A[Ramp(0, 2, 3)] + B[Ramp(0, 3, 3)]; +// is unrolled into: +// C[0] = A[0] + B[0]; +// C[1] = A[2] + B[3]; +// C[2] = A[4] + B[6]; +class TORCH_API CppPrinter : public IRPrinter { + public: + explicit CppPrinter(std::ostream* os); + ~CppPrinter() override; + + void printPrologue(); + + using IRPrinter::visit; + + // Binary expressions. + void visit(ModPtr) override; + void visit(MaxPtr) override; + void visit(MinPtr) override; + + // Conditional expressions. + void visit(CompareSelectPtr) override; + void visit(IfThenElsePtr) override; + + // Tensor operations. + void visit(AllocatePtr) override; + void visit(FreePtr) override; + void visit(LoadPtr) override; + void visit(StorePtr) override; + + // Casts. + void visit(CastPtr) override; + void visit(BitCastPtr) override; + + // Calls. + void visit(IntrinsicsPtr) override; + void visit(ExternalCallPtr) override; + + // Vars. + void visit(LetPtr) override; + void visit(VarPtr) override; + + // Vector data types. + void visit(RampPtr) override; + void visit(BroadcastPtr) override; + + private: + int lane_; + std::unordered_map vector_vars_; +}; + +class TORCH_API CppCodeGen : public CodeGen { + public: + CppCodeGen( + StmtPtr stmt, + const std::vector& buffer_args, + at::Device device = at::kCPU, + const std::string& kernel_func_name = "func"); + + ~CppCodeGen() override; + + void call(const std::vector& args) override; + void call_raw(const std::vector& args) override; + + template + void operator()(const Ts&... ts) { + call(std::vector({CallArg(ts)...})); + } + + std::string getCodeText(const std::string& attr = "") override { + return oss_.str(); + } + + private: + void init(); + + std::ostream& os() { + return printer_->os(); + } + + std::ostringstream oss_; + std::unique_ptr printer_; + std::unique_ptr var_name_rewriter_; +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_intrinsics.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..caeeed693ff38f2129d8916204a5e843c88e062f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_intrinsics.h @@ -0,0 +1,36 @@ +#pragma once + +namespace torch { +namespace jit { +namespace tensorexpr { + +constexpr auto cpp_intrinsics_definition = R"( +namespace std { + +template ::value, int>::type = 0> +T rsqrt(T v) { + return 1.0f / std::sqrt(v); +} + +template ::value, int>::type = 0> +T frac(T v) { + T intpart; + return std::modf(v, &intpart); +} + +template +To bitcast(const From& v) { + assert(sizeof(To) == sizeof(From)); + To res; + std::memcpy(&res, &v, sizeof(From)); + return res; +} + +} // namespace std +)"; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_codegen.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_codegen.h new file mode 100644 index 0000000000000000000000000000000000000000..22de1ce32d00fb896a532e23b9ff6bd014cd646d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_codegen.h @@ -0,0 +1,295 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +// A class that analyzes the given program relevant for Cuda backends. +class CudaAnalysis : public IRVisitor { + public: + CudaAnalysis() { + gpu_block_extents_ = {alloc(1), alloc(1), alloc(1)}; + gpu_thread_extents_ = { + alloc(1), alloc(1), alloc(1)}; + } + bool is_buf_store_target(BufPtr buf) const { + return store_targets_.count(buf) > 0; + } + + const std::unordered_set& thread_local_bufs() const { + return thread_local_bufs_; + } + + const std::unordered_set& cross_block_bufs() const { + return cross_block_bufs_; + } + + const std::vector& gpu_block_extents() const { + return gpu_block_extents_; + } + + const std::vector& gpu_thread_extents() const { + return gpu_thread_extents_; + } + + private: + void visit(StorePtr v) override { + store_targets_.insert(v->buf()); + } + + void visit(AllocatePtr v) override; + void visit(FreePtr v) override; + void visit(PlacementAllocatePtr v) override; + void visit(ForPtr v) override; + + std::unordered_set store_targets_; + std::unordered_set thread_local_bufs_; + std::unordered_set cross_block_bufs_; + + std::vector gpu_block_extents_; + std::vector gpu_thread_extents_; +}; + +// An IRMutator that replaces binding loop options with Cuda metavars, and masks +// statements blocks which should execute with less reach than the launch +// parameter extent. +// +// We do this by segmenting each block into chunks which should have the same +// execution parameters, then if those params differ from the max mask each dim. +class GPUMetaVarRewriter : public IRMutator { + public: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + explicit GPUMetaVarRewriter(const CudaAnalysis* cuda_analysis) + : cuda_analysis_(cuda_analysis) { + gpu_block_vars_ = { + alloc("blockIdx.x", kInt), + alloc("blockIdx.y", kInt), + alloc("blockIdx.z", kInt)}; + gpu_thread_vars_ = { + alloc("threadIdx.x", kInt), + alloc("threadIdx.y", kInt), + alloc("threadIdx.z", kInt)}; + + current_block_reach_ = { + alloc(1), alloc(1), alloc(1)}; + current_thread_reach_ = { + alloc(1), alloc(1), alloc(1)}; + } + + StmtPtr mutate(ForPtr v) override; + StmtPtr mutate(BlockPtr v) override; + + const std::vector& gpu_block_vars() const { + return gpu_block_vars_; + } + + const std::vector& gpu_thread_vars() const { + return gpu_thread_vars_; + } + + const std::vector& gpu_block_extents() const { + return cuda_analysis_->gpu_block_extents(); + } + + const std::vector& gpu_thread_extents() const { + return cuda_analysis_->gpu_thread_extents(); + } + + private: + // When processing a block, stores the contents of each sub-segment. + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + class Segment { + public: + void reset(bool mask) { + stmts_.clear(); + mask_ = mask; + } + + bool empty() const { + return stmts_.empty(); + } + + std::vector& stmts() { + return stmts_; + } + bool mask() { + return mask_; + } + + private: + std::vector stmts_; + bool mask_{true}; + }; + + // Returns true if the current execution scope is equivalent to the launch + // parameters. + bool isFullExtent(); + + std::vector gpu_block_vars_; + std::vector gpu_thread_vars_; + + std::vector current_block_reach_; + std::vector current_thread_reach_; + + const CudaAnalysis* cuda_analysis_; +}; + +// A class that overrides the underlying IRPrinter to produce Cuda C. +class CudaPrinter : public IRPrinter { + public: + explicit CudaPrinter( + std::ostream* os, + const CudaAnalysis* cuda_analysis, + bool has_random) + : IRPrinter(*os), cuda_analysis_(cuda_analysis) { + if (has_random) { + rand_func_ = alloc("rand", kHandle); + } + } + + void visit(CastPtr v) override; + void visit(IntrinsicsPtr v) override; + void visit(ForPtr v) override; + + void visit(LoadPtr v) override; + void visit(StorePtr v) override; + void visit(AtomicAddPtr v) override; + void visit(MaxPtr v) override; + void visit(MinPtr v) override; + void visit(IfThenElsePtr v) override; + void visit(BlockPtr v) override; + void visit(AllocatePtr v) override; + void visit(FreePtr v) override; + void visit(LetPtr v) override; + + void visit(ExternalCallPtr v) override; + + VarPtr rand_func() const { + return rand_func_; + } + + std::string dtypeToCppString(const Dtype& dtype) override; + + using IRPrinter::name_manager; + using IRPrinter::visit; + + private: + VarPtr rand_func_; + const CudaAnalysis* cuda_analysis_; + + void print_flat_alloc(AllocatePtr alloc); +}; + +// Construct Cuda C from the buffer and tensor input, and invoke the kernel +// when real arguments are provided. +class TORCH_CUDA_CU_API CudaCodeGen : public CodeGen { + public: + template + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + CudaCodeGen(StmtPtr stmt, Ts... ts) + : CodeGen( + stmt, + std::vector({BufferArg(ts)...}), + at::Device(at::kCUDA, at::cuda::current_device())) { + Initialize(); + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + CudaCodeGen( + StmtPtr stmt, + const std::vector& buffer_args, + at::Device device = at::Device(at::kCUDA, at::cuda::current_device()), + const std::string& kernel_func_name = "func") + : CodeGen(stmt, buffer_args, device, kernel_func_name) { + Initialize(); + } + + ~CudaCodeGen() override; + + void call(const std::vector& args) override; + void call_raw(const std::vector& args) override; + void call_with_numel(void** args, int64_t numel) override; + + template + void operator()(const Ts&... ts) { + call(std::vector({CallArg(ts)...})); + } + + at::Tensor empty_strided( + c10::IntArrayRef size, + c10::IntArrayRef stride, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt) override; + + const std::vector& gpu_block_extents() const { + return cuda_analysis_->gpu_block_extents(); + } + + const std::vector& gpu_thread_extents() const { + return cuda_analysis_->gpu_thread_extents(); + } + + std::string getCodeText(const std::string& attr = "") override { + return oss_.str(); + } + + private: + void Initialize(); + + void CompileToNVRTC(const std::string& code, const std::string& func_name); + + UniqueNameManager* name_manager() { + if (!printer_) { + throw std::runtime_error("Null IRPrinter is not expected"); + } + return printer_->name_manager(); + } + + std::ostream& os() { + return printer_->os(); + } + + std::ostringstream oss_; + std::unique_ptr printer_; + std::unique_ptr cuda_analysis_; + std::unique_ptr metavar_rewriter_; + std::unordered_set taken_func_names; + std::mutex eval_lock_; + CUfunction function_; + bool has_random_ = false; + int thread_block_size_ = -1; + + std::vector arg_pos_in_extents_; +#ifdef TORCH_ENABLE_LLVM + std::vector> block_extents_eval_; + std::vector> thread_extents_eval_; +#else + std::vector> block_extents_eval_; + std::vector> thread_extents_eval_; +#endif + + std::string GetUniqueFuncName(const std::string& func_prefix); +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_random.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_random.h new file mode 100644 index 0000000000000000000000000000000000000000..987ac5211d92960e335bfe2c8688a2c06fc6677d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_random.h @@ -0,0 +1,104 @@ +#pragma once + +namespace torch { +namespace jit { +namespace tensorexpr { + +constexpr auto philox_random_string = R"( + +class Philox { +public: + __device__ inline Philox(unsigned long long seed, + unsigned long long subsequence, + unsigned long long offset) { + key.x = (unsigned int)seed; + key.y = (unsigned int)(seed >> 32); + counter = make_uint4(0, 0, 0, 0); + counter.z = (unsigned int)(subsequence); + counter.w = (unsigned int)(subsequence >> 32); + STATE = 0; + incr_n(offset / 4); + } + + __device__ inline unsigned long operator()() { + if(STATE == 0) { + uint4 counter_ = counter; + uint2 key_ = key; + for(int i = 0; i < 9; i++) { + counter_ = single_round(counter_, key_); + key_.x += (kPhilox10A); key_.y += (kPhilox10B); + } + output = single_round(counter_, key_); + incr(); + } + unsigned long ret; + switch(STATE) { + case 0: ret = output.x; break; + case 1: ret = output.y; break; + case 2: ret = output.z; break; + case 3: ret = output.w; break; + } + STATE = (STATE + 1) % 4; + return ret; + } + +private: + uint4 counter; + uint4 output; + uint2 key; + unsigned int STATE; + __device__ inline void incr_n(unsigned long long n) { + unsigned int nlo = (unsigned int)(n); + unsigned int nhi = (unsigned int)(n >> 32); + counter.x += nlo; + if (counter.x < nlo) + nhi++; + counter.y += nhi; + if (nhi <= counter.y) + return; + if (++counter.z) + return; + ++counter.w; + } + __device__ inline void incr() { + if (++counter.x) + return; + if (++counter.y) + return; + if (++counter.z) + return; + ++counter.w; + } + __device__ unsigned int mulhilo32(unsigned int a, unsigned int b, + unsigned int *result_high) { + *result_high = __umulhi(a, b); + return a*b; + } + + __device__ inline uint4 single_round(uint4 ctr, uint2 key) { + unsigned int hi0; + unsigned int hi1; + unsigned int lo0 = mulhilo32(kPhiloxSA, ctr.x, &hi0); + unsigned int lo1 = mulhilo32(kPhiloxSB, ctr.z, &hi1); + + uint4 ret = {hi1 ^ ctr.y ^ key.x, lo1, hi0 ^ ctr.w ^ key.y, lo0}; + return ret; + } + + static const unsigned long kPhilox10A = 0x9E3779B9; + static const unsigned long kPhilox10B = 0xBB67AE85; + static const unsigned long kPhiloxSA = 0xD2511F53; + static const unsigned long kPhiloxSB = 0xCD9E8D57; +}; + +// Inverse of 2^32. +#define M_RAN_INVM32 2.3283064e-10f +__device__ __inline__ float Uint32ToFloat(unsigned int x) { + return x * M_RAN_INVM32; +} + +)"; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/eval.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/eval.h new file mode 100644 index 0000000000000000000000000000000000000000..64ac1edf8f188d57044a74c24852c0a38493ceca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/eval.h @@ -0,0 +1,346 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +class InterpValue { + public: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + InterpValue() : dtype_(kInt) { + Intvalues.push_back(0); + } + + template + InterpValue(Dtype dtype, T v) : dtype_(dtype) { +#define TYPE_CASE(Type, Name) \ + if (dtype == k##Name) { \ + Name##values.push_back(v); \ + return; \ + } + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE); +#undef TYPE_CASE + throw unsupported_dtype(); + } + +#define VALUE_CTOR(Type, Name) \ + InterpValue(Type v) : dtype_(k##Name) { \ + Name##values.push_back(v); \ + } + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_CTOR); +#undef VALUE_CTOR + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + explicit InterpValue(c10::quint8 v) : dtype_(kQUInt8) { + QUInt8values.emplace_back(v.val_); + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + explicit InterpValue(c10::qint8 v) : dtype_(kQInt8) { + QInt8values.emplace_back(v.val_); + } + +#define VALUE_VEC_CTOR(Type, Name) \ + InterpValue(const std::vector& v) \ + : dtype_(Dtype(k##Name, v.size())), Name##values(v) {} + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_VEC_CTOR); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + VALUE_VEC_CTOR(c10::quint8, QUInt8) + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + VALUE_VEC_CTOR(c10::qint8, QInt8) +#undef VALUE_VEC_CTOR + + template + T as() const; + + template + const std::vector& as_vec() const; + + int64_t intValue() const; + + Dtype dtype() const { + return dtype_; + } + + private: + Dtype dtype_; + +#define VALUE_STORAGE(Type, Name) std::vector Name##values; + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_STORAGE); + VALUE_STORAGE(c10::qint8, QInt8); + VALUE_STORAGE(c10::quint8, QUInt8); +#undef VALUE_STORAGE + void* ptr; +}; + +#define VALUE_AS_DISPATCH(Type, Name) \ + template <> \ + inline Type InterpValue::as() const { \ + if (dtype_ != k##Name) { \ + throw unsupported_dtype(); \ + } \ + return Name##values[0]; \ + } +AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_AS_DISPATCH); +VALUE_AS_DISPATCH(c10::quint8, QUInt8); +VALUE_AS_DISPATCH(c10::qint8, QInt8); +#undef VALUE_AS_DISPATCH + +#define VALUE_AS_VEC_DISPATCH(Type, Name) \ + template <> \ + inline const std::vector& InterpValue::as_vec() const { \ + if (dtype_.scalar_type() != ScalarType::Name) { \ + throw unsupported_dtype(); \ + } \ + return Name##values; \ + } +AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_AS_VEC_DISPATCH); +VALUE_AS_VEC_DISPATCH(c10::quint8, QUInt8); +VALUE_AS_VEC_DISPATCH(c10::qint8, QInt8); +#undef VALUE_AS_VEC_DISPATCH + +template +auto underlyingValue(Type x) { + return x; +} + +template <> +inline auto underlyingValue(c10::quint8 x) { + return x.val_; +} + +template <> +inline auto underlyingValue(c10::qint8 x) { + return x.val_; +} + +template +To raw_bitcast(const From& src) { + TORCH_CHECK(sizeof(To) == sizeof(From), "Invalid bitcast invocation"); + To storage; + std::memcpy(&storage, &src, sizeof(To)); + return reinterpret_cast(storage); +} + +class SimpleIREvaluatorImpl; +class TORCH_API SimpleIREvaluator : public CodeGen { + public: + SimpleIREvaluator( + StmtPtr stmt, + const std::vector& buffer_args, + at::Device device = at::kCPU, + const std::string& kernel_func_name = "func"); + + ~SimpleIREvaluator() override; + + void call(const std::vector& args) override; + void call_raw(const std::vector& args) override; + + template + void operator()(const Ts&... ts) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector args({CallArg(ts)...}); + call(args); + } + + void bindVar(VarPtr v, ExprPtr e); + InterpValue value() const; + + private: + void bindArg(const BufferArg& buf, void* data); + void expand_intrinsics() { + GenericIntrinsicsExpander intrinsics_expander; + apply_mutator(&intrinsics_expander); + } + + std::unique_ptr impl_; +}; + +template +class ExprEval { + public: + using BufferArg = CodeGen::BufferArg; + using CallArg = CodeGen::CallArg; + + template + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + ExprEval(const ExprHandle& expr, Ts... ts) + : ExprEval(expr, {BufferArg(ts)...}) {} + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + ExprEval(const ExprHandle& expr, const std::vector& buffer_args) + : dtype_(expr.dtype()) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector buffer_args_extended = buffer_args; + BufHandle ret_buf("ret_val", {1}, dtype_); + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector indices; + ExprHandle zero = IntImm::make(0); + for (size_t i = 0; i < ret_buf.ndim(); i++) { + indices.push_back(zero); + } + StmtPtr store_stmt = Store::make(ret_buf, indices, expr); + buffer_args_extended.emplace_back(ret_buf); + codegen_.reset(new CodeGenType(store_stmt, buffer_args_extended)); + } + + template + void operator()(Ts... ts) { + call(ts...); + } + + void operator()(const std::vector& call_args) { + call(call_args); + } + + void bindVar(VarPtr v, ExprPtr e) { + codegen_->bindVar(v, e); + } + + void bindVar(const VarHandle& v, const ExprHandle& e) { + codegen_->bindVar(v.node(), e.node()); + } + + template + void call(Ts... ts) { + call({CallArg(ts)...}); + } + + void call(const std::vector& call_args) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector call_args_extended = call_args; + switch (dtype_.scalar_type()) { +#define TYPE_CASE(Type, Name) \ + case ScalarType::Name: { \ + std::vector ret_val_arg(1); \ + call_args_extended.push_back(CallArg(ret_val_arg)); \ + codegen_->call(call_args_extended); \ + ret_value_ = InterpValue(ret_val_arg[0]); \ + } break; + // NOLINTNEXTLINE(modernize-use-emplace) + AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TYPE_CASE); + // NOLINTNEXTLINE(modernize-use-emplace) + TYPE_CASE(c10::quint8, QUInt8); + // NOLINTNEXTLINE(modernize-use-emplace) + TYPE_CASE(c10::qint8, QInt8); +#undef TYPE_CASE + case ScalarType::Bool: { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector ret_val_arg(1); + call_args_extended.emplace_back(ret_val_arg.data()); + codegen_->call(call_args_extended); + ret_value_ = InterpValue((bool)ret_val_arg[0]); + } break; + default: + throw unsupported_dtype(); + } + } + + void call_raw(const std::vector& args) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector args_extended = args; + switch (dtype_.scalar_type()) { +#define TYPE_CASE(Type, Name) \ + case ScalarType::Name: { \ + std::vector ret_val_arg(1); \ + args_extended.push_back(ret_val_arg.data()); \ + codegen_->call_raw(args_extended); \ + ret_value_ = InterpValue(ret_val_arg[0]); \ + } break; + AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TYPE_CASE); + TYPE_CASE(c10::quint8, QUInt8); + TYPE_CASE(c10::qint8, QInt8); +#undef TYPE_CASE + case ScalarType::Bool: { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector ret_val_arg(1); + args_extended.push_back(ret_val_arg.data()); + codegen_->call_raw(args_extended); + ret_value_ = InterpValue((bool)ret_val_arg[0]); + } break; + default: + throw unsupported_dtype(); + } + } + + template + T value(const std::vector& args) { + call_raw(args); + return ret_value_.as(); + } + + template + T value(Ts... ts) { + call(std::forward(ts)...); + return ret_value_.as(); + } + + Dtype dtype() { + return dtype_; + } + + private: + Dtype dtype_; + std::unique_ptr codegen_; + InterpValue ret_value_; +}; + +// Evaluates the given expression and returns an int64_t value if the result of +// the given expression is int64_t. +c10::optional evalInt(ExprPtr e); + +// Substitutes the given vars with their corresponding expressions in the input +// expression. +inline ExprPtr Substitute(ExprPtr expr, const VarMapping& var_mapping) { + VarSubMutator var_sub(var_mapping); + return expr->accept_mutator(&var_sub); +} + +// Substitutes the given vars with their corresponding expressions in the input +// statement. +inline StmtPtr Substitute(StmtPtr stmt, const VarMapping& var_mapping) { + VarSubMutator var_sub(var_mapping); + return stmt->accept_mutator(&var_sub); +} + +// Creates a clone of the input expression and substitutes the given vars with +// their corresponding expressions in the clone. +// NOTE: This works because cloning reuses variables and does not create new +// ones, and `VarMapping` input has variables as the key. +inline ExprPtr SubstituteInClone(ExprPtr expr, const VarMapping& var_mapping) { + VarSubMutator var_sub(var_mapping); + return Expr::clone(std::move(expr))->accept_mutator(&var_sub); +} + +// Creates a clone of the input statement and substitutes the given vars with +// their corresponding expressions in the clone. +// NOTE: This works because cloning reuses variables and does not create new +// ones, and `VarMapping` input has variables as the key. +inline StmtPtr SubstituteInClone(StmtPtr stmt, const VarMapping& var_mapping) { + VarSubMutator var_sub(var_mapping); + return Stmt::clone(std::move(stmt))->accept_mutator(&var_sub); +} + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/exceptions.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/exceptions.h new file mode 100644 index 0000000000000000000000000000000000000000..b5e656f4080c57996dc5433ea7829a7dbd531a29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/exceptions.h @@ -0,0 +1,91 @@ +#pragma once + +#include +#include + +#include +#include + +// Forward declarations of types +namespace torch { +namespace jit { +namespace tensorexpr { +class Expr; +class Stmt; +} // namespace tensorexpr +} // namespace jit +} // namespace torch + +// Forward declarations of functions +namespace std { +TORCH_API std::string to_string(const torch::jit::tensorexpr::ExprPtr); +TORCH_API std::string to_string(const torch::jit::tensorexpr::StmtPtr); +} // namespace std + +namespace torch { +namespace jit { +namespace tensorexpr { + +class unsupported_dtype : public std::runtime_error { + public: + explicit unsupported_dtype() : std::runtime_error("UNSUPPORTED DTYPE") {} + explicit unsupported_dtype(const std::string& err) + : std::runtime_error("UNSUPPORTED DTYPE: " + err) {} +}; + +class out_of_range_index : public std::runtime_error { + public: + explicit out_of_range_index() : std::runtime_error("OUT OF RANGE INDEX") {} + explicit out_of_range_index(const std::string& err) + : std::runtime_error("OUT OF RANGE INDEX: " + err) {} +}; + +class unimplemented_lowering : public std::runtime_error { + public: + explicit unimplemented_lowering() + : std::runtime_error("UNIMPLEMENTED LOWERING") {} + explicit unimplemented_lowering(ExprPtr expr) + : std::runtime_error("UNIMPLEMENTED LOWERING: " + std::to_string(expr)) {} + explicit unimplemented_lowering(StmtPtr stmt) + : std::runtime_error("UNIMPLEMENTED LOWERING: " + std::to_string(stmt)) {} +}; + +class malformed_input : public std::runtime_error { + public: + explicit malformed_input() : std::runtime_error("MALFORMED INPUT") {} + explicit malformed_input(const std::string& err) + : std::runtime_error("MALFORMED INPUT: " + err) {} + explicit malformed_input(ExprPtr expr) + : std::runtime_error("MALFORMED INPUT: " + std::to_string(expr)) {} + explicit malformed_input(const std::string& err, ExprPtr expr) + : std::runtime_error( + "MALFORMED INPUT: " + err + " - " + std::to_string(expr)) {} + explicit malformed_input(StmtPtr stmt) + : std::runtime_error("MALFORMED INPUT: " + std::to_string(stmt)) {} + explicit malformed_input(const std::string& err, StmtPtr stmt) + : std::runtime_error( + "MALFORMED INPUT: " + err + " - " + std::to_string(stmt)) {} +}; + +class malformed_ir : public std::runtime_error { + public: + explicit malformed_ir() : std::runtime_error("MALFORMED IR") {} + explicit malformed_ir(const std::string& err) + : std::runtime_error("MALFORMED IR: " + err) {} + explicit malformed_ir(ExprPtr expr) + : std::runtime_error("MALFORMED IR: " + std::to_string(expr)) {} + explicit malformed_ir(const std::string& err, ExprPtr expr) + : std::runtime_error( + "MALFORMED IR: " + err + " - " + std::to_string(expr)) {} + explicit malformed_ir(StmtPtr stmt) + : std::runtime_error("MALFORMED IR: " + std::to_string(stmt)) {} + explicit malformed_ir(const std::string& err, StmtPtr stmt) + : std::runtime_error( + "MALFORMED IR: " + err + " - " + std::to_string(stmt)) {} +}; + +TORCH_API std::string buildErrorMessage(const std::string& s = ""); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/expr.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/expr.h new file mode 100644 index 0000000000000000000000000000000000000000..1a0cc57875d19978b0fe249dab83b8038b9dce9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/expr.h @@ -0,0 +1,499 @@ +/** + * This file implements the core classes for Tensor Expressions. + * + * The structure of the expressions is inspired by Halide/TVM IR. + */ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +enum IRNodeType { + kPrimitive, + kAdd, + kSub, + kMul, + kDiv, + kMod, + kMax, + kMin, + kAnd, + kOr, + kLshift, + kRshift, + kXor, + kCompareSelect, + kCast, + kBitCast, + kOther, +}; + +// The common base between all expression node. +class TORCH_API Expr : public std::enable_shared_from_this { + public: + explicit Expr(Dtype dtype, IRNodeType expr_type = kOther) + : dtype_(dtype), expr_type_(expr_type) {} + virtual ~Expr() = default; + Dtype dtype() const { + return dtype_; + } + virtual void accept(IRVisitor* visitor) = 0; + virtual ExprPtr accept_mutator(IRMutator* mutator) = 0; + + IRNodeType expr_type() const { + return expr_type_; + } + // Is this a fixed (constant) immediate value. + virtual bool isConstant() const { + return false; + } + + void set_dtype(Dtype dtype) { + dtype_ = dtype; + } + + /* + * Make a deep copy of the given expression. + * + * All sub-expressions inside the given expressions are also cloned. Note + * that the variables are not deep-copied since they are immutable. + */ + static ExprPtr clone(ExprPtr s); + + protected: + std::shared_ptr getptr() { + return shared_from_this(); + } + + private: + Dtype dtype_; + IRNodeType expr_type_; +}; + +// A CRTP pattern to accept visitors for children class, +// and dispatch back to the children. +template +class ExprNode : public Base { + public: + using ExprNodeBase = ExprNode; + void accept(IRVisitor* visitor) override { + visitor->visit(static_to(Base::getptr())); + } + ExprPtr accept_mutator(IRMutator* mutator) override; + // pass the constructor to the base class + using Base::Base; +}; + +// A wrapper object to the underlying ExprNode. +// Also serves the primary way to build and operate on other expressions. +class TORCH_API ExprHandle { + public: + ExprHandle() = default; + explicit ExprHandle(ExprPtr node) : base_expr_node_(std::move(node)) {} + + ExprPtr node() { + return base_expr_node_; + } + + ExprPtr node() const { + return base_expr_node_; + } + + bool empty() const { + return base_expr_node_ == nullptr; + } + +#define IMM_EXPR_DECLARE(Type, Name) ExprHandle(Type v); + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_EXPR_DECLARE); +#undef IMM_EXPR_DECLARE + + template + NodePtr AsNode() { + return to(this->node()); + } + + template + NodePtr AsNode() const { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) + return const_cast(this)->AsNode(); + } + + Dtype dtype() const { + return node()->dtype(); + } + + // Handling the math operators. + ExprHandle operator+(const ExprHandle& other) const; + ExprHandle operator-(const ExprHandle& other) const; + ExprHandle operator*(const ExprHandle& other) const; + ExprHandle operator/(const ExprHandle& other) const; + ExprHandle operator%(const ExprHandle& other) const; + ExprHandle operator==(const ExprHandle& other) const; + ExprHandle operator!=(const ExprHandle& other) const; + ExprHandle operator>(const ExprHandle& other) const; + ExprHandle operator>=(const ExprHandle& other) const; + ExprHandle operator<(const ExprHandle& other) const; + ExprHandle operator<=(const ExprHandle& other) const; + ExprHandle operator&(const ExprHandle& other) const; + ExprHandle operator|(const ExprHandle& other) const; + ExprHandle operator&&(const ExprHandle& other) const; + ExprHandle operator||(const ExprHandle& other) const; + ExprHandle operator^(const ExprHandle& other) const; + ExprHandle operator<<(const ExprHandle& other) const; + ExprHandle operator>>(const ExprHandle& other) const; + + private: + ExprPtr base_expr_node_ = nullptr; +}; + +// The underlying representation node to a Var. +// Currently, each Var object represents a unique variable, even though the +// names might be the same. We should consider add a unique_name as well. +class TORCH_API Var : public ExprNode { + public: + static ExprHandle make(const std::string& name_hint, Dtype dtype) { + return ExprHandle(alloc(name_hint, dtype)); + } + static ExprHandle make(Dtype dtype) { + return ExprHandle(alloc("", dtype)); + } + + // TODO: unique_name + const std::string& name_hint() const { + return name_hint_; + } + + void set_name_hint(const std::string& name) { + name_hint_ = name; + } + + void set_name_hint(std::string&& name) { + name_hint_ = std::move(name); + } + + Var(std::string name_hint, Dtype dtype) + : ExprNodeBase(dtype, kPrimitive), name_hint_(std::move(name_hint)) {} + + private: + std::string name_hint_; +}; + +TORCH_API std::vector make_contiguous_strides( + const std::vector& dims); +TORCH_API std::vector make_channels_last_strides( + const std::vector& dims); + +class TORCH_API Buf : public ExprNode { + public: + static BufHandle make(const std::vector& dims, Dtype dtype); + + static BufHandle make( + const std::string& name_hint, + const std::vector& dims, + const std::vector& strides, + Dtype dtype); + + static BufHandle make( + const std::string& name_hint, + const std::vector& dims, + Dtype dtype, + c10::optional initializer = c10::nullopt, + c10::optional> strides = c10::nullopt, + c10::optional qscale = c10::nullopt, + c10::optional qzero = c10::nullopt); + + // TODO: unique_name + VarPtr base_handle() const { + return base_handle_; + } + void set_base_handle(VarPtr base_handle) { + base_handle_ = std::move(base_handle); + } + + const std::string& name_hint() const { + return base_handle_->name_hint(); + } + void set_name_hint(const std::string& name_hint) { + base_handle_->set_name_hint(name_hint); + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Buf(const std::string& name_hint, + const std::vector& dims, + Dtype dtype, + ExprPtr initializer = nullptr, + c10::optional> strides = c10::nullopt, + ExprPtr qscale = nullptr, + ExprPtr qzero = nullptr) + : Buf(alloc(name_hint, kHandle), + dims, + dtype, + std::move(initializer), + std::move(strides), + std::move(qscale), + std::move(qzero)) {} + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Buf(VarPtr var, + std::vector dims, + Dtype dtype, + ExprPtr initializer = nullptr, + c10::optional> strides = c10::nullopt, + ExprPtr qscale = nullptr, + ExprPtr qzero = nullptr); + + size_t ndim() const { + return dims_.size(); + } + ExprPtr dim(size_t index) const { + if (index >= ndim()) { + throw out_of_range_index(); + } + return dims_[index]; + } + std::vector dims() const { + return dims_; + } + void set_dims(std::vector dims) { + dims_ = std::move(dims); + } + + std::vector strides() const { + return strides_; + } + + void set_strides(std::vector strides) { + strides_ = std::move(strides); + } + + ExprPtr initializer() const { + return initializer_; + }; + + ExprPtr qzero() const { + return qzero_; + } + + ExprPtr qscale() const { + return qscale_; + } + + void set_qzero(ExprPtr qzero) { + qzero_ = std::move(qzero); + } + + void set_qscale(ExprPtr qscale) { + qscale_ = std::move(qscale); + } + + bool hasConstantDims() const { + for (const auto& d : dims_) { + if (!d->isConstant()) { + return false; + } + } + return true; + } + + bool is_contiguous( + at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const; + + // The channels-last 1d can benefit the performance of some operators like + // conv1d. But the MemoryFormat enum has not covered this layout yet. Hence, + // we abstract a dedicated function to check channels-last 1d contiguous. + // + // Channels-last 1d: + // dims: n c l + // strides(nlc): c*l 1 c + bool is_channels_last_1d_contiguous() const { + if (dims_.size() != 3) { + return false; + } + return is_stride_one(1) && is_cont_with(2, 1) && is_cont_with(0, 2); + } + + private: + bool is_cont_with(int cur_dim, int adjacent_dim) const; + bool is_stride_one(int cur_dim) const; + + VarPtr base_handle_; + std::vector dims_; + std::vector strides_; + ExprPtr initializer_; + // qscale_ and qzero_ are used only for quantized dtypes Bufs: kQUInt8, kQInt8 + ExprPtr qscale_; + ExprPtr qzero_; +}; + +class TORCH_API BufHandle : public ExprHandle { + public: + BufHandle( + const std::string& name_hint, + const std::vector& dims, + Dtype dtype) + : ExprHandle(Buf::make(name_hint, dims, dtype)) {} + + BufHandle( + const std::string& name_hint, + const std::vector& dims, + const std::vector& strides, + Dtype dtype) + : ExprHandle(Buf::make(name_hint, dims, strides, dtype)) {} + + BufHandle(const std::vector& dims, Dtype dtype) + : ExprHandle(Buf::make("_", dims, dtype)) {} + + explicit BufHandle(Dtype dtype) : ExprHandle(Buf::make("_", {}, dtype)) {} + + explicit BufHandle(BufPtr node) : ExprHandle(std::move(node)) {} + BufPtr node() const { + return static_to(ExprHandle::node()); + } + BufPtr node() { + return static_to(ExprHandle::node()); + } + + template + inline ExprHandle load(const Ts&... ts) const; + + template + inline ExprHandle load(const std::vector& args) const; + + inline ExprHandle load(const std::vector& args) const; + + StorePtr store(const std::vector& args, const ExprHandle& val) + const; + + bool operator==(const BufHandle& other) const { + return this->node() == other.node(); + } + bool operator!=(const BufHandle& other) const { + return !(*this == other); + } + + const std::string& name_hint() const { + return this->node()->name_hint(); + } + + bool empty() const { + return (this->node() == nullptr); + } + + size_t ndim() const { + return node()->ndim(); + } + + std::vector dims() const; + + ExprHandle dim(size_t index) const { + return ExprHandle(node()->dim(index)); + } + + bool is_contiguous( + at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const { + return node()->is_contiguous(memory_format); + } + + bool is_channels_last_1d_contiguous() const { + return node()->is_channels_last_1d_contiguous(); + } +}; + +// An expression to construct the underlying variable node. +// Note: do not store any info here, since it is often possible to slice this +// object. For example: VarHandle x('x'); ExprHandle x2 = x; +class TORCH_API VarHandle : public ExprHandle { + public: + // Creates an empty VarHandle whose base Var is set to nullptr. + VarHandle() : ExprHandle() {} + + explicit VarHandle(Dtype dtype) : ExprHandle(Var::make(dtype)) {} + + VarHandle(const std::string& name_hint, Dtype dtype) + : ExprHandle(Var::make(name_hint, dtype)) {} + + explicit VarHandle(VarPtr node) : ExprHandle(std::move(node)) {} + + VarPtr node() const { + return static_to(ExprHandle::node()); + } + bool operator==(const VarHandle& other) const { + return this->node() == other.node(); + } + bool operator!=(const VarHandle& other) const { + return !(*this == other); + } + + const std::string& name_hint() const { + return this->node()->name_hint(); + } + bool empty() const { + return (this->node() == nullptr); + } +}; + +template +ExprPtr ExprNode::accept_mutator(IRMutator* mutator) { + return mutator->mutate(static_to(Base::getptr())); +} + +inline bool same_node(const ExprHandle& expr1, const ExprHandle& expr2) { + return expr1.AsNode() == expr2.AsNode(); +} + +TORCH_API ExprHandle sin(const ExprHandle& v); +TORCH_API ExprHandle cos(const ExprHandle& v); +TORCH_API ExprHandle tan(const ExprHandle& v); +TORCH_API ExprHandle asin(const ExprHandle& v); +TORCH_API ExprHandle acos(const ExprHandle& v); +TORCH_API ExprHandle atan(const ExprHandle& v); +TORCH_API ExprHandle sinh(const ExprHandle& v); +TORCH_API ExprHandle cosh(const ExprHandle& v); +TORCH_API ExprHandle tanh(const ExprHandle& v); +TORCH_API ExprHandle sigmoid(const ExprHandle& v); +TORCH_API ExprHandle exp(const ExprHandle& v); +TORCH_API ExprHandle expm1(const ExprHandle& v); +TORCH_API ExprHandle abs(const ExprHandle& v); +TORCH_API ExprHandle log(const ExprHandle& v); +TORCH_API ExprHandle fast_tanh(const ExprHandle& v); +TORCH_API ExprHandle fast_sigmoid(const ExprHandle& v); +TORCH_API ExprHandle fast_log(const ExprHandle& v); +TORCH_API ExprHandle log_vml(const ExprHandle& v); +TORCH_API ExprHandle log2(const ExprHandle& v); +TORCH_API ExprHandle log10(const ExprHandle& v); +TORCH_API ExprHandle log1p(const ExprHandle& v); +TORCH_API ExprHandle erf(const ExprHandle& v); +TORCH_API ExprHandle erfc(const ExprHandle& v); +TORCH_API ExprHandle sqrt(const ExprHandle& v); +TORCH_API ExprHandle rsqrt(const ExprHandle& v); +TORCH_API ExprHandle ceil(const ExprHandle& v); +TORCH_API ExprHandle floor(const ExprHandle& v); +TORCH_API ExprHandle round(const ExprHandle& v); +TORCH_API ExprHandle trunc(const ExprHandle& v); +TORCH_API ExprHandle frac(const ExprHandle& v); +TORCH_API ExprHandle lgamma(const ExprHandle& v); +TORCH_API ExprHandle atan2(const ExprHandle& v1, const ExprHandle& v2); +TORCH_API ExprHandle pow(const ExprHandle& v1, const ExprHandle& v2); +TORCH_API ExprHandle fmod(const ExprHandle& v1, const ExprHandle& v2); +TORCH_API ExprHandle remainder(const ExprHandle& v1, const ExprHandle& v2); +TORCH_API ExprHandle isnan(const ExprHandle& v1); +TORCH_API ExprHandle Relu(const ExprHandle& v1); + +TORCH_API ExprHandle +ifThenElse(const ExprHandle& c, const ExprHandle& t, const ExprHandle& f); + +TORCH_API ExprHandle expr_to_vec(ExprHandle v, int lanes); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..627d67c934d593c8741d0cead8b78477e02d2585 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions.h @@ -0,0 +1,115 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#define FOR_ALL_EXTERNAL_FUNCTIONS(_) \ + _(nnc_aten_adaptive_avg_pool2d) \ + _(nnc_aten_addmm) \ + _(nnc_aten_conv2d) \ + _(nnc_aten_conv1d) \ + _(nnc_aten_conv1d_out) \ + _(nnc_aten_dequantize) \ + _(nnc_aten_dequantize_out) \ + _(nnc_aten_embedding) \ + _(nnc_aten_matmul) \ + _(nnc_aten_mv) \ + _(nnc_aten_mm) \ + _(nnc_aten_mean) \ + _(nnc_aten_max_red) \ + _(nnc_aten_max_red_out) \ + _(nnc_aten_quantized_conv1d) \ + _(nnc_aten_quantized_conv1d_out) \ + _(nnc_aten_quantized_conv2d) \ + _(nnc_aten_quantized_conv2d_out) \ + _(nnc_aten_quantized_conv2d_relu) \ + _(nnc_aten_quantized_conv2d_relu_out) \ + _(nnc_aten_quantized_linear) \ + _(nnc_aten_quantized_linear_out) \ + _(nnc_aten_quantized_linear_relu) \ + _(nnc_aten_quantized_add) \ + _(nnc_aten_quantized_cat) \ + _(nnc_aten_quantized_mul) \ + _(nnc_aten_quantized_mul_out) \ + _(nnc_aten_quantized_mul_scalar) \ + _(nnc_aten_quantized_mul_scalar_out) \ + _(nnc_aten_quantized_relu) \ + _(nnc_aten_quantized_sigmoid) \ + _(nnc_aten_quantized_sigmoid_out) \ + _(nnc_aten_quantize_per_tensor) \ + _(nnc_aten_quantize_per_tensor_out) \ + _(nnc_aten_triangular_solve) \ + _(nnc_aten_upsample_nearest2d) \ + _(nnc_aten_upsample_nearest2d_out) \ + _(nnc_prepacked_conv2d_clamp_run) \ + _(nnc_prepacked_linear_clamp_run) + +#define DECLARE_EXTERNAL_FUNCTION(NAME) \ + TORCH_API void NAME( \ + int64_t bufs_num, \ + void** buf_data, \ + int64_t* buf_ranks, \ + int64_t* buf_dims, \ + int64_t* buf_strides, \ + int8_t* buf_dtypes, \ + int64_t args_num, \ + int64_t* extra_args); + +namespace torch { +namespace jit { +namespace tensorexpr { +struct QIData final { + double scale; + int64_t zero; + c10::ScalarType scalarType; +}; +std::vector constructTensors( + int64_t bufs_num, + void** buf_data, + int64_t* buf_ranks, + int64_t* buf_dims, + int64_t* buf_strides, + int8_t* buf_dtypes, + c10::optional>> qdataArg = + c10::nullopt); + +std::vector constructTensors2( + int64_t bufs_in_num, + void** buf_data, + int64_t* buf_ranks, + int64_t* buf_dims, + int64_t* buf_strides, + int8_t* buf_dtypes, + c10::optional>> qdataArg = + c10::nullopt, + size_t bufs_out_num = 0); + +#ifdef C10_MOBILE +extern "C" { +#endif +void DispatchParallel( + int8_t* func, + int64_t start, + int64_t stop, + int8_t* packed_data) noexcept; + +FOR_ALL_EXTERNAL_FUNCTIONS(DECLARE_EXTERNAL_FUNCTION) +#if AT_MKLDNN_ENABLED() +DECLARE_EXTERNAL_FUNCTION(nnc_mkldnn_prepacked_conv_run); +#endif + +TORCH_API void nnc_aten_free(int64_t bufs_num, void** ptrs) noexcept; + +#ifdef C10_MOBILE +} // extern "C" +#endif + +} // namespace tensorexpr +} // namespace jit +} // namespace torch + +#undef DECLARE_EXTERNAL_FUNCTION diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_core.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_core.h new file mode 100644 index 0000000000000000000000000000000000000000..7d85d545af8c8eb68cc7c3e4d5ce49a2388125d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_core.h @@ -0,0 +1,29 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +#ifdef C10_MOBILE +extern "C" { +#endif +void DispatchParallel( + int8_t* func, + int64_t start, + int64_t stop, + int8_t* packed_data) noexcept; + +TORCH_API void nnc_aten_free(int64_t bufs_num, void** ptrs) noexcept; + +#ifdef C10_MOBILE +} // extern "C" +#endif + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_registry.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..913dbb9c72423b95a3531c90a57cfe87690c2f60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_registry.h @@ -0,0 +1,61 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +// The external functions that could be called from NNC must have the same +// signature defined by `NNCExternalFunction`. +// +// Why this signature? +// It was picked for two reasons: 1) it should be generic enough to represent +// most of the ops we might want to call, 2) it should be possible to generate a +// code for this call in LLVM codegen. +// The first 5 parameters allow to pass any number of contiguous CPU tensors in +// case we need to run aten ops (TODO: support different devices). The first +// buffer in the array is assumed to be the output buffer. We couldn't use +// `at::Tensor` (or `c10::IValue`) type there directly as it would mean that +// we'd need to declare it in LLVM codegen in LLVM IR form, which would be very +// cumbersome and hard to maintain. Note that the dimensions of all tensors are +// concatenated into a single array buf_dims. We do not need to pass its length, +// since it can be deduced from total number of buffers and their ranks. +// +// The last 2 arguments allow to pass any non-tensor arguments encoded as an +// array of int64_t values. The way they are encoded is not specified and could +// be arbitrary - whatever the most convenient for the specific bridge function +// is. +// +// The bridge functions must not throw exceptions - properly propagating them +// from the generated code is too cumbersome, and thus all calls to functions +// that could throw must be wrapped with try-catch blocks. +using NNCExternalFunction = void (*)( + int64_t bufs_num, + void** buf_data, + int64_t* buf_ranks, + int64_t* buf_dims, + int64_t* buf_strides, + int8_t* buf_dtypes, + int64_t args_num, + int64_t* extra_args); + +// Return a global map "function-name" -> "function-pointer" for all registered +// in NNC external functions +TORCH_API std::unordered_map& +getNNCFunctionRegistry(); + +// To register a new external function in NNC one needs to create an instance of +// this struct +struct RegisterNNCExternalFunction { + RegisterNNCExternalFunction(const std::string& name, NNCExternalFunction fn) { + getNNCFunctionRegistry()[name] = fn; + } +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/fwd_decls.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/fwd_decls.h new file mode 100644 index 0000000000000000000000000000000000000000..f7caf135350a782685564904a06056f3e851c06c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/fwd_decls.h @@ -0,0 +1,129 @@ +#pragma once +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +template +using NodePtr = std::shared_ptr; + +template +NodePtr to(NodePtr x) { + return std::dynamic_pointer_cast(x); +} + +template +NodePtr static_to(NodePtr x) { + return std::static_pointer_cast(x); +} + +template +NodePtr alloc(Args&&... args) { + return std::make_shared(std::forward(args)...); +} + +class Buf; +class Expr; +class Stmt; +class Var; + +using BufPtr = NodePtr; +using ExprPtr = NodePtr; +using StmtPtr = NodePtr; +using VarPtr = NodePtr; + +class ExprHandle; +class VarHandle; +class BufHandle; + +class Add; +class And; +class BitCast; +class Broadcast; +class Cast; +class CompareSelect; +class Div; +class IfThenElse; +class Intrinsics; +class Let; +class Load; +class Lshift; +class Max; +class MaxTerm; +class Min; +class MinTerm; +class Mod; +class Mul; +class Or; +class Polynomial; +class Ramp; +class ReduceOp; +class RoundOff; +class Rshift; +class Store; +class Sub; +class Term; +class Xor; +using AddPtr = NodePtr; +using AndPtr = NodePtr; +using BitCastPtr = NodePtr; +using BroadcastPtr = NodePtr; +using CastPtr = NodePtr; +using CompareSelectPtr = NodePtr; +using DivPtr = NodePtr
; +using IfThenElsePtr = NodePtr; +using IntrinsicsPtr = NodePtr; +using LetPtr = NodePtr; +using LoadPtr = NodePtr; +using LshiftPtr = NodePtr; +using MaxPtr = NodePtr; +using MaxTermPtr = NodePtr; +using MinPtr = NodePtr; +using MinTermPtr = NodePtr; +using ModPtr = NodePtr; +using MulPtr = NodePtr; +using OrPtr = NodePtr; +using PolynomialPtr = NodePtr; +using RampPtr = NodePtr; +using ReduceOpPtr = NodePtr; +using RoundOffPtr = NodePtr; +using RshiftPtr = NodePtr; +using StorePtr = NodePtr; +using SubPtr = NodePtr; +using TermPtr = NodePtr; +using XorPtr = NodePtr; + +class Allocate; +class AtomicAdd; +class Block; +class Cond; +class ExternalCall; +class ExternalCallWithAlloc; +class For; +class Free; +class FreeExt; +class PlacementAllocate; +class SyncThreads; +using AllocatePtr = NodePtr; +using AtomicAddPtr = NodePtr; +using BlockPtr = NodePtr; +using CondPtr = NodePtr; +using ExternalCallPtr = NodePtr; +using ExternalCallWithAllocPtr = NodePtr; +using ForPtr = NodePtr; +using FreePtr = NodePtr; +using FreeExtPtr = NodePtr; +using PlacementAllocatePtr = NodePtr; +using SyncThreadsPtr = NodePtr; + +#define IMM_DECLARE(Type, Name) \ + class Name##Imm; \ + using Name##ImmPtr = NodePtr; +AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_DECLARE); +#undef IMM_DECLARE + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/graph_opt.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/graph_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..1180d0ac438b9771321a81c036f254d5ca4d45ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/graph_opt.h @@ -0,0 +1,115 @@ +#pragma once + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +// Optimize aten::cat ops in the given subgraph. +// +// Moving users of cat to its inputs. +// Cat ops get lowered into multiple loops, one per input. When the result +// of cat is used by some other op, it results in a situation where inlining +// of cat does not happen. This in turn results in intermediate buffers +// being created for the result of cat, since it is not inlined. +// +// For example, consider the following graph: +// graph(%x : Float(10, strides=[1], device=cpu), +// %y : Float(20, strides=[1], device=cpu)): +// %dim : int = prim::Constant[value=0]() +// %xy_list : Tensor[] = prim::ListConstruct(%x, %y) +// %cat : Float(60, strides=[1], device=cpu) = aten::cat(%xy_list, %dim) +// %5 : Float(60, strides=[1], device=cpu) = aten::log(%cat) +// return (%5))IR"; +// +// This will get lowered into: +// Allocate(aten_cat); +// for (...) +// aten_cat[...] = x[...] +// for (...) +// aten_cat[...] = y[...] +// for (...) +// aten_log[...] = log(aten_cat[...]) +// Free(aten_cat); +// Note that aten_cat is not inlined into aten_log and it results in +// an intermediate buffer allocation as well. +// +// Optimization: +// We move the ops that use the result of `cat` into its inputs whenever +// possible. +// +// The graph above will be transformed to: +// graph(%x : Float(10, strides=[1], device=cpu), +// %y : Float(20, strides=[1], device=cpu)): +// %3 : int = prim::Constant[value=0]() +// %7 : Float(10, strides=[1], device=cpu) = aten::log(%x) +// %8 : Float(20, strides=[1], device=cpu) = aten::log(%y) +// %9 : Tensor[] = prim::ListConstruct(%7, %8) +// %10 : Float(60, strides=[1], device=cpu) = aten::cat(%9, %3) +// return (%10) +// +// This will get lowered into: +// for (...) +// aten_cat[...] = log(x[...]) +// for (...) +// aten_cat[...] = log(y[...]) +// aten_cat is the output buffer here. + +bool OptimizeCat(const std::shared_ptr& graph); + +TORCH_API void annotateInputShapes( + const std::shared_ptr& graph, + const std::vector>& example_inputs); +TORCH_API std::shared_ptr removeUnusedSelfArgument( + const std::shared_ptr& graph); +TORCH_API std::shared_ptr removeGraphOutput( + const std::shared_ptr& graph, + size_t idx); +TORCH_API std::shared_ptr replaceListOutputWithTuple( + const std::shared_ptr& graph); + +// Perform \p ITERS rounds of "trimming" for the given \p GRAPH. +// +// Trimming means that we try to remove a small portion of the graph while +// keeping it valid. This is useful for debugging when we try to find a minimal +// example reproducing the issue at hand. When ITERS is 0, the graph remains +// unchanged, when ITERS is a big number, the graph usually becomes empty. +TORCH_API std::shared_ptr trimGraph( + const std::shared_ptr& graph, + int64_t iters); + +// Scan all values in the given graph and replace each dimension with a size Xi +// present in \p SIZES with a symbolic shape Yi. Return a vector of symbol +// values [Y0, Y1, .., Yn]. +// +// For example: +// Input: +// graph(%x : Float(10, 20, 30, 40)): +// %y : Float(10, 20, 30, 40) = aten::relu(%x) +// return %y +// +// If we run makeShapesSymbolic(graph, {20, 40}), then we'll get: +// +// graph(%x : Float(10, SS(-3), 30, SS(-5))): +// %y : Float(10, SS(-3), 30, SS(-5)) = aten::relu(%x) +// return %y +// +// and get {-3, -5} as the return value. +TORCH_API std::vector makeShapesSymbolic( + std::shared_ptr& graph, + const std::vector& sizes); + +// Inspect the graph and report whether it can be converted to TE IR. +// TODO: add error reporting for graphs that can't be converted. +TORCH_API bool isGraphCompilable(const std::shared_ptr& graph); + +// Examine the graph and (hackily) fill in missing tensor type info, such as +// scalar type, device, and strides. Ideally, this should be done by a proper +// dtype/device/shape propagation passes, but until they are ready we can use +// this, not always correct, workaround pass. +TORCH_API void fixupMissingShapeInfo(const std::shared_ptr& graph); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/half_support.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/half_support.h new file mode 100644 index 0000000000000000000000000000000000000000..8ec41fe2f4a83e14f5de47df196db02079da117c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/half_support.h @@ -0,0 +1,217 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +// Walk the Statement looking for Half size loads/stores. +class HalfChecker : public IRVisitor { + public: + HalfChecker(const std::vector& args) { + for (const auto& BA : args) { + hasHalf_ |= BA.dtype().scalar_type() == ScalarType::Half; + } + } + + bool hasHalf() const { + return hasHalf_; + } + + bool hasBFloat16() const { + return hasBFloat16_; + } + + void visit(LoadPtr v) override { + hasHalf_ |= v->dtype().scalar_type() == ScalarType::Half; + hasBFloat16_ |= v->dtype().scalar_type() == ScalarType::BFloat16; + IRVisitor::visit(v); + } + + void visit(StorePtr v) override { + hasHalf_ |= v->buf()->dtype().scalar_type() == ScalarType::Half; + hasBFloat16_ |= v->buf()->dtype().scalar_type() == ScalarType::BFloat16; + IRVisitor::visit(v); + } + + void visit(HalfImmPtr v) override { + hasHalf_ = true; + } + + void visit(BFloat16ImmPtr v) override { + hasBFloat16_ = true; + } + + void visit(CastPtr v) override { + hasHalf_ |= v->dtype().scalar_type() == ScalarType::Half; + hasBFloat16_ |= v->dtype().scalar_type() == ScalarType::BFloat16; + IRVisitor::visit(v); + } + + private: + bool hasHalf_{false}; + bool hasBFloat16_{false}; +}; + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +class HalfRewriter : public IRMutator { + ExprPtr mutate(LoadPtr v) override { + ExprPtr child = IRMutator::mutate(v); + if (!isHalf(child)) { + return child; + } + + ExprPtr ret = alloc( + child->dtype().cloneWithScalarType(ScalarType::Float), child); + + inserted_half_casts_.insert(ret); + return ret; + } + + StmtPtr mutate(StorePtr v) override { + // Since mutation changes the `value()` expression in-place, we need to + // get the dtype of the `value()` before that is mutated. + auto newType = v->value()->dtype(); + ExprPtr new_val = v->value()->accept_mutator(this); + auto bufType = v->buf()->dtype(); + + if (isHalf(newType.scalar_type())) { + new_val = alloc(newType, new_val); + inserted_half_casts_.insert(new_val); + } + + // The scalar_type of value is not Half while the buf is Half + if (!isHalf(newType.scalar_type()) && isHalf(bufType.scalar_type())) { + new_val = alloc( + newType.cloneWithScalarType(bufType.scalar_type()), new_val); + inserted_half_casts_.insert(new_val); + } + + v->set_value(new_val); + return v; + } + + ExprPtr mutate(HalfImmPtr v) override { + return alloc(kFloat, v); + } + + ExprPtr mutate(BFloat16ImmPtr v) override { + return alloc(kFloat, v); + } + + ExprPtr mutate(CastPtr v) override { + ExprPtr child = v->src_value()->accept_mutator(this); + + // just don't allow half casts we didn't insert. + if (isHalf(v)) { + if (inserted_half_casts_.count(v) < 1) { + v->set_src_value(child); + v->set_dtype(v->dtype().cloneWithScalarType(c10::kFloat)); + return v; + } + } + + // Remove Half(Float()) and friends. + CastPtr cast_child = to(child); + if (cast_child) { + auto cast_to_double = v->dtype().scalar_type() == ScalarType::Double; + auto from_half = isHalf(cast_child->src_value()); + // Cannot simplify the double(float(half)) to double(half) as NNC does + // not support cast BF16 to double directly. + auto not_cast_half_to_doulbe = !(cast_to_double && from_half); + if (v->dtype().is_floating_point() && + cast_child->dtype().is_floating_point() && not_cast_half_to_doulbe) { + return alloc(v->dtype(), cast_child->src_value()); + } + } + + if (child == v->src_value()) { + return v; + } + + return alloc(v->dtype(), child); + } + + StmtPtr mutate(LetPtr v) override { + if (isHalf(v->var()->dtype().scalar_type())) { + VarPtr load_new_var = alloc(v->var()->name_hint(), kFloat); + ExprPtr new_value = alloc( + v->var()->dtype().cloneWithScalarType(ScalarType::Float), + v->value()->accept_mutator(this)); + var_map[v->var()] = load_new_var; + + return alloc(load_new_var, new_value); + } + + return IRMutator::mutate(v); + } + + ExprPtr mutate(VarPtr v) override { + auto it = var_map.find(v); + if (it != var_map.end()) { + return it->second; + } + + return v; + } + + template + ExprPtr mutateArithmetic(T v) { + IRMutator::mutate(v); + if (isHalf(v)) { + v->set_dtype(v->dtype().cloneWithScalarType(c10::kFloat)); + } + return v; + } + + ExprPtr mutate(AddPtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(SubPtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(MulPtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(DivPtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(MaxPtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(MinPtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(CompareSelectPtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(BroadcastPtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(IfThenElsePtr v) override { + return mutateArithmetic(v); + } + ExprPtr mutate(IntrinsicsPtr v) override { + return mutateArithmetic(v); + } + + private: + static bool isHalf(ScalarType st) { + return st == ScalarType::Half || st == ScalarType::BFloat16; + } + + static bool isHalf(ExprPtr v) { + return isHalf(v->dtype().scalar_type()); + } + + std::unordered_set inserted_half_casts_; + std::unordered_map var_map; +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/hash_provider.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/hash_provider.h new file mode 100644 index 0000000000000000000000000000000000000000..c16066197ca9e4faa209ae698478431d9e894dd2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/hash_provider.h @@ -0,0 +1,304 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +struct TORCH_API SimplifierHashType { + SimplifierHashType() = default; + explicit SimplifierHashType(size_t s) : _h(s) {} + + bool operator==(const SimplifierHashType& other) const; + bool operator!=(const SimplifierHashType& other) const; + bool operator<(const SimplifierHashType& other) const; + bool operator==(const size_t other) const; + bool operator!=(const size_t other) const; + + size_t _h{0}; +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch + +namespace std { +template <> +struct hash { + size_t operator()(const torch::jit::tensorexpr::SimplifierHashType& k) const { + return k._h; + } +}; + +} // namespace std + +namespace torch { +namespace jit { +namespace tensorexpr { + +#define CACHE_GUARD() \ + if (cachedHash(v)) { \ + return; \ + } + +class Term; +class Polynomial; + +/* Expression hasher providing comparable values representing sub-exprs. + * Uses memoization to avoid excessive recursion. */ +class TORCH_API HashProvider : public IRVisitor { + public: + template + SimplifierHashType hash(T e) { + // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage) + e->accept(this); + return hashOf(e); + } + + bool cachedHash(ExprPtr e) { + return exprToHash_.find(e) != exprToHash_.end(); + } + bool cachedHash(StmtPtr s) { + return stmtToHash_.find(s) != stmtToHash_.end(); + } + + void clearCache() { + exprToHash_.clear(); + stmtToHash_.clear(); + } + + void visit(AddPtr v) override; + void visit(SubPtr v) override; + void visit(MulPtr v) override; + void visit(DivPtr v) override; + void visit(ModPtr v) override; + void visit(RoundOffPtr v) override; + void visit(MaxPtr v) override; + void visit(MinPtr v) override; + void visit(AndPtr v) override; + void visit(OrPtr v) override; + void visit(XorPtr v) override; + void visit(LshiftPtr v) override; + void visit(RshiftPtr v) override; + void visit(CompareSelectPtr v) override; + +// NOLINTNEXTLINE +#define IMM_VISIT(Type, Name) \ + void visit(Name##ImmPtr v) override { \ + CACHE_GUARD(); \ + putHash(v, hash_combine(#Name, v->value())); \ + } + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_VISIT); +#undef IMM_VISIT + + void visit(CastPtr v) override; + void visit(VarPtr v) override; + void visit(RampPtr v) override; + void visit(LoadPtr v) override; + void visit(StorePtr v) override; + void visit(BlockPtr v) override; + void visit(ForPtr v) override; + void visit(BroadcastPtr v) override; + void visit(IfThenElsePtr v) override; + void visit(IntrinsicsPtr v) override; + void visit(AllocatePtr v) override; + void visit(FreePtr v) override; + void visit(CondPtr v) override; + void visit(TermPtr v) override; + void visit(PolynomialPtr v) override; + void visit(MaxTermPtr v) override; + void visit(MinTermPtr v) override; + + template + SimplifierHashType hash_combine(const Types&... args) { + SimplifierHashType seed; + _hash_combine(seed, args...); + return seed; + } + + private: + SimplifierHashType hashOf(ExprPtr e) { + auto it = exprToHash_.find(e); + if (it != exprToHash_.end()) { + return it->second; + } + + // As a failsafe fall back to IRPrinter. + std::stringstream ss; + IRPrinter printer(ss); + e->accept(&printer); + SimplifierHashType hash = SimplifierHashType(te_hash(ss.str())); + putHash(std::move(e), hash); + + return hash; + } + + SimplifierHashType hashOf(StmtPtr s) { + auto it = stmtToHash_.find(s); + if (it != stmtToHash_.end()) { + return it->second; + } + + // As a failsafe fall back to IRPrinter. + std::stringstream ss; + IRPrinter printer(ss); + s->accept(&printer); + SimplifierHashType hash = SimplifierHashType(te_hash(ss.str())); + putHash(std::move(s), hash); + + return hash; + } + + // Hash funcs for various types, numbers are random. + template + void _hash_combine(SimplifierHashType& seed, const T& val) { + seed._h ^= te_hash(val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4); + } + + void _hash_combine(SimplifierHashType& seed, const char* val) { + seed._h ^= te_hash(val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4); + } + + // at:::Half doesn't have a prime_number_hash, so cast to short. + void _hash_combine(SimplifierHashType& seed, const at::Half& val) { + seed._h ^= + te_hash((uint16_t)val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4); + } + + void _hash_combine(SimplifierHashType& seed, const Dtype& val) { + seed._h ^= te_hash(val.ToCppString()) + 0x1f752c19 + (seed._h << 7) + + (seed._h >> 4); + } + + void _hash_combine(SimplifierHashType& seed, ExprPtr e) { + _hash_combine(seed, hash(std::move(e))); + } + + template + void _hash_combine( + SimplifierHashType& seed, + const T& val, + const Types&... args) { + _hash_combine(seed, val); + _hash_combine(seed, args...); + } + + void putHash(ExprPtr e, SimplifierHashType h) { + auto res = exprToHash_.emplace(e, h); + if (res.second == false) { + // This is always a logic bug since we should check the cache first. + throw std::runtime_error("hash collision"); + } + } + void putHash(StmtPtr s, SimplifierHashType h) { + auto res = stmtToHash_.emplace(s, h); + if (res.second == false) { + // This is always a logic bug since we should check the cache first. + throw std::runtime_error("hash collision"); + } + } + + std::unordered_map exprToHash_; + std::unordered_map stmtToHash_; + UniqueNameManager name_manager_; + + size_t te_hash(SimplifierHashType val) { + return val._h; + } + + size_t te_hash(int64_t val) { + // put the thing down. + size_t h = val ^ 0x647AA4D20C0B; + // bit flip it. + size_t h2 = ~h; + // and reverse byte order. + size_t h3 = 0; + for (unsigned int i = 0; i < 64; i += 8) { + h3 |= ((h2 >> i) & 0xFF) << (64 - i - 8); + } + return h3; + } + + size_t te_hash(int32_t val) { + int64_t v2 = val; + return te_hash(v2); + } + + size_t te_hash(uint32_t val) { + int64_t v2 = val; + return te_hash(v2); + } + + size_t te_hash(uint64_t val) { + int64_t v2 = val; + return te_hash(v2); + } + + size_t te_hash(int16_t val) { + int64_t v2 = val; + return te_hash(v2); + } + + size_t te_hash(std::string val) { + size_t hash{0}; + int64_t intval{0}; + int64_t s = val.size() - 1; + while (s >= 0) { + for (unsigned int i = 0; i < 8; ++i) { + if (s < 0) + break; + // NOLINTNEXTLINE(bugprone-signed-char-misuse) + int64_t c = val.data()[s]; + intval |= (c << (i * 8)); + + s--; + } + hash ^= te_hash(intval); + intval = 0; + } + + return hash; + } + + size_t te_hash(double d) { + // memcpy as type punning. Should be optimized out. + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int64_t n; + std::memcpy(&n, &d, sizeof d); + return te_hash(n); + } + + size_t te_hash(float d) { + // memcpy as type punning. Should be optimized out. + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int32_t n; + std::memcpy(&n, &d, sizeof d); + return te_hash(n); + } + + size_t te_hash(at::Half d) { + // memcpy as type punning. Should be optimized out. + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int16_t n; + std::memcpy(&n, &d, sizeof d); + return te_hash(n); + } + + size_t te_hash(at::BFloat16 d) { + // memcpy as type punning. Should be optimized out. + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + int16_t n; + std::memcpy(&n, &d, sizeof d); + return te_hash(n); + } +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/intrinsic_symbols.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/intrinsic_symbols.h new file mode 100644 index 0000000000000000000000000000000000000000..7508090f93060865cbd9cb5b48c0cfa34bc1b72a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/intrinsic_symbols.h @@ -0,0 +1,22 @@ +#pragma once + +#ifdef TORCH_ENABLE_LLVM +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +struct SymbolAddress { + const char* symbol; + void* address; + + SymbolAddress(const char* sym, void* addr) : symbol(sym), address(addr) {} +}; + +c10::ArrayRef getIntrinsicSymbols(); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch +#endif // TORCH_ENABLE_LLVM diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir.h new file mode 100644 index 0000000000000000000000000000000000000000..1ab21c83ef18342a6663b760a66fdbdc79c7d20b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir.h @@ -0,0 +1,934 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +enum CompareSelectOperation { + kEQ = 0, + kGT, + kGE, + kLT, + kLE, + kNE, +}; + +enum CompareSelectBias { + kUnbiased, + kLikely, + kUnlikely, +}; + +inline int getPrecedence(IRNodeType ty) { + // Match C++ operator precedence rules, since some pretty-print expressions to + // C++. SEE: https://en.cppreference.com/w/cpp/language/operator_precedence + switch (ty) { + case kPrimitive: + return 0; + case kCast: + case kBitCast: + return 2; + case kAdd: + case kSub: + return 6; + case kMul: + case kDiv: + case kMod: + return 5; + case kMax: + case kMin: + return 99; + case kAnd: + return 11; + case kOr: + return 13; + case kLshift: + case kRshift: + return 7; + case kXor: + return 12; + case kCompareSelect: + return 16; + default: + return 99; + } +} + +class TORCH_API Cast : public ExprNode { + public: + ExprPtr src_value() const { + return src_value_; + } + + void set_src_value(ExprPtr src_value) { + src_value_ = std::move(src_value); + } + + static ExprHandle make(Dtype dtype, const ExprHandle& src_value) { + return ExprHandle(alloc(dtype, src_value.node())); + } + Cast(Dtype dtype, ExprPtr src_value) + : ExprNodeBase(dtype, kCast), src_value_(std::move(src_value)) {} + + bool isConstant() const override { + return src_value_->isConstant(); + } + + private: + ExprPtr src_value_; +}; + +template +ExprHandle cast(const ExprHandle& src_value) { + return Cast::make(Dtype(ToDtype(), src_value.dtype().lanes()), src_value); +} + +// This is a bitwise cast, akin to bitcast in LLVM +class TORCH_API BitCast : public ExprNode { + public: + ExprPtr src_value() const { + return src_value_; + } + + void set_src_value(ExprPtr src_value) { + src_value_ = std::move(src_value); + } + + static ExprHandle make(Dtype dtype, const ExprHandle& src_value) { + return ExprHandle(alloc(dtype, src_value.node())); + } + BitCast(Dtype dtype, ExprPtr src_value) + : ExprNodeBase(dtype, kBitCast), src_value_(std::move(src_value)) { + TORCH_CHECK(src_value_->dtype().byte_size() == dtype.byte_size()); + } + + bool isConstant() const override { + return src_value_->isConstant(); + } + + private: + ExprPtr src_value_; +}; + +template +ExprHandle bitcast(const ExprHandle& src_value) { + return BitCast::make( + Dtype(ToDtype(), src_value.dtype().lanes()), src_value); +} + +// Represent the expression node for binary operators. +// A CRTP pattern to share common code among the operators. +template +class BinaryOpNode : public ExprNode { + public: + ExprPtr lhs() const { + return this->lhs_; + } + ExprPtr rhs() const { + return this->rhs_; + } + + void set_lhs(ExprPtr lhs) { + lhs_ = std::move(lhs); + } + + void set_rhs(ExprPtr rhs) { + rhs_ = std::move(rhs); + } + + static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) { + return ExprHandle(alloc(lhs.node(), rhs.node())); + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + BinaryOpNode( + ExprPtr lhs_v, + ExprPtr rhs_v, + IRNodeType expr_type, + ScalarType ret_type = ScalarType::Undefined) + : ExprNode( + // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage) + BinaryOpDtype(lhs_v->dtype(), rhs_v->dtype(), ret_type), + expr_type), + lhs_(CastIfNeeded(std::move(lhs_v), ExprNode::dtype())), + rhs_(CastIfNeeded(std::move(rhs_v), ExprNode::dtype())) {} + + private: + static ExprPtr CastIfNeeded(ExprPtr expr, Dtype dst_dtype) { + if (expr->dtype() == dst_dtype) { + return expr; + } + return Cast::make(dst_dtype, ExprHandle(std::move(expr))).node(); + } + + ExprPtr lhs_; + ExprPtr rhs_; +}; + +namespace detail { +template +void bin_op_deducer(BinaryOpNode); +bool bin_op_deducer(...); +} // namespace detail + +class TORCH_API Add : public BinaryOpNode { + public: + Add(ExprPtr lhs, ExprPtr rhs) + : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kAdd) {} +}; + +class TORCH_API Sub : public BinaryOpNode { + public: + Sub(ExprPtr lhs, ExprPtr rhs) + : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kSub) {} +}; + +class TORCH_API Mul : public BinaryOpNode { + public: + Mul(ExprPtr lhs, ExprPtr rhs) + : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMul) {} +}; + +class TORCH_API Div : public BinaryOpNode
{ + public: + Div(ExprPtr lhs, ExprPtr rhs) + : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kDiv) {} +}; + +class TORCH_API Mod : public BinaryOpNode { + public: + Mod(ExprPtr lhs, ExprPtr rhs) + : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMod) {} +}; + +template +class BitwiseOpNode : public BinaryOpNode { + public: + BitwiseOpNode(ExprPtr lhs, ExprPtr rhs, IRNodeType type) + : BinaryOpNode(std::move(lhs), std::move(rhs), type) {} + + static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) { + if (!lhs.dtype().is_integral()) { + throw unsupported_dtype(); + } + if (lhs.dtype() != rhs.dtype()) { + throw malformed_input("lhs/rhs dtype mismatch"); + } + return BinaryOpNode::make(lhs, rhs); + } +}; + +class TORCH_API And : public BitwiseOpNode { + public: + And(ExprPtr lhs, ExprPtr rhs) + : BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kAnd) {} +}; + +class TORCH_API Or : public BitwiseOpNode { + public: + Or(ExprPtr lhs, ExprPtr rhs) + : BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kOr) {} +}; + +class TORCH_API Xor : public BitwiseOpNode { + public: + Xor(ExprPtr lhs, ExprPtr rhs) + : BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kXor) {} +}; + +class TORCH_API Lshift : public BitwiseOpNode { + public: + Lshift(ExprPtr lhs, ExprPtr rhs) + : BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kLshift) {} +}; + +class TORCH_API Rshift : public BitwiseOpNode { + public: + Rshift(ExprPtr lhs, ExprPtr rhs) + : BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kRshift) {} +}; + +// TODO: add TORCH_API +// Currently adding it results in a compilation error on Windows +class Max : public BinaryOpNode { + private: + bool propagate_nans_; + + public: + Max(ExprPtr lhs, ExprPtr rhs, bool propagate_nans) + : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMax), + propagate_nans_(propagate_nans) {} + + bool propagate_nans() const { + return propagate_nans_; + } + + static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) = delete; + static ExprHandle make( + const ExprHandle& lhs, + const ExprHandle& rhs, + bool propagate_nans) { + return ExprHandle(alloc(lhs.node(), rhs.node(), propagate_nans)); + } +}; + +// TODO: add TORCH_API +// Currently adding it results in a compilation error on Windows +class Min : public BinaryOpNode { + private: + bool propagate_nans_; + + public: + Min(ExprPtr lhs, ExprPtr rhs, bool propagate_nans) + : BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMin), + propagate_nans_(propagate_nans) {} + + bool propagate_nans() const { + return propagate_nans_; + } + + static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) = delete; + static ExprHandle make( + const ExprHandle& lhs, + const ExprHandle& rhs, + bool propagate_nans) { + return ExprHandle(alloc(lhs.node(), rhs.node(), propagate_nans)); + } +}; + +// Encode typed immediate values e.g. IntImm, FloatImm. +#define IMM_DECLARE(Type, Name) \ + class TORCH_API Name##Imm : public ExprNode { \ + public: \ + Name##Imm(Type value) \ + : ExprNodeBase(k##Name, kPrimitive), value_(value) {} \ + bool isConstant() const override { \ + return true; \ + } \ + Type value() const { \ + return value_; \ + } \ + static ExprHandle make(Type value) { \ + return ExprHandle(alloc(value)); \ + } \ + \ + private: \ + Type value_; \ + }; +AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_DECLARE); +#undef IMM_DECLARE + +// Get immediate by ScalarType. +template +ExprPtr getImmediateByType(ScalarType immType, T initialVal) { + switch (immType) { +#define TYPE_CASE(Type, Name) \ + case ScalarType::Name: \ + return alloc(Type(initialVal)); + // NOLINTNEXTLINE(bugprone-branch-clone) + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE); +#undef TYPE_CASE + default: + throw unsupported_dtype(); + } + return nullptr; +} + +template +ExprPtr getImmediateByType(Dtype dtype, T initialVal) { + return getImmediateByType(dtype.scalar_type(), initialVal); +} + +template +ExprPtr immLike(const ExprPtr& e, T v) { + return getImmediateByType(e->dtype(), v); +} + +template +ExprPtr immLike(const ExprHandle& e, T v) { + return immLike(e.node(), v); +} + +inline c10::optional intValue(const ExprPtr& e) { +#define TYPE_CASE(Type, Name) \ + if (auto v = to(e)) { \ + return v->value(); \ + } + AT_FORALL_INT_TYPES(TYPE_CASE); +#undef TYPE_CASE + return c10::nullopt; +} + +inline c10::optional intValue(const ExprHandle& e) { + return intValue(e.node()); +} + +template +T immediateAs(const ExprPtr& e) { +#define TYPE_CASE(Type, Name) \ + if (Name##ImmPtr imm = to(e)) { \ + return imm->value(); \ + } + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE); +#undef TYPE_CASE + throw unsupported_dtype(); + return 0; +} + +template +T immediateAs(const ExprHandle& e) { + return immediateAs(e.node()); +} + +template +bool immediateEquals(const ExprPtr& e, T val) { +#define TYPE_CASE(Type, Name) \ + if (Name##ImmPtr imm = to(e)) { \ + return imm->value() == val; \ + } + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE); +#undef TYPE_CASE + throw unsupported_dtype(); + return false; +} + +TORCH_API bool immediateIsNegative(const ExprPtr& e); + +TORCH_API bool immediateIsPositive(const ExprPtr& e); + +TORCH_API bool immediateIsZero(const ExprPtr& e); + +// Represents a ramp vector node: +// [base, base + 1 * stride, ... , base + (lanes - 1) * stride] +class TORCH_API Ramp : public ExprNode { + public: + ExprPtr base() const { + return base_; + } + ExprPtr stride() const { + return stride_; + } + + void set_base(ExprPtr base) { + base_ = std::move(base); + } + + void set_stride(ExprPtr stride) { + stride_ = std::move(stride); + } + + static ExprHandle make( + const ExprHandle& base, + const ExprHandle& stride, + int lanes) { + if (stride.dtype() != base.dtype()) { + throw malformed_input("Bad stride in Ramp"); + } + return ExprHandle(alloc(base.node(), stride.node(), lanes)); + } + int lanes() const { + return lanes_; + } + + Ramp(ExprPtr base, ExprPtr stride, int lanes) + : ExprNodeBase(Dtype(base->dtype(), lanes)), + base_(std::move(base)), + stride_(std::move(stride)), + lanes_(lanes) {} + + private: + ExprPtr base_; + ExprPtr stride_; + int lanes_; +}; + +class TORCH_API Load : public ExprNode { + public: + VarPtr base_handle() const { + return buf_->base_handle(); + } + std::vector indices() const { + return indices_; + } + ExprPtr flat_index() const { + TORCH_CHECK(indices_.size() == 1, "Indices haven't been flattened."); + return indices_[0]; + } + BufPtr buf() const { + return buf_; + } + + void set_buf(BufPtr buf) { + buf_ = std::move(buf); + } + + void set_indices(std::vector indices) { + indices_ = std::move(indices); + } + + static ExprHandle make( + Dtype dtype, + const BufHandle& buf, + const std::vector& indices); + static ExprHandle make( + const BufHandle& buf, + const std::vector& indices); + + Load(Dtype dtype, BufPtr base_handle, std::vector indices); + Load(BufPtr base_handle, const std::vector& indices); + + private: + BufPtr buf_; + std::vector indices_; +}; + +class TORCH_API Broadcast : public ExprNode { + public: + ExprPtr value() const { + return value_; + } + + void set_value(ExprPtr value) { + value_ = std::move(value); + } + + int lanes() const { + return lanes_; + } + static ExprHandle make(const ExprHandle& value, int lanes) { + return ExprHandle(alloc(value.node(), lanes)); + } + Broadcast(ExprPtr value, int lanes) + : ExprNodeBase(Dtype(value->dtype(), lanes)), + value_(std::move(value)), + lanes_(lanes) {} + + private: + ExprPtr value_; + int lanes_; +}; + +class TORCH_API IfThenElse : public ExprNode { + public: + ExprPtr condition() const { + return condition_; + } + + // Lazily evaluated only if condition is true + ExprPtr true_value() const { + return true_; + } + + // Lazily evaluated only if condition is false + ExprPtr false_value() const { + return false_; + } + + void set_condition(ExprPtr condition) { + condition_ = std::move(condition); + } + + void set_true_value(ExprPtr true_value) { + true_ = std::move(true_value); + } + + void set_false_value(ExprPtr false_value) { + false_ = std::move(false_value); + } + + static ExprHandle make( + const ExprHandle& c, + const ExprHandle& t, + const ExprHandle& f) { + if (!c.dtype().is_integral()) { + throw unsupported_dtype(); + } + if (c.dtype().lanes() != 1) { + throw unsupported_dtype(); + } + if (t.dtype() != f.dtype()) { + throw malformed_input("Bad dtype in IfThenElse"); + } + return ExprHandle(alloc(c.node(), t.node(), f.node())); + } + + IfThenElse(ExprPtr c, ExprPtr t, ExprPtr f) + : ExprNodeBase(t->dtype()), + condition_(std::move(c)), + true_(std::move(t)), + false_(std::move(f)) {} + + private: + ExprPtr condition_; + ExprPtr true_; + ExprPtr false_; +}; + +class TORCH_API CompareSelect : public ExprNode { + public: + CompareSelectOperation compare_select_op() const { + return compare_op_; + } + ExprPtr lhs() const { + return this->lhs_; + } + ExprPtr rhs() const { + return this->rhs_; + } + ExprPtr ret_val1() const { + return this->ret_val1_; + } + ExprPtr ret_val2() const { + return this->ret_val2_; + } + + void set_lhs(ExprPtr lhs) { + lhs_ = std::move(lhs); + } + + void set_rhs(ExprPtr rhs) { + rhs_ = std::move(rhs); + } + + void set_ret_val1(ExprPtr ret_val1) { + ret_val1_ = std::move(ret_val1); + } + + void set_ret_val2(ExprPtr ret_val2) { + ret_val2_ = std::move(ret_val2); + } + + CompareSelectBias bias() const { + return bias_; + } + + static ExprHandle make( + const ExprHandle& lhs, + const ExprHandle& rhs, + CompareSelectOperation cmp_op, + CompareSelectBias bias = kUnbiased) { + if (lhs.dtype() != rhs.dtype()) { + throw malformed_input("bad dtype in CompareSelect"); + } + return ExprHandle(alloc( + lhs.node(), + rhs.node(), + IntImm::make(1).node(), + IntImm::make(0).node(), + cmp_op, + bias)); + } + + static ExprHandle make( + const ExprHandle& lhs, + const ExprHandle& rhs, + const ExprHandle& ret_val1, + const ExprHandle& ret_val2, + CompareSelectOperation cmp_op, + CompareSelectBias bias = kUnbiased) { + if (lhs.dtype() != rhs.dtype() || ret_val1.dtype() != ret_val2.dtype()) { + throw malformed_input("bad dtype in CompareSelect"); + } + return ExprHandle(alloc( + lhs.node(), + rhs.node(), + ret_val1.node(), + ret_val2.node(), + cmp_op, + bias)); + } + + CompareSelect( + ExprPtr lhs, + ExprPtr rhs, + ExprPtr ret_val1, + ExprPtr ret_val2, + CompareSelectOperation cmp_op, + CompareSelectBias bias = kUnbiased) + : ExprNodeBase(ret_val1->dtype()), + lhs_(std::move(lhs)), + rhs_(std::move(rhs)), + ret_val1_(std::move(ret_val1)), + ret_val2_(std::move(ret_val2)), + compare_op_(cmp_op), + bias_(bias) {} + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + CompareSelect( + ExprPtr lhs, + ExprPtr rhs, + CompareSelectOperation cmp_op, + CompareSelectBias bias = kUnbiased) + : ExprNodeBase(kInt), + lhs_(std::move(lhs)), + rhs_(std::move(rhs)), + ret_val1_(alloc(1)), + ret_val2_(alloc(0)), + compare_op_(cmp_op), + bias_(bias) {} + + private: + ExprPtr lhs_; + ExprPtr rhs_; + ExprPtr ret_val1_; + ExprPtr ret_val2_; + CompareSelectOperation compare_op_; + CompareSelectBias bias_; +}; + +enum IntrinsicsOp { + kSin, + kCos, + kTan, + kAsin, + kAcos, + kAtan, + kAtan2, + kSinh, + kCosh, + kTanh, + kSigmoid, + kExp, + kExpm1, + kAbs, + kLog, + kLog2, + kLog10, + kLog1p, + kErf, + kErfc, + kSqrt, + kRsqrt, + kPow, + kCeil, + kFloor, + kRound, + kTrunc, + kFmod, + kRemainder, + kLgamma, + kFrac, + kIsNan, + kRand, // We need more discussions on this. Should we consider stateful? + kMaxIntrinsicsOp, +}; + +class TORCH_API Intrinsics : public ExprNode { + public: + static ExprHandle make(IntrinsicsOp op_type, const ExprHandle& v1) { + return ExprHandle(alloc(op_type, v1.node())); + } + + static ExprHandle make( + IntrinsicsOp op_type, + const ExprHandle& v1, + const ExprHandle& v2) { + return ExprHandle(alloc(op_type, v1.node(), v2.node())); + } + + static ExprHandle make( + IntrinsicsOp op_type, + const std::vector& params) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector params_nodes(params.size()); + for (size_t i = 0; i < params.size(); i++) { + params_nodes[i] = params[i].node(); + } + return ExprHandle(alloc(op_type, params_nodes)); + } + + static ExprHandle make(IntrinsicsOp op_type, Dtype dtype) { + return ExprHandle(alloc(op_type, dtype)); + } + + IntrinsicsOp op_type() const { + return op_type_; + } + + std::string func_name() const { + switch (op_type()) { + case kSin: + return "sin"; + case kCos: + return "cos"; + case kTan: + return "tan"; + case kAsin: + return "asin"; + case kAcos: + return "acos"; + case kAtan: + return "atan"; + case kAtan2: + return "atan2"; + case kSinh: + return "sinh"; + case kCosh: + return "cosh"; + case kTanh: + return "tanh"; + case kSigmoid: + return "sigmoid"; + case kExp: + return "exp"; + case kAbs: + return "abs"; + case kLog: + return "log"; + case kLog2: + return "log2"; + case kLog10: + return "log10"; + case kLog1p: + return "log1p"; + case kErf: + return "erf"; + case kSqrt: + return "sqrt"; + case kRsqrt: + return "rsqrt"; + case kPow: + return "pow"; + case kCeil: + return "ceil"; + case kFloor: + return "floor"; + case kRound: + return "round"; + case kTrunc: + return "trunc"; + case kRand: + return "rand"; + case kFmod: + return "fmod"; + case kRemainder: + return "remainder"; + case kLgamma: + return "lgamma"; + case kExpm1: + return "expm1"; + case kErfc: + return "erfc"; + case kFrac: + return "frac"; + case kIsNan: + return "isnan"; + default: + throw std::runtime_error( + "invalid op_type: " + c10::to_string(op_type())); + } + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Intrinsics(IntrinsicsOp op_type, Dtype dtype) + : ExprNodeBase(IntrinsicsDtype(op_type, dtype)), + params_({}), + op_type_(op_type) { + if (OpArgCount(op_type) != 0) { + throw malformed_input("bad arg count in Intrinsics"); + } + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Intrinsics(IntrinsicsOp op_type, ExprPtr v1) + : ExprNodeBase(IntrinsicsDtype(op_type, v1->dtype())), + params_({std::move(v1)}), + op_type_(op_type) { + if (OpArgCount(op_type) != 1) { + throw malformed_input("bad arg count in Intrinsics"); + } + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Intrinsics(IntrinsicsOp op_type, ExprPtr v1, ExprPtr v2) + : ExprNodeBase(IntrinsicsDtype(op_type, v1->dtype(), v2->dtype())), + params_({std::move(v1), std::move(v2)}), + op_type_(op_type) { + if (OpArgCount(op_type) != 2) { + throw malformed_input("bad arg count in Intrinsics"); + } + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Intrinsics(IntrinsicsOp op_type, const std::vector& params) + : ExprNodeBase(IntrinsicsDtype(op_type, params)), + params_(params), + op_type_(op_type) { + if (OpArgCount(op_type) != nparams()) { + throw malformed_input("bad arg count in Intrinsics"); + } + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Intrinsics( + IntrinsicsOp op_type, + Dtype dtype, + const std::vector& params) + : ExprNodeBase(IntrinsicsDtype(op_type, dtype)), + params_(params), + op_type_(op_type) { + if (OpArgCount(op_type) != nparams()) { + throw malformed_input("bad arg count in Intrinsics"); + } + } + + bool isPure() const { + return op_type_ != kRand; + } + + int nparams() const { + return params_.size(); + } + + ExprPtr param(int index) const { + return params_[index]; + } + const std::vector& params() const { + return params_; + } + + void set_params(std::vector params) { + params_ = std::move(params); + } + + static int OpArgCount(IntrinsicsOp op_type); + + private: + static Dtype IntrinsicsDtype(IntrinsicsOp op_type, Dtype dt1); + static Dtype IntrinsicsDtype(IntrinsicsOp op_type, Dtype dt1, Dtype dt2); + static Dtype IntrinsicsDtype( + IntrinsicsOp op_type, + const std::vector& params); + + std::vector params_; + IntrinsicsOp op_type_; +}; + +TORCH_API std::vector ExprHandleVectorToExprVector( + const std::vector&); +TORCH_API std::vector ExprVectorToExprHandleVector( + const std::vector&); +TORCH_API std::vector VarHandleVectorToVarVector( + const std::vector&); +TORCH_API std::vector VarVectorToVarHandleVector( + const std::vector&); +TORCH_API ExprPtr flatten_index( + const std::vector& dims, + const std::vector& indices, + const std::vector& strides); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_cloner.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_cloner.h new file mode 100644 index 0000000000000000000000000000000000000000..37216693b35b44efbaadf020d1ae07c237b4f74d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_cloner.h @@ -0,0 +1,65 @@ +#pragma once +#include +#include +#include + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +class TORCH_API IRCloner : public IRMutator { + public: + ~IRCloner() override = default; + ExprPtr mutate(AddPtr v) override; + ExprPtr mutate(SubPtr v) override; + ExprPtr mutate(MulPtr v) override; + ExprPtr mutate(DivPtr v) override; + ExprPtr mutate(ModPtr v) override; + ExprPtr mutate(MaxPtr v) override; + ExprPtr mutate(MinPtr v) override; + ExprPtr mutate(AndPtr v) override; + ExprPtr mutate(OrPtr v) override; + ExprPtr mutate(XorPtr v) override; + ExprPtr mutate(LshiftPtr v) override; + ExprPtr mutate(RshiftPtr v) override; + ExprPtr mutate(CompareSelectPtr v) override; +#define IMM_MUTATE_DECLARE(Type, Name) ExprPtr mutate(Name##ImmPtr v) override; + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_MUTATE_DECLARE); +#undef IMM_MUTATE_DECLARE + ExprPtr mutate(CastPtr v) override; + ExprPtr mutate(BitCastPtr v) override; + ExprPtr mutate(VarPtr v) override; + ExprPtr mutate(BufPtr v) override; + ExprPtr mutate(RampPtr v) override; + ExprPtr mutate(LoadPtr v) override; + ExprPtr mutate(BroadcastPtr v) override; + ExprPtr mutate(IfThenElsePtr v) override; + ExprPtr mutate(IntrinsicsPtr v) override; + + ExprPtr mutate(TermPtr v) override; + ExprPtr mutate(PolynomialPtr v) override; + ExprPtr mutate(RoundOffPtr v) override; + ExprPtr mutate(MaxTermPtr v) override; + ExprPtr mutate(MinTermPtr v) override; + + ExprPtr mutate(ReduceOpPtr v) override; + + StmtPtr mutate(ForPtr v) override; + StmtPtr mutate(BlockPtr v) override; + StmtPtr mutate(StorePtr v) override; + StmtPtr mutate(AtomicAddPtr v) override; + StmtPtr mutate(SyncThreadsPtr v) override; + StmtPtr mutate(ExternalCallPtr v) override; + StmtPtr mutate(ExternalCallWithAllocPtr v) override; + + StmtPtr mutate(AllocatePtr v) override; + StmtPtr mutate(FreePtr v) override; + StmtPtr mutate(LetPtr v) override; + StmtPtr mutate(CondPtr v) override; +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_mutator.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_mutator.h new file mode 100644 index 0000000000000000000000000000000000000000..aaecd34289bc342c55461103637c1751e3aea0b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_mutator.h @@ -0,0 +1,66 @@ +#pragma once +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +class TORCH_API IRMutator { + public: + virtual ~IRMutator() = default; + virtual ExprPtr mutate(AddPtr v); + virtual ExprPtr mutate(SubPtr v); + virtual ExprPtr mutate(MulPtr v); + virtual ExprPtr mutate(DivPtr v); + virtual ExprPtr mutate(ModPtr v); + virtual ExprPtr mutate(MaxPtr v); + virtual ExprPtr mutate(MinPtr v); + virtual ExprPtr mutate(AndPtr v); + virtual ExprPtr mutate(OrPtr v); + virtual ExprPtr mutate(XorPtr v); + virtual ExprPtr mutate(LshiftPtr v); + virtual ExprPtr mutate(RshiftPtr v); + virtual ExprPtr mutate(CompareSelectPtr v); +#define IMM_MUTATE_DECLARE(Type, Name) virtual ExprPtr mutate(Name##ImmPtr v); + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_MUTATE_DECLARE); +#undef IMM_MUTATE_DECLARE + virtual ExprPtr mutate(CastPtr v); + virtual ExprPtr mutate(BitCastPtr v); + virtual ExprPtr mutate(VarPtr v); + virtual ExprPtr mutate(BufPtr v); + virtual ExprPtr mutate(RampPtr v); + virtual ExprPtr mutate(LoadPtr v); + virtual ExprPtr mutate(BroadcastPtr v); + virtual ExprPtr mutate(IfThenElsePtr v); + virtual ExprPtr mutate(IntrinsicsPtr v); + + virtual ExprPtr mutate(TermPtr v); + virtual ExprPtr mutate(PolynomialPtr v); + virtual ExprPtr mutate(RoundOffPtr v); + virtual ExprPtr mutate(MaxTermPtr v); + virtual ExprPtr mutate(MinTermPtr v); + + virtual ExprPtr mutate(ReduceOpPtr v); + + virtual StmtPtr mutate(ForPtr v); + virtual StmtPtr mutate(BlockPtr v); + virtual StmtPtr mutate(StorePtr v); + virtual StmtPtr mutate(AtomicAddPtr v); + virtual StmtPtr mutate(SyncThreadsPtr v); + virtual StmtPtr mutate(ExternalCallPtr v); + virtual StmtPtr mutate(ExternalCallWithAllocPtr v); + + virtual StmtPtr mutate(AllocatePtr v); + virtual StmtPtr mutate(FreePtr v); + virtual StmtPtr mutate(FreeExtPtr v); + virtual StmtPtr mutate(PlacementAllocatePtr v); + virtual StmtPtr mutate(LetPtr v); + virtual StmtPtr mutate(CondPtr v); +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_printer.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_printer.h new file mode 100644 index 0000000000000000000000000000000000000000..8eff1abd1b262992fb3445a05baa409277e212a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_printer.h @@ -0,0 +1,130 @@ +#pragma once + +#include + +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +class Tensor; + +class TORCH_API IRPrinter : public IRVisitor { + public: + explicit IRPrinter(std::ostream& os) : printer_os_(this, os) {} + + void print(ExprHandle); + void print(Expr&); + void print(Stmt&); + void visit(AddPtr v) override; + void visit(SubPtr v) override; + void visit(MulPtr v) override; + void visit(DivPtr v) override; + void visit(ModPtr v) override; + void visit(MaxPtr v) override; + void visit(MinPtr v) override; + void visit(AndPtr v) override; + void visit(OrPtr v) override; + void visit(XorPtr v) override; + void visit(LshiftPtr v) override; + void visit(RshiftPtr v) override; + void visit(CompareSelectPtr v) override; +#define IMM_PRINT_VISIT(Type, Name) void visit(Name##ImmPtr v) override; + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_PRINT_VISIT); +#undef IMM_PRINT_VISIT + void visit(CastPtr v) override; + void visit(BitCastPtr v) override; + void visit(VarPtr v) override; + void visit(BufPtr v) override; + void visit(RampPtr v) override; + void visit(LoadPtr v) override; + void visit(BroadcastPtr v) override; + void visit(IfThenElsePtr v) override; + void visit(IntrinsicsPtr v) override; + void visit(TermPtr v) override; + void visit(PolynomialPtr v) override; + void visit(RoundOffPtr v) override; + void visit(MaxTermPtr v) override; + void visit(MinTermPtr v) override; + void visit(ReduceOpPtr v) override; + + void visit(AtomicAddPtr v) override; + void visit(SyncThreadsPtr v) override; + void visit(ExternalCallPtr v) override; + void visit(ExternalCallWithAllocPtr v) override; + void visit(StorePtr v) override; + void visit(ForPtr v) override; + void visit(CondPtr v) override; + void visit(BlockPtr v) override; + void visit(AllocatePtr v) override; + void visit(FreePtr v) override; + void visit(FreeExtPtr v) override; + void visit(PlacementAllocatePtr v) override; + void visit(LetPtr v) override; + + // A child class may have a difference rule for generating dtype + // string, e.g. CUDA needs int64_t to be generated as long long. + virtual std::string dtypeToCppString(const Dtype& dtype); + + std::ostream& os() { + return printer_os_; + } + + class PrinterStream : public std::ostream { + public: + PrinterStream(IRPrinter* printer, std::ostream& os) + : std::ostream(os.rdbuf()), printer_(printer) {} + + IRPrinter* printer() { + return printer_; + } + + private: + IRPrinter* printer_ = nullptr; + }; + + protected: + std::string to_string(CompareSelectOperation op); + + UniqueNameManager* name_manager() { + return &name_manager_; + } + void emitIndent(); + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + int indent_ = 0; + + private: + PrinterStream printer_os_; + UniqueNameManager name_manager_; +}; + +TORCH_API std::ostream& operator<<(std::ostream& stream, const Expr&); +TORCH_API std::ostream& operator<<(std::ostream& stream, const ExprHandle&); +TORCH_API std::ostream& operator<<(std::ostream& stream, const Stmt&); +TORCH_API std::ostream& operator<<(std::ostream& stream, const Tensor&); + +TORCH_API void print(ExprPtr expr); +TORCH_API void print(StmtPtr stmt); +TORCH_API void print(const Tensor& t); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch + +namespace std { + +using torch::jit::tensorexpr::Expr; +using torch::jit::tensorexpr::ExprPtr; +using torch::jit::tensorexpr::Stmt; +using torch::jit::tensorexpr::StmtPtr; +using torch::jit::tensorexpr::Tensor; + +TORCH_API std::string to_string(ExprPtr expr); +TORCH_API std::string to_string(StmtPtr stmt); +TORCH_API std::string to_string(const Tensor& t); +} // namespace std diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_simplifier.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_simplifier.h new file mode 100644 index 0000000000000000000000000000000000000000..0635a7ccd26fba34e4bec24ca7ca37b3fe8b9759 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_simplifier.h @@ -0,0 +1,554 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +/* IR Simplification + * + * Simplifies expressions in two stages: + * 1. Recursively traverse the map combining similar operations into Terms + * (interacted via Multiplication) and Polynomials (interacted via Addition). We + * reorder the components of each Term or Polynomial into a consistent order to + * allow combination or cancelling of like terms. + * 2. Once the format of the tree is minimal, expand each Term into a sequence + * of Muls, and each Polynomial into a sequence of Ads. + */ + +namespace torch { +namespace jit { +namespace tensorexpr { + +// A bunch of helpers for determine the Dtype of the output of a multi argument +// Term or Polynomial. +template +Dtype promoteTypesVec(ExprPtr s, std::vector& v) { + Dtype t = s->dtype(); + bool first = true; + + for (const auto& e : v) { + if (first) { + t = Dtype(t.scalar_type(), e->dtype().lanes()); + first = false; + } + t = promoteTypes(t, e->dtype()); + } + return t; +} + +template +Dtype promoteTypesVec(std::vector& v) { + if (v.empty()) { + throw malformed_input("empty list of types"); + } + + Dtype t = v[0]->dtype(); + for (const auto& e : v) { + t = promoteTypes(t, e->dtype()); + } + return t; +} + +template +Dtype promoteTypesMap( + ExprPtr s, + std::unordered_map& m) { + Dtype t = s->dtype(); + bool first = true; + for (auto& e : m) { + if (first) { + t = Dtype(t.scalar_type(), e.second->dtype().lanes()); + first = false; + } + t = promoteTypes(t, e.second->dtype()); + } + return t; +} + +template +Dtype promoteTypesVar(ExprType e) { + return e->dtype(); +} + +template +Dtype promoteTypesVar(ExprType e, Args... es) { + Dtype lhs = e->dtype(); + Dtype rhs = promoteTypesVar(es...); + if (e->isConstant()) { + lhs = Dtype(lhs.scalar_type(), rhs.lanes()); + } + + return promoteTypes(lhs, rhs); +} + +// Uses the evaluator to fold an Expression with constant terms. +// E.g. evaluateOp(Add(3, 4)) => 7. +// Expr v must not have any unbound Vars. +inline ExprPtr evaluateOp(ExprPtr v) { + ExprHandle handle(v); + ExprEval eval(handle); + + switch (v->dtype().scalar_type()) { +#define TYPE_CASE(Type, Name) \ + case ScalarType::Name: { \ + Type val = eval.value(); \ + return getImmediateByType(v->dtype().scalar_type(), val); \ + } + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE); +#undef TYPE_CASE + default: + LOG(FATAL) << "Unsupported datatype: " << v->dtype(); + return nullptr; + } + return nullptr; +} + +// A Term represents a grouping of Exprs through multiplication. +// E.g. product(scalar, *variables). +class Term : public ExprNode { + public: + template + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Term(HashProvider& hasher, ExprPtr s, Args... ts) + : ExprNodeBase(promoteTypesVar(s, ts...)), scalar_(s), hasher_(hasher) { + CHECK(s->isConstant()); + addComponent(ts...); + sort(); + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Term(HashProvider& hasher, ExprPtr s, std::vector v) + : ExprNodeBase(promoteTypesVec(s, v)), + variables_(std::move(v)), + scalar_(s), + hasher_(hasher) { + sort(); + } + + // Convenience constructor from a map of hash -> var, used when merging Terms. + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Term( + HashProvider& hasher, + ExprPtr s, + std::unordered_map varmap) + : ExprNodeBase(promoteTypesMap(s, varmap)), scalar_(s), hasher_(hasher) { + for (auto& p : varmap) { + addComponent(p.second); + } + sort(); + } + + ExprPtr scalar() const { + return scalar_; + } + const std::vector& variables() const { + return variables_; + } + HashProvider& hasher() const { + return hasher_; + } + + // Produce a hash of just the variable components of this term, to determine + // if it can be combined with another term. + SimplifierHashType hashVars() const; + + private: + std::vector variables_; + ExprPtr scalar_; + HashProvider& hasher_; + + void addComponent() {} + void addComponent(ExprPtr e) { + variables_.push_back(std::move(e)); + } + template + void addComponent(ExprPtr e, Es&&... es) { + addComponent(std::move(e)); + addComponent(std::forward(es)...); + } + + // Sort by hash to normalize order of components. + void sort(); +}; + +// Polynomial represents a grouping of Exprs by addition. +// E.g. sum(*variables, scalar). +// This would better be called Expression, but, naming conflict... +class Polynomial : public ExprNode { + public: + template + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Polynomial(HashProvider& hasher, ExprPtr s, Args... ts) + : ExprNodeBase(promoteTypesVar(s, ts...)), scalar_(s), hasher_(hasher) { + CHECK(s->isConstant()); + addTerm(ts...); + sort(); + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Polynomial(HashProvider& hasher, ExprPtr s, std::vector v) + : ExprNodeBase(promoteTypesVec(s, v)), + variables_(std::move(v)), + scalar_(s), + hasher_(hasher) { + sort(); + } + + // Helper constructor for list of terms with no scalar component. + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Polynomial(HashProvider& hasher, std::vector terms) + : ExprNodeBase(promoteTypesVec(terms)), + variables_(std::move(terms)), + scalar_(getImmediateByType(dtype(), 0)), + hasher_(hasher) { + sort(); + } + + // Convenience constructor for map of hash -> var, used when merging + // Polynomials. + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Polynomial( + HashProvider& hasher, + ExprPtr s, + std::unordered_map varmap) + : ExprNodeBase(promoteTypesMap(s, varmap)), scalar_(s), hasher_(hasher) { + for (auto& p : varmap) { + addTerm(p.second); + } + sort(); + } + + ExprPtr scalar() const { + return scalar_; + } + const std::vector& variables() const { + return variables_; + } + HashProvider& hasher() const { + return hasher_; + } + + SimplifierHashType hashVars() const; + + private: + std::vector variables_; + ExprPtr scalar_; + HashProvider& hasher_; + + void addTerm(TermPtr t) { + variables_.push_back(std::move(t)); + } + template + void addTerm(TermPtr t, Ts&&... ts) { + addTerm(std::move(t)); + addTerm(std::forward(ts)...); + } + + // Sort by hash to normalize order of terms. + void sort(); +}; + +class RoundOff : public BinaryOpNode { + public: + RoundOff(ExprPtr lhs, ExprPtr rhs) + : BinaryOpNode(lhs, rhs, IRNodeType::kOther) {} +}; + +class MaxTerm : public ExprNode { + public: + template + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + MaxTerm(HashProvider& hasher, ExprPtr s, bool p, Args... ts) + : ExprNodeBase(s ? promoteTypesVar(s, ts...) : promoteTypesVar(ts...)), + scalar_(s), + hasher_(hasher), + propagate_nans_(p) { + addComponent(ts...); + uniquefy(); + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + MaxTerm(HashProvider& hasher, ExprPtr s, bool p, std::vector v) + : ExprNodeBase(s ? promoteTypesVec(s, v) : promoteTypesVec(v)), + variables_(std::move(v)), + scalar_(s), + hasher_(hasher), + propagate_nans_(p) { + uniquefy(); + } + + bool propagate_nans() const { + return propagate_nans_; + } + + ExprPtr scalar() const { + return scalar_; + } + const std::vector& variables() const { + return variables_; + } + HashProvider& hasher() const { + return hasher_; + } + + private: + std::vector variables_; + ExprPtr scalar_; + HashProvider& hasher_; + bool propagate_nans_; + + void addComponent() {} + void addComponent(ExprPtr e) { + variables_.push_back(std::move(e)); + } + template + void addComponent(ExprPtr e, Es&&... es) { + addComponent(std::move(e)); + addComponent(std::forward(es)...); + } + + // Uniquefy the terms using their hash. + void uniquefy(); +}; + +class MinTerm : public ExprNode { + public: + template + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + MinTerm(HashProvider& hasher, ExprPtr s, bool p, Args... ts) + : ExprNodeBase(s ? promoteTypesVar(s, ts...) : promoteTypesVar(ts...)), + scalar_(s), + hasher_(hasher), + propagate_nans_(p) { + addComponent(ts...); + uniquefy(); + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + MinTerm(HashProvider& hasher, ExprPtr s, bool p, std::vector v) + : ExprNodeBase(s ? promoteTypesVec(s, v) : promoteTypesVec(v)), + variables_(std::move(v)), + scalar_(s), + hasher_(hasher), + propagate_nans_(p) { + uniquefy(); + } + + bool propagate_nans() const { + return propagate_nans_; + } + + ExprPtr scalar() const { + return scalar_; + } + const std::vector& variables() const { + return variables_; + } + HashProvider& hasher() const { + return hasher_; + } + + private: + std::vector variables_; + ExprPtr scalar_; + HashProvider& hasher_; + bool propagate_nans_; + + void addComponent() {} + void addComponent(ExprPtr e) { + variables_.push_back(std::move(e)); + } + template + void addComponent(ExprPtr e, Es&&... es) { + addComponent(std::move(e)); + addComponent(std::forward(es)...); + } + + // Uniquefy the terms using their hash. + void uniquefy(); +}; + +// Context-sensitive IR simplification +using VarBoundInfo = std::unordered_map; + +class TORCH_API SimplifierUnderContext : public IRMutator { + public: + ~SimplifierUnderContext() override = default; + // Add boundary info for index variables in for-loops + StmtPtr mutate(ForPtr v) override; + + ExprPtr mutate(DivPtr v) override; + ExprPtr mutate(ModPtr v) override; + ExprPtr mutate(CompareSelectPtr v) override; + ExprPtr mutate(IfThenElsePtr v) override; + + protected: + bool getLoopBoundInfo(const ExprPtr& expr, analysis::Bound* loop_bound_info); + + protected: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + HashProvider hasher_; + VarBoundInfo var_bound_info_; +}; + +// Stmt simplification should occur in both modes. +class TORCH_API PolynomialBase : public IRMutator { + public: + ~PolynomialBase() override = default; + + StmtPtr mutate(BlockPtr v) override; + + StmtPtr mutate(CondPtr v) override; + + StmtPtr mutate(ForPtr v) override; + + // Trivially factorize terms by GCD of scalar components. + TermPtr factorizePolynomial(PolynomialPtr poly); + + HashProvider& hasher() { + return hasher_; + } + + protected: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + HashProvider hasher_; +}; + +// Simplify the IR by combining arithmetic expressions over common terms. +class TORCH_API PolynomialTransformer : public PolynomialBase { + public: + using PolynomialBase::mutate; + // Inserts term into the provided map, in the case of a hash collision + // combines the term with the existing and updates the map. + void addOrUpdateTerm( + std::unordered_map& varmap, + TermPtr term); + + // Add Polynomial expressions, combining Terms representing the same + // variables. + ExprPtr addPolynomials(PolynomialPtr lhs, PolynomialPtr rhs); + + // Insert a new Term into the provided polynomial. If the new term has + // common variables to an existing term it is combined. + ExprPtr insertTerm(PolynomialPtr poly, TermPtr term); + + // Merge and simplify addition. + ExprPtr mutate(AddPtr v) override; + + // Subtract one term from another, cancelling if necessary. + ExprPtr subTerms(TermPtr lhs, TermPtr rhs, bool negated); + + // Subtract the RHS Polynomial from the LHS Polynomial, cancelling out where + // possible. + ExprPtr subPolynomials(PolynomialPtr lhs, PolynomialPtr rhs); + + // Merge and simplify subtraction. + ExprPtr mutate(SubPtr v) override; + + // Multiply two terms together, usually creating a new term with the variable + // lists concatenated. + TermPtr mulTerms(TermPtr lhs, TermPtr rhs); + + // Multiply a Polynomial by a Term. + ExprPtr polyByTerm(PolynomialPtr poly, TermPtr term); + + // Match a rounding pattern and create a RoundOff if found. + ExprPtr isRoundOff(ExprPtr lhs, ExprPtr rhs); + + // Inserts a new component into a term, simplifying if possible. + ExprPtr insertIntoTerm(TermPtr term, ExprPtr expr); + + // Merge and simplify multiplication. + ExprPtr mutate(MulPtr v) override; + + ExprPtr mutate(DivPtr v) override; + + ExprPtr mutate(ModPtr v) override; + + ExprPtr mutate(AndPtr v) override; + + ExprPtr mutate(XorPtr v) override; + + ExprPtr mutate(LshiftPtr v) override; + + ExprPtr mutate(RshiftPtr v) override; + + ExprPtr mutate(MaxPtr v) override; + + ExprPtr mutate(MinPtr v) override; + + ExprPtr mutate(CompareSelectPtr v) override; + + ExprPtr mutate(IntrinsicsPtr v) override; + + ExprPtr mutate(CastPtr v) override; + + ExprPtr mutate(IfThenElsePtr v) override; + + static ExprPtr simplify(ExprPtr e); + static ExprHandle simplify(const ExprHandle& e); + static StmtPtr simplify(StmtPtr e); +}; + +// Expands Terms and Polynomial expressions into primitive operations. +// Does some simple factorization and reordering. +class TORCH_API TermExpander : public PolynomialBase { + PolynomialTransformer* simplifier_; + std::set eliminated_allocations_; + + public: + using PolynomialBase::mutate; + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + TermExpander(PolynomialTransformer* simplifier) : simplifier_(simplifier) {} + bool check_safe() { + return eliminated_allocations_.empty(); + } + + // Expand Terms out to a series of Muls. + ExprPtr mutate(TermPtr v) override; + + // Expand Polynomials out to a series of Adds. + ExprPtr mutate(PolynomialPtr v) override; + + // Expand MaxTerms to a series of Max ops. + ExprPtr mutate(MaxTermPtr v) override; + + // Expand MinTerms to a series of Min ops. + ExprPtr mutate(MinTermPtr v) override; + + // Expand RoundOff to it's component: Mul(Div(lhs, rhs), rhs). + ExprPtr mutate(RoundOffPtr v) override; + + // Eliminate zero length allocations. + StmtPtr mutate(AllocatePtr v) override; + StmtPtr mutate(FreePtr v) override; + + // Override to enable condition fusing. + BlockPtr fuseConditions(BlockPtr v); + StmtPtr fuseSyncThreads(BlockPtr block); + StmtPtr mutate(BlockPtr v) override; +}; + +class TORCH_API IRSimplifier { + public: + static StmtPtr simplify(StmtPtr s); + static ExprPtr simplify(ExprPtr e); + static ExprHandle simplify(const ExprHandle& e) { + return ExprHandle(simplify(e.node())); + } +}; + +// Flattens the buf and performs the simplifier on the flattened dims. +ExprPtr buf_flat_size(BufPtr v); +// Returns true if expressions A and B can be simplified to an equal expression. +TORCH_API bool exprEquals(ExprPtr A, ExprPtr B); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_verifier.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_verifier.h new file mode 100644 index 0000000000000000000000000000000000000000..03b6d9a1f0cce4fa8d6a551702d175162226ad7a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_verifier.h @@ -0,0 +1,58 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +class Expr; +class ExprHandle; +class Mod; +class And; +class Or; +class Xor; +class Lshift; +class Rshift; +class CompareSelect; +class Ramp; +class Load; +class IfThenElse; +class Intrinsics; + +class Stmt; +class ExternalCall; +class Store; +class For; +class Block; + +class TORCH_API IRVerifier : public IRVisitor { + public: + IRVerifier() = default; + + void visit(ModPtr v) override; + void visit(AndPtr v) override; + void visit(OrPtr v) override; + void visit(XorPtr v) override; + void visit(LshiftPtr v) override; + void visit(RshiftPtr v) override; + void visit(CompareSelectPtr v) override; + void visit(RampPtr v) override; + void visit(LoadPtr v) override; + void visit(IfThenElsePtr v) override; + void visit(IntrinsicsPtr v) override; + + void visit(ExternalCallPtr v) override; + void visit(StorePtr v) override; + void visit(ForPtr v) override; + void visit(BlockPtr v) override; +}; + +TORCH_API void verify(StmtPtr); +TORCH_API void verify(ExprPtr); +TORCH_API void verify(ExprHandle); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_visitor.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_visitor.h new file mode 100644 index 0000000000000000000000000000000000000000..09e6069dba1c288b62604ad4f9edff9af8210e1d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_visitor.h @@ -0,0 +1,64 @@ +#pragma once +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +class TORCH_API IRVisitor { + public: + virtual ~IRVisitor() = default; + virtual void visit(AddPtr v); + virtual void visit(SubPtr v); + virtual void visit(MulPtr v); + virtual void visit(DivPtr v); + virtual void visit(ModPtr v); + virtual void visit(MaxPtr v); + virtual void visit(MinPtr v); + virtual void visit(AndPtr v); + virtual void visit(OrPtr v); + virtual void visit(XorPtr v); + virtual void visit(LshiftPtr v); + virtual void visit(RshiftPtr v); + virtual void visit(CompareSelectPtr v); + +#define IMM_PRINT_VISIT(Type, Name) virtual void visit(Name##ImmPtr v); + + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_PRINT_VISIT) +#undef IMM_PRINT_VISIT + + virtual void visit(CastPtr v); + virtual void visit(BitCastPtr v); + virtual void visit(VarPtr v); + virtual void visit(BufPtr v); + virtual void visit(RampPtr v); + virtual void visit(LoadPtr v); + virtual void visit(ForPtr v); + virtual void visit(BlockPtr v); + virtual void visit(StorePtr v); + virtual void visit(BroadcastPtr v); + virtual void visit(IfThenElsePtr v); + virtual void visit(IntrinsicsPtr v); + virtual void visit(AllocatePtr v); + virtual void visit(FreePtr v); + virtual void visit(FreeExtPtr v); + virtual void visit(PlacementAllocatePtr v); + virtual void visit(LetPtr v); + virtual void visit(CondPtr v); + virtual void visit(TermPtr v); + virtual void visit(PolynomialPtr v); + virtual void visit(RoundOffPtr v); + virtual void visit(MaxTermPtr v); + virtual void visit(MinTermPtr v); + virtual void visit(ReduceOpPtr v); + virtual void visit(AtomicAddPtr v); + virtual void visit(SyncThreadsPtr v); + virtual void visit(ExternalCallPtr v); + virtual void visit(ExternalCallWithAllocPtr v); +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/kernel.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..45658beb750e9f85fdd3bbdc8488223f8bbb04c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/kernel.h @@ -0,0 +1,382 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +struct SmallSizeTPairHash { + public: + std::size_t operator()(const std::pair& x) const { + // hashing input index and then dim index + return x.first * 128 + x.second; + } +}; + +// Returns true if the TE fuser supports this conv2d. +bool conv2dIsSupportedJit(const Node* node); +// Returns true if the TE fuser supports this conv2d with mkldnn prepacked conv. +bool mkldnnPrepackedConvIsSupportedJit(const Node* node); +// Returns true if the TE _convolution node is Conv2d. +bool isConv2d(const Node* node); +// Returns true if the TE fuser supports this matmul. +bool matmulIsSupported(const Node* node); +template +inline std::vector bufferSizes(const T& t) { + std::vector sizes; + for (size_t i = 0; i < t->ndim(); i++) { + sizes.push_back(*intValue(t->dim(i))); + } + return sizes; +} + +// Get the dimensions of a value. +std::vector valueShape(const ArgValue& v); + +// If v is a tensor, broadcast it to match the shape of axes, or return +// directly if v is a constant. +ExprHandle tensorOrConstant( + const ArgValue& v, + const std::vector& axes); + +int64_t normalizeAndCheckIndex(int64_t idx, int64_t list_size); + +ExprHandle broadcast(BufHandle b, const std::vector& axes); + +ExprHandle constant(const ArgValue& v); + +std::vector computeIndicesToBroadcast( + const std::vector& outputAxes, + const std::vector& inputSizes); + +inline std::string getArgValueName(const ArgValue& a) { + if (std::holds_alternative(a)) { + return "BufHandle"; + } else if (std::holds_alternative(a)) { + return "VarHandle"; + } else if (std::holds_alternative(a)) { + return "double"; + } else if (std::holds_alternative(a)) { + return "int64_t"; + } else if (std::holds_alternative(a)) { + return "bool"; + } else if (std::holds_alternative(a)) { + return "BufList"; + } else if (std::holds_alternative(a)) { + return "DoubleList"; + } else if (std::holds_alternative(a)) { + return "IntList"; + } else if (std::holds_alternative(a)) { + return "None"; + } else { + throw std::runtime_error("ArgValue type not handled in string conversion"); + } +} + +template +std::vector convertVecArgValue(const std::vector& v) { + std::vector res; + for (auto& x : v) { + auto val = std::get_if(&x); + if (val) { + res.push_back(*val); + } else { + throw std::runtime_error( + "vector type not homogeneous - found " + getArgValueName(x) + + ", expected " + getArgValueName(v[0])); + } + } + return res; +} + +class TORCH_API TensorExprKernel { + struct ConstantDescr { + BufPtr buf; + // Only one of ptr and node is used at a time + // 1) ptr for the constant tensors + // 2) node for the constant custom class objects + void* ptr = nullptr; + Node* node = nullptr; + }; + + public: + // Constructor Params: + // * subgraph + // - the graph that needs to be compiled. + // * kernel_func_name + // - the name that should be used for the generated kernel. + // * custom_lowerings + // - map that represents custom lowering definitions for a set of ops. + // * symbolic_shape_inputs + // - a list of symbolic graph inputs that represent the symbolic dims of + // the input tensors. + // * pre_alloc + // - a flag to control pre-allocation of buffers. + explicit TensorExprKernel( + const std::shared_ptr& subgraph, + const std::string& kernel_func_name, + std::unordered_map custom_lowerings = + {}, + std::vector symbolic_shape_inputs = {}, + bool pre_alloc = false, + std::unordered_map< + const torch::jit::Value*, + std::vector> symbolic_strides = {}); + + explicit TensorExprKernel( + const std::shared_ptr& subgraph, + std::unordered_map custom_lowerings = + {}, + std::vector symbolic_shape_inputs = {}, + bool pre_alloc = false, + std::unordered_map< + const torch::jit::Value*, + std::vector> symbolic_strides = {}) + : TensorExprKernel( + subgraph, + SubgraphUtils::generateNameForGraph(subgraph), + custom_lowerings, + symbolic_shape_inputs, + pre_alloc, + symbolic_strides) {} + + void run(Stack& stack) const; + void runFast( + const std::vector& inputs, + const std::vector& outputs) const; + // Expected format of stack: + // ... + // i.e., output IValues must be below the input IValues in the stack. + void runWithAllocatedOutputs(Stack& stack) const; + + void fallback(Stack& stack) const { + InterpreterState(code_).run(stack); + } + void recompile(); + + StmtPtr getCodeGenStmt(); + + std::string getCodeText(const std::string& attr = "") { + return codegen_->getCodeText(attr); + } + + const std::shared_ptr graph() { + return graph_; + } + + const std::vector& getConstantDescriptors() const { + return constants_; + } + + const std::vector& getBufferArgs() const { + return bufferArgs_; + } + + const std::string& getKernelName() const { + return codegen_->kernel_func_name(); + } + + const std::vector& getSymbolicShapeInputs() const { + return symbolic_shape_inputs_; + } + + private: + enum BackendType { + kUninitialized, + kSimpleIREval, + kLLVMCodeGen, + kCudaCodeGen, + kBlockCodeGen, + }; + + enum MemoryLayoutPolicy { + kContiguous, + kChannelsLastNdContiguous, + }; + + void compile(); + void genInputDebugNames(); + void runKernel(Stack& stack) const; + + std::vector sizesForValue(const torch::jit::Value* v); + + // These functions broadcast shape and also store a `hasBroadcast_` variable. + std::vector broadcastShapesMut( + const std::vector& a, + const std::vector& b); + std::vector broadcastShapesMut( + std::vector> shapes); + + ArgValue toArg(const torch::jit::Value* v) const; + ExprHandle constant(const torch::jit::Value* v); + + Tensor computeValue(const torch::jit::Value* v); + + void bindConstant(const torch::jit::Value* v); + + StmtPtr transformLoops(BackendType backendType, StmtPtr st); + + std::string getCodeGenName(BackendType backendType); + + void getStaticOutputSizesAndStrides( + const at::ArrayRef& inputs, + std::vector>* static_sizes, + std::vector>* static_strides) const; + + std::vector prepareRunArgs( + const at::ArrayRef& inputs, + std::vector& outputs) const; + BackendType inferBackendTypeFromDevice(at::Device device); + + Tensor bindInput(const torch::jit::Value* input); + BlockPtr bindAllInputs(); + + // Deduce the memory layout policy to be propagated within + // NNC fusion group. The memory layout policy could be `kContiguous` + // or `kChannelsLastNdContiguous`. + // `kContiguous`: Always convert the non-contiguous input tensors and + // internal buffers to contiguous. + // `kChannelsLastNdContiguous`: Always convert the input tensors and + // internal buffers to channels-last contiguous. + // Currently, the rule is simple. + // If all the input and out tensors of NNC fusion group are channels-last + // contiguous, the policy is `kChannelsLastNdContiguous`. Otherwise, it + // is always `kContiguous`. + void deduceMemoryLayoutPolicy(); + + Tensor convertSymbolicOutputToCorrectStrides(torch::jit::Value* v); + Tensor convertStaticShapeOutputToCorrectStrides(torch::jit::Value* v); + Tensor convertSymbolicOutputToCorrectStrides( + const std::vector& sizes, + const std::vector& sorted_stride_indices_descending, + const std::vector& strides, + BufPtr& buf); + + NNCLoweringFunction getCustomLoweringFor(c10::Symbol op) const; + std::unordered_map getCustomLowerings() + const { + return custom_lowerings_; + } + + // Allocate memory for intermediate buffers at compile time. + // Specifically, we pre-allocate memory for intermediate buffers with static + // size and manage these buffers in the way we manage JIT constant tensors: + // push the buf args into the stack so NNC IR can access them at runtime. + std::vector preAllocIntermediateBufs( + const std::vector& interm_bufs); + + struct UnpackedTensorOptions { + c10::optional dtype; + c10::optional layout; + c10::optional device; + c10::optional pinned_memory; + + UnpackedTensorOptions(const c10::TensorOptions& opts) + : dtype(c10::optTypeMetaToScalarType(opts.dtype_opt())), + layout(opts.layout_opt()), + device(opts.device_opt()), + pinned_memory(opts.pinned_memory_opt()) {} + }; + + ExprHandle getVarForShape(const c10::ShapeSymbol& ss); + std::vector computeInputTensorDims( + const torch::jit::Value* input); + ExprHandle getStrideArg(size_t tensor_input, size_t stride_index); + std::vector sizesFromSymbolicShape( + const c10::SymbolicShape& shape); + std::vector getInputStrides( + const torch::jit::Value* input, + const std::vector& inputTensorDims); + std::vector& getSymbolicStrideDesc( + const torch::jit::Value* value); + + // Apply the optimizations to the graph owned by the current fusion group, + // like concatenation optimization, post-op fusion, and some other graph-level + // optimizations. + void optimizeOwningGraph(); + + int64_t nInputs_ = 0; + int64_t nOutputs_ = 0; + std::vector bufferArgs_; + std::vector> tensorOutputSizes_; + std::vector> tensorOutputStrides_; + std::vector tensorOutputStrideDesc_; + std::vector isOutputScalar_; + std::vector tensorOutputTensorOptions_; + std::unordered_set bufOutputs_; + std::unordered_set bufsToBeParallelized_; + std::unordered_map bufs_; + std::unordered_map scalars_; + std::unordered_map input_name_map_; + std::unique_ptr codegen_; + at::Device device_ = at::kCPU; + std::shared_ptr graph_; + Code code_; + bool allow_fallback_{false}; + bool use_fallback_{false}; + bool hasRandom_{false}; + bool hasBroadcast_{false}; + std::unordered_map> + known_sizes_; + + std::vector> tensorOutputSymbolicSizes_; + // A map from ShapeSymbol.value() to the corresponding Var. + std::unordered_map shapeSymbolToVar_; + std::unordered_map shapeSymbolInputPos_; + // List of values corresponding to the ShapeSymbols that are inputs to + // kernel being compiled. The order of these values correspond to the order + // of the symbolic inputs at the end of the list of inputs to the kernel. + std::vector symbolic_shape_inputs_; + bool has_symbolic_shapes_{false}; + + std::vector unpacked_constant_tensors_; + std::vector constants_; + + std::unordered_map custom_lowerings_; + StmtPtr stmt_ = nullptr; + bool pre_alloc_{false}; + std::string kernel_func_name_; + + // index of stack, stride index of tensor that will be appended as a codegen + // arg + std::vector> input_stride_args_; + // map from to stride as arg VarHandle + std::unordered_map, VarHandle, SmallSizeTPairHash> + strideArgToVar_; + std::unordered_map< + const torch::jit::Value*, + std::vector> + symbolic_strides_; + + // Memory layout to be propagated with fusion group + MemoryLayoutPolicy memory_layout_policy_ = MemoryLayoutPolicy::kContiguous; +}; + +TORCH_API int& getTECudaPointwiseLoopLevels(); +TORCH_API int& getTECudaPointwiseBlockCount(); +TORCH_API int& getTECudaPointwiseBlockSize(); +TORCH_API bool& getTEGenerateBlockCode(); +TORCH_API bool& getTEMustUseLLVMOnCPU(); +TORCH_API bool fallbackAllowed(); +TORCH_API bool setFallbackAllowed(bool value); +TORCH_API bool& getCatWoConditionals(); +TORCH_API bool& getOptConditionals(); + +TORCH_API c10::optional pickDeviceType( + const at::ArrayRef& inputs); + +bool isContiguous( + const torch::jit::Value* v, + at::MemoryFormat memory_format = at::MemoryFormat::Contiguous); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/llvm_codegen.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/llvm_codegen.h new file mode 100644 index 0000000000000000000000000000000000000000..7ab506fa8fe1ece16af2d9be8e373d539b1ce8c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/llvm_codegen.h @@ -0,0 +1,143 @@ +#pragma once + +#ifdef TORCH_ENABLE_LLVM +#include + +#include +#include +#include + +#include + +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +class LLVMCodeGenImpl; +class LLVMCodeGenCallee; + +class TORCH_API LLVMCodeGen : public CodeGen { + public: + explicit LLVMCodeGen( + StmtPtr stmt, + const std::vector& args, + at::Device device = at::kCPU, + const std::string& kernel_func_name = "func", + Dtype dtype = kInt, + c10::optional triple = c10::nullopt, + c10::optional cpu = c10::nullopt, + c10::optional attrs = c10::nullopt); + explicit LLVMCodeGen(StmtPtr stmt); + + LLVMCodeGen() = delete; + ~LLVMCodeGen() override; + + // Cleans up all the memory used during LLVM code generation pass except + // the generated kernel. After calling this method, users should not call + // methods like `getCodeText` that require the LLVMCodeGenImpl data. However, + // users can continue to call this kernel using `call` and `call_raw`. + void cleanup_memory(); + + TORCH_API void call(const std::vector& args) override; + TORCH_API void call_raw(const std::vector& args) override; + TORCH_API void call_with_numel(void** args, int64_t numel) override; + + at::Tensor empty_strided( + c10::IntArrayRef size, + c10::IntArrayRef stride, + c10::optional dtype_opt, + c10::optional layout_opt, + c10::optional device_opt, + c10::optional pin_memory_opt) override; + + template + T value() { + return value(nullptr); + } + + template + T value(std::vector& args) { + return value(args.data()); + } + + template + T value(void** args) { + T (*fp)(void**) = (T(*)(void**))getKernelAddress(callee_.get()); + T rv = fp(args); + return rv; + } + + std::string getCodeText(const std::string& attr = "") override; + + private: + void* getKernelAddress(LLVMCodeGenCallee* callee); + + std::unique_ptr callee_; + std::unique_ptr impl_; +}; + +struct TORCH_API LLVMCodeGenBuilder { + using BufferArg = CodeGen::BufferArg; + + LLVMCodeGenBuilder(StmtPtr stmt, std::vector args) + : stmt_(stmt), args_(std::move(args)) {} + + LLVMCodeGenBuilder& device(at::Device device) { + device_ = device; + return *this; + } + + LLVMCodeGenBuilder& kernelFuncName(std::string name) { + kernelFuncName_ = std::move(name); + return *this; + } + + LLVMCodeGenBuilder& dtype(Dtype d) { + dtype_ = d; + return *this; + } + + LLVMCodeGenBuilder& triple(std::string triple) { + triple_ = std::move(triple); + return *this; + } + + LLVMCodeGenBuilder& cpu(std::string cpu) { + cpu_ = std::move(cpu); + return *this; + } + + LLVMCodeGenBuilder& attrs(std::string attrs) { + attrs_ = std::move(attrs); + return *this; + } + + std::unique_ptr build() { + return std::make_unique( + stmt_, args_, device_, kernelFuncName_, dtype_, triple_, cpu_, attrs_); + } + + private: + StmtPtr stmt_; + std::vector args_; + at::Device device_ = at::kCPU; + std::string kernelFuncName_ = "func"; + Dtype dtype_ = kInt; + c10::optional triple_ = c10::nullopt; + c10::optional cpu_ = c10::nullopt; + c10::optional attrs_ = c10::nullopt; +}; + +TORCH_API c10::optional& LLVMTargetTriple(); +TORCH_API c10::optional& LLVMTargetCPU(); +TORCH_API c10::optional& LLVMTargetAttrs(); +TORCH_API bool& LLVMAOTWorkflow(); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch + +#endif // TORCH_ENABLE_LLVM diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/llvm_jit.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/llvm_jit.h new file mode 100644 index 0000000000000000000000000000000000000000..4aca55a9abf47a4e78ce0e6047119ed937e5f732 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/llvm_jit.h @@ -0,0 +1,77 @@ +#pragma once + +#ifdef TORCH_ENABLE_LLVM +#include +#include +#include +#include + +C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override") +#include +C10_DIAGNOSTIC_POP() +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +inline std::string formatError(llvm::Error&& err, const char* msg) { + static constexpr const char* defaultErrorMsg = + "Unexpected failure in LLVM JIT"; + std::string errorMsg(msg ? msg : defaultErrorMsg); + llvm::raw_string_ostream ss(errorMsg); + ss << ": " << err; + return ss.str(); +} + +template +T assertSuccess(llvm::Expected valOrErr, const char* msg = nullptr) { + TORCH_INTERNAL_ASSERT(valOrErr, formatError(valOrErr.takeError(), msg)); + return std::move(*valOrErr); +} + +inline void assertSuccess(llvm::Error err, const char* msg = nullptr) { + TORCH_INTERNAL_ASSERT(!err, formatError(std::move(err), msg)); +} + +} // namespace tensorexpr +} // namespace jit +} // namespace torch + +namespace llvm { +namespace orc { + +class PytorchLLVMJITImpl; + +class TORCH_API PytorchLLVMJIT { + public: + PytorchLLVMJIT( + c10::optional triple, + c10::optional cpu, + c10::optional attrs); + ~PytorchLLVMJIT(); + + void addModule(std::unique_ptr M, std::unique_ptr C); + + JITSymbol findSymbol(const std::string Name); + + bool hasSymbol(const std::string& Name); + + TargetMachine& getTargetMachine(); + + const DataLayout& getDataLayout(); + + private: + // Use the PImpl idiom here to hide the no-rtti parts of the JIT structure. + std::unique_ptr impl_; +}; + +} // end namespace orc +} // end namespace llvm + +#endif // ENABLE LLVM diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/loopnest.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/loopnest.h new file mode 100644 index 0000000000000000000000000000000000000000..5f8660d1a95389db55b48322c3347833b3d5d1c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/loopnest.h @@ -0,0 +1,606 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +class Expr; +class Var; +class Buf; +class Tensor; +class Function; +class Stmt; +class For; +class Block; +class Store; +class Dtype; + +class TORCH_API LoopNest { + public: + // A constructor for building a LoopNest from a list of Tensors + LoopNest( + const std::vector& output_tensors, + const std::vector& tensors_to_compute); + + // A convenience constructor for the case when all tensors are output tensors + LoopNest(const std::vector& output_tensors); + + // A constructor for building a LoopNest from an Stmt and a list of output + // buffers. + LoopNest(StmtPtr stmt, std::unordered_set output_bufs); + + // A constructor for building a LoopNest from another loopnest. It clones the + // other loopnest's stmt. + LoopNest(const LoopNest& other); + + StmtPtr root_stmt() const { + return root_stmt_; + } + + std::vector getLoopStmtsFor(Tensor) const; + std::vector getLoopStmtsFor(BufPtr) const; + std::vector getLoopStmtsFor(StmtPtr) const; + StmtPtr getLoopBodyFor(Tensor) const; + StmtPtr getLoopBodyFor(BufPtr) const; + + // Returns the For stmt indexed by 'indices' in the 'root' For stmt. + //'indices' indicates the path to the returned loop from 'root' in AST, e.g., + // + // root: for(int i...){ + // j_loop: for (int j...){ + // k1_loop: for (int k1...){ + // A[i, j, k1] = .... + // } + // B[i, j] = ... + // k2_loop: for (int k2...){ + // A[i, j, k2] = ... + // } + // } + // } + // + // the path from 'root' to 'j_loop' is [0] + // the path from 'root' to 'k1_loop' is [0, 0] + // the path from 'root' to 'k2_loop' is [0, 2] + ForPtr getLoopAt(ForPtr root, const std::vector& indices) const; + + // Returns the For stmt that is immediately enclosing the given stmt. + static ForPtr getParentLoop(StmtPtr st); + + // Returns the list of For stmts corresponding to the loopnest that is + // enclosing the given stmt. + static std::vector getEnclosingLoopNest(StmtPtr st); + + // Returns a list of all Stmts that write to the given buf. + std::vector getAllWritesToBuf(BufPtr) const; + + // The following methods return the For loops that contain writes to + // the given buf. + // + // For example, consider the following code: + // for i1 + // for j1 + // a[i1,j1] = + // for i2 + // for j2 + // for k2 + // a[i2,j2] = + // for j3 + // a[i2,j3] = + + // Returns a list of For loops which directly contain a Stmt that writes + // to buf. + // For the above example: + // getAllInnermostLoopsWritingToBuf(a) => {j1, k2, j3} + std::vector getAllInnermostLoopsWritingToBuf(BufPtr) const; + + // Returns a list of For loopnests which contain a Stmt that writes to + // the given buf. Each loopnest here is a vector For loops. + // For the above example: + // getAllLoopNestsWritingToBuf(a) => {{i1,j1}, {i2,j2,k2}, {i2,j3}} + std::vector> getAllLoopNestsWritingToBuf(BufPtr) const; + + StmtPtr simplify(); + + // Sanitize variables and buffer names. + // The pass assigns predefined names for loop index variables + // (i,j,k,l,m,n,o,p,i1,j1,k1,...) and ensures these names are not conflicting + // anywhere. It also removes duplicates from other Buf nad Var names as well + // as replaces illegal characters in them with underscores. + // + // Note: since it's currently technically possible to use the same variable + // as index in two different loops, this transformation finds such cases and + // introduces new variables to avoid duplication. + static StmtPtr sanitizeNames(StmtPtr s); + + bool computeInline(StmtPtr s); + bool computeInline(BufPtr b); + void inlineIntermediateBufs(bool allow_duplicated_work); + + // Optimizes conditionals. + // + // Currently, only the following pattern of conditionals is optimized. + // This corresponds to the conditional format that is generated to handle + // `aten::cat` op. + // + // for (int i = 0; i < 20; i++) { + // A[i] = IfThenElse(i<5 ? 1 : 0, B[i], C[i-5]) + // } + // + // Constraints that must be satisfied for this optimization: + // * All conditions should be of the form "var < expr". + // * All conditions should have the same variable, say v. + // * The condition variable found should be the same as the inner-most + // loop variable. TODO: Remove this constraint. + // * If there are multiple stores that contain conditionals using the same + // loop variable, only the first conditional will be optimized. + // TODO: Remove this constraint. + bool optimizeConditionals(); + + // Splits the given loop into 2 nested loops with the given factor as the + // inner loop bound. If the factor does not evenly divide the loop bound, + // then the remaining iterations are extracted into a tail loop that is + // added after the given loop. + // + // For example, consider the following code: + // for (int i = 0; i < 100; ++i) { + // A[i] = + // } + // + // splitWithTail(i, 8, ...) will result in: + // for (int i_outer = 0; i_outer < 12; ++i_outer) { + // for (int i_inner = 0; i_inner < 8; ++i_inner) { + // A[i_outer * 8 + i_inner] = + // } + // } + // for (int i_tail = 0; i_tail < 4; ++i_tail) { + // A[i_tail + 96] = + // } + // + // The given loop will be transformed to the outer loop after splitting. + // So, the pointer to the input loop should be valid after splitting and + // will point to the outer loop. The `inner` and `tail` parameters will be + // set to point to the inner and tail loops that are generated. + static void splitWithTail(ForPtr f, int factor, ForPtr* inner, ForPtr* tail); + // A convenience wrapper when the caller does not need to access the + // split loops. + static void splitWithTail(ForPtr f, int factor); + + // Splits the given loop into 2 nested loops with the given factor as the + // inner loop bound. If the factor does not evenly divide the loop bound, + // then a conditional is inserted into the body to handle the remaining + // iterations appropriately. + // + // For example, consider the following code: + // for (int i = 0; i < 100; ++i) { + // A[i] = + // } + // + // splitWithMask(i, 8, ...) will result in: + // for (int i_outer = 0; i_outer < 13; ++i_outer) { + // for (int i_inner = 0; i_inner < 8; ++i_inner) { + // if (i_outer * 8 + i_inner < 100) { + // A[i_outer * 8 + i_inner] = + // } + // } + // } + // + // The given loop will be transformed to the outer loop after splitting. + // So, the pointer to the input loop should be valid after splitting and + // will point to the outer loop. The `inner` parameter will be set to point + // to the inner loop that is generated. + static void splitWithMask(ForPtr f, int factor, ForPtr* inner); + // A convenience wrapper when the caller does not need to access the + // split loops. + static void splitWithMask(ForPtr f, int factor); + + // The following methods support loop distribution. + // For example, consider the following code. This will be used to + // demonstrate the methods below. + // + // S0: for m + // S1: for i + // S2: A[i] = 0 + // S3: for j + // S4: A[i] = A[i] + + // S5: B[i] = A[i] + // S6: for k + // S7: B[i] = B[i] + + + // This method distributes the given loop over its body by splitting + // after every given pivot stmt. + // + // NOTE: Pivot stmts that are not in the given loop's body will be ignored. + // + // For the above example: + // distributeLoop(S1, {S3, S5}) + // will result in: + // S0: for m + // S1: for i + // S2: A[i] = 0 + // S3: for j + // S4: A[i] = A[i] + + // : for i + // S5: B[i] = A[i] + // : for i + // S6: for k + // S7: B[i] = B[i] + + static std::vector distributeLoop( + ForPtr loop, + const std::unordered_set& pivots); + + // This method distributes the given loop over every stmt in its body. + // + // For the above example: + // distributeLoop(S1) + // will result in: + // S0: for m + // S1: for i + // S2: A[i] = 0 + // : for i + // S3: for j + // S4: A[i] = A[i] + + // : for i + // S5: B[i] = A[i] + // : for i + // S6: for k + // S7: B[i] = B[i] + + static std::vector distributeLoop(ForPtr loop); + // Same as above, but also distribute parent loops. + // Returns the result of distributing the outermost loop. + // + // For the above example: + // distributeLoopAndParents(S1) will result in: + // S0: for m + // S1: for i + // S2: A[i] = 0 + // : for m + // : for i + // S3: for j + // S4: A[i] = A[i] + + // : for m + // : for i + // S5: B[i] = A[i] + // : for m + // : for i + // S6: for k + // S7: B[i] = B[i] + + static std::vector distributeLoopAndParents(ForPtr loop); + + // This method distributes the given loop over its body by splitting + // after every For stmt in its body. + // + // For the above example: + // distributeLoopOverInnerLoops(S1) + // will result in: + // S0: for m + // S1: for i + // S2: A[i] = 0 + // S3: for j + // S4: A[i] = A[i] + + // : for i + // S5: B[i] = A[i] + // S6: for k + // S7: B[i] = B[i] + + static std::vector distributeLoopOverInnerLoops(ForPtr loop); + // Same as above, but also distribute parent loops. + // Returns the result of distributing the outermost loop. + // + // For the above example: + // distributeLoopAndParentsOverInnerLoops(S1) + // will result in: + // S0: for m + // S1: for i + // S2: A[i] = 0 + // S3: for j + // S4: A[i] = A[i] + + // : for m + // : for i + // S5: B[i] = A[i] + // S6: for k + // S7: B[i] = B[i] + + static std::vector distributeLoopAndParentsOverInnerLoops( + ForPtr loop); + + // This method performs loop fusion. + // For example, consider the following code. + // + // S1: for m + // S2: A[m] = 0 + // S3: for j + // S4: A[m] = A[m] + + // S5: for n + // S5: B[n] = A[n] + // S6: for k + // S7: B[n] = B[n] + + // + // fuseLoops({S1, S5}), will return the following loop: + // S1: for m + // S2: A[m] = 0 + // S3: for j + // S4: A[m] = A[m] + + // S5: B[m] = A[m] + // S6: for k + // S7: B[m] = B[m] + + // + // This transformation is unsafe as it simply add all loops into the body of + // the first loop for fusion without correctness checks. + // + // Below are the two requirements to apply unsafeFuseLoops: + // * All the loops have the same parent. + // * There are no statements between these loops in their parent body. + static bool unsafeFuseLoops(const std::vector& loops, ForPtr* fused); + + // Loop fusion is done only when all the conditions below are satisfied. + // * All the loops have the same parent. + // * There are no statements between these loops in their parent body. + // * The start bounds are the same for all loops. + // * The stop bounds are the same for all loops. + // * Fusing the loops does not violate or add any dependencies. + static bool fuseLoops(const std::vector& loops, ForPtr* fused); + + static void reorderAxis(ForPtr a, ForPtr b); + + // Reorder the given list of loops according to the permutation specified. + // Here `permutation[i]` represents the position of the loop in the input + // which will end up at position `i` after the reorder. + // + // For example, consider the following code: + // for p + // for q + // for r + // for s + // A[p,q,r,s] = + // + // reorder({p, q, r, s}, {2, 3, 0, 1}) will return the list of loops in the + // following form: + // for r + // for s + // for p + // for q + // A[p,q,r,s] = + static std::vector reorder( + const std::vector& loops, + const std::vector& permutation); + + // Tile takes a 2d domain (x, y) and splits it into small rectangular blocks + // each with shape (x_factor, y_factor). The traversal over the domain turns + // into an outer iteration over the blocks and an inner traversal over all + // points in the block. + // Note that if x dim % x_factor or y dim % y_factor does not equal to 0, the + // loop body will generate corresponding tailing loops. + // The transformation is in-place and returns 'xtail'. + // + // For example, consider the following code: + // for i: [0, 64) + // for j: [0, 64) + // for k: [0, 32) + // A[i, j] = B[i, k] + C[j, k] + // + // tile(i, j, 4, 8) will transform "i" for-stmt into the following nested + // loop: + // for i_outer: [0, 16) + // for j_outer: [0, 8) + // for i_inner: [0, 4) + // for j_inner: [0, 8) + // for k: [0, 32) + // A[i_outer * 4 + i_inner, j_outer * 8 + j_inner] = + // B[i_outer * 4 + i_inner, k] + C[j_outer * 8 + j_inner, k] + // + // tile(i, j, 4, 9) will transform "i" for-stmt into the following nested + // loop: + // for i_outer: [0, 16) + // for j_outer: [0, 7) + // for i_inner: [0, 4) + // for j_inner: [0, 9) + // for k: (0, 32) + // A[i_outer * 4 + i_inner, j_outer * 9 + j_inner] = + // B[i_outer * 4 + i_inner, k] + C[j_outer * 9 + j_inner, k] + // for j_tail: [0, 1) + // for i_inner: [0, 4) + // for k: (0, 32) + // A[i_outer * 4 + i_inner, 7 * 9 + j_tail] = + // B[i_outer * 4 + i_inner, k] + C[7 * 9 + j_tail, k] + ForPtr tile(ForPtr x, ForPtr y, int x_factor, int y_factor); + + // Returns true if the given loops are perfectly nested, i.e., every loop + // (except the innermost) should have exactly one statement in its body + // and that statement must be the next inner loop. + static bool areLoopsPerfectlyNested(const std::vector& loops); + + // Returns true if the given loop has a loop-carried dependence. + static bool hasLoopCarriedDependence(ForPtr loop); + + // Unrolls all the iterations of the given loop. + // Requires that the loop bounds are constant. + static void fullUnroll(ForPtr f, StmtPtr* unrolled); + static void fullUnroll(ForPtr f); + + // Unrolls the given loop for the specified factor. + // This does not require constant bounds for the loop being unrolled. + static void unroll(ForPtr f, int factor, ForPtr* tail); + static void unroll(ForPtr f, int factor); + + static bool normalize(ForPtr f); + static bool isNormalized(ForPtr f); + + static bool flatten(const std::vector& f, ForPtr* flattened); + static bool flatten(const std::vector& f); + + // Compresses the given buffer based on its use in the given Stmts. + // + // NOTE: This API assumes that there are no accesses to the given buffer + // outside the given statement. So, this should be called with the entire + // kernel statement to avoid incorrect buffer compressions. + // + // For example, given the input: + // + // for (int i = 0; i < 100; ++i) { + // for (int j = 0; j < 200; ++j) { + // A[i,j] = sin(i*j) + // } + // for (int j = 0; j < 199; ++j) { + // B[i,j] = A[i,j] + A[i, j+1] + // } + // } + // + // compressBuffer(A, ...) will compress buffer A from + // [100, 200] to [1, 200] and modify the code as follows: + // + // for (int i = 0; i < 100; ++i) { + // for (int j = 0; j < 200; ++j) { + // A[0,j] = sin(i*j) + // } + // for (int j = 0; j < 199; ++j) { + // B[i,j] = A[0,j] + A[0, j+1] + // } + // } + static void compressBuffer(BufPtr buf, StmtPtr stmt); + + // Compresses all buffers in the given statement. + // + // NOTE: This API assumes that there are no accesses to buffers outside + // the given statement. So, this should be called with the entire + // kernel statement to avoid incorrect buffer compressions. + // + // TODO: Add an IR verifier check to detect invalidly compressed buffers. + static void compressAllBuffers(StmtPtr stmt); + + // Get 'num' loops from the loopnest starting at 'f'. + static std::vector getLoopStmtsInLoopNest(ForPtr f, size_t num); + + // LoopOptions are propagated to tail. + static void sliceHead(ForPtr f, int factor, ForPtr* head, ForPtr* tail); + static void sliceHead(ForPtr f, int factor); + // LoopOptions are propagated to head. + static void sliceTail(ForPtr f, int factor, ForPtr* head, ForPtr* tail); + static void sliceTail(ForPtr f, int factor); + + using AccessResult = std::pair; + // Insert a cache for the consumer's usages of the buffer produced in + // consumer, and redirect reads and writes in the consumer to that cache. + // Returns a pair of the new cache buffer, and the new rewritten consumer. + static AccessResult cacheAccesses( + BufPtr producer, + const std::string& name, + StmtPtr consumer); + + // Insert a temporary computation of statement S in the scope of loop AT. + // S is assumed to be a Store or a Block containing a Store. Along with the + // computation itself, this transformation inserts Alloc/Free statements for + // the temporary buffer used in the computation. + static void computeAt(StmtPtr s, ForPtr at); + + // Rfactor a reduction axis into a normal axis. + // + // Requirements: + // * S is the reduction store + // * S is the only statement in the innermost loop + // * There is at least two reduction arguments in S + // * OUTER_REDUCTION_FOR loop corresponds to the outermost reduction variable + // used in the store and all other reduction variables are index variables of + // children loops of OUTER_REDUCTION_FOR + // * OUTER_REDUCTION_FOR is a perfect loop nest, i.e. it has only loops + // corresponding to the other reduction variables and the store, nested into + // each other + // + // What it does: + // * Introduce a new buffer with an extra dimension of a size equal to the + // span of the loop OUTER_REDUCTION_FOR (the new buffer is returned via + // RFAC_BUF_PTR) + // * Insert an initialization store for the new buffer in + // OUTER_REDUCTION_FOR before its nested loop + // * Replace the reduction store to the original buffer with the reduction + // store to the temp buffer, removing the index var of OUTER_REDUCTION_FOR + // from reduction arguments + // * Insert a final reduction store over the extra dimension of the new + // buffer to the original buffer + // * Returns TRUE if the transformation succeeded and FALSE otherwise + // + // Example: + // Original IR: + // S1: for i # normal axis + // S2: X[i] = 0 + // S3: for j # reduction axis + // S4: for k # reduction axis + // S5: X[i] = ReduceOp(X[i] + Y[i,j,k], reduce_axis={j,k}) + // + // After RFACTOR(S5, S3) + // S1: for i # normal axis + // S2: X[i] = 0 + // S3: for j # reduction axis for X, normal axis for X_rfac + // X_rfac[i,j] = 0 + // S4: for k # reduction axis + // X_rfac[i,j] = ReduceOp(X_rfac[i,j] + Y[i,j,k], reduce_axis={k}) + // X[i] = ReduceOp(X[i] + X_rfac[i,j], reduce_axis={j}) + static bool rfactor(StmtPtr s, ForPtr outer_reduction_for); + static bool rfactor( + StmtPtr s, + ForPtr outer_reduction_for, + BufPtr* rfac_buf_ptr); + + // Vectorize the given loop. This method requires that the given loop + // does not perform a reduction. + // It returns true if vectorization is successful and false otherwise. + static bool vectorize(ForPtr); + + // Find the inner-most loops and vectorize them. Currently, this only works + // for the LLVM backend, when no reductions are involved. + void vectorizeInnerLoops(); + + void eliminateDeadStores(); + + void prepareForCodegen(); + + const std::unordered_set getInputBufs() const; + const std::unordered_set getOutputBufs() const { + return output_bufs_; + } + std::vector getIntermediateBufs() const; + + // Finds which is the outer For between a and b for loops. If neither of the 2 + // Fors is an ancestor of the other, it returns nullptr. + static ForPtr findOuterFor(ForPtr a, ForPtr b); + + private: + void initialize( + const std::vector& output_tensors, + const std::vector& tensors_to_compute); + + StmtPtr root_stmt_; + + std::unordered_set output_bufs_; +}; + +TORCH_API StmtPtr FlattenIndexes(StmtPtr s); + +// TODO: Revisit this once we decide on how dependencies analysis should look +// like. Maybe we would choose to use a different API and BufUse would be +// removed, or if we decide to keep it we need to properly document its API. +struct BufLoadOrStoreUse { + StmtPtr s; + bool isStore; +}; + +/* + * Returns a map ( Buf -> uses of this Buf), uses are represented as vectors of + * BufUse elements, which are StmtPtr and a bool isStore flag. The order of uses + * in the vectors reflects the order in which the uses appear in the given + * statement. + */ +std::unordered_map> findLoadOrStoreUses( + StmtPtr s); + +// replaces all invalid characters with underscore +TORCH_API std::string sanitizeName(const std::string& input_name); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/loopnest_randomization.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/loopnest_randomization.h new file mode 100644 index 0000000000000000000000000000000000000000..9616f1c76cddc38f295b2db515cfe9eb8ffce0b4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/loopnest_randomization.h @@ -0,0 +1,13 @@ +#pragma once + +namespace torch { +namespace jit { +namespace tensorexpr { + +// Applies a series of loop optimizations chosen randomly. This is only for +// testing purposes. This allows automatic stress testing of NNC loop +// transformations. +void loopnestRandomization(int64_t seed, LoopNest& l); +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/lowerings.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/lowerings.h new file mode 100644 index 0000000000000000000000000000000000000000..6d8b2c433ae3702675a89b7a490cd164018c4756 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/lowerings.h @@ -0,0 +1,49 @@ +// This file defines classes for registering standard lowerings from JIT to TE +// IR. +#pragma once + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +using ArgNone = std::monostate; +using BufList = std::vector; +using DoubleList = std::vector; +using IntList = std::vector; +using ArgValue = std::variant< + tensorexpr::BufHandle, + tensorexpr::VarHandle, + double, + int64_t, + bool, + BufList, + DoubleList, + IntList, + std::string, + ArgNone>; + +using NNCLoweringFunction = std::function&, + const std::vector&, + const std::vector&, + const c10::optional&, + at::Device)>; + +TORCH_API FunctionSchemaMap& getNNCLoweringRegistry(); +TORCH_API NNCLoweringFunction getStandardLoweringFor(const std::string& op); + +struct RegisterNNCLoweringsFunction { + RegisterNNCLoweringsFunction( + const std::vector& schemas, + NNCLoweringFunction fn); +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/mem_dependency_checker.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/mem_dependency_checker.h new file mode 100644 index 0000000000000000000000000000000000000000..3b5bb538459c668481610d778c13a96e5142c23e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/mem_dependency_checker.h @@ -0,0 +1,415 @@ +#pragma once +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { +namespace analysis { + +enum class AccessType { + Input, + Output, + Load, + Store, + Call, + AtomicAdd, + Alloc, + Free +}; +const char* AccessToString(AccessType a); + +class AccessInfo; +using DependencySet = std::unordered_set>; + +/* AccessInfo + * + * Represents a single bounded memory access to a buffer, for instance a Load or + * a Store. Holds information relating to the specific access and links to + * connected accesses in the dependency graph. + */ +class TORCH_API AccessInfo { + public: + AccessInfo( + size_t id, + AccessType type, + StmtPtr stmt, + VarPtr var, + IndexBounds bounds) + : id_(id), + type_(type), + stmt_(std::move(stmt)), + expr_(nullptr), + var_(std::move(var)), + bounds_(std::move(bounds)) {} + + AccessInfo( + size_t id, + AccessType type, + ExprPtr expr, + StmtPtr stmt, + VarPtr var, + IndexBounds bounds) + : id_(id), + type_(type), + stmt_(std::move(stmt)), + expr_(std::move(expr)), + var_(std::move(var)), + bounds_(std::move(bounds)) {} + + // Id is a unique int representing the order this access occurred in the + // graph. + size_t id() const { + return id_; + } + + // The type of the access (Load, Store, etc). + AccessType type() const { + return type_; + } + + // The enclosing Stmt this access represents. E.g. if this is a Store then + // Stmt is the Store itself, while if the access is caused by an Expr, this is + // the most immediate parent Stmt. + StmtPtr stmt() const { + return stmt_; + } + + // If the access is represented by an Expr (such as Load or Call) then this is + // it, otherwise it's nullptr. + ExprPtr expr() const { + return expr_; + } + + // The Var representing the underlying Buffer. + VarPtr var() const { + return var_; + } + + // A vector of Bounds representing the start and end expression for each + // dimension. + IndexBounds& bounds() { + return bounds_; + } + + // Each access that this depends upon, + // eg. if this is a Load, then it contains every Store that immediately + // contributes to a load of the bounds. + // or: if this is a Store, it contains all reads on the RHS of the Store. + const std::map>& dependencies() const { + return dependencies_; + } + + // Each access that depends on this one. + // ie. this access is present in the dependencies map of all accesses that are + // dependent. + std::map> dependents() const { + std::map> res; + for (const auto& kv : dependents_) { + res.emplace(kv.first, kv.second.lock()); + } + return res; + } + + // Returns the symbolic expression of the indices of this access. + std::vector getIndices() const; + + // Establishes a dependency or dependent relationship with another access. + void addDependency(const std::shared_ptr& write); + void addDependent(const std::shared_ptr& read); + + // helper for checking dependencies. + bool hasDependency(const std::shared_ptr& info) const; + + // Returns the set of all nodes that are direct (immediate) dependencies of + // this access. + DependencySet getDirectDependencies(); + // likewise, returns all nodes that directly depend on this one. + DependencySet getDirectDependents(); + + // Returns the full list of all nodes in the graph that this access depends + // on, and all nodes they depend on, and so forth, back to the inputs. + DependencySet getIndirectDependencies(); + // likewise, returns the full list of all nodes that depend on this node, and + // all nodes that depend on those nodes and so on down to the outputs. + DependencySet getIndirectDependents(); + + // Does this access represent a read of memory (Load, ReduceOp, Call, etc). + bool isRead() const; + // Does this access represent a write of memory (Store, etc). + bool isWrite() const; + + // Helpers for dumping accesses in various formats. + void print() const; + void dumpDOT(std::ostream& os) const; + const char* AccessTypeColour() const; + + private: + size_t id_; + AccessType type_; + StmtPtr stmt_; + ExprPtr expr_; + VarPtr var_; + IndexBounds bounds_; + + // Yes these should be sorted. + std::map> dependencies_; + std::map> dependents_; +}; + +using VarBoundMap = std::unordered_map; + +/* MemDependencyChecker analyses a IR fragment and builds a dependency graph of + * accesses contained within. + * + * It's possible to retrieve the entire graph in node-object form, or can be + * used as an oracle for answering dependency questions. e.g: + * + * analyzer.hasIndirectDependency(BufA, BufB); or, + * analyzer.hasDirectDependency(LoadA, StoreB); + */ +class TORCH_API MemDependencyChecker : public IRVisitor { + struct Scope; + + public: + MemDependencyChecker(); + MemDependencyChecker( + const std::unordered_set& inputs, + const std::unordered_set& outputs); + MemDependencyChecker( + const std::vector& inputs, + const std::vector& outputs); + + ~MemDependencyChecker() override = default; + + // Whether or not to allow loop execution order to influence dependency + // calculation. If the loop may later be parallelized you don't want this. + bool allowLoopExecutionOrderAnalysis(bool allow = true); + + // Dependency Checking API. + // The goal is to have enough overloads here so you don't really have to think + // about it. + + // Returns true if any read in A has a direct dependence on a write in B. + bool dependsDirectly(StmtPtr A, StmtPtr B); + bool dependsDirectly(ExprPtr A, StmtPtr B); + + // Returns true of the output depends directly on a write contained in B. + bool dependsDirectly(BufPtr output, StmtPtr B); + + // Returns true if a read in A depends directly on the provided input. + bool dependsDirectly(StmtPtr A, BufPtr input); + bool dependsDirectly(ExprPtr A, BufPtr input); + + // Outputs/inputs cannot depend directly. + + // Returns true if the access A has B as an immediate dependency. + bool dependsDirectly( + const std::shared_ptr& A, + const std::shared_ptr& B); + + // Returns true if any read in A has an ancestor write contained in B. + bool dependsIndirectly(StmtPtr A, StmtPtr B); + bool dependsIndirectly(ExprPtr A, StmtPtr B); + + // Returns true of the output depends indirectly on a write contained in B. + bool dependsIndirectly(BufPtr output, StmtPtr B); + + // Returns true if a read in A depends indirectly on the provided input. + bool dependsIndirectly(StmtPtr A, BufPtr input); + bool dependsIndirectly(ExprPtr A, BufPtr input); + + // returns true if the output uses any load of the input. + bool dependsIndirectly(BufPtr output, BufPtr input); + + // Returns true if the access A has a dependency chain to access B. + bool dependsIndirectly( + const std::shared_ptr& A, + const std::shared_ptr& B); + + // Returns the AccessInfo + std::shared_ptr accessFor(StmtPtr A) const; + std::shared_ptr accessFor(ExprPtr A) const; + + // Returns all AccessInfos. + std::unordered_set> accessesWithin( + StmtPtr A) const; + // TODO: this will return only the AccessInfo for A. It's included for + // completeness but be aware it wont return accesses used in the computation + // of A. + std::unordered_set> accessesWithin( + ExprPtr A) const; + + // Accesses relating to input and output buffers. + std::shared_ptr input(BufPtr B) const; + std::shared_ptr output(BufPtr B) const; + + // Returns the full history of reads and writes. + const std::vector>& getHistory() const; + + // Dumps the dependency graph in DOT format. + void dumpDAG(const std::string& filename) const; + + private: + // Node visitors. + void visit(StorePtr v) override; + void visit(LoadPtr v) override; + void visit(ForPtr v) override; + void visit(CondPtr v) override; + void visit(IfThenElsePtr v) override; + void visit(CompareSelectPtr v) override; + void visit(BlockPtr v) override; + void visit(LetPtr v) override; + void visit(AtomicAddPtr v) override; + void visit(AllocatePtr v) override; + void visit(FreePtr v) override; + + using BoundRelationship = std::pair>; + + // An internal struct holding the accesses found within a scope Block. + struct Scope { + Scope(BlockPtr b, std::shared_ptr p) + : block(std::move(b)), parent(std::move(p)) {} + + BlockPtr block; + std::shared_ptr parent; + + std::unordered_map shadowedVarBounds; + std::unordered_set localVars; + + std::vector> accesses_; + + std::unordered_map> openWrites_; + }; + std::shared_ptr currentScope_; + + bool allowExecutionOrderAnalysis_{false}; + + std::unordered_multimap> stmtToAccess_; + std::unordered_multimap> exprToAccess_; + std::unordered_map>> + scopeToAccesses_; + + VarBoundMap knownVarBounds_; + + // Finds all accesses that are reads within the scope of v. + template + DependencySet getAllReadsWithin(StmtOrExprPtr v) { + DependencySet reads; + auto insertAllReads = [&](const auto& nodes) { + for (const auto& l : nodes) { + auto bound = exprToAccess_.equal_range(l); + for (auto it = bound.first; it != bound.second; ++it) { + if (it->second->isRead()) { + reads.insert(it->second); + } + } + } + }; + + // Look for and insert accesses belonging to all nodes that act like + // reads. + insertAllReads(NodeFinder::find(v)); + insertAllReads(NodeFinder::find(v)); + + return reads; + } + + // Finds all accesses that are writes within the scope of v. + // Writes cannot occur in Exprs, so this is a little simpler. + DependencySet getAllWritesWithin(StmtPtr v) { + DependencySet writes; + + // writes just Store currently. + auto stores = NodeFinder::find(std::move(v)); + for (const auto& s : stores) { + auto bound = stmtToAccess_.equal_range(s); + for (auto it = bound.first; it != bound.second; ++it) { + if (it->second->isWrite()) { + writes.insert(it->second); + } + } + } + return writes; + } + + // Templated helpers to work on either Exprs or Stmts. + template + bool dependsDirectlyHelper(StmtOrExprPtr A, StmtPtr B) { + auto aReads = getAllReadsWithin(A); + auto bWrites = getAllWritesWithin(B); + + for (auto& read : aReads) { + for (auto& depPair : read->dependencies()) { + if (bWrites.count(depPair.second) != 0) { + return true; + } + } + } + + return false; + } + + template + bool dependsIndirectlyHelper(StmtOrExprPtr A, StmtPtr B) { + auto aReads = getAllReadsWithin(A); + auto bWrites = getAllWritesWithin(B); + + auto aDeps = getAllWriteDependencies(aReads); + + for (auto& dependency : aDeps) { + if (bWrites.count(dependency) != 0) { + return true; + } + } + + return false; + } + + DependencySet getAllWriteDependencies(const DependencySet& products); + + // Maps for inputs and outputs, since they aren't present directly in the IR. + std::unordered_map> inputs_; + std::unordered_map> outputs_; + std::unordered_map> intermediates_; + + // Inserts accesses for Buf's: specifically for inputs and outputs. + void insertBuffers( + std::unordered_map>& bufs, + AccessType type); + + // Update the write history with a new write, adding dependencies and closing + // any overlapped writes (if possible). + void updateWriteHistory( + std::list& writeHistory, + const std::shared_ptr& info, + size_t latestAccessToClose, + bool closeOverlapped = true, + bool insert = true); + + // Merge a child scope into a parent scope, adding dependencies for open + // writes in the parent to accesses in the child. + void mergeScope( + const std::shared_ptr& child, + const std::shared_ptr& parent, + bool closeOverlapped = true); + + // Binds symbolic vars in indices with the low and high bound for those vars. + std::vector getIndicesBounds(const std::vector& indices); + + size_t nextAccess_{0}; + StmtPtr lastStmt_{nullptr}; +}; + +} // namespace analysis +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/conv2d.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/conv2d.h new file mode 100644 index 0000000000000000000000000000000000000000..65902960192ab722d680f9e3d39127359df844ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/conv2d.h @@ -0,0 +1,105 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +// An API to compute 2D depthwise convolutions with bias. +TORCH_API Tensor conv2d_depthwise( + BufHandle input, + BufHandle weight, + BufHandle bias, + int stride, + int pad, + int groups); + +// An API to compute 2D depthwise convolutions without bias. +TORCH_API Tensor conv2d_depthwise( + BufHandle input, + BufHandle weight, + int stride, + int pad, + int groups); + +TORCH_API Tensor conv2d_depthwise( + BufHandle input, + BufHandle weight, + BufHandle bias, + ExprHandle N, + ExprHandle C, + ExprHandle H, + ExprHandle W, + ExprHandle K, + ExprHandle CperG, + ExprHandle R, + ExprHandle S, + ExprHandle stride, + ExprHandle pad, + ExprHandle groups); + +TORCH_API Tensor conv2d_depthwise( + BufHandle input, + BufHandle weight, + ExprHandle N, + ExprHandle C, + ExprHandle H, + ExprHandle W, + ExprHandle K, + ExprHandle CperG, + ExprHandle R, + ExprHandle S, + ExprHandle stride, + ExprHandle pad, + ExprHandle groups); + +bool conv2dIsSupported( + const TensorInfo& input, + const TensorInfo& weight, + const TensorInfo& bias, + const std::vector& stride, + const std::vector& pad, + const std::vector& dilation, + int64_t groups); +bool mkldnnPrepackedConvIsSupported( + const TensorInfo& input, + const TensorInfo& weight, + const std::vector& stride, + const std::vector& pad, + const std::vector& dilation, + int64_t groups); +Tensor computeConv2d( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); +Tensor computeConv1d( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); +Tensor computePrepackedConv2dClampRun( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); +Tensor computePrepackedLinearClampRun( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); +Tensor computeMkldnnPrepackedConvRun( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/matmul.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/matmul.h new file mode 100644 index 0000000000000000000000000000000000000000..70f3f4bf7bf03f04379b781467db8d429a11abcc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/matmul.h @@ -0,0 +1,24 @@ +#pragma once + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +Tensor computeMatmul( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); +Tensor computeAddMM( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/misc.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/misc.h new file mode 100644 index 0000000000000000000000000000000000000000..5650b35147b17cbc62c30a33d2efadcd0f03f39b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/misc.h @@ -0,0 +1,98 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +struct TensorInfo { + std::vector dims; + c10::ScalarType dtype; +}; +c10::optional getTensorInfo(BufHandle b); + +int64_t normalizeAndCheckIndex(int64_t idx, int64_t list_size); + +// Convert boolean to integer, if needed. +ExprHandle boolToInteger(const ExprHandle& x); +ExprHandle promoteToDtype(ExprHandle e, ScalarType dt); +void promoteInputs( + std::vector& inputs, + const int typeConstraints = kAllTypes); +ExprHandle promoteIntegerToDefaultType(const ExprHandle& e); +ExprHandle promoteHalfToFloat(const ExprHandle& e); +ExprHandle demoteOutput( + const ExprHandle& e, + const c10::optional type); + +std::vector broadcastShapes( + std::vector> shapes); +std::vector broadcastShapes( + const std::vector& a, + const std::vector& b); + +std::vector valueShape(const ArgValue& v); +ExprHandle tensorOrConstant( + const ArgValue& v, + const std::vector& axes); +ExprHandle scalarOrConstant(const ArgValue& v); +ExprHandle broadcast(BufHandle b, const std::vector& axes); +ExprHandle constant(const ArgValue& v); + +ExprHandle clamp( + const ExprHandle& cmin, + const ExprHandle& cmax, + const ExprHandle& input); + +Tensor computeChunk( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); +Tensor computeTranspose( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); +Tensor computeExpand( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); +Tensor computeReshape( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); +Tensor computeFlatten( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); +Tensor computeCatWoConditionals( + const std::vector& inputs, + const std::vector& outputShape); +Tensor computeCat( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); +Tensor computeEmbedding( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/norm.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/norm.h new file mode 100644 index 0000000000000000000000000000000000000000..7c8cc43387b0131f0641b274063fb9175d3a036e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/norm.h @@ -0,0 +1,18 @@ +#pragma once + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +Tensor computeBatchNorm( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/operators.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/operators.h new file mode 100644 index 0000000000000000000000000000000000000000..6298a6480149b9db1536ea408094e1d259c2605f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/operators.h @@ -0,0 +1,10 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/pointwise.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/pointwise.h new file mode 100644 index 0000000000000000000000000000000000000000..8de218dbb0383138bafbf42779c0c45caadb353b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/pointwise.h @@ -0,0 +1,86 @@ +#pragma once + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +TORCH_API Tensor computeSign( + const std::vector& inputs, + const std::vector& outputShape, + c10::optional> outputStrides = c10::nullopt); + +Tensor computeOneOperand( + const std::string& name, + const std::vector& inputValues, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + const std::function& innerExpr, + const int checkParamTypes = kAllTypes); +Tensor computeTwoOperand( + const std::string& name, + const std::vector& inputValues, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + const std::function& + innerExpr); +Tensor computeTwoOperandWithAlpha( + const std::string& name, + const std::vector& inputValues, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + const std::function& + innerExpr); +Tensor computeConditionWithTwoOperand( + const std::string& name, + const std::vector& inputValues, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + const std::function< + ExprHandle(const ExprHandle&, const ExprHandle&, const ExprHandle&)>& + innerExpr); +Tensor computeThreeOperand( + const std::string& name, + const std::vector& inputValues, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + const std::function< + ExprHandle(const ExprHandle&, const ExprHandle&, const ExprHandle&)>& + innerExpr, + bool promote_inputs = true); +Tensor computeFourOperand( + const std::string& name, + const std::vector& inputValues, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + const std::function& innerExpr); +Tensor computeNoop( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +Tensor computeScalar( + const std::string& name, + const std::vector& inputValues, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + const std::function& + innerExpr); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/quantization.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/quantization.h new file mode 100644 index 0000000000000000000000000000000000000000..019b2349b184056b4bb3769340debbe2985650f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/quantization.h @@ -0,0 +1,160 @@ +#pragma once + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +TORCH_API ExprHandle quantizePerTensorQParamFromArg(ArgValue arg); + +TORCH_API double immQScale(const BufHandle& qx); + +TORCH_API int64_t immQZero(const BufHandle& qx); + +TORCH_API ScalarType immQDType(const BufHandle& qx); + +TORCH_API bool isQuantized(const BufHandle& qx); + +TORCH_API Tensor computeQuantizePerTensor( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeQuantizePerTensorExternalCall( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeQuantizedConv1d( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeQuantizedConv2dPrepack( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeQuantizedConv1d( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeQuantizedConv2d( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeQuantizedConv2dRelu( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeQuantizedLinear( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeQuantizedLinearRelu( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeQuantizedAdd( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +Tensor computeQuantizedAddExternalCall( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeQuantizedMul( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeQuantizedMulScalar( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeQuantizedCat( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeQuantizedRelu( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeDequantize( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeDequantizeExternalCall( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeUpsampleNearest2d( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeUpsampleNearest2dExternalCall( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +TORCH_API Tensor computeQuantizedSigmoidExternalCall( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device); +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/reduction.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/reduction.h new file mode 100644 index 0000000000000000000000000000000000000000..6265c4d2658583ed5dc7b858e19d23d9160ae265 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/reduction.h @@ -0,0 +1,36 @@ +#pragma once + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +TORCH_API Tensor computeSum( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); +TORCH_API Tensor computeMean( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); +TORCH_API Tensor computeAdaptiveAvgPool2d( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); +Tensor computeMax( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + const c10::optional& outputType, + at::Device device); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/softmax.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..d5dd7fd429bed004a6e49e9a88090503aba30948 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/softmax.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +Tensor computeSoftmax( + const std::vector& inputs, + const std::vector& outputShape, + const std::vector& outputStrides, + bool log_softmax); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/reduction.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/reduction.h new file mode 100644 index 0000000000000000000000000000000000000000..28acaf87c0a64308d1e9e9579c2b63990ecc067c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/reduction.h @@ -0,0 +1,305 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +using ParameterList = const std::vector; +using ReduceInteraction = std::function; + +// A Reducer is a user interface describing a particular reduction +// operation. It has three components: An initialization value, a way of +// interacting each value with the accumulation, and a method for obtaining the +// current value to be reduced. It is materialized into a ReduceOp when loop +// variables are known. +class TORCH_API Reducer { + public: + Reducer(ExprHandle init, ReduceInteraction& interaction) + : init_(init.node()), interaction_(interaction) {} + + template + Reducer(ExprHandle init, RI interaction) + : init_(init.node()), interaction_(std::move(interaction)) {} + + ExprPtr initializer() const { + return init_; + } + + ExprHandle operator()( + BufHandle result_buf, + ExprHandle body, + const std::vector& output, + const std::vector& inner) const; + + ReduceOpPtr operator()( + BufPtr result_buf, + ExprPtr body, + const std::vector& output, + const std::vector& inner) const; + + ExprHandle operator()( + BufHandle result_buf, + BufHandle acc_buf, + ExprHandle body, + const std::vector& output, + const std::vector& inner) const; + + // Polymorphic handling of Body functions with a variety of parameters. + static ExprHandle getReduceBody( + const std::function& func, + const std::vector& vars) { + return func(vars); + } + + static ExprHandle getReduceBody( + const std::function& func, + const std::vector& vars) { + if (vars.size() != 1) { + throw malformed_input("mismatch between reduce body and arg size (1)"); + } + + return func(vars[0]); + } + + static ExprHandle getReduceBody( + const std::function& func, + const std::vector& vars) { + if (vars.size() != 2) { + throw malformed_input("mismatch between reduce body and arg size (2)"); + } + return func(vars[0], vars[1]); + } + + static ExprHandle getReduceBody( + const std::function< + ExprHandle(const VarHandle&, const VarHandle&, const VarHandle&)>& + func, + const std::vector& vars) { + if (vars.size() != 3) { + throw malformed_input("mismatch between reduce body and arg size (3)"); + } + return func(vars[0], vars[1], vars[2]); + } + + static ExprHandle getReduceBody( + const std::function& func, + const std::vector& vars) { + if (vars.size() != 4) { + throw malformed_input("mismatch between reduce body and arg size (4)"); + } + return func(vars[0], vars[1], vars[2], vars[3]); + } + + // Completes the reduction operator by applying the interaction function to + // the accumulation and the body expression. + static ExprPtr complete( + BufPtr accumulator, + ReduceInteraction interaction, + ExprHandle body, + const std::vector& output_args, + const std::vector& reduce_args) { + ExprHandle accum = + ExprHandle(alloc(body.dtype(), accumulator, output_args)); + auto e = interaction(std::move(accum), std::move(body)); + return e.node(); + } + static ExprHandle complete( + BufHandle accumulator, + ReduceInteraction interaction, + ExprHandle body, + const std::vector& output_args, + const std::vector& reduce_args) { + ExprHandle accum = Load::make(body.dtype(), accumulator, output_args); + auto e = interaction(std::move(accum), std::move(body)); + return e; + } + + private: + ExprPtr init_; + ReduceInteraction interaction_; +}; + +// An expression representing a Reduction operation (e.g. Sum, Max) broken into +// it's component parts: initialization, accumulation var, acquisition of value +// to be reduced and interaction. +// +// This is intended to be expanded in the loopnest and not make it to codegen. +class TORCH_API ReduceOp : public ExprNode { + public: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + ReduceOp( + ExprPtr body, + std::vector reduce_args, + const Reducer& reducer) + : ExprNodeBase(body->dtype()), + body_(body), + reduce_args_(std::move(reduce_args)), + reducer_(reducer) { + result_buf_ = nullptr; + acc_buf_ = nullptr; + ri_operand_ = nullptr; + } + + ReduceOp( + ExprPtr body, + std::vector reduce_args, + BufPtr result_buf, + BufPtr acc_buf, + ExprPtr ri_operand, + const Reducer& reducer) + : ExprNodeBase(body->dtype()), + body_(body), + reduce_args_(std::move(reduce_args)), + result_buf_(std::move(result_buf)), + acc_buf_(std::move(acc_buf)), + ri_operand_(std::move(ri_operand)), + reducer_(reducer) {} + + static ExprHandle make( + ExprHandle body, + std::vector reduce_args, + const Reducer& reducer); + + static ExprHandle make( + ExprHandle body, + std::vector reduce_args, + BufHandle result_buf, + BufHandle acc_buf, + ExprHandle ri_operand, + const Reducer& reducer); + + // return the body expression which obtains the value to be reduced. + ExprPtr body() const { + return body_; + } + + // Returns the original Reducer factory that can create ReduceOps. + const Reducer& reducer() const { + return reducer_; + } + + // returns variables associated with the axes of reduction. + const std::vector& reduce_args() const { + return reduce_args_; + } + + void setAccBuf(BufHandle acc_buf) { + acc_buf_ = acc_buf.node(); + } + BufPtr getAccBuf() { + return acc_buf_; + } + + void setResultBuf(BufHandle buf) { + result_buf_ = buf.node(); + } + BufPtr getResultBuf() { + return result_buf_; + } + + void setRiOperand(ExprHandle ri_operand) { + ri_operand_ = ri_operand.node(); + } + ExprPtr getRiOperand() { + return ri_operand_; + } + + private: + // body_ = reducer_->interaction_(result_buf_, ri_operand_) + ExprPtr body_; + std::vector reduce_args_; + + BufPtr result_buf_; + BufPtr acc_buf_; + ExprPtr ri_operand_; + + const Reducer reducer_; +}; + +class Sum : public Reducer { + public: + Sum() + : Reducer(ExprHandle(0), [](ExprHandle a, ExprHandle b) { + return a + b; + }) {} +}; + +inline ExprHandle maximumVal(ScalarType type) { + switch (type) { +#define MAX_BY_TYPE_CASE(Type, Name) \ + case ScalarType::Name: \ + return ExprHandle(std::numeric_limits::max()); + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, MAX_BY_TYPE_CASE) +#undef MAX_BY_TYPE_CASE + default: + throw unsupported_dtype(); + } + return ExprHandle(); +} + +inline ExprHandle minimumVal(ScalarType type) { + switch (type) { +#define MAX_BY_TYPE_CASE(Type, Name) \ + case ScalarType::Name: \ + return ExprHandle(std::numeric_limits::min()); + AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, MAX_BY_TYPE_CASE) +#undef MAX_BY_TYPE_CASE + default: + throw unsupported_dtype(); + } +} + +class Maximum : public Reducer { + public: + // TODO possible to remove this arg by deferring the init value until we + // know the dtype of the body. + Maximum(Dtype dtype) + : Reducer( + minimumVal(dtype.scalar_type()), + [](ExprHandle a, ExprHandle b) { return Max::make(a, b, true); }) {} + Maximum(ExprHandle initializer) + : Reducer(initializer, [](ExprHandle a, ExprHandle b) { + return Max::make(a, b, true); + }) {} +}; + +class Minimum : public Reducer { + public: + Minimum(Dtype dtype) + : Reducer( + maximumVal(dtype.scalar_type()), + [](ExprHandle a, ExprHandle b) { return Min::make(a, b, true); }) {} + Minimum(ExprHandle initializer) + : Reducer(initializer, [](ExprHandle a, ExprHandle b) { + return Min::make(a, b, true); + }) {} +}; + +class ReductionExpander : public IRMutator { + public: + StmtPtr expand(StmtPtr s) { + return s->accept_mutator(this); + } + + ExprPtr mutate(ReduceOpPtr v) override { + return v->body(); + } +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/registerizer.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/registerizer.h new file mode 100644 index 0000000000000000000000000000000000000000..f73551be243be922f7e1eeafe086f76c2db025f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/registerizer.h @@ -0,0 +1,433 @@ +#pragma once +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { +namespace registerizer { + +/* The Registerizer performs scalar replacement by looking for common Stores and +Loads to a single item in a buffer and replacing them with a local temporary +scalar which is cheaper to write. + +For example it can replace: + +{ + A[0] = 0; + for(const auto x : c10::irange(10)) { + A[0] = (A[0]) + x; + } +} + +with: + +{ + int A_ = 0; + for(const auto x : c10::irange(10)) { + A_ = x + A_; + } + A[0] = A_; +} + +This is particularly useful on GPUs when parallelizing, since after replacing +loops with metavars we have a lot of accesses like this. */ + +class Scope; + +/* Holds analysis information about accesses to a specific range of a + buffer, including the number of loads and stores and the lowest common parent + Block. + */ +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +class AccessInfo { + public: + AccessInfo() = default; + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + AccessInfo( + SimplifierHashType h, + BufPtr b, + std::vector i, + size_t accessOrder) + : hash_(h), + buf_(std::move(b)), + indices_(std::move(i)), + store_cost_(alloc(0)), + load_cost_(alloc(0)), + accessOrder_(accessOrder) {} + + // Adds a Store to this access, which is in the provided scope. + void addStore(StorePtr store, const std::shared_ptr& scope); + + // Adds a Load to this access, which occurs in the usage Stmt in the provided + // scope. + void addLoad( + LoadPtr load, + const std::shared_ptr& scope, + StmtPtr usage); + + // Merge another AccessInfo into this one. + void merge(const std::shared_ptr& other); + + // Returns true if the other AccessInfo's bounds may overlap this one. + bool overlaps(const std::shared_ptr& other); + + // Returns true if the indices of this access depend on the provided Var. + bool dependsOnVar(VarPtr v); + + // Clone this AccessInfo, and set this as the new accesses' hiddenAccess. + static std::shared_ptr cloneWithHiddenInfo( + const std::shared_ptr& orig); + + // print for debugging. + void print() const; + + SimplifierHashType hash() const { + return hash_; + } + + BufPtr buf() const { + return buf_; + } + + const std::vector& indices() const { + return indices_; + } + + BlockPtr block() const { + return block_; + } + + void setEnclosingBlock(BlockPtr b) { + block_ = b; + } + + StmtPtr first_usage() const { + return first_usage_; + } + StmtPtr last_usage() const { + return last_usage_; + } + + void setUsageMarks(StmtPtr first, StmtPtr last) { + first_usage_ = first; + last_usage_ = last; + } + + bool firstUsageOverlapped() const { + return firstUsageOverlapped_; + } + + ExprPtr store_cost() const { + return store_cost_; + } + + ExprPtr load_cost() const { + return load_cost_; + } + + const std::vector& stores() const { + return stores_; + } + + const std::vector& loads() const { + return loads_; + } + + void hoistCosts(ExprPtr extent) { + store_cost_ = IRSimplifier::simplify(alloc(store_cost_, extent)); + load_cost_ = IRSimplifier::simplify(alloc(load_cost_, extent)); + } + + size_t conditionId() const { + return conditionId_; + } + + void setConditionId(size_t c) { + conditionId_ = c; + } + + size_t accessOrder() const { + return accessOrder_; + } + + std::shared_ptr hiddenAccess() const { + return hiddenAccess_; + } + + // Holds state relating to the scalar variable we will insert to replace some + // number of loads and stores. + struct ScalarReplacement { + VarPtr var{nullptr}; + BufPtr var_wrapper{nullptr}; + LetPtr initializer{nullptr}; + }; + + ScalarReplacement& replacement() { + return replacement_; + } + + private: + SimplifierHashType hash_; + BufPtr buf_; + std::vector indices_; + BlockPtr block_{nullptr}; + + StmtPtr first_usage_{nullptr}; + StmtPtr last_usage_{nullptr}; + + // Whether or not this access is overlapped in the first Stmt it appears. This + // means we cannot use it's first Store as the initializer. + bool firstUsageOverlapped_{false}; + + // The cost in real ops that this access represents, to enable + // filtering accesses that wont save any loads or stores. + ExprPtr store_cost_; + ExprPtr load_cost_; + + // The actual Stores and Loads which represent this access. + // Be careful with these, any mutator will invalidate these pointers. + std::vector stores_; + std::vector loads_; + + // An identifier representing the conditional block, if any, this access + // depends on. + size_t conditionId_{0}; + + // An identifier representing the order this access was first encountered, for + // sorting returned results. + size_t accessOrder_{0}; + + // Sometimes when traversing the tree we need to record what would happen if + // we hoisted an access, but sometimes it doesn't work out. This lets us + // "undo" some mutation and return to the internal hidden AccessInfo. + // It will be removed after any further additions to this AccessInfo. + std::shared_ptr hiddenAccess_; + + ScalarReplacement replacement_; +}; + +using AccessHashMap = + std::unordered_map>; + +// Represents a scope block and holds all accesses contained within it. +class Scope { + public: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Scope(BlockPtr b, std::shared_ptr parent, size_t conditionId = 0) + : block_(std::move(b)), + parent_(std::move(parent)), + conditionId_(conditionId) {} + + AccessHashMap& getAccessMapByBuf(BufPtr b); + + std::unordered_map& openAccesses() { + return openAccesses_; + } + + std::vector>& closedAccesses() { + return closedAccesses_; + } + + BlockPtr block() const { + return block_; + } + + std::shared_ptr parent() const { + return parent_; + } + + size_t conditionId() const { + return conditionId_; + } + + const std::unordered_set& localVars() const { + return localVars_; + } + void addLocalVar(VarPtr v) { + localVars_.insert(v); + } + + void closeAccess(const std::shared_ptr& info); + + void filterClosed(); + + private: + // Map of map to access, narrowing by Buf then by hash(Buf+Indices). + // This allows us to find a candidate access easily, and also check for + // overlap with other accesses to the same buf. Buf -> + // Hash -> + // Access + std::unordered_map openAccesses_; + std::vector> closedAccesses_; + + // The Block object this scope represents. + BlockPtr block_; + + // The enclosing scope object. + std::shared_ptr parent_; + + // An identifier representing the condition block this scope depends on. + size_t conditionId_; + + // A set of variables local to this scope (e.g. loop vars). + std::unordered_set localVars_; +}; + +/* Analyzes the graph and collects accesses to the same symbolic tensor element + * which can be replaced by a single local scalar. + * + * This works by recursively walking the tree in postfix order, building sets of + * accesses to the same symbolic element by scope and then merging lower scopes + * into their enclosing scope. + * + * It is safe to move two accesses of the same Tensor element to a local scalar + * Var if between all usages of the element there are no other Loads or Stores + * that may refer to it. In the comments I refer to this as overlapping the + * access, or "cutting" the existing AccessInfo. In the case where a candidate + * for registerization is cut, it may be possible to finalize the access early + * by writing it back to the Tensor and then create a new scalar variable after + * the overlapping access is complete. We will attempt to do this when it saves + * memory accesses. + * + * There are a few cases that make this more challenging: + * + * - For: Loops change the number of real usages of a buffer by the loop + * extent, but only if we can pull the definition and finalization of the scalar + * variable out of the loop block. + * + * - Cond: Conditions complicate lifting scalars out of internal scopes. + * Generally we cannot lift an access outside of a conditional scope unless + * there is already a reference to that same access at the higher scope, since + * we don't know if the condition was guarding an array access not safe at the + * higher scope. In the comments I refer to this as the condition "hiding" the + * access, and the outer access "unhiding" it. + * + * - IfThenElse: Same situation as Cond, except since IfThenElse is an Expr + * rather than a Stmt we cannot insert the scalar definition or finalizer + * within the conditional scope. Accesses inside an IfThenElse can be safely + * combined with external accesses but cannot exist completely within. + * + * - Let: Accesses dependent on local variables via Let Stmts, or loop vars, + * cannot be raised outside of the scope of the dependent var. + */ +class TORCH_API RegisterizerAnalysis : public IRVisitor { + public: + RegisterizerAnalysis() + : currentScope_(std::make_shared(nullptr, nullptr, 0)) {} + ~RegisterizerAnalysis() override = default; + + void visit(ForPtr v) override; + + void visit(CondPtr v) override; + + void visit(BlockPtr v) override; + + void visit(StorePtr v) override; + + void visit(LoadPtr v) override; + + void visit(IfThenElsePtr v) override; + + void visit(LetPtr v) override; + +#define STMT_ON_STACK(Op) \ + void visit(Op##Ptr v) override { \ + stmtStack_.push_front(v); \ + IRVisitor::visit(v); \ + stmtStack_.pop_front(); \ + } + + STMT_ON_STACK(AtomicAdd); + STMT_ON_STACK(Allocate); + STMT_ON_STACK(Free); + +#undef STMT_ON_STACK + + std::vector> getCandidates(); + + private: + void mergeCurrentScopeIntoParent(); + void mergeHiddenScope(bool allowClosed); + void closeAccessIntoScope( + const std::shared_ptr& info, + const std::shared_ptr& scope); + + std::unordered_set exprConditionals_; + + // A stack of enclosing Stmts for tracking the usage Stmt of Loads. + std::deque stmtStack_; + + // The current scope being analyzed. + std::shared_ptr currentScope_; + + HashProvider hasher_; + + size_t conditionId_{0}; + size_t accessOrder_{0}; +}; + +/* Replaces each registerizable access with a Scalar variable, including + * definition, initializer and finalizer. + */ +class TORCH_API RegisterizerReplacer : public IRMutator { + public: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + RegisterizerReplacer(std::vector>& vec) + : infoSet_(vec) { + buildReplacements(); + } + + ExprPtr mutate(LoadPtr v) override; + + StmtPtr mutate(StorePtr v) override; + + StmtPtr mutate(BlockPtr v) override; + + private: + struct ReplacerScope { + std::unordered_map>> + initializerPoints_; + std::unordered_map>> + finalizePoints_; + }; + + // Creates the various ReplacerScope objects and builds internal maps. + void buildReplacements(); + + // State relating to the accesses yet to be replaced. + std::vector>& infoSet_; + std::unordered_map> storeToAccess_; + std::unordered_map> loadToAccess_; + std::unordered_map parentToAccesses_; + + // Holds the set of Stores that should be pulled into an initializer, so they + // can be eliminated. + std::set eliminatedIntializers_; + + // Tracks the number of times we've seen each buffer, so we can name the + // scalar Vars appropriately. + std::unordered_map bufferAccessCounts_; + unsigned int getBufferAccessCount(BufPtr b) { + return ++bufferAccessCounts_[b]; + } +}; +} // namespace registerizer + +// Apply scalar replacement to all accesses in s. +// To produce safe code, this must occur after handling parallelized axes and +// atomics. +TORCH_API StmtPtr registerize(StmtPtr s); + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/stmt.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/stmt.h new file mode 100644 index 0000000000000000000000000000000000000000..be309d85625ea6e83e1ecbf997f6097e4cdf99a5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/stmt.h @@ -0,0 +1,1024 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +namespace torch { +namespace jit { +namespace tensorexpr { + +// The common base between all statement node. +class TORCH_API Stmt : public std::enable_shared_from_this { + public: + Stmt() = default; + virtual ~Stmt() = default; + virtual void accept(IRVisitor* visitor) = 0; + virtual StmtPtr accept_mutator(IRMutator* mutator) = 0; + + StmtPtr get_parent() const { + return parent_ ? parent_->getptr() : nullptr; + } + + /* + * Make a deep copy of the given statement. + * + * All statements and expressions used in children of the statement are + * cloned. Note that the variables are not deep-copied since they are + * immutable. + */ + static StmtPtr clone(StmtPtr s); + + protected: + static void set_parent(StmtPtr s, Stmt* new_parent) { + s->parent_ = new_parent; + } + std::shared_ptr getptr() { + return shared_from_this(); + } + + private: + Stmt* parent_ = nullptr; +}; + +template +class StmtNode : public Stmt { + public: + using StmtNodeBase = StmtNode; + void accept(IRVisitor* visitor) override { + visitor->visit(static_to(getptr())); + } + StmtPtr accept_mutator(IRMutator* mutator) override; + StmtNode() = default; +}; + +template +StmtPtr StmtNode::accept_mutator(IRMutator* mutator) { + return mutator->mutate(static_to(getptr())); +} + +// Concrete Stmt classes +class TORCH_API Block : public StmtNode { + public: + static BlockPtr make(const std::vector& stmts) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector valid_stmts; + for (auto& stmt : stmts) { + if (!stmt) { + continue; + } + valid_stmts.push_back(stmt); + } + if (valid_stmts.empty()) { + return nullptr; + } + return alloc(valid_stmts); + } + + int nstmts() const { + return stmts_.size(); + } + bool empty() const { + return stmts_.empty(); + } + + void prepend_stmt(StmtPtr s) { + if (s->get_parent()) { + throw malformed_input( + "Block prepend Stmt with existing parent", std::move(s)); + } + + stmts_.push_front(s); + set_parent(std::move(s), this); + } + void append_stmt(StmtPtr s) { + if (s->get_parent()) { + throw malformed_input( + "Block append Stmt with existing parent", std::move(s)); + } + + stmts_.push_back(s); + set_parent(std::move(s), this); + } + + void insert_stmt_before(StmtPtr s, StmtPtr before) { + if (s->get_parent()) { + throw malformed_input( + "Block append Stmt with existing parent", std::move(s)); + } + + auto pos = std::find(stmts_.begin(), stmts_.end(), before); + if (pos == stmts_.end()) { + throw malformed_input( + "Inserting after statement that is not in block", std::move(s)); + } + + stmts_.insert(pos, s); + set_parent(std::move(s), this); + } + + void insert_stmt_after(StmtPtr s, StmtPtr after) { + if (s->get_parent()) { + throw malformed_input( + "Block append Stmt with existing parent", std::move(s)); + } + + auto pos = std::find(stmts_.begin(), stmts_.end(), after); + if (pos == stmts_.end()) { + throw malformed_input( + "Inserting after statement that is not in block", std::move(s)); + } + + ++pos; + + stmts_.insert(pos, s); + set_parent(std::move(s), this); + } + + bool replace_stmt(StmtPtr old_stmt, StmtPtr new_stmt) { + if (new_stmt->get_parent()) { + throw malformed_input( + "Block replace Stmt with existing parent", std::move(new_stmt)); + } + + auto pos = std::find(stmts_.begin(), stmts_.end(), old_stmt); + if (pos == stmts_.end()) { + return false; + } + stmts_.insert(pos, new_stmt); + stmts_.erase(pos); + set_parent(std::move(old_stmt), nullptr); + set_parent(std::move(new_stmt), this); + return true; + } + + // Creates a new block by cloning `this` block and replacing the given + // statement with a new statement. Note that `old_stmt` refers to a statement + // in `this` block. If the `old_stmt` is not found, it will return `nullptr`. + BlockPtr clone_and_replace(StmtPtr old_stmt, StmtPtr new_stmt) { + if (new_stmt->get_parent()) { + throw malformed_input( + "Block replace Stmt with existing parent", std::move(new_stmt)); + } + + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector stmts(stmts_.begin(), stmts_.end()); + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector cloned_stmts(stmts.size()); + bool found = false; + for (int i = 0; i < static_cast(stmts.size()); ++i) { + if (stmts[i] == old_stmt) { + found = true; + cloned_stmts[i] = new_stmt; + } else { + cloned_stmts[i] = Stmt::clone(stmts[i]); + } + } + if (!found) { + return nullptr; + } + return alloc(cloned_stmts); + } + + bool remove_stmt(StmtPtr stmt) { + auto pos = std::find(stmts_.begin(), stmts_.end(), stmt); + if (pos == stmts_.end()) { + return false; + } + + set_parent(std::move(stmt), nullptr); + stmts_.erase(pos); + return true; + } + + std::list stmts() const { + return stmts_; + } + + void clear() { + for (const auto& s : stmts_) { + set_parent(s, nullptr); + } + stmts_.clear(); + } + + void set_stmts(const std::vector& stmts) { + clear(); + init(stmts); + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + explicit Block(const std::vector& stmts) { + init(stmts); + } + + typedef std::list::iterator iterator; + typedef std::list::const_iterator const_iterator; + + iterator begin() { + return stmts_.begin(); + } + + const_iterator begin() const { + return stmts_.begin(); + } + + iterator end() { + return stmts_.end(); + } + + const_iterator end() const { + return stmts_.end(); + } + + StmtPtr front() { + return stmts_.front(); + } + + StmtPtr front() const { + return stmts_.front(); + } + + StmtPtr back() { + return stmts_.back(); + } + + StmtPtr back() const { + return stmts_.back(); + } + + void splice(Block::iterator it, BlockPtr other) { + for (const StmtPtr& s : *other) { + set_parent(s, this); + } + + stmts_.splice(it, other->stmts_); + } + + static BlockPtr getSharedParent(StmtPtr p1, StmtPtr p2) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::unordered_set enclosing; + + StmtPtr p1_p = std::move(p1); + while (p1_p) { + if (BlockPtr b = to(p1_p)) { + if (b) { + enclosing.insert(b); + } + } + p1_p = p1_p->get_parent(); + } + + StmtPtr p2_p = std::move(p2); + while (p2_p) { + if (BlockPtr b = to(p2_p)) { + if (enclosing.count(b) != 0) { + return b; + } + } + p2_p = p2_p->get_parent(); + } + + return nullptr; + } + + // returns the immediate child containing statement s. + StmtPtr getEnclosedRoot(StmtPtr s) const { + while (s && s->get_parent().get() != this) { + s = s->get_parent(); + } + return s; + } + + private: + std::list stmts_; + + void init(const std::vector& stmts) { + for (const StmtPtr& s : stmts) { + if (!s) { + continue; + } + if (!s->get_parent()) { + // If we get here, it's a bug, but we cannot throw an error from a + // constructor. But IR verifier would catch this. + set_parent(s, this); + } + + stmts_.push_back(s); + } + } +}; + +class TORCH_API Store : public StmtNode { + public: + VarPtr base_handle() const { + return buf_->base_handle(); + } + std::vector indices() const { + return indices_; + } + ExprPtr flat_index() const { + TORCH_CHECK(indices_.size() == 1, "Indices haven't been flattened."); + return indices_[0]; + } + ExprPtr value() const { + return value_; + } + BufPtr buf() const { + return buf_; + } + + void set_buf(BufPtr buf) { + buf_ = std::move(buf); + } + + void set_indices(std::vector indices) { + indices_ = std::move(indices); + } + + void set_value(ExprPtr value) { + value_ = std::move(value); + } + + static StorePtr make( + const BufHandle& buf, + const std::vector& indices, + const ExprHandle& value); + + Store(BufPtr buf, std::vector indices, ExprPtr value); + + private: + BufPtr buf_; + std::vector indices_; + ExprPtr value_; +}; + +// Allocate a buffer of given shapes and dtypes and bind it with the given +// buffer var. The life span is at most through the current program, until it is +// explicitly freed. An unfreed memory is likely considered an error. +class TORCH_API Allocate : public StmtNode { + public: + static AllocatePtr make(const BufHandle& buf_handle) { + return alloc(buf_handle.node()); + } + + VarPtr buffer_var() const { + return buf_->base_handle(); + } + + Dtype dtype() const { + return buf_->dtype(); + } + + const std::vector dims() const { + return buf_->dims(); + } + + BufPtr buf() const { + return buf_; + } + + void set_buf(BufPtr buf) { + buf_ = std::move(buf); + } + + explicit Allocate(BufPtr buf) : buf_(std::move(buf)) {} + + private: + BufPtr buf_; + // TODO: add memory types. +}; + +// PlacementAllocate is a variation of the Allocate operator in NNC IR. It does +// not allocate memory but reuse the memory of another buffer for the given +// buffer. +class TORCH_API PlacementAllocate : public StmtNode { + public: + static PlacementAllocatePtr make( + const BufHandle& buf_handle, + const BufHandle& buf_handle_to_reuse) { + return alloc( + buf_handle.node(), buf_handle_to_reuse.node()); + } + + BufPtr buf() const { + return buf_; + } + + BufPtr buf_to_reuse() const { + return buf_to_reuse_; + } + + void set_buf(BufPtr buf) { + buf_ = std::move(buf); + } + + void set_buf_to_reuse(BufPtr buf) { + buf_to_reuse_ = std::move(buf); + } + + explicit PlacementAllocate(BufPtr buf, BufPtr buf_to_reuse) + : buf_(std::move(buf)), buf_to_reuse_(std::move(buf_to_reuse)) {} + + private: + BufPtr buf_; + BufPtr buf_to_reuse_; +}; + +// Free the specific buffer. It is an error. +class TORCH_API Free : public StmtNode { + public: + static FreePtr make(const BufHandle& buf_handle) { + return alloc(buf_handle.node()); + } + + VarPtr buffer_var() const { + return buf_->base_handle(); + } + + BufPtr buf() const { + return buf_; + } + + void set_buf(BufPtr buf) { + buf_ = std::move(buf); + } + + explicit Free(BufPtr buf) : buf_(std::move(buf)) {} + + private: + BufPtr buf_; +}; + +class TORCH_API FreeExt : public StmtNode { + public: + static FreeExtPtr make(const std::vector& bufs); + + std::vector bufs() const { + return bufs_; + } + + void set_bufs(std::vector bufs) { + bufs_ = std::move(bufs); + } + + explicit FreeExt(std::vector bufs) : bufs_(std::move(bufs)) {} + + private: + std::vector bufs_; +}; + +class TORCH_API Let : public StmtNode { + public: + static LetPtr make(const VarHandle& var, const ExprHandle& val) { + return alloc(var.node(), val.node()); + } + + Let(VarPtr var, ExprPtr val) : var_(std::move(var)), val_(std::move(val)) {} + + VarPtr var() const { + return var_; + } + + ExprPtr value() const { + return val_; + } + + void set_var(VarPtr var) { + var_ = std::move(var); + } + + void set_val(ExprPtr val) { + val_ = std::move(val); + } + + private: + VarPtr var_; + ExprPtr val_; +}; + +class TORCH_API Cond : public StmtNode { + public: + static CondPtr make( + const ExprHandle& condition, + StmtPtr true_stmt, + StmtPtr false_stmt) { + return alloc(condition.node(), true_stmt, false_stmt); + } + + ExprPtr condition() const { + return condition_; + } + + BlockPtr true_stmt() const { + return true_stmt_; + } + + BlockPtr false_stmt() const { + return false_stmt_; + } + + void set_condition(ExprPtr condition) { + condition_ = std::move(condition); + } + + void set_true_stmt(StmtPtr true_stmt) { + if (true_stmt) { + BlockPtr b = to(true_stmt); + if (!b) { + b = alloc(std::vector({std::move(true_stmt)})); + } + true_stmt_ = b; + set_parent(true_stmt_, this); + } + } + + void set_false_stmt(StmtPtr false_stmt) { + if (false_stmt) { + BlockPtr b = to(false_stmt); + if (!b) { + b = alloc(std::vector({std::move(false_stmt)})); + } + false_stmt_ = b; + set_parent(false_stmt_, this); + } + } + + Cond(ExprPtr condition, StmtPtr true_stmt, StmtPtr false_stmt) + : condition_(std::move(condition)) { + set_true_stmt(std::move(true_stmt)); + set_false_stmt(std::move(false_stmt)); + } + + CondPtr cloneWithNewBodies(StmtPtr true_stmt, StmtPtr false_stmt) { + return alloc(condition_, true_stmt, false_stmt); + } + + CondPtr cloneWithNewBody(StmtPtr true_stmt) { + return alloc(condition_, true_stmt, nullptr); + } + + private: + ExprPtr condition_; + BlockPtr true_stmt_ = nullptr; + BlockPtr false_stmt_ = nullptr; +}; + +class TORCH_API LoopOptions { + public: + enum { + IDX_UNSET = -1, + IDX_X = 0, + IDX_Y = 1, + IDX_Z = 2, + IDX_W = 3, + IDX_MAX = IDX_W, + }; + // GPU Block Index + bool is_gpu_block_index() const { + return gpu_block_index_ != IDX_UNSET; + } + + int gpu_block_index() const { + return gpu_block_index_; + } + + std::string gpu_block_index_str() const { + if (!is_gpu_block_index()) { + throw malformed_input("Has no GPU block index"); + } + + // NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays) + static const char* kBlockIndexNames[] = { + "blockIdx.x", + "blockIdx.y", + "blockIdx.z", + "blockIdx.w", + }; + + if (gpu_block_index_ < IDX_X || gpu_block_index_ > IDX_MAX) { + throw malformed_input("invalid GPU block index"); + } + + return kBlockIndexNames[gpu_block_index_]; + } + + void set_gpu_block_index(int index) { + if (index == IDX_UNSET) { + gpu_block_index_ = IDX_UNSET; + } + + if (is_gpu_thread_index()) { + throw std::runtime_error("Cannot set both gpu block and thread index"); + } + if (is_gpu_block_index() && gpu_block_index() != index) { + throw std::runtime_error("Cannot set a previously set block index"); + } + gpu_block_index_ = index; + } + + // GPU Thread Index + bool is_gpu_thread_index() const { + return gpu_thread_index() != IDX_UNSET; + } + + int gpu_thread_index() const { + return gpu_thread_index_; + } + + std::string gpu_thread_index_str() const { + if (!is_gpu_thread_index()) { + throw malformed_input("has no GPU thread index"); + } + + // NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays) + static const char* kThreadIndexNames[] = { + "threadIdx.x", "threadIdx.y", "threadIdx.z", "threadIdx.w"}; + + if (gpu_thread_index_ < IDX_X || gpu_thread_index_ > IDX_MAX) { + throw malformed_input("invalid GPU thread index"); + } + + return kThreadIndexNames[gpu_thread_index_]; + } + + void set_gpu_thread_index(int index) { + if (index == IDX_UNSET) { + gpu_thread_index_ = IDX_UNSET; + } + + if (is_gpu_block_index()) { + throw std::runtime_error("Cannot set both gpu thread and block index"); + } + if (is_gpu_thread_index() && gpu_thread_index() != index) { + throw std::runtime_error("Cannot set a previously set thread index"); + } + gpu_thread_index_ = index; + } + + void set_parallel() { + is_parallel_ = true; + } + + bool is_parallel() const { + return is_parallel_; + } + + std::string ToString() const { + if (is_gpu_block_index()) { + return gpu_block_index_str(); + } else if (is_gpu_thread_index()) { + return gpu_thread_index_str(); + } else if (is_parallel()) { + return "parallel"; + } + return ""; + } + + bool isDefault() const { + return gpu_block_index_ == IDX_UNSET && gpu_thread_index_ == IDX_UNSET && + !is_parallel_; + } + + void set_buffer_mapping(const std::unordered_map& map) { + map_input_to_tensor_bufs_ = map; + } + + std::unordered_map get_buffer_mapping() const { + return map_input_to_tensor_bufs_; + } + + private: + int gpu_block_index_{IDX_UNSET}; + int gpu_thread_index_{IDX_UNSET}; + bool is_parallel_{false}; + std::unordered_map map_input_to_tensor_bufs_; +}; + +class TORCH_API For : public StmtNode { + public: + VarPtr var() const { + return var_; + } + ExprPtr start() const { + return start_; + } + ExprPtr stop() const { + return stop_; + } + BlockPtr body() const { + return body_; + } + static ForPtr make( + const VarHandle& var, + const ExprHandle& start, + const ExprHandle& stop, + StmtPtr body) { + if (!body) { + return nullptr; + } + return alloc(var.node(), start.node(), stop.node(), body); + } + static ForPtr make( + const VarHandle& var, + const ExprHandle& start, + const ExprHandle& stop, + StmtPtr body, + const LoopOptions& loop_options) { + if (!body) { + return nullptr; + } + return alloc( + var.node(), start.node(), stop.node(), body, loop_options); + } + const LoopOptions loop_options() const { + return loop_options_; + } + + For(VarPtr var, ExprPtr start, ExprPtr stop, StmtPtr body) + : var_(std::move(var)), start_(std::move(start)), stop_(std::move(stop)) { + BlockPtr b = to(body); + if (!b) { + b = alloc(std::vector({std::move(body)})); + } + body_ = b; + set_parent(body_, this); + } + + For(VarPtr var, + ExprPtr start, + ExprPtr stop, + StmtPtr body, + LoopOptions loop_options) + : var_(var), + start_(start), + stop_(stop), + loop_options_(std::move(loop_options)) { + if (!var) { + throw malformed_input("invalid Var in For loop"); + } else if (!start) { + throw malformed_input("invalid Start in For loop"); + } else if (!stop) { + throw malformed_input("invalid Stop in For loop"); + } else if (!body || body->get_parent()) { + throw malformed_input("invalid Body in For loop"); + } + + BlockPtr b = to(body); + if (!b) { + b = alloc(std::vector({std::move(body)})); + } + body_ = b; + set_parent(body_, this); + } + + void set_gpu_block_index(int block_index) { + loop_options_.set_gpu_block_index(block_index); + } + + void set_gpu_thread_index(int thread_index) { + loop_options_.set_gpu_thread_index(thread_index); + } + + void set_parallel() { + loop_options_.set_parallel(); + } + + bool is_parallel() const { + return loop_options_.is_parallel(); + } + + void set_buffer_map(const std::unordered_map& map) { + loop_options_.set_buffer_mapping(map); + } + + ForPtr cloneWithNewBody(StmtPtr body) const { + return alloc(var_, start_, stop_, body, loop_options_); + } + + BlockPtr removeBody() { + auto res = body_; + set_parent(res, nullptr); + body_ = nullptr; + return res; + } + + void set_body(StmtPtr body) { + BlockPtr b = to(body); + if (!b) { + b = alloc(std::vector({std::move(body)})); + } + body_ = b; + set_parent(body_, this); + } + + void set_start(ExprPtr start) { + start_ = std::move(start); + } + + void set_stop(ExprPtr stop) { + stop_ = std::move(stop); + } + + void set_var(VarPtr var) { + var_ = std::move(var); + } + + private: + VarPtr var_; + ExprPtr start_; + ExprPtr stop_; + BlockPtr body_; + LoopOptions loop_options_; +}; + +// A backend specific IR Node that implements atomic-add. +// This node could only shows up as an internal with GPU backends. +// TODO: move to this an internal IR. +// TODO: make IR nodes extensible. +class TORCH_API AtomicAdd : public StmtNode { + public: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + AtomicAdd(BufPtr buf, std::vector indices, ExprPtr value) + : buf_(std::move(buf)), + indices_(std::move(indices)), + value_(std::move(value)) {} + + VarPtr base_handle() const { + return buf_->base_handle(); + } + + BufPtr buf() const { + return buf_; + } + + ExprPtr flat_index() const { + TORCH_CHECK(indices_.size() == 1, "Indices haven't been flattened."); + return indices_[0]; + } + + ExprPtr value() const { + return value_; + } + + const std::vector& indices() const { + return indices_; + } + + void set_buf(BufPtr buf) { + buf_ = std::move(buf); + } + + void set_indices(std::vector indices) { + indices_ = std::move(indices); + } + + void set_value(ExprPtr value) { + value_ = std::move(value); + } + + private: + BufPtr buf_; + std::vector indices_; + ExprPtr value_; +}; + +class TORCH_API SyncThreads : public StmtNode { + public: + SyncThreads() = default; +}; + +/* + * ExternalCall statement represents a call to an external function that would + * compute the contents of the output buffer. An ExternalCall statement consists + * of: + * 1) output buffer - the buffer that'll be initialized by the call + * 2) external function name - a key from the NNC function registry to lookup + * the actual function to call + * 3) buffer arguments - the input buffers used by the function + * 4) non-buffer arguments - scalar arguments to pass to the function + * + * An example: + * A = nnc_conv2d(buf_args={Input, Weight, Bias}, args={1}) + * Here 'A' is the output buffer, "nnc_conv2d" is the function name, the buffer + * arguments are 'Input', 'Weight', and 'Bias', and there is a single non-buffer + * argument - 1. + * + * The semantics of the scalar arguments is defined solely by the implementation + * of the external function. + */ +class TORCH_API ExternalCall : public StmtNode { + public: + static ExternalCallPtr make( + BufHandle buf, + const std::string& func_name, + const std::vector& buf_args, + const std::vector& args); + + BufPtr buf() const { + return buf_; + } + + std::string func_name() const { + return func_name_; + } + + std::vector buf_args() const { + return buf_args_; + } + + std::vector args() const { + return args_; + } + + void set_buf(BufPtr buf) { + buf_ = std::move(buf); + } + + void set_buf_args(std::vector buf_args) { + buf_args_ = std::move(buf_args); + } + + void set_args(std::vector args) { + args_ = std::move(args); + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + ExternalCall( + BufPtr buf, + std::string func_name, + std::vector buf_args, + std::vector args) + : buf_(std::move(buf)), + func_name_(std::move(func_name)), + buf_args_(std::move(buf_args)), + args_(std::move(args)) {} + + private: + BufPtr buf_; + std::string func_name_; + std::vector buf_args_; + std::vector args_; +}; + +class TORCH_API ExternalCallWithAlloc : public StmtNode { + public: + static ExternalCallWithAllocPtr make( + const std::string& func_name, + const std::vector& buf_out_args, + const std::vector& buf_args, + const std::vector& args); + + std::vector buf_out_args() const { + return buf_out_args_; + } + + std::string func_name() const { + return func_name_; + } + + std::vector buf_args() const { + return buf_args_; + } + + std::vector args() const { + return args_; + } + + void set_buf_out_args(std::vector buf_out_args) { + buf_out_args_ = std::move(buf_out_args); + } + + void set_buf_args(std::vector buf_args) { + buf_args_ = std::move(buf_args); + } + + void set_args(std::vector args) { + args_ = std::move(args); + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + ExternalCallWithAlloc( + std::string func_name, + std::vector buf_out_args, + std::vector buf_args, + std::vector args) + : func_name_(std::move(func_name)), + buf_out_args_(std::move(buf_out_args)), + buf_args_(std::move(buf_args)), + args_(std::move(args)) {} + + private: + std::string func_name_; + std::vector buf_out_args_; + std::vector buf_args_; + std::vector args_; +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/tensor.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..698de07f2be54f11f215bb464d98c69141de2ed1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/tensor.h @@ -0,0 +1,329 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +class TORCH_API Tensor { + public: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Tensor(BufPtr buf, const std::vector& args, ExprPtr body) + : buf_(std::move(buf)) { + stmt_ = constructStmt(args, std::move(body), {}, {}); + } + Tensor(BufHandle buf, const std::vector& args, ExprHandle body) + : Tensor(buf.node(), VarHandleVectorToVarVector(args), body.node()) {} + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + Tensor( + BufPtr buf, + const std::vector& args, + const std::vector& reduce_dims, + const std::vector& reduce_args, + ExprPtr body) + : buf_(std::move(buf)) { + stmt_ = constructStmt(args, std::move(body), reduce_dims, reduce_args); + } + Tensor( + BufHandle buf, + const std::vector& args, + const std::vector& reduce_dims, + const std::vector& reduce_args, + ExprHandle body) + : Tensor( + buf.node(), + VarHandleVectorToVarVector(args), + ExprHandleVectorToExprVector(reduce_dims), + VarHandleVectorToVarVector(reduce_args), + body.node()) {} + + Tensor(BufPtr buf, StmtPtr stmt) + : buf_(std::move(buf)), stmt_(std::move(stmt)) {} + + BufPtr buf() const { + return buf_; + } + + StmtPtr stmt() const { + return stmt_; + } + + template + inline ExprHandle load(const std::vector& args) const; + template + inline ExprHandle load(const Ts&... ts) const; + + private: + StmtPtr constructStmt( + const std::vector& args, + ExprPtr body, + const std::vector& reduce_dims, + const std::vector& reduce_args) const; + + BufPtr buf_; + StmtPtr stmt_; +}; + +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const std::function& body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + const std::function& body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const std::function& + body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + const std::function& + body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const std::function< + ExprHandle(const VarHandle&, const VarHandle&, const VarHandle&)>& + body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + const std::function< + ExprHandle(const VarHandle&, const VarHandle&, const VarHandle&)>& + body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const std::function& body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + const std::function& body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const std::function&)>& body_func); +TORCH_API Tensor Compute( + const std::string& func_name, + const std::vector& dims, + const std::function&)>& body_func); + +inline std::vector create_index_vars( + const std::vector& dims) { + std::vector vars; + vars.reserve(dims.size()); + for (const ExprHandle& dim : dims) { + vars.emplace_back(alloc( + "i", dim.dtype().scalar_type() == ScalarType::Long ? kLong : kInt)); + } + return vars; +} + +// Handle reductions over a Reducer and a body_func which produces values. +template +Tensor Reduce( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const Reducer& reducer, + const InitFunc& init_func, + const BodyFunc& body_func, + const std::vector& reduce_dims) { + std::vector vars = create_index_vars(dims); + std::vector reduce_vars = create_index_vars(reduce_dims); + + // If reduce_vars is empty, then it's not a reduction, but rather a simple + // copy + if (reduce_vars.empty()) { + ExprHandle body = Reducer::getReduceBody(body_func, vars); + BufHandle func_result = Buf::make( + func_name, dims, body.dtype(), c10::nullopt, std::move(strides)); + return Tensor(std::move(func_result), vars, std::move(body)); + } + + std::vector all_vars; + all_vars.insert(all_vars.end(), vars.begin(), vars.end()); + all_vars.insert(all_vars.end(), reduce_vars.begin(), reduce_vars.end()); + + ExprHandle body = Reducer::getReduceBody(body_func, all_vars); + std::vector output_args(vars.begin(), vars.end()); + ExprHandle init_expr = Cast::make(body.dtype(), init_func(vars)); + BufHandle func_result = Buf::make(func_name, dims, body.dtype(), init_expr); + + ExprHandle reduce_op = reducer(func_result, body, output_args, reduce_vars); + if (body.dtype() == kBFloat16) { + ExprHandle init_expr_acc = Cast::make(kFloat, init_func(vars)); + BufHandle func_result_acc = + Buf::make(func_name + "_acc", dims, kFloat, init_expr_acc); + reduce_op = reducer( + func_result, + std::move(func_result_acc), + std::move(body), + output_args, + reduce_vars); + } + + Tensor t = Tensor( + std::move(func_result), + vars, + reduce_dims, + reduce_vars, + std::move(reduce_op)); + return t; +} +template +Tensor Reduce( + const std::string& func_name, + const std::vector& dims, + const Reducer& reducer, + const InitFunc& init_func, + const BodyFunc& body_func, + const std::vector& reduce_dims) { + return Reduce( + func_name, + dims, + c10::nullopt, + reducer, + init_func, + body_func, + reduce_dims); +} + +template +Tensor Reduce( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const Reducer& reducer, + const BodyFunc& body_func, + const std::vector& reduce_dims) { + return Reduce( + func_name, + dims, + strides, + reducer, + [&](ParameterList p) { return ExprHandle(reducer.initializer()); }, + body_func, + reduce_dims); +} +template +Tensor Reduce( + const std::string& func_name, + const std::vector& dims, + const Reducer& reducer, + const BodyFunc& body_func, + const std::vector& reduce_dims) { + return Reduce( + func_name, dims, c10::nullopt, reducer, body_func, reduce_dims); +} + +// Overload which allows inline lambda functions for the body_func. +template +Tensor Reduce( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const Reducer& reducer, + const BodyFunc&& body_func, + const std::vector& reduce_dims) { + return Reduce(func_name, dims, strides, reducer, body_func, reduce_dims); +} +template +Tensor Reduce( + const std::string& func_name, + const std::vector& dims, + const Reducer& reducer, + const BodyFunc&& body_func, + const std::vector& reduce_dims) { + return Reduce(func_name, dims, c10::nullopt, reducer, body_func, reduce_dims); +} + +TORCH_API Tensor Reduce( + const std::string& name, + const std::vector& dims, + c10::optional> strides, + const Reducer& reducer, + const BufHandle& buffer, + const std::vector& reduce_dims); +TORCH_API Tensor Reduce( + const std::string& name, + const std::vector& dims, + const Reducer& reducer, + const BufHandle& buffer, + const std::vector& reduce_dims); + +// Overload for the common case of all dimensions of a previously Computed +// Tensor. +TORCH_API Tensor Reduce( + const std::string& func_name, + const std::vector& dims, + c10::optional> strides, + const Reducer& reducer, + Tensor tensor, + const std::vector& reduce_dims); +TORCH_API Tensor Reduce( + const std::string& func_name, + const std::vector& dims, + const Reducer& reducer, + Tensor tensor, + const std::vector& reduce_dims); + +template +inline ExprHandle Tensor::load(const Ts&... ts) const { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector params({ExprHandle(ts)...}); + return Load::make(BufHandle(this->buf()), params); +} + +template +inline ExprHandle Tensor::load(const std::vector& args) const { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector params(args.begin(), args.end()); + return Load::make(BufHandle(this->buf()), params); +} + +template +inline ExprHandle BufHandle::load(const Ts&... ts) const { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector params({ExprHandle(ts)...}); + return ExprHandle(alloc(node(), ExprHandleVectorToExprVector(params))); +} + +template +inline ExprHandle BufHandle::load(const std::vector& args) const { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector params(args.begin(), args.end()); + return ExprHandle(alloc(node(), ExprHandleVectorToExprVector(params))); +} + +inline ExprHandle BufHandle::load(const std::vector& args) const { + return this->template load(args); +} + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/tensorexpr_init.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/tensorexpr_init.h new file mode 100644 index 0000000000000000000000000000000000000000..d3893da99554eb5edf297aebcbe7c42fc31c1ae3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/tensorexpr_init.h @@ -0,0 +1,11 @@ +#pragma once + +#include +#include + +namespace torch { +namespace jit { +// Initialize Python bindings for Tensor Expressions +void initTensorExprBindings(PyObject* module); +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/types.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/types.h new file mode 100644 index 0000000000000000000000000000000000000000..ee1b8e213adb405b251fc22cc463f88667fbeea6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/types.h @@ -0,0 +1,162 @@ +#pragma once + +#include +#include + +#include +#include +#include + +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +using int32 = std::int32_t; + +class Dtype; +TORCH_API std::ostream& operator<<(std::ostream& stream, const Dtype& dtype); + +using ScalarType = c10::ScalarType; + +enum ElementType { + kAllTypes = 0, + kIntegralTypes = 1 << 0, + kFloatingPointTypes = 1 << 1, + kBoolType = 1 << 2, + kComplexTypes = 1 << 3, + kQintTypes = 1 << 4, + kNonComplexOrQintTypes = kIntegralTypes | kBoolType | kFloatingPointTypes, +}; + +// Data types for scalar and vector elements. +class TORCH_API Dtype { + public: + explicit Dtype(int8_t type) + : scalar_type_(static_cast(type)), lanes_(1) {} + explicit Dtype(ScalarType type) : scalar_type_(type), lanes_(1) {} + Dtype(int8_t type, int lanes) + : scalar_type_(static_cast(type)), lanes_(lanes) {} + Dtype(ScalarType type, int lanes) : scalar_type_(type), lanes_(lanes) {} + Dtype(Dtype type, int lanes) + : scalar_type_(type.scalar_type_), lanes_(lanes) { + if (type.lanes() != 1) { + throw malformed_input("dtype lanes dont match"); + } + } + int lanes() const { + return lanes_; + } + ScalarType scalar_type() const { + return scalar_type_; + } + Dtype scalar_dtype() const; + bool operator==(const Dtype& other) const { + return scalar_type_ == other.scalar_type_ && lanes_ == other.lanes_; + } + bool operator!=(const Dtype& other) const { + return !(*this == other); + } + int byte_size() const; + std::string ToCppString() const; + + bool is_integral() const { + return c10::isIntegralType(scalar_type_, true); + } + bool is_floating_point() const { + return c10::isFloatingType(scalar_type_); + } + bool is_signed() const { + return c10::isSignedType(scalar_type_); + } + + Dtype cloneWithScalarType(ScalarType nt) const { + return Dtype(nt, lanes_); + } + + private: + friend TORCH_API std::ostream& operator<<( + std::ostream& stream, + const Dtype& dtype); + ScalarType scalar_type_; + int lanes_; // the width of the element for a vector time +}; + +extern TORCH_API Dtype kHandle; + +#define NNC_DTYPE_DECLARATION(ctype, name) extern TORCH_API Dtype k##name; + +AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, NNC_DTYPE_DECLARATION) +NNC_DTYPE_DECLARATION(c10::quint8, QUInt8); +NNC_DTYPE_DECLARATION(c10::qint8, QInt8); +#undef NNC_DTYPE_DECLARATION + +template +TORCH_API Dtype ToDtype(); + +#define NNC_TODTYPE_DECLARATION(ctype, name) \ + template <> \ + inline Dtype ToDtype() { \ + return k##name; \ + } +AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, NNC_TODTYPE_DECLARATION) +NNC_TODTYPE_DECLARATION(c10::quint8, QUInt8); +NNC_TODTYPE_DECLARATION(c10::qint8, QInt8); +#undef NNC_TODTYPE_DECLARATION + +TORCH_API Dtype ToDtype(ScalarType type); + +inline Dtype promoteTypes(Dtype a, Dtype b) { + if (a.lanes() != b.lanes()) { + throw malformed_input("promoting types with different lanes"); + } + return Dtype( + static_cast(c10::promoteTypes( + static_cast(a.scalar_type()), + static_cast(b.scalar_type()))), + a.lanes()); +} + +inline Dtype BinaryOpDtype( + Dtype op1_dtype, + Dtype op2_dtype, + ScalarType ret_type = ScalarType::Undefined) { + if (op1_dtype == op2_dtype) { + if (ret_type == ScalarType::Undefined) { + return op1_dtype; + } + + return ToDtype(ret_type); + } + + if (op1_dtype.lanes() != op2_dtype.lanes()) { + throw malformed_input("lanes dont match"); + } + int lanes = op1_dtype.lanes(); + + Dtype resultType = promoteTypes(op1_dtype, op2_dtype); + if (resultType.scalar_type() == ScalarType::Undefined) { + throw malformed_input("scalar type doesn't match"); + } + + if (lanes == 1) { + // Use the fixed scalar Dtypes. + return ToDtype(resultType.scalar_type()); + } + + return resultType; +} + +} // namespace tensorexpr +} // namespace jit +} // namespace torch + +namespace std { + +using torch::jit::tensorexpr::Dtype; +std::string to_string(const Dtype& dtype); +using torch::jit::tensorexpr::ScalarType; +std::string to_string(const ScalarType& dtype); + +} // namespace std diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/unique_name_manager.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/unique_name_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..b6162e6168fd63967fb71b4abd3d5d56bbb70f7c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/unique_name_manager.h @@ -0,0 +1,38 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +class VarHandle; +class Var; + +using VarNameMap = std::unordered_map; + +// A manager to get unique names from vars. +// It starts with the name hints of the var and append "_" + $counter until it +// hits a unique name. +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +class TORCH_API UniqueNameManager { + public: + const std::string& get_unique_name(const VarHandle& v); + + const std::string& get_unique_name(VarPtr v); + + private: + friend class ScopedVarName; + VarNameMap unique_name_mapping_; + std::unordered_map unique_name_count_; + std::unordered_set all_unique_names_; +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/var_substitutor.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/var_substitutor.h new file mode 100644 index 0000000000000000000000000000000000000000..44416e43307de5b3a41f8a24676a7fba55f5df61 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/var_substitutor.h @@ -0,0 +1,67 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace tensorexpr { + +using VarMapping = std::vector>; + +class VarSubMutator : public IRMutator { + public: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + VarSubMutator(const VarMapping& var_mapping) { + for (auto& entry : var_mapping) { + VarPtr key_var = entry.first; + ExprPtr value = entry.second; + if (!key_var) { + throw malformed_input("missing key in VarSubMutator"); + } + var_mapping_[std::move(key_var)] = std::move(value); + } + } + + ExprPtr mutate(VarPtr var) override { + auto iter = var_mapping_.find(var); + if (iter == var_mapping_.end()) { + return var; + } + return iter->second; + } + + ExprPtr mutate(ReduceOpPtr var) override { + auto body = var->body()->accept_mutator(this); + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + std::vector new_inner; + + for (const auto& v : var->reduce_args()) { + ExprPtr e = v->accept_mutator(this); + if (VarPtr new_var = to(e)) { + new_inner.push_back(std::move(new_var)); + } else { + VarFinder varFinder; + e->accept(&varFinder); + auto varlist = varFinder.vars(); + new_inner.insert(new_inner.end(), varlist.begin(), varlist.end()); + } + } + + return alloc(body, new_inner, var->reducer()); + } + + private: + std::unordered_map var_mapping_; +}; + +} // namespace tensorexpr +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/api.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/api.h new file mode 100644 index 0000000000000000000000000000000000000000..8349803af26956155af4e7b54896839e50887132 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/api.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +// There are some components which use these symbols. Until we migrate them +// we have to mirror them in the old autograd namespace. +namespace torch { +namespace autograd { +namespace profiler { +using torch::profiler::impl::ActivityType; +using torch::profiler::impl::getProfilerConfig; +using torch::profiler::impl::ProfilerConfig; +using torch::profiler::impl::profilerEnabled; +using torch::profiler::impl::ProfilerState; +} // namespace profiler +} // namespace autograd +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/collection.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/collection.h new file mode 100644 index 0000000000000000000000000000000000000000..3678e04bfbdacd430af863f9ffae1d81713bc107 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/collection.h @@ -0,0 +1,661 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch { +namespace profiler { +namespace impl { + +enum class EventType : uint8_t { + TorchOp = 0, + Backend, + Vulkan, + Allocation, + OutOfMemory, + PyCall, + PyCCall, + Kineto +}; + +// ============================================================================ +// == Value (Tensor, Scalar) summary ========================================== +// ============================================================================ +struct TORCH_API RawTensorMetadataBase { + RawTensorMetadataBase() = default; + explicit RawTensorMetadataBase(const at::Tensor& t); + + StorageImplData data_; + c10::ScalarType dtype_{c10::ScalarType::Undefined}; + c10::Layout layout_{c10::Layout::Strided}; + uint32_t dim_{0}; +}; + +// Collected during profiling. +struct TORCH_API RawTensorMetadata : RawTensorMetadataBase { + RawTensorMetadata() = default; + RawTensorMetadata(const RawTensorMetadata&) = default; + RawTensorMetadata(RawTensorMetadata&&) noexcept = default; + RawTensorMetadata& operator=(const RawTensorMetadata&) = default; + RawTensorMetadata& operator=(RawTensorMetadata&&) noexcept = default; + explicit RawTensorMetadata(const at::Tensor& t); + + // Wrap `weak_self_` in `c10::optional` and split device into components to + // keep struct default constructable. (which the std::array initializer needs) + c10::optional weak_self_; + c10::DeviceType device_type_{c10::DeviceType::CPU}; + c10::DeviceIndex device_index_{-1}; +}; + +// Used during post processing. +struct TORCH_API TensorMetadata : public RawTensorMetadataBase { + TensorMetadata( + const RawTensorMetadata& r, + std::vector sizes, + std::vector strides); + + TensorImplAddress impl() const { + return weak_self_.get(); + } + + WeakTensor weak_self_; + c10::Device device_; + std::vector sizes_; + std::vector strides_; + + // Set during `calculateUniqueTensorIDs`. + c10::optional id_; + c10::optional allocation_id_; +}; + +using op_input_t = std::variant< + TensorMetadata, + std::vector, + c10::IValue, + c10::nullopt_t>; + +// ============================================================================ +// == ExtraFields ============================================================= +// ============================================================================ +template +struct ExtraFields; + +struct TorchOpBasicFields { + int64_t sequence_number_{0}; + uint64_t forward_tid_{0}; + at::RecordScope scope_{}; + bool is_async_{false}; + int64_t debug_handle_{0}; + std::string name_; + + // Set in the exit callback. + uint64_t end_tid_{0}; +}; + +using jit_stack_t = std::vector; +using jit_modules_t = std::vector; +using extra_args_t = std::unordered_map; +using extra_meta_t = std::unordered_map; + +struct FallbackPair { + ProfilerVoidEventStub device_event_start_ = nullptr; + ProfilerVoidEventStub device_event_end_ = nullptr; +}; + +template <> +struct ExtraFields : TorchOpBasicFields { + ExtraFields( + TorchOpBasicFields&& f, + uint64_t correlation_id, + c10::time_t end_time_ns, + std::vector&& inputs, + std::vector&& concrete_inputs, + jit_stack_t&& jit_stack, + jit_modules_t&& jit_modules, + extra_args_t&& extra_args, + extra_meta_t&& extra_meta, + FallbackPair&& device_fallback, + bool allow_tf32_cublas, + std::unique_ptr&& perf_event_counters) + : TorchOpBasicFields(std::move(f)), + correlation_id_{correlation_id}, + end_time_ns_{end_time_ns}, + inputs_{std::move(inputs)}, + concrete_inputs_{std::move(concrete_inputs)}, + jit_stack_{std::move(jit_stack)}, + jit_modules_{std::move(jit_modules)}, + extra_args_{std::move(extra_args)}, + extra_meta_{std::move(extra_meta)}, + device_fallback_{std::move(device_fallback)}, + allow_tf32_cublas_{allow_tf32_cublas}, + perf_event_counters_{std::move(perf_event_counters)} {} + uint64_t correlation_id_; + c10::time_t end_time_ns_; + std::vector inputs_; + std::vector concrete_inputs_; + jit_stack_t jit_stack_; + jit_modules_t jit_modules_; + extra_args_t extra_args_; + extra_meta_t extra_meta_; + FallbackPair device_fallback_; + bool allow_tf32_cublas_; + std::unique_ptr perf_event_counters_; +}; + +template <> +struct ExtraFields { + int64_t start_time_us_; + int64_t end_time_us_; + int64_t debug_handle_; + at::RecordScope scope_; + std::string name_; + std::string backend_; + jit_stack_t jit_stack_; + jit_modules_t jit_modules_; +}; + +template <> +struct ExtraFields { + using raw_event_t = std::pair; + std::string name_; + int64_t duration_ns_{0}; + // While building the event tree, we want to report a vulkan event's duration + // as 0 so that its end time doesn't exceed that of its parent cpu op + bool in_tree_building_{false}; +}; + +struct RawAllocation { + c10::approx_time_t start_time_; + void* ptr_; + int64_t alloc_size_; + size_t total_allocated_; + size_t total_reserved_; + c10::DeviceType device_type_; + c10::DeviceIndex device_index_; +}; + +// For performance. +static_assert(c10::is_pod_v, "Non-POD member of RawAllocation."); + +template <> +struct ExtraFields : RawAllocation { + ExtraFields(const RawAllocation& allocation) : RawAllocation(allocation) {} + + c10::Device device() const { + return {device_type_, device_index_}; + } + + c10::optional id_; + c10::optional allocation_id_; +}; + +template <> +struct ExtraFields { + c10::approx_time_t start_time_; + int64_t alloc_size_; + size_t total_allocated_; + size_t total_reserved_; + c10::DeviceType device_type_; + c10::DeviceIndex device_index_; +}; + +// For performance. +static_assert( + c10::is_pod_v>, + "Non-POD member of ExtraFields."); + +struct PyFrameState { + int line_no_; + at::StringView filename_; + at::StringView funcname_; +}; + +template +using strong_t = strong:: + type, strong::hashable>; + +using PyModuleSelf = strong_t; +using PyModuleCls = strong_t; +using PyMethod = strong_t; +using PyOptimizerSelf = strong_t; +using PyOptimizerCls = strong_t; + +struct NNModuleInfo { + struct ParameterInfo { + std::string name_; + TensorMetadata metadata_; + c10::optional grad_metadata_; + }; + + PyModuleSelf self_; + PyModuleCls cls_; + at::StringView cls_name_; + + std::vector parameters_; + // Indicates that `self_` is the kth instance of `cls_` observed. + size_t id_{std::numeric_limits::max()}; +}; + +struct OptimizerInfo { + struct ParameterInfo { + TensorMetadata metadata_; + c10::optional grad_metadata_; + std::vector> state_; + }; + + PyOptimizerSelf self_; + PyOptimizerCls cls_; + at::StringView cls_name_; + + std::vector parameters_; +}; + +struct PyExtraFieldsBase { + PyExtraFieldsBase( + c10::time_t end_time_ns, + size_t python_tid, + PyFrameState caller) + : end_time_ns_{end_time_ns}, + python_tid_{python_tid}, + caller_{std::move(caller)} {} + + c10::time_t end_time_ns_; + size_t python_tid_; + PyFrameState caller_; + + // kth python event observed. (Used by TensorBoard) + size_t id_{std::numeric_limits::max()}; +}; + +template <> +struct ExtraFields : public PyExtraFieldsBase { + struct args_t { + PyFrameState frame_state_; + c10::optional module_info_; + c10::optional optimizer_info_; + }; + + ExtraFields( + c10::time_t end_time_ns, + size_t python_tid, + PyFrameState caller, + args_t args) + : PyExtraFieldsBase(end_time_ns, python_tid, std::move(caller)), + callsite_{std::move(args.frame_state_)}, + module_{std::move(args.module_info_)}, + optimizer_{std::move(args.optimizer_info_)} {} + + PyFrameState callsite_; + c10::optional module_; + c10::optional optimizer_; +}; + +template <> +struct ExtraFields : public PyExtraFieldsBase { + using args_t = at::StringView; + + ExtraFields( + c10::time_t end_time_ns, + size_t python_tid, + PyFrameState caller, + args_t args) + : PyExtraFieldsBase(end_time_ns, python_tid, std::move(caller)), + function_name_{std::move(args)} {} + + at::StringView function_name_; +}; + +template <> +struct ExtraFields { + // Mirrors `libkineto::GenericTraceActivity::Flow`. This information is used + // during post processing to properly embed Kineto events into the broader + // profiler tree structure. End users are not generally expected to use these + // fields directly, but they are available for debugging. + struct Flow { + uint32_t id{0}; + uint32_t type{0}; + uint32_t start{0}; + }; + + std::string name_; + int64_t duration_us_{0}; + uint64_t correlation_id_{0}; + libkineto::ActivityType activity_type_; + Flow flow; + std::weak_ptr linked_activity_{}; +}; + +struct TORCH_API Result : public std::enable_shared_from_this { + template + [[nodiscard]] static std::shared_ptr create(Args... args) { + return std::shared_ptr(new Result(std::forward(args)...)); + } + + template + decltype(auto) visit(T&& visitor) { + return std::visit(std::forward(visitor), extra_fields_); + } + + template + decltype(auto) visit(T&& visitor) const { + return std::visit(std::forward(visitor), extra_fields_); + } + + template + void visit_if_base(Fn&& fn) const { + visit([&](const auto& extra_fields) { + using extra_fields_t = typename std::remove_cv_t< + typename std::remove_reference_t>; + + if constexpr (std::is_base_of_v) { + fn(extra_fields); + } + }); + } + + EventType tag() const { + return visit([](const auto& i) { return deduceTag(i); }); + } + + std::string name() const; + libkineto::ActivityType kinetoType() const; + uint64_t correlationID() const; + int64_t endTimeNS() const; + uint64_t endTID() const; + c10::DeviceType deviceType() const; + + int64_t start_time_ns_; + uint64_t start_tid_; + kineto::DeviceAndResource kineto_info_; + std::variant< + ExtraFields, + ExtraFields, + ExtraFields, + ExtraFields, + ExtraFields, + ExtraFields, + ExtraFields, + ExtraFields> + extra_fields_; + + std::weak_ptr parent_; + std::vector> children_; + bool finished_{false}; + + const torch::profiler::impl::kineto::activity_t* kineto_activity_{nullptr}; + + private: + template + Result( + int64_t start_time_ns, + uint64_t start_tid, + kineto::DeviceAndResource kineto_info, + ExtraFields&& extra_fields) + : start_time_ns_{start_time_ns}, + start_tid_{start_tid}, + kineto_info_{kineto_info}, + extra_fields_{std::move(extra_fields)} {} + + template + static EventType deduceTag(const ExtraFields&) { + return E; + } +}; + +struct KinetoObserverContext : public at::ObserverContext { + struct Event { + TorchOpBasicFields basic_fields_; + c10::approx_time_t start_time_; + + // Set in the exit callback. + c10::approx_time_t end_time_{ + std::numeric_limits::min()}; + + bool allow_tf32_cublas_; + std::unique_ptr counters_; + }; + + explicit KinetoObserverContext(Event* event) : event_{event} {} + + Event* event_; + FallbackPair* fallback_{nullptr}; +}; + +constexpr int IO_ENCODER_DEFAULT_BLOCK_SIZE = 1024; + +constexpr int SCALAR_LIST_LENGTH_LIMIT = 30; + +// InputOutputEncoder +// Stores each op_events' shapes and dtypes, and concrete values into a +// contiguous AppendOnlyList so that we no longer create vectors for shapes +// and dtypes on every op. Those vectors can be created during +// post-processing. +// It splits the data into two categories: input shapes and concrete inputs. +class InputOutputEncoder final { + public: + void push(c10::ArrayRef values); + + // Used during post-processing to unpack the encoded data. + // Each method returns a "supplier" lambda which takes no arguments; + // invoking the lambda once will return a list of args that represent + // the inputs for one op. + // The data is split into two streams: "input shapes" and "concrete inputs". + // Note: "auto" only works because these are only used in collection.cpp, + // where they are implemented. + auto getInputShapeGenerator(); + auto getConcreteInputGenerator(); + + bool isSupportedScalarList(const c10::IValue& list_candidate); + + void clear(); + + enum class Tag { + Tensor = 0, + UndefinedTensor, + TensorListBegin, // TODO: generalize to other lists. + ScalarList, + Scalar, + Other, + TERMINATOR + }; + + enum class IOType { Shapes, ConcreteInputs, None }; + + private: + void push(const at::Tensor& t); + + // Implementation detail for getInputShapeGenerator and + // getConcreteInputGenerator + auto getIValueGenerator(const IOType& io_type); + + AppendOnlyList tags_; + AppendOnlyList + tensor_metadata_; + AppendOnlyList tensor_sizes_strides_; + AppendOnlyList ivalues_; +}; + +using perf_profiler_t = torch::profiler::impl::linux_perf::PerfProfiler; + +class TORCH_API ThreadLocalSubqueue { + public: + ThreadLocalSubqueue(const uint64_t tid, ProfilerConfig config); + + std::unique_ptr begin_op(const at::RecordFunction& fn); + + template + void emplace_backend_event(Args&&... args) { + backend_events_.emplace_back(std::forward(args)...); + } + + template + void emplace_vulkan_event(Args&&... args) { + vulkan_events_.emplace_back(std::forward(args)...); + } + + template + void emplace_allocation_event(Args&&... args) { + allocations_.emplace_back(std::forward(args)...); + } + + template + void emplace_ooms_event(Args&&... args) { + ooms_.emplace_back(std::forward(args)...); + } + + template + void emplace_py_call(Args&&... args) { + py_calls_.emplace_back(std::forward(args)...); + } + + uint64_t tid() const { + return tid_; + } + + const kineto::DeviceAndResource& kineto_info() const { + return kineto_info_; + } + + inline void disable_perf_profiler(perf_counters_t& counters) const { + perf_profiler_->Disable(counters); + } + + private: + uint64_t tid_; + ProfilerConfig config_; + kineto::DeviceAndResource kineto_info_; + std::unique_ptr perf_profiler_; + + friend class RecordQueue; + // See `containers.h` for block size benchmarks. + static constexpr size_t BlockSize = 512; + + struct TorchOpStorage { + // NB: This is a destructive operation. + void materialize( + std::vector>& out, + const std::function& time_converter, + const uint64_t tid, + const kineto::DeviceAndResource& kineto_info); + + template + class EventBlock : public std::array { + public: + EventBlock(); + uint64_t correlation_id(const T* ptr) const; + + private: + uint64_t id_start_; + }; + + using event_t = KinetoObserverContext::Event; + class OpList : public AppendOnlyList { + public: + template + std::pair emplace_back(Args&&... args); + static uint64_t correlationID(const OpList::Iterator& e); + } op_events_; + + // report_input_shapes + InputOutputEncoder inputs_outputs_; + + // with_stack (JIT) + AppendOnlyList jit_stack_; + + // with_modules + AppendOnlyList jit_modules_; + + // with_flops + AppendOnlyList extra_args_; + + // report extra metadata, i.e. collective communication meta + AppendOnlyList extra_meta_; + + // ProfilerState::KINETO_GPU_FALLBACK or + // ProfilerState::KINETO_PRIVATEUSE1_FALLBACK + AppendOnlyList device_fallback_; + } torch_ops_; + + // reportBackendEventToActiveKinetoProfiler + AppendOnlyList, BlockSize> backend_events_; + + // _reportVulkanEventToProfiler + AppendOnlyList::raw_event_t, BlockSize> + vulkan_events_; + + // reportMemoryUsage + AppendOnlyList allocations_; + + // reportOOMs + AppendOnlyList, BlockSize> ooms_; + + // with_stack (Python) + AppendOnlyList< + std::pair, + BlockSize> + py_calls_; +}; + +class TORCH_API RecordQueue { + public: + RecordQueue(ProfilerConfig config, std::set activities); + + bool tracePython() const; + ThreadLocalSubqueue* getSubqueue(); + void stop(); + + // NB: This is a destructive operation. + std::pair< + std::vector>, + std::unique_ptr> + getRecords( + std::function time_converter, + uint64_t start_time_us, + uint64_t end_time_us); + + private: + uint32_t id_; + ProfilerConfig config_; + std::set activities_; + ska::flat_hash_map> + sub_queues_; + std::mutex sub_queue_mutex_; + std::unique_ptr python_tracer_; +}; + +TORCH_API bool get_record_concrete_inputs_enabled(); +TORCH_API void set_record_concrete_inputs_enabled_fn(std::function); +TORCH_API void set_record_concrete_inputs_enabled_val(bool); + +TORCH_API bool get_fwd_bwd_enabled(); +TORCH_API void set_fwd_bwd_enabled_fn(std::function); +TORCH_API void set_fwd_bwd_enabled_val(bool); + +TORCH_API bool get_cuda_sync_enabled(); +TORCH_API void set_cuda_sync_enabled_fn(std::function); +TORCH_API void set_cuda_sync_enabled_val(bool); + +} // namespace impl +} // namespace profiler +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/combined_traceback.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/combined_traceback.h new file mode 100644 index 0000000000000000000000000000000000000000..e84a51e9c4a3af70b5dd0d67a7646c40f406a001 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/combined_traceback.h @@ -0,0 +1,76 @@ +#pragma once + +#include +#include + +namespace torch { + +// struct that holds the result of symbolizing multiple tracebacks +// each traceback is a list of indices into all_frames +// (lots of Frames get duplicated across traces) +struct TORCH_API SymbolizedTracebacks { + std::vector all_frames; + // index into all_frames, so that + // it is possible to dedupe frame objects in + // construction of python objects + std::vector> tracebacks; +}; + +struct TORCH_API CapturedTraceback : public c10::GatheredContext { + struct PyFrame { + void* code; // PyCodeObject*, but python headers not present + int lasti; + }; + + static std::shared_ptr gather( + bool python, + bool script, + bool cpp); + CapturedTraceback() = default; + CapturedTraceback(const CapturedTraceback&) = delete; + CapturedTraceback& operator=(const CapturedTraceback&) = delete; + CapturedTraceback(CapturedTraceback&&) noexcept = default; + CapturedTraceback& operator=(CapturedTraceback&&) noexcept = delete; + ~CapturedTraceback() override; + + using visitproc = int (*)(void* self, void* arg); + + struct Python { + virtual std::vector gather() = 0; + virtual void release(std::vector& frames) = 0; + virtual void appendSymbolized( + const std::vector& to_symbolize, + SymbolizedTracebacks& st) = 0; + // tp_traverse/tp_clear implementations + virtual int traverse( + std::vector& frames, + visitproc visit, + void* arg) = 0; + virtual int clear(std::vector& frames) = 0; + virtual ~Python() = default; + Python* next_ = nullptr; + }; + // called once by each python interpreter to + // register python stack recording functionality + // p cannot be deleted once added. + static void addPythonUnwinder(Python* p); + + int traversePython(visitproc visit, void* arg); + int clearPython(); + + private: + std::vector frames_; + std::vector cpp_frames_; + std::vector script_frames_; + friend TORCH_API SymbolizedTracebacks + symbolize(const std::vector& to_symbolize); + + // non-owning reference to one of the immortal Python* objects + // registered above. + Python* python_ = nullptr; +}; + +TORCH_API SymbolizedTracebacks +symbolize(const std::vector& to_symbolize); + +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/containers.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/containers.h new file mode 100644 index 0000000000000000000000000000000000000000..3de4930ad985709cc702ade8fac3dfe2082749c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/containers.h @@ -0,0 +1,206 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace torch { +namespace profiler { +namespace impl { + +// ============================================================================ +// == AppendOnlyList ========================================================== +// ============================================================================ +// During profiling, we have a very predictable access pattern: we only +// append to the end of the container. We can specialize and outperform both +// std::vector (which must realloc) and std::deque (which performs a double +// indirection), and this class of operation is sufficiently important to the +// profiling hot path to warrant specializing: +// https://godbolt.org/z/rTjozf1c4 +// https://quick-bench.com/q/mmfuu71ogwaiULDCJyHdKnHZms4 (Prototype #1, +// int) https://quick-bench.com/q/5vWDW6jjdXVdoffev2zst8D09no (Prototype +// #1, int pair) https://quick-bench.com/q/IfEkfAQMeJSNBA52xtMP6Agcl-Q +// (Prototype #2, int pair) +// https://quick-bench.com/q/wJV2lKmuXL4XyGJzcI5hs4gEHFg (Prototype #3, int +// pair) https://quick-bench.com/q/xiO8ZaBEkYRYUA9dFrMuPLlW9fo (Full impl, +// int pair) +// AppendOnlyList has 2x lower emplace overhead compared to more generic STL +// containers. +// +// The optimal value of `ChunkSize` will vary by use case, but testing shows +// that a value of 1024 does a good job amortizing the `malloc` cost of growth. +// Performance drops off for larger values, so testing on a case-by-case basis +// is recommended if performance is absolutely critical. + +template < + typename T, + size_t ChunkSize, + template class block_t = std::array> +class AppendOnlyList { + public: + using array_t = block_t; + static_assert( + std::is_base_of_v, array_t>, + "AppendOnlyList expects raw low level pointer storage."); + static_assert(ChunkSize > 0, "Block cannot be empty."); + + AppendOnlyList() : buffer_last_{buffer_.before_begin()} {} + AppendOnlyList(const AppendOnlyList&) = delete; + AppendOnlyList& operator=(const AppendOnlyList&) = delete; + + size_t size() const { + return n_blocks_ * ChunkSize - (size_t)(end_ - next_); + } + + template + T* emplace_back(Args&&... args) { + maybe_grow(); + if constexpr ( + std::is_trivially_destructible_v && + std::is_trivially_destructible_v) { + ::new ((void*)next_) T{std::forward(args)...}; + } else { + *next_ = T{std::forward(args)...}; + } + return next_++; + } + + template + typename std::enable_if< + std::is_same::value && std::is_trivially_copyable::value>::type + copy(c10::ArrayRef src) { + size_t n = src.size(); + if (C10_UNLIKELY(n == 0)) { + return; + } + maybe_grow(); + if (C10_LIKELY(next_ && (next_ + n <= end_))) { + std::memcpy((void*)next_, (void*)src.begin(), n * sizeof(T0)); + next_ += n; + } else { + // We could chunk this into several `memcpy`s, but because we expect this + // fallback to be infrequent (n << ChunkSize) the performance impact is + // negligible. + for (auto i : src) { + emplace_back(i); + } + } + } + + void clear() { + buffer_.clear(); + buffer_last_ = buffer_.before_begin(); + n_blocks_ = 0; + next_ = nullptr; + end_ = nullptr; + } + + struct Iterator { + using iterator_category = std::forward_iterator_tag; + using difference_type = std::ptrdiff_t; + using value_type = T; + using pointer = T*; + using reference = T&; + + Iterator(std::forward_list& buffer, const size_t size) + : block_{buffer.begin()}, size_{size} {} + + // End iterator. + Iterator() = default; + + bool exhausted() const { + return current_ >= size_; + } + + reference operator*() const { + return *current_ptr(/*checked=*/true); + } + pointer operator->() { + return current_ptr(/*checked=*/true); + } + + // Prefix increment + Iterator& operator++() { + if (!(++current_ % ChunkSize)) { + block_++; + } + return *this; + } + + // Postfix increment + Iterator operator++(int) { + Iterator tmp = *this; + ++(*this); + return tmp; + } + + friend bool operator==(const Iterator& a, const Iterator& b) { + return a.current_ptr() == b.current_ptr(); + } + friend bool operator!=(const Iterator& a, const Iterator& b) { + return a.current_ptr() != b.current_ptr(); + } + + std::pair address() const { + if (current_ >= size_) { + return {nullptr, 0}; + } + return {&(*block_), current_ % ChunkSize}; + } + + private: + T* current_ptr(bool checked = false) const { + auto a = address(); + if (a.first == nullptr) { + TORCH_INTERNAL_ASSERT(!checked, "Invalid access on AppendOnlyList."); + return nullptr; + } + return a.first->data() + a.second; + } + + typename std::forward_list::iterator block_; + size_t current_{0}; + size_t size_{0}; + }; + + Iterator begin() { + return Iterator(buffer_, size()); + } + Iterator end() { + return Iterator(); + } + // TODO: cbegin and cend() + + private: + void maybe_grow() { + if (C10_UNLIKELY(next_ == end_)) { + buffer_last_ = buffer_.emplace_after(buffer_last_); + n_blocks_++; + next_ = buffer_last_->data(); + end_ = next_ + ChunkSize; + } + } + + std::forward_list buffer_; + + // We maintain a pointer to the last element of `buffer_` so that we can + // insert at the end in O(1) time. + size_t n_blocks_{0}; + T* next_{nullptr}; + T* end_{nullptr}; + + protected: + typename std::forward_list::iterator buffer_last_; +}; + +} // namespace impl +} // namespace profiler +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/data_flow.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/data_flow.h new file mode 100644 index 0000000000000000000000000000000000000000..535dcbc8b2ef9b053f7b32bc8ea238d30fa47173 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/data_flow.h @@ -0,0 +1,94 @@ +#pragma once + +#include + +#include +#include +#include +#include + +namespace torch { +namespace profiler { +namespace impl { + +// Identity is a complex concept in PyTorch. A Tensor might not have a +// an associated storage, multiple Tensors might share the same underlying +// storage, the storage of a Tensor might change over time, etc. +// +// For the purpose of profiling we're mostly interested in data flow +// analysis. As a result, we can take an expansive view of identity: +// Tensors share an ID if they share a TensorImpl or storage data. +// +// This identity equality is transitive; If Tensors T0 and T1 share a storage +// S0 and T1 later points to a different storage S1 then all Tensors which +// point to either S0 or S1 are considered to have the same identity. (Since +// profiler cannot reason beyond that.) +// +// The profiler will handle lifetime analysis to ensure that identities do +// not run afoul of the ABA problem. This does, however, mean that identities +// can only be assigned when memory profiling is enabled. +using TensorID = strong::type; + +// Uniquely identifies an allocation. (Generally a StorageImpl's data ptr.) +using AllocationID = strong::type< + size_t, + struct StorageID_, + strong::ordered, + strong::regular, + strong::hashable>; + +// We use a Tensor's TensorImpl adress and StorageImpl data start to build the +// data flow graph. We do not hold an owning reference so we wrap them in strong +// types to prevent direct access. +using TensorImplAddress = strong::type< + const c10::TensorImpl*, + struct TensorImplAddress_, + strong::regular, + strong::hashable, + strong::boolean>; + +using StorageImplData = strong::type< + const void*, + struct StorageImplData_, + strong::regular, + strong::hashable, + strong::boolean>; + +// ============================================================================ +// == weak_intrusive_ptr and the ABA problem for TensorImpl* ================== +// ============================================================================ +// Tracking `TensorImpl`s is an important part of identity tracking, because +// a Tensor might change storage; however when it does we want to retain the +// fact that the old and new storage belong to the same logical Tensor. We +// cannot take an owning reference to the Tensor because that would change +// program semantics by extending the lifetime of the Tensor. However if we +// store a raw TensorImpl* pointer the TensorImpl might be deleted and a new +// TensorImpl might be created that reuses the address. (ABA problem) +// +// Fortunately, there is a feature of `c10::intrusive_ptr` that we can use to +// prevent address reuse for the duration of profiling: the weak intrusive ptr. +// When a Tensor's refcount reaches zero but there are outstanding weak +// references (`weakcount_ > 0`) it will free the underlying managed resources +// by calling `target_->release_resources()`, but it will not call `delete`. +// (Instead, `delete` is called when the last weak reference is destroyed.) +// This means that we can safely use address identity to track `TensorImpls`. +class WeakTensor { + public: + explicit WeakTensor(const at::Tensor& t) : weak_self_(t.getIntrusivePtr()) {} + + auto get() const { + return TensorImplAddress{weak_self_._unsafe_get_target()}; + } + + private: + c10::weak_intrusive_ptr weak_self_; +}; + +struct Result; + +void calculateUniqueTensorIDs( + std::vector>& sorted_results); + +} // namespace impl +} // namespace profiler +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/events.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/events.h new file mode 100644 index 0000000000000000000000000000000000000000..42642f2afa6c7e6b6fc256fd981cb9c174d1d0fc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/events.h @@ -0,0 +1,31 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace profiler { + +/* A vector type to hold a list of performance counters */ +using perf_counters_t = std::vector; + +/* Standard list of performance events independent of hardware or backend */ +constexpr std::array ProfilerPerfEvents = { + /* + * Number of Processing Elelement (PE) cycles between two points of interest + * in time. This should correlate positively with wall-time. Measured in + * uint64_t. PE can be non cpu. TBD reporting behavior for multiple PEs + * participating (i.e. threadpool). + */ + "cycles", + + /* Number of PE instructions between two points of interest in time. This + * should correlate positively with wall time and the amount of computation + * (i.e. work). Across repeat executions, the number of instructions should + * be more or less invariant. Measured in uint64_t. PE can be non cpu. + */ + "instructions"}; +} // namespace profiler +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/kineto_shim.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/kineto_shim.h new file mode 100644 index 0000000000000000000000000000000000000000..e92cbf003d6a1c664a9699ac4126b595853747ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/kineto_shim.h @@ -0,0 +1,151 @@ +#pragma once + +#include +#include + +// Skip Kineto dependency on mobile unless explicitly asked for. +// When is it explicitly asked for? +// KinetoEdgeCPUProfiler uses KinetoProfiler for cpu +// event profiling. This has a dependency on cpu only libkineto +#if defined(USE_KINETO) && defined(C10_MOBILE) && \ + !defined(EDGE_PROFILER_USE_KINETO) +#undef USE_KINETO +#endif + +#include + +#include +#include + +#ifdef USE_KINETO +// Forward declarations so we don't have to include `libkineto.h` in a header. +namespace libkineto { +class GenericTraceActivity; +struct CpuTraceBuffer; +class ActivityTraceInterface; +} // namespace libkineto +#endif + +namespace torch { +namespace profiler { + +#ifdef USE_KINETO +constexpr bool kKinetoAvailable{true}; +#else +constexpr bool kKinetoAvailable{false}; +#endif + +namespace impl { +namespace kineto { + +// ---------------------------------------------------------------------------- +// -- Interface (Does not require Kineto) ------------------------------------- +// ---------------------------------------------------------------------------- +struct DeviceAndResource { + int32_t device; + int32_t resource; +}; +const DeviceAndResource kineto_ids(); + +#ifdef USE_KINETO +using trace_t = libkineto::CpuTraceBuffer; +using interface_trace_t = libkineto::ActivityTraceInterface; +using activity_t = libkineto::GenericTraceActivity; +#else +struct DummyTraceBuffer {}; +struct DummyTraceInterface {}; + +using trace_t = DummyTraceBuffer; +using interface_trace_t = DummyTraceBuffer; +struct activity_t; +#endif // USE_KINETO + +void addMetadata( + activity_t* activity, + const std::string& key, + const std::string& value); + +// Wraps: libkineto::CpuTraceBuffer +struct TraceWrapper { + TraceWrapper(const int64_t start_time, const std::string& name); + TraceWrapper(TraceWrapper&&) = default; + TraceWrapper(const TraceWrapper&) = delete; + ~TraceWrapper(); + + // The caller is expected to hold a mutex when calling `addCPUActivity`. + activity_t* addCPUActivity( + const std::string& name, + const libkineto::ActivityType type, + const DeviceAndResource device_and_resource, + const uint64_t correlation_id, + const int64_t start_time, + const int64_t end_time); + + void transferCpuTrace(int64_t end_time); + + explicit operator bool() const; + + std::unique_ptr& get() { + return cpu_trace_; + } + + private: + std::unique_ptr cpu_trace_; +}; + +// Wraps libkineto::ActivityTraceInterface +struct ActivityTraceWrapper { + explicit ActivityTraceWrapper(std::unique_ptr&& trace); + ActivityTraceWrapper() = default; + ActivityTraceWrapper(ActivityTraceWrapper&&) = default; + ActivityTraceWrapper(const ActivityTraceWrapper&) = delete; + explicit operator bool() const; + void save(const std::string& path); + + const std::unique_ptr& get() { + return trace_; + } + + private: + std::unique_ptr trace_; +#ifdef USE_KINETO + bool saved_ = false; // Kineto's save is destructive +#endif +}; + +using ActivitySet = std::set; +void prepareTrace( + const bool cpuOnly, + const ActivitySet& activities, + const torch::profiler::impl::ExperimentalConfig& config); +void startTrace(); +ActivityTraceWrapper stopTrace(); +void pushCorrelationId(uint64_t correlation_id); +void pushUserCorrelationId(uint64_t correlation_id); +void popCorrelationId(); +void popUserCorrelationId(); +void recordThreadInfo(); + +void logInvariantViolation( + const std::string& assertion, + const std::string& error, + const std::string& profile_id, + const std::string& group_profile_id); + +} // namespace kineto +} // namespace impl +} // namespace profiler + +namespace autograd { +namespace profiler { +c10::DeviceType deviceTypeFromActivity(libkineto::ActivityType activity_type); + +TORCH_API void addMetadataJson( + const std::string& key, + const std::string& value); + +TORCH_API void profilerStep(); + +} // namespace profiler +} // namespace autograd +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/perf-inl.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/perf-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..0dfa45ac6f2be0ac7106d232262f96161961ce0d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/perf-inl.h @@ -0,0 +1,72 @@ +#pragma once + +#if defined(__ANDROID__) || defined(__linux__) + +#include + +#include +#include + +#include + +#endif /* __ANDROID__ || __linux__ */ + +#include + +namespace torch { +namespace profiler { +namespace impl { +namespace linux_perf { + +/* + * PerfEvent + * --------- + */ + +inline void PerfEvent::Disable() const { +#if defined(__ANDROID__) || defined(__linux__) + ioctl(fd_, PERF_EVENT_IOC_DISABLE, 0); +#endif /* __ANDROID__ || __linux__ */ +} + +inline void PerfEvent::Enable() const { +#if defined(__ANDROID__) || defined(__linux__) + ioctl(fd_, PERF_EVENT_IOC_ENABLE, 0); +#endif /* __ANDROID__ || __linux__ */ +} + +inline void PerfEvent::Reset() const { +#if defined(__ANDROID__) || defined(__linux__) + ioctl(fd_, PERF_EVENT_IOC_RESET, 0); +#endif /* __ANDROID__ || __linux__ */ +} + +/* + * PerfProfiler + * ------------ + */ + +inline uint64_t PerfProfiler::CalcDelta(uint64_t start, uint64_t end) const { + if (end < start) { // overflow + return end + (std::numeric_limits::max() - start); + } + // not possible to wrap around start for a 64b cycle counter + return end - start; +} + +inline void PerfProfiler::StartCounting() const { + for (auto& e : events_) { + e.Enable(); + } +} + +inline void PerfProfiler::StopCounting() const { + for (auto& e : events_) { + e.Disable(); + } +} + +} // namespace linux_perf +} // namespace impl +} // namespace profiler +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/util.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/util.h new file mode 100644 index 0000000000000000000000000000000000000000..df141dcb1e0a095c7523b4451623c88dc904de4f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/profiler/util.h @@ -0,0 +1,162 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +// TODO: replace with pytorch/rfcs#43 when it is ready. +#define SOFT_ASSERT(cond, ...) \ + [&]() -> bool { \ + if (C10_UNLIKELY(!(cond))) { \ + torch::profiler::impl::logSoftAssert( \ + __func__, \ + __FILE__, \ + static_cast(__LINE__), \ + #cond, \ + ::c10::str(__VA_ARGS__)); \ + if (torch::profiler::impl::softAssertRaises()) { \ + TORCH_INTERNAL_ASSERT(cond, __VA_ARGS__); \ + } else { \ + TORCH_WARN(__VA_ARGS__); \ + } \ + return false; \ + } \ + return true; \ + }() + +namespace torch { +namespace profiler { +namespace impl { +TORCH_API bool softAssertRaises(); +TORCH_API void setSoftAssertRaises(c10::optional value); +TORCH_API void logSoftAssert( + const char* func, + const char* file, + uint32_t line, + const char* cond, + const char* args); +TORCH_API inline void logSoftAssert( + const char* func, + const char* file, + uint32_t line, + const char* cond, + ::c10::detail::CompileTimeEmptyString args) { + logSoftAssert(func, file, line, cond, (const char*)args); +} +TORCH_API void logSoftAssert( + const char* func, + const char* file, + uint32_t line, + const char* cond, + const std::string& args); + +using shape = + std::variant, std::vector>>; +constexpr int TENSOR_LIST_DISPLAY_LENGTH_LIMIT = 30; + +std::string getNvtxStr( + const char* name, + int64_t sequence_nr, + const std::vector>& shapes, + at::RecordFunctionHandle op_id = 0, + const std::list>& input_op_ids = + {}); + +struct TORCH_API FileLineFunc { + std::string filename; + size_t line; + std::string funcname; +}; + +TORCH_API std::vector prepareCallstack( + const std::vector& cs); +TORCH_API std::vector callstackStr( + const std::vector& cs); +TORCH_API std::string stacksToStr( + const std::vector& stacks, + const char* delim); +TORCH_API std::vector> inputSizes( + const at::RecordFunction& fn, + const bool flatten_list_enabled = false); +TORCH_API std::string variantShapesToStr(const std::vector& shapes); +TORCH_API std::string shapesToStr( + const std::vector>& shapes); +TORCH_API std::string strListToStr(const std::vector& types); +TORCH_API std::string inputOpIdsToStr( + const std::list>& input_op_ids); +TORCH_API std::string ivalueListToStr(const std::vector& list); +TORCH_API std::vector inputTypes(const at::RecordFunction& fn); + +std::unordered_map TORCH_API +saveExtraArgs(const at::RecordFunction& fn); +std::unordered_map TORCH_API +saveNcclMeta(const at::RecordFunction& fn); + +uint64_t TORCH_API computeFlops( + const std::string& op_name, + const std::unordered_map& extra_args); + +std::string shapeToStr(const std::vector& shape); + +template +class TORCH_API GlobalStateManager { + public: + static GlobalStateManager& singleton() { + static GlobalStateManager singleton_; + return singleton_; + } + + static void push(std::shared_ptr&& state) { + if (singleton().state_) { + LOG(WARNING) << "GlobalStatePtr already exists!"; + } else { + singleton().state_ = std::move(state); + } + } + + static auto* get() { + return singleton().state_.get(); + } + + static std::shared_ptr pop() { + auto out = singleton().state_; + singleton().state_.reset(); + return out; + } + + private: + GlobalStateManager() = default; + + std::shared_ptr state_; +}; + +struct HashCombine { + template + size_t operator()(const std::pair& i) { + return c10::get_hash((*this)(i.first), (*this)(i.second)); + } + + template + size_t operator()(const std::tuple& i) { + return c10::get_hash(i); + } + + template + size_t operator()(const T& i) { + return c10::get_hash(i); + } +}; + +} // namespace impl +} // namespace profiler +} // namespace torch