text
stringlengths 145
7.65M
|
---|
=========================================================================================================================================
SOURCE CODE FILE: python_engine.h
LINES: 1
SIZE: 1.27 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_engine.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
#include <torch/csrc/autograd/engine.h>
#include <torch/csrc/autograd/function.h>
bool THPEngine_initModule(PyObject* module);
namespace torch::autograd::python {
struct PythonEngine : public Engine {
static Engine& get_python_engine();
~PythonEngine() override;
void thread_init(
int device,
const std::shared_ptr<ReadyQueue>& ready_queue,
bool should_increment) override;
void thread_on_exception(
const std::shared_ptr<GraphTask>& graph_task,
const std::shared_ptr<Node>& fn,
std::exception& e) override;
variable_list execute(
const edge_list& roots,
const variable_list& inputs,
bool keep_graph,
bool create_graph,
bool accumulate_grad,
const edge_list& outputs = {}) override;
c10::intrusive_ptr<at::ivalue::Future> execute_with_graph_task(
const std::shared_ptr<GraphTask>& graph_task,
std::shared_ptr<Node> graph_root,
InputBuffer&& input_buffer) override;
std::unique_ptr<AnomalyMetadata> make_anomaly_metadata() override;
std::unique_ptr<SavedVariableHooks> get_default_saved_variable_hooks()
override;
private:
PythonEngine();
};
} // namespace torch::autograd::python
```
|
===========================================================================================================================================
SOURCE CODE FILE: python_enum_tag.h
LINES: 1
SIZE: 0.12 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_enum_tag.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
namespace torch::autograd {
void initEnumTag(PyObject* module);
}
```
|
================================================================================================================================================
SOURCE CODE FILE: python_fft_functions.h
LINES: 1
SIZE: 0.14 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_fft_functions.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/utils/pythoncapi_compat.h>
namespace torch::autograd {
void initFFTFunctions(PyObject* module);
}
```
|
===========================================================================================================================================
SOURCE CODE FILE: python_function.h
LINES: 1
SIZE: 5.24 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_function.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/custom_function.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/saved_variable.h>
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/utils/object_ptr.h>
#include <c10/core/DeviceGuard.h>
#include <optional>
#include <memory>
#include <vector>
namespace torch::jit {
struct Graph;
}
namespace torch::autograd {
// A Function which is implemented by a Python object (i.e., a THPFunction).
// Calls to 'apply' are forwarded to the Python method implementation.
// NOLINTNEXTLINE(cppcoreguidelines-special-member-functions)
struct PyNode : public Node {
PyNode(THPObjectPtr obj) : obj(obj.release()) {}
PyObject* to_py_args(
const variable_list& inputs,
at::OptionalDeviceGuard* device_guard);
variable_list to_variable_list(
const PyObject* r,
const std::vector<bool>& is_variable_input);
variable_list apply(variable_list&& inputs) override;
variable_list apply_with_saved_impl(
const variable_list& inputs,
const SwapSavedVariables& saved);
void release_variables() override;
std::string name() const override;
bool is_traceable() override;
bool is_aot_backward() const override;
void compiled_args(CompiledNodeArgs& args) const override;
variable_list apply_with_saved(
const variable_list& inputs,
SwapSavedVariables& saved) override;
// THPFunction this Function is wrapping. Owning!
PyObject* obj;
// NOLINTNEXTLINE(bugprone-exception-escape)
~PyNode() override {
// Can't use THPObjectPtr as a field in this class; destructor won't take
// out GIL! When I forgot to do this by hand
// TestAutograd.test_inplace_view_python called me out about it.
// If python is already dead, leak the wrapped python objects
if (Py_IsInitialized()) {
pybind11::gil_scoped_acquire gil;
Py_DECREF(obj);
}
}
};
/**
* Cast an object into a tuple, if it is not a tuple already. Returns true
* if the original object was not a tuple.
*/
inline bool ensure_tuple(THPObjectPtr& obj) {
if (PyTuple_Check(obj.get()))
return false;
PyObject* tuple = PyTuple_New(1);
if (!tuple)
throw python_error();
PyTuple_SET_ITEM(tuple, 0, obj.release());
obj = tuple;
return true;
}
} // namespace torch::autograd
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct THPFunction {
PyObject_HEAD
PyObject* needs_input_grad;
// Python tuple of tensors whose variables we should save. Set
// by Python with 'save_for_backward'. If nullptr, no tensors were
// saved.
PyObject* to_save;
// Python tuple of tensors which are not differentiable. Set by
// Python with 'mark_non_differentiable'. If nullptr, no tensors were
// non-differentiable.
PyObject* non_differentiable;
// Python tuple of tensors which had inplace updates in the forward()
// pass. Set by Python with 'mark_dirty'. If nullptr, no tensors were
// modified inplace.
PyObject* dirty_tensors;
// boolean indicating whether to materialize undefined output grad tensors
// into tensors full of zeros. Set by Python with 'set_materialize_grads'.
// Default is true.
bool materialize_grads;
// boolean indicating whether to materialize output grad tensors
// corresponding to non-differentiable outputs. Normally, someone would
// already get this behavior by switching off materialize_grads,
// but there are certain use cases where that is not feasible:
// https://github.com/pytorch/pytorch/pull/98659#pullrequestreview-1376822560
bool materialize_non_diff_grads;
PyObject* compiled_autograd_backward_state;
std::vector<c10::SymInt> compiled_autograd_symints;
std::vector<torch::autograd::VariableInfo> output_info;
std::vector<torch::autograd::VariableInfo> input_info;
std::vector<torch::autograd::SavedVariable> saved_variables;
// For each input, true if the input is a THPVariable
std::vector<bool> is_variable_input;
char has_freed_buffers;
PyObject* saved_for_forward;
// The actual PyNode (in the autograd graph) that this data was
// saved for. This field may be NULL (because a user can construct
// a THPFunction directly from Python), but when this field is non-NULL,
// it is guaranteed that cdata.lock()->obj == this
//
// In most ordinary use, this field should always be non-NULL; e.g.,
// when we allocate a THPFunction because we are running Node.apply,
// after constructing a THPFunction, we immediately allocate a PyNode
// for it. We can't enforce this directly in the constructor of
// THPFunction though, because there's no way to keep it live long enough
// to save an owning reference to PyNode into the grad_fn of a Variable.
std::weak_ptr<torch::autograd::PyNode> cdata;
};
bool THPFunction_initModule(PyObject* module);
TORCH_PYTHON_API extern PyTypeObject THPFunctionType;
TORCH_PYTHON_API extern PyObject* THPFunctionClass;
TORCH_PYTHON_API extern PyObject* THPGradientEdgeClass;
inline bool THPFunction_Check(PyObject* obj) {
return PyObject_IsInstance(obj, (PyObject*)&THPFunctionType);
}
```
|
=======================================================================================================================================
SOURCE CODE FILE: python_hook.h
LINES: 1
SIZE: 2.08 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_hook.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/autograd/function_hook.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/object_ptr.h>
namespace torch::dynamo::autograd {
class SwapSavedVariables;
} // namespace torch::dynamo::autograd
namespace torch::autograd {
struct PyFunctionTensorPreHook : public FunctionPreHook {
PyFunctionTensorPreHook(PyObject* dict, size_t value_idx);
~PyFunctionTensorPreHook() override;
variable_list operator()(const variable_list& values) override;
void compiled_args(
torch::dynamo::autograd::CompiledNodeArgs& args) const override;
PyObject* dict;
size_t value_idx;
};
struct PyFunctionPreHook : public FunctionPreHook {
PyFunctionPreHook(PyObject* dict);
~PyFunctionPreHook() override;
variable_list operator()(const variable_list& values) override;
void compiled_args(
torch::dynamo::autograd::CompiledNodeArgs& args) const override;
PyObject* dict;
};
struct PyFunctionPostHook : public FunctionPostHook {
PyFunctionPostHook(PyObject* dict);
~PyFunctionPostHook() override;
variable_list operator()(
const variable_list& outputs,
const variable_list& inputs) override;
void compiled_args(
torch::dynamo::autograd::CompiledNodeArgs& args) const override;
PyObject* dict;
};
// PyFunctionTensorPostAccGradHooks is a dictionary of PostAccumulateGradHooks,
// and it is understandable if you are confused by why it's a subclass. We are
// simply following the precedent of PyFunctionPreHook and PyFunctionPostHook
// above to easily enroll into existing infrastructure.
struct PyFunctionTensorPostAccGradHooks : public PostAccumulateGradHook {
PyFunctionTensorPostAccGradHooks(PyObject* dict);
~PyFunctionTensorPostAccGradHooks() override;
void operator()(const Variable& tensor) override;
void compiled_args(
torch::dynamo::autograd::CompiledNodeArgs& args) const override;
void apply_with_saved(
Variable& tensor,
torch::dynamo::autograd::SwapSavedVariables& saved) override;
PyObject* dict;
};
} // namespace torch::autograd
```
|
==================================================================================================================================================
SOURCE CODE FILE: python_legacy_variable.h
LINES: 1
SIZE: 0.26 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_legacy_variable.h
ENCODING: utf-8
```h
#pragma once
// Instantiates torch._C._LegacyVariableBase, which defines the Python
// constructor (__new__) for torch.autograd.Variable.
#include <torch/csrc/python_headers.h>
namespace torch::autograd {
void init_legacy_variable(PyObject* module);
}
```
|
===================================================================================================================================================
SOURCE CODE FILE: python_linalg_functions.h
LINES: 1
SIZE: 0.14 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_linalg_functions.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/utils/pythoncapi_compat.h>
namespace torch::autograd {
void initLinalgFunctions(PyObject* module);
}
```
|
===================================================================================================================================================
SOURCE CODE FILE: python_nested_functions.h
LINES: 1
SIZE: 0.21 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_nested_functions.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/utils/python_compat.h>
namespace torch::autograd {
PyMethodDef* get_nested_functions_manual();
void initNestedFunctions(PyObject* module);
} // namespace torch::autograd
```
|
===============================================================================================================================================
SOURCE CODE FILE: python_nn_functions.h
LINES: 1
SIZE: 0.13 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_nn_functions.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/utils/python_compat.h>
namespace torch::autograd {
void initNNFunctions(PyObject* module);
}
```
|
=======================================================================================================================================================
SOURCE CODE FILE: python_saved_variable_hooks.h
LINES: 1
SIZE: 1.09 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_saved_variable_hooks.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ATen.h>
#include <c10/core/SafePyObject.h>
#include <pybind11/pybind11.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/python_variable.h>
#include <torch/csrc/autograd/saved_variable_hooks.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/pybind.h>
namespace py = pybind11;
namespace torch::autograd {
struct PySavedVariableHooks : public SavedVariableHooks {
PySavedVariableHooks(py::function& pack_hook, py::function& unpack_hook);
void call_pack_hook(const at::Tensor& tensor) override;
at::Tensor call_unpack_hook() override;
~PySavedVariableHooks() override;
std::optional<std::pair<c10::SafePyObject, c10::SafePyObject>>
retrieve_unpack_hook_data() const override;
private:
PyObject* pack_hook_;
PyObject* unpack_hook_;
PyObject* data_ = nullptr;
};
struct PyDefaultSavedVariableHooks {
static void push_hooks(py::function& pack_hook, py::function& unpack_hook);
static void pop_hooks();
static std::unique_ptr<SavedVariableHooks> get_hooks();
};
} // namespace torch::autograd
```
|
===================================================================================================================================================
SOURCE CODE FILE: python_sparse_functions.h
LINES: 1
SIZE: 0.14 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_sparse_functions.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/utils/pythoncapi_compat.h>
namespace torch::autograd {
void initSparseFunctions(PyObject* module);
}
```
|
====================================================================================================================================================
SOURCE CODE FILE: python_special_functions.h
LINES: 1
SIZE: 0.14 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_special_functions.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/utils/pythoncapi_compat.h>
namespace torch::autograd {
void initSpecialFunctions(PyObject* module);
}
```
|
==================================================================================================================================================
SOURCE CODE FILE: python_torch_functions.h
LINES: 1
SIZE: 0.66 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_torch_functions.h
ENCODING: utf-8
```h
#include <Python.h>
namespace torch::autograd {
extern PyObject* THPVariableFunctionsModule;
// Wrapper converts a raised TypeError into returning NotImplemented
// Used to implement binary arithmetic operators
template <PyObject* (*Func)(PyObject*, PyObject*, PyObject*)>
inline PyObject* TypeError_to_NotImplemented_(
PyObject* self,
PyObject* args,
PyObject* kwargs) {
PyObject* ret = Func(self, args, kwargs);
if (!ret && PyErr_ExceptionMatches(PyExc_TypeError)) {
PyErr_Clear();
Py_INCREF(Py_NotImplemented);
ret = Py_NotImplemented;
}
return ret;
}
void initTorchFunctions();
} // namespace torch::autograd
```
|
===========================================================================================================================================
SOURCE CODE FILE: python_variable.h
LINES: 1
SIZE: 3.54 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_variable.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/Tensor.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/pythoncapi_compat.h>
#include <ATen/core/function_schema.h>
#include <pybind11/pybind11.h>
#include <torch/csrc/Exceptions.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/utils/pybind.h>
namespace py = pybind11;
// Python object that backs torch.autograd.Variable
struct THPVariable {
PyObject_HEAD
// Payload
c10::MaybeOwned<at::Tensor> cdata;
// Hooks to be run on backwards pass (corresponds to Python attr
// '_backwards_hooks', set by 'register_hook')
PyObject* backward_hooks = nullptr;
// Hooks to be run in the backwards pass after accumulate grad,
// i.e., after the .grad has been set (corresponds to Python attr
// '_post_accumulate_grad_hooks', set by 'register_post_accumulate_grad_hook')
PyObject* post_accumulate_grad_hooks = nullptr;
};
TORCH_PYTHON_API void registerPythonTensorClass(
const std::string& device,
PyObject* python_tensor_class);
TORCH_PYTHON_API void activateGPUTrace();
TORCH_PYTHON_API extern PyObject* THPVariableClass;
TORCH_PYTHON_API extern PyObject* ParameterClass;
bool THPVariable_initModule(PyObject* module);
TORCH_PYTHON_API PyObject* THPVariable_Wrap(const at::TensorBase& var);
inline bool THPVariable_CheckTypeExact(PyTypeObject* tp) {
// Check that a python object is a `Tensor`, but not a `Tensor` subclass.
// (A subclass could have different semantics.) The one exception is
// Parameter, which is used for Python bookkeeping but is equivalent to
// Tensor as far as C++ is concerned.
return (
tp == (PyTypeObject*)THPVariableClass ||
tp == (PyTypeObject*)ParameterClass);
}
inline bool THPVariable_CheckExact(PyObject* obj) {
return THPVariable_CheckTypeExact(Py_TYPE(obj));
}
inline bool THPVariable_Check(PyObject* obj) {
if (!THPVariableClass)
return false;
// Fast path
if (THPVariable_CheckExact(obj)) {
return true;
}
const auto result = PyObject_IsInstance(obj, THPVariableClass);
if (result == -1)
throw python_error();
return result;
}
inline const at::Tensor& THPVariable_Unpack(THPVariable* var) {
return *var->cdata;
}
inline const at::Tensor& THPVariable_Unpack(PyObject* obj) {
return THPVariable_Unpack(reinterpret_cast<THPVariable*>(obj));
}
std::pair<py::object, py::dict> parseIValuesToPyArgsKwargs(
const c10::OperatorHandle& op,
const std::vector<c10::IValue>& arguments);
void pushPyOutToStack(
const c10::OperatorHandle& op,
torch::jit::Stack* stack,
py::object out,
const char* msg);
inline PyObject* THPVariable_WrapList(
const torch::autograd::variable_list& inputs) {
PyObject* pyinput = PyList_New(static_cast<Py_ssize_t>(inputs.size()));
for (const auto i : c10::irange(inputs.size())) {
PyList_SET_ITEM(pyinput, i, THPVariable_Wrap(inputs[i]));
}
return pyinput;
}
inline torch::autograd::variable_list THPVariable_UnpackList(
PyObject* pyresult) {
TORCH_CHECK(PyList_CheckExact(pyresult));
auto result_len = PyList_GET_SIZE(pyresult);
torch::autograd::variable_list result;
result.reserve(result_len);
for (const auto i : c10::irange(result_len)) {
PyObject* item = PyList_GET_ITEM(pyresult, i);
if (!Py_IsNone(item)) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(THPVariable_Check(item));
result.emplace_back(THPVariable_Unpack(item));
} else {
result.emplace_back();
}
}
return result;
}
```
|
====================================================================================================================================================
SOURCE CODE FILE: python_variable_indexing.h
LINES: 1
SIZE: 2.78 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\python_variable_indexing.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/SymInt.h>
#include <torch/csrc/autograd/python_variable.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/pybind.h>
#include <torch/csrc/utils/python_symnode.h>
namespace torch::autograd {
struct UnpackedSlice {
c10::SymInt start;
c10::SymInt stop;
c10::SymInt step;
};
// This mirrors Cpython's PySlice_Unpack method
inline UnpackedSlice __PySlice_Unpack(PyObject* _r) {
PySliceObject* r = (PySliceObject*)_r;
/* this is harder to get right than you might think */
c10::SymInt start_sym, stop_sym, step_sym;
auto clip_val = [](Py_ssize_t val) {
if (val < c10::SymInt::min_representable_int()) {
auto r = PyErr_WarnEx(
PyExc_UserWarning,
"Truncating the start/stop/step "
"of slice. This is likely because of "
"saved old models when the start/stop/step were larger.",
1);
if (r != 0) {
throw python_error();
}
return (Py_ssize_t)(c10::SymInt::min_representable_int());
}
return val;
};
if (r->step == Py_None) {
step_sym = c10::SymInt(1);
} else {
if (torch::is_symint(r->step)) {
step_sym = py::handle(r->step).cast<c10::SymInt>();
} else {
Py_ssize_t step = 0;
if (!_PyEval_SliceIndex(r->step, &step)) {
throw python_error();
}
if (step == 0) {
PyErr_SetString(PyExc_ValueError, "slice step cannot be zero");
}
step = clip_val(step);
step_sym = c10::SymInt(step);
}
}
if (torch::is_symint(r->start)) {
start_sym = py::handle(r->start).cast<c10::SymInt>();
} else if (r->start == Py_None) {
start_sym = c10::SymInt(step_sym < 0 ? PY_SSIZE_T_MAX : 0);
} else {
Py_ssize_t start = 0;
if (!_PyEval_SliceIndex(r->start, &start)) {
throw python_error();
}
start = clip_val(start);
start_sym = c10::SymInt(start);
}
if (torch::is_symint(r->stop)) {
stop_sym = py::handle(r->stop).cast<c10::SymInt>();
} else if (r->stop == Py_None) {
stop_sym = c10::SymInt(
step_sym < 0 ? c10::SymInt::min_representable_int() : PY_SSIZE_T_MAX);
} else {
Py_ssize_t stop = 0;
if (!_PyEval_SliceIndex(r->stop, &stop)) {
throw python_error();
}
stop = clip_val(stop);
stop_sym = c10::SymInt(stop);
}
return UnpackedSlice{
std::move(start_sym), std::move(stop_sym), std::move(step_sym)};
}
Py_ssize_t THPVariable_length(PyObject* self);
PyObject* THPVariable_getitem(PyObject* self, PyObject* index);
int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* value);
Variable valueToTensor(
c10::TensorOptions options,
PyObject* value,
const at::Device& device);
} // namespace torch::autograd
```
|
===============================================================================================================================================
SOURCE CODE FILE: record_function_ops.h
LINES: 1
SIZE: 0.94 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\record_function_ops.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/record_function.h>
#include <torch/custom_class.h>
#include <optional>
namespace torch::autograd::profiler {
struct PythonRecordFunction : public torch::CustomClassHolder {
at::RecordFunction record;
explicit PythonRecordFunction(
at::RecordScope scope = at::RecordScope::FUNCTION)
: record(scope) {}
};
// Creates a new profiling scope using RecordFunction and invokes its starting
// callbacks.
TORCH_API c10::intrusive_ptr<PythonRecordFunction> record_function_enter_new(
const std::string& name,
const std::optional<std::string>& args = std::nullopt);
// Schedules RecordFunction's end callbacks to be run on completion of a future.
TORCH_API c10::intrusive_ptr<c10::ivalue::Future> _call_end_callbacks_on_fut_new(
const c10::intrusive_ptr<PythonRecordFunction>& record,
const c10::intrusive_ptr<c10::ivalue::Future>& fut);
} // namespace torch::autograd::profiler
```
|
==========================================================================================================================================
SOURCE CODE FILE: saved_variable.h
LINES: 1
SIZE: 5.04 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\saved_variable.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/SafePyObject.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/forward_grad.h>
#include <torch/csrc/autograd/saved_variable_hooks.h>
#include <ATen/core/Tensor.h>
#include <cstdint>
#include <memory>
namespace torch::autograd {
using Variable = at::Tensor;
struct Node;
TORCH_API extern const char* ERR_BACKWARD_TWICE;
/// A snapshot of a variable at a certain version. A `SavedVariable` stores
/// enough information to reconstruct a variable from a certain point in time.
class TORCH_API SavedVariable {
public:
SavedVariable() = default;
SavedVariable(
const Variable& variable,
bool is_output,
bool is_inplace_on_view = false);
SavedVariable(
const std::optional<Variable>& variable,
bool is_output,
bool is_inplace_on_view = false);
SavedVariable(const SavedVariable&) = delete;
SavedVariable(SavedVariable&&) = default;
SavedVariable& operator=(const SavedVariable&) = delete;
SavedVariable& operator=(SavedVariable&&) = default;
~SavedVariable() {
if (fw_grad_) {
// See note [ Using ForwardGrad ]
fw_grad_->clear();
}
}
/// Reconstructs the saved variable. Pass `saved_for` as the gradient
/// function if constructing the `SavedVariable` with it would have caused a
/// circular reference.
Variable unpack(std::shared_ptr<Node> saved_for = nullptr) const;
void register_hooks(std::unique_ptr<SavedVariableHooks>&& hooks);
void reset_data();
bool has_hooks() const {
return (bool)hooks_;
}
// Used by compiled autograd
std::optional<std::pair<c10::SafePyObject, c10::SafePyObject>>
retrieve_unpack_hook_data() const {
if (!hooks_) {
return std::nullopt;
}
return hooks_->retrieve_unpack_hook_data();
}
private:
// This field contains either:
// 1. the variable to save
// 2. or its tensor_data.
// If storing the variable itself would create a circular reference,
// we fall into the second case and its metadata is also saved separately.
// In that case, the grad_fn must be passed in to the unpack function when
// reconstructing the Variable (except when we are doing an inplace operation
// on a view, see below). The field saved_original_ below reflects the two
// cases: its value is true in the first case and false in the second case.
// The value data_.defined() can be false in three cases:
// 1. SavedVariable was constructed without a Tensor (the value to save is
// None), in that case was_default_constructed_ will be kept at true
// 2. The saved variable has been released by calling
// SavedVariable::reset_data(), typically during the backward pass
// 3. Hooks have been registered. In that case, hooks_ will be defined
// instead. Note that the value of saved_original_ only reflects what happened
// during the construction of the SavedVariable. If saved_original_ is true,
// we saved the original tensor in data_, but if the user registers hooks, we
// will no longer have it (despite the saved_original_ still being true)
at::Tensor data_;
// This field is used to store the forward AD gradients associated with
// the saved Tensor. Note that this shared_ptr must never be shared with
// either the saved Tensor or the unpacked Tensor. See note [ Using
// ForwardGrad ]
std::shared_ptr<ForwardGrad> fw_grad_;
// Weak version of grad_fn_ that prevents leaks in rebase_history() for
// inplace views.
// This variable is used when the user chooses to create a SavedVariable with
// is_inplace_on_view = true.
// In that case, the grad_fn passed in to the unpack function at unwrapping
// time is unused.
std::weak_ptr<Node> weak_grad_fn_;
uint32_t saved_version_ = 0;
uint32_t output_nr_ = 0;
bool was_default_constructed_ = true;
bool is_inplace_on_view_ = false;
bool saved_original_ = false;
bool is_leaf_ = false;
bool is_output_ = false;
// Hooks are a pair of functions pack_hook/unpack_hook that provides
// fine-grained control over how the SavedVariable should save its data.
// pack_hook is called upon registration, while unpack_hook is called when
// unpacking.
std::unique_ptr<SavedVariableHooks> hooks_;
// Fields grad_fn_, grad_accumulator_, and requires_grad_ are only used if
// hooks are defined. They are set before pack_hook is called and used after
// unpack_hook is called.
std::shared_ptr<Node> grad_fn_;
// For the usual case where leaf tensors are the input, we expect its
// grad_acc to be kept alive by the graph. The reason SavedVariable holds
// a owning reference is to support the case where a custom autograd Function
// saves an intermediate.
std::shared_ptr<Node> grad_accumulator_;
bool requires_grad_ = false;
void save_metadata(const Variable& data);
static std::unique_ptr<SavedVariableHooks> get_default_hooks();
void set_hooks_and_pack_data(
std::unique_ptr<SavedVariableHooks>&& hooks,
const Variable& data);
};
} // namespace torch::autograd
```
|
================================================================================================================================================
SOURCE CODE FILE: saved_variable_hooks.h
LINES: 1
SIZE: 0.55 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\saved_variable_hooks.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/Tensor.h>
#include <c10/core/SafePyObject.h>
namespace torch::autograd {
struct TORCH_API SavedVariableHooks {
virtual void call_pack_hook(const at::Tensor& tensor) = 0;
virtual at::Tensor call_unpack_hook() = 0;
virtual ~SavedVariableHooks() = default;
virtual std::optional<std::pair<c10::SafePyObject, c10::SafePyObject>>
retrieve_unpack_hook_data() const {
throw std::runtime_error(
"Compiled Autograd only supports python saved tensor hooks ");
}
};
} // namespace torch::autograd
```
|
====================================================================================================================================
SOURCE CODE FILE: symbolic.h
LINES: 1
SIZE: 0.31 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\symbolic.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/onnx/onnx.h>
namespace torch::autograd {
struct SymbolicContext {
jit::Block* block;
};
struct symbolic_unconvertible : public std::runtime_error {
using std::runtime_error::runtime_error;
};
} // namespace torch::autograd
```
|
================================================================================================================================================
SOURCE CODE FILE: error_messages.h
LINES: 1
SIZE: 0.50 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\utils\error_messages.h
ENCODING: utf-8
```h
#pragma once
#include <sstream>
namespace torch::autograd::utils {
inline std::string requires_grad_leaf_error(bool requires_grad) {
std::ostringstream oss;
oss << "you can only change requires_grad flags of leaf variables.";
if (requires_grad == false) {
oss << " If you want to use a computed variable in a subgraph "
"that doesn't require differentiation use "
"var_no_grad = var.detach().";
}
return oss.str();
}
} // namespace torch::autograd::utils
```
|
======================================================================================================================================================
SOURCE CODE FILE: grad_layout_contract.h
LINES: 1
SIZE: 2.83 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\utils\grad_layout_contract.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/Tensor.h>
namespace torch::autograd::utils {
// Helper functions to enforce the "Gradient Layout Contract" described in
// torch/csrc/autograd/functions/accumulate_grad.h.
// Checks if grad obeys the contract with variable.
inline bool obeys_layout_contract(
const at::Tensor& grad,
const at::Tensor& variable) {
TORCH_INTERNAL_ASSERT(!grad.is_sparse());
TORCH_INTERNAL_ASSERT(!grad.is_sparse_csr());
TORCH_INTERNAL_ASSERT(!variable.is_sparse_csr());
// NOLINTNEXTLINE(bugprone-branch-clone)
if (variable.is_nested()) {
// TODO: Nested Tensor does not have an implementation of detach. The
// current implementation of nested tensor likely does obey the gradient
// contract and should return true, but this would likely change in the
// future
return false;
} else if (variable.is_sparse()) {
// Gradient Layout Contract is not applicable for sparse layouts
return false;
} else if (variable.is_non_overlapping_and_dense()) {
// Only look at stride for dimensions that are not of size 1.
const auto& grad_sizes = grad.sym_sizes();
const auto& grad_strides = grad.sym_strides();
const auto& variable_strides = variable.sym_strides();
for (const auto idx : c10::irange(grad_sizes.size())) {
if (grad_sizes[idx] != 1) {
if (grad_strides[idx] != variable_strides[idx]) {
return false;
}
} else {
// This should not be needed but we don't check if a Tensor has views
// before stashing it. And 0-strided Tensors of size 1 are actually
// views for ops like cat.
// TODO: Actually detect views in the accumulateGrad function so that
// this Tensor is not considered at all.
if (grad_strides[idx] == 0) {
return false;
}
}
}
return true;
} else {
return grad.is_contiguous(at::MemoryFormat::Contiguous);
}
}
// Creates a clone of new_grad that obeys the contract with variable.
// The clone should attach to new_grad's history if GradMode::is_enabled().
inline at::Tensor clone_obey_contract(
const at::Tensor& new_grad,
const at::Tensor& variable) {
if (variable.is_non_overlapping_and_dense()) {
// (1)
// Does this dicey-looking sequence attach the result to new_grad's
// history if GradMode::is_enabled()? Yes, and @alband says it should.
return std::move(new_grad
.new_empty_strided_symint(
variable.sym_sizes(),
variable.sym_strides(),
variable.options().memory_format(std::nullopt))
.copy_(new_grad));
} else {
// (2)
return new_grad.clone(at::MemoryFormat::Contiguous);
}
}
} // namespace torch::autograd::utils
```
|
==================================================================================================================================================
SOURCE CODE FILE: lambda_post_hook.h
LINES: 1
SIZE: 1.29 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\utils\lambda_post_hook.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/autograd/function_hook.h>
#include <torch/csrc/dynamo/compiled_autograd.h>
namespace torch::autograd::utils {
// Turns lambda into a torch::autograd::FunctionPostHook.
class LambdaPostHook : public torch::autograd::FunctionPostHook {
using variable_list = std::vector<torch::autograd::Variable>;
using fn_type =
std::function<variable_list(const variable_list&, const variable_list&)>;
using compiled_fn_type = std::function<void(CompiledNodeArgs&)>;
public:
// The lambda function takes as arguments the outputs and inputs of the
// autograd function and can modify the outputs of the autograd function by
// returning a new output if needed.
/* implicit */ LambdaPostHook(fn_type fn) : fn_(std::move(fn)) {}
LambdaPostHook(fn_type fn, compiled_fn_type compiled_fn)
: fn_(std::move(fn)), compiled_fn_(std::move(compiled_fn)) {}
variable_list operator()(
const variable_list& outputs,
const variable_list& inputs) override {
return fn_(outputs, inputs);
}
void compiled_args(CompiledNodeArgs& args) const override {}
protected:
std::function<variable_list(const variable_list&, const variable_list&)> fn_;
compiled_fn_type compiled_fn_{};
};
} // namespace torch::autograd::utils
```
|
====================================================================================================================================================
SOURCE CODE FILE: python_arg_parsing.h
LINES: 1
SIZE: 1.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\utils\python_arg_parsing.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/Tensor.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/python_arg_parser.h>
namespace torch::autograd::utils {
// The parameter allow_copy is to accept copy for Tensor.to (and by proxy
// PackedSequences.to) but not nn.Module.to.
inline std::tuple<
std::optional<at::Device>,
std::optional<at::ScalarType>,
bool,
bool,
std::optional<at::MemoryFormat>>
parse_to_conversion(PythonArgs& r, bool allow_copy) {
if (r.idx == 0) {
if (!allow_copy && !r.isNone(3))
throw std::runtime_error(".to() does not accept copy argument");
return std::make_tuple(
r.deviceOptional(0),
r.scalartypeOptional(1),
r.toBool(2),
r.toBool(3),
r.memoryformatOptional(4));
} else if (r.idx == 1) {
if (!allow_copy && !r.isNone(2))
throw std::runtime_error(".to() does not accept copy argument");
return std::make_tuple(
std::nullopt,
r.scalartype(0),
r.toBool(1),
r.toBool(2),
r.memoryformatOptional(3));
} else {
auto tensor = r.tensor(0);
if (!allow_copy && !r.isNone(2))
throw std::runtime_error(".to() does not accept copy argument");
return std::make_tuple(
tensor.device(),
tensor.scalar_type(),
r.toBool(1),
r.toBool(2),
r.memoryformatOptional(3));
}
}
} // namespace torch::autograd::utils
```
|
==========================================================================================================================================
SOURCE CODE FILE: warnings.h
LINES: 1
SIZE: 0.59 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\utils\warnings.h
ENCODING: utf-8
```h
#pragma once
#include <c10/util/Exception.h>
#include <mutex>
#include <vector>
namespace torch::autograd::utils {
// Warning handler for multi-threaded contexts. Gather warnings from
// all threads into a single queue, then process together at the end
// in the main thread.
class DelayWarningHandler : public at::WarningHandler {
public:
~DelayWarningHandler() override = default;
void replay_warnings();
private:
void process(const c10::Warning& warning) override;
std::vector<c10::Warning> warnings_;
std::mutex mutex_;
};
} // namespace torch::autograd::utils
```
|
==============================================================================================================================================
SOURCE CODE FILE: wrap_outputs.h
LINES: 1
SIZE: 3.80 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\utils\wrap_outputs.h
ENCODING: utf-8
```h
#pragma once
// Wrap tensor operation outputs as PyObject*
#include <ATen/ScalarOps.h>
#include <ATen/core/Tensor.h>
#include <c10/util/irange.h>
#include <torch/csrc/python_headers.h>
#include <initializer_list>
#include <tuple>
#include <torch/csrc/Dtype.h>
#include <torch/csrc/DynamicTypes.h>
#include <torch/csrc/Layout.h>
#include <torch/csrc/QScheme.h>
#include <torch/csrc/autograd/python_variable.h>
#include <torch/csrc/autograd/variable.h>
#include <torch/csrc/utils/python_numbers.h>
#include <torch/csrc/utils/tensor_qschemes.h>
namespace torch::autograd::utils {
inline PyObject* wrap(bool value) {
if (value) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
}
}
inline PyObject* wrap(c10::DeviceIndex value) {
return THPUtils_packDeviceIndex(value);
}
inline PyObject* wrap(int64_t value) {
return THPUtils_packInt64(value);
}
inline PyObject* wrap(double value) {
return PyFloat_FromDouble(value);
}
inline PyObject* wrap(c10::complex<double> value) {
// I could probably also use FromComplex with a reinterpret cast,
// but... eh.
return PyComplex_FromDoubles(value.real(), value.imag());
}
inline PyObject* wrap(void* value) {
return PyLong_FromVoidPtr(value);
}
inline PyObject* wrap(THPDtype* dtype) {
return Py_NewRef(dtype);
}
inline PyObject* wrap(at::ScalarType scalarType) {
return Py_NewRef(getTHPDtype(scalarType));
}
inline PyObject* wrap(THPLayout* layout) {
return Py_NewRef(layout);
}
inline PyObject* wrap(at::Layout layout) {
return Py_NewRef(getTHPLayout(layout));
}
inline PyObject* wrap(const at::Tensor& tensor) {
return THPVariable_Wrap(tensor);
}
inline PyObject* wrap(const at::Scalar& scalar) {
return wrap(scalar_to_tensor(scalar));
}
inline PyObject* wrap(at::QScheme qscheme) {
auto* thp_qscheme = torch::utils::getTHPQScheme(qscheme);
Py_INCREF(thp_qscheme);
return thp_qscheme;
}
inline PyObject* wrap(at::TensorList tl) {
auto r = THPObjectPtr{PyTuple_New(static_cast<Py_ssize_t>(tl.size()))};
if (!r)
throw python_error();
for (const auto i : c10::irange(tl.size())) {
PyTuple_SET_ITEM(r.get(), i, wrap(tl[i]));
}
return r.release();
}
inline PyObject* wrap(at::IntArrayRef list) {
auto r = THPObjectPtr{PyTuple_New(static_cast<Py_ssize_t>(list.size()))};
if (!r)
throw python_error();
for (const auto i : c10::irange(list.size())) {
PyTuple_SET_ITEM(r.get(), i, wrap(list[i]));
}
return r.release();
}
inline PyObject* wrap(at::Stream stream) {
return THPStream_Wrap(stream);
}
namespace detail {
template <typename F, typename Tuple, size_t... Is>
void apply_with_idx_impl(
const F& f,
Tuple& t,
std::index_sequence<Is...> /*indices*/) {
(void)std::initializer_list<int>{(f(std::get<Is>(t), Is), 0)...};
}
// For tuple(a, b, c), calls f(a, 0), f(b, 1), f(c, 2)
template <typename F, typename... Ts>
void apply_with_idx(const F& f, std::tuple<Ts...>& t) {
apply_with_idx_impl(f, t, std::index_sequence_for<Ts...>{});
}
} // namespace detail
template <typename... Ts>
PyObject* wrap(std::tuple<Ts...> values) {
auto r = THPObjectPtr{PyTuple_New(sizeof...(Ts))};
if (!r)
throw python_error();
detail::apply_with_idx(
[&](auto& value, size_t idx) {
PyTuple_SET_ITEM(r.get(), idx, wrap(std::move(value)));
},
values);
return r.release();
}
template <typename... Ts>
PyObject* wrap(PyTypeObject* type, std::tuple<Ts...> values) {
auto r = THPObjectPtr{PyStructSequence_New(type)};
if (!r)
throw python_error();
detail::apply_with_idx(
[&](auto& value, size_t idx) {
PyStructSequence_SET_ITEM(r.get(), idx, wrap(std::move(value)));
},
values);
return r.release();
}
} // namespace torch::autograd::utils
```
|
====================================================================================================================================
SOURCE CODE FILE: variable.h
LINES: 1
SIZE: 40.34 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\variable.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/utils/python_stub.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/cpp_hook.h>
#include <torch/csrc/autograd/edge.h>
#include <torch/csrc/autograd/forward_grad.h>
#include <torch/csrc/autograd/function_hook.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/core/Tensor.h>
#include <ATen/core/VariableHooksInterface.h>
#include <c10/util/Exception.h>
#include <cstdint>
#include <memory>
#include <mutex>
#include <string>
#include <utility>
#include <vector>
namespace torch::autograd {
/// `Variable` is exactly the same as `Tensor` (i.e. we have `using Variable =
/// at::Tensor`). This means you can perform all the usual mathematical and
/// other operations you can perform on `Tensor`s also on `Variable`s.
///
/// The only reason we are keeping the `Variable` class is backward
/// compatibility with external user's legacy C++ frontend code. Our intention
/// is to eliminate the `Variable` class in the near future.
using Variable = at::Tensor;
} // namespace torch::autograd
// The following are all internal APIs and should not be shown in libtorch docs.
// Therefore, we wrap the following code with `#ifndef DOXYGEN_SHOULD_SKIP_THIS
// ... #endif`
#ifndef DOXYGEN_SHOULD_SKIP_THIS
namespace torch::autograd {
/// Check if this type is supported by the autograd engine.
/// If you change this, update the doc at the top of the
/// torch/autograd/__init__.py file and
/// "test_set_requires_grad_only_for_continuous_types" in test/test_autograd.py
static inline bool isDifferentiableType(at::ScalarType t) {
return isFloatingType(t) || isComplexType(t);
}
struct Node;
///~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Variable
///~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// A `Variable` augments a `Tensor` with the ability to interact in our
/// autograd machinery. Conceptually, `Variable`s travel along `Edge`s between
/// `Node`s in the autograd graph. A `Variable` can either be a leaf, like a
/// weight in a neural network, or an interior variable, when it is the result
/// of an operation between variables. Every `Variable` also stores another
/// `Variable` called its `grad` (gradient). If the variable is a leaf, its
/// gradient will be accumulated into this variable.
///
/// Every Tensor is a Variable, but sometimes we colloquially refer to Variables
/// that don't require gradients as Tensors (since none of the autograd
/// machinery for Variables applies). Historically, Variables and Tensors
/// were separate concepts, but now they are exactly the same (i.e. we have
/// `using Variable = at::Tensor`).
///
/// Gradient Edges
///~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Furthermore, `Variable`s have the notion of a `gradient_edge`, which is the
/// edge in the autograd graph that connects the variable to a particular input
/// of the gradient function that will be invoked with the variable during the
/// backward pass. More precisely, this gradient function can be one of two
/// things:
/// 1. A `grad_fn`, if the variable is in the interior of the graph. This is the
/// gradient of the function that produced the variable.
/// 2. A `grad_accumulator`, if the variable is a leaf, which accumulates a
/// scalar gradient value into its `grad` variable.
///
/// Versioning
///~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Another major feature of `Variable`s are *versions*. Versions are
/// incremented when an in-place mutation of a variable occurs. Versions are
/// useful when constructing `SavedVariable`s, which take a snapshot of a
/// `Variable` at a certain version. You can retrieve a `Variable`'s version
/// through its `current_version()` method.
///
/// Views
///~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// It is possible for a `Variable` to be a *view* of another `Variable`, in
/// which case it tracks that `Variable`'s data and autograd history. Beyond
/// construction, the interface of a view is identical to that of a regular
/// `Variable`. You can determine whether `Variable` is in fact a view by
/// probing its `is_view()` method. Note that the *view* semantics are only
/// meaningful for `Variable` relations that are relevant to autograd.
/// See NOTE [ Autograd View Variables ] for more details.
///~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
struct AutogradMeta;
struct DifferentiableViewMeta;
// Private-ish functions for manipulating variables; we don't want to put them
// on Tensor proper
namespace impl {
// WARNING: This may return a nullptr. If you require AutogradMeta to return
// a materialized structure, use materialize_autograd_meta instead.
TORCH_API AutogradMeta* get_autograd_meta(const at::TensorBase&);
// WARNING: This will return a nullptr if the Tensor is not a view.
TORCH_API DifferentiableViewMeta* get_view_autograd_meta(const at::TensorBase&);
// Returns the current autograd meta, materializing it if it was previously
// none. This counts as a *mutating* operation, so do not call it on
// "read-only" operators; in particular, this is NOT thread safe
TORCH_API AutogradMeta* materialize_autograd_meta(const at::TensorBase&);
/// Set the gradient accumulator of the `Variable`. This is only applicable to
/// leaf variables. Interior variables should call `set_gradient_edge()`.
TORCH_API void set_grad_accumulator(
const Variable&,
std::weak_ptr<Node> grad_accumulator);
/// Attempts to get a pointer to the gradient accumulator of the `Variable`,
/// if it still exists. If the gradient accumulator function has been
/// destroyed, returns a `nullptr`.
TORCH_API std::shared_ptr<Node> try_get_grad_accumulator(const Variable&);
/// Gets the gradient accumulator of the `Variable` if it has one, or else
/// create one on the fly and return it.
TORCH_API std::shared_ptr<Node> grad_accumulator(const Variable&);
/// Returns the "canonical" gradient edge of this `Variable`, i.e. either the
/// gradient function if this is an interior `Variable`, or the gradient
/// accumulator otherwise. If the `Variable` is interior, the returned `Edge`
/// will store the input index of the `Node` to which this variable is
/// connected in its `input_nr` field. For leaves, the `input_nr` is always
/// zero. Note that `set_gradient_edge` and `gradient_edge` are not
/// symmetric. You must use `set_gradient_edge` to set the `grad_fn` and
/// `set_grad_accumulator` to set the accumulator.
TORCH_API Edge gradient_edge(const Variable&);
/// Set the gradient edge -- i.e. `grad_fn` and `input_nr` -- of the
/// `Variable`.
/// NOTE: This will always set the `grad_fn`, even if this is a leaf variable,
/// and never the `grad_accumulator`. For the latter, use
/// `set_grad_accumulator`. This allows late construction of an interior
/// `Variable`.
TORCH_API void set_gradient_edge(const Variable&, Edge edge);
// Autograd Graph Interaction
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Update the `grad_fn` of an existing Variable. Called after in-place
/// modifications.
///
/// For View Variables:
/// Called after in-place modifications. Modifies the grad_fn of the base
/// Variable.
TORCH_API void rebase_history(const Variable&, Edge gradient_edge);
/// Gets the raw gradient function pointer, whatever it currently is.
TORCH_API Node* grad_fn_unsafe(const Variable&);
/// Increments the version count of this `Variable`.
TORCH_API void bump_version(const Variable&);
TORCH_API void set_version_counter(
const Variable&,
const c10::VariableVersion& version_counter);
/// Retrieves this `Variable`s version counter.
TORCH_API const c10::VariableVersion& version_counter(const Variable&);
TORCH_API void set_name(const Variable&, const std::string& name);
TORCH_API void add_hook(
const at::TensorBase&,
std::unique_ptr<FunctionPreHook> hook);
TORCH_API std::vector<std::unique_ptr<FunctionPreHook>>& hooks(const Variable&);
TORCH_API void clear_hooks(const at::TensorBase&);
TORCH_API void set_post_acc_grad_hooks(
const at::TensorBase&,
std::unique_ptr<PostAccumulateGradHook> dict);
TORCH_API std::unique_ptr<PostAccumulateGradHook>& post_acc_grad_hooks(
const Variable&);
TORCH_API void create_cpp_hook(
const at::TensorBase&,
bool is_retains_grad_hooks = false);
} // namespace impl
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// AutogradMeta
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Each `Variable` has one unique `AutogradMeta` struct, which stores autograd
/// metadata fields that are necessary for tracking the Variable's autograd
/// history. As an optimization, a Variable may store a nullptr, in lieu of a
/// default constructed AutogradMeta.
struct TORCH_API AutogradMeta : public c10::AutogradMetaInterface {
std::string name_;
Variable grad_;
std::shared_ptr<Node> grad_fn_;
std::weak_ptr<Node> grad_accumulator_;
// This field is used to store all the forward AD gradients
// associated with this AutogradMeta (and the Tensor it corresponds to)
// There is a semantic 1:1 correspondence between AutogradMeta and
// ForwardGrad but:
// - This field is lazily populated.
// - This field is a shared_ptr but it must never be
// shared by multiple Tensors. See Note [ Using ForwardGrad ]
// Any transition from not_initialized to initialized
// must be protected by mutex_
mutable std::shared_ptr<ForwardGrad> fw_grad_;
// The hooks_ field is actually reused by both python and cpp logic
// For both cases, we have a data structure, cpp_hooks_list_ (cpp)
// or dict (python) which is the canonical copy.
// Then, for both cases, we always register a single hook to
// hooks_ which wraps all the hooks in the list/dict.
// And, again in both cases, if the grad_fn exists on that tensor
// we will additionally register a single hook to the grad_fn.
//
// Note that the cpp and python use cases aren't actually aware of
// each other, so using both is not defined behavior.
std::vector<std::unique_ptr<FunctionPreHook>> hooks_;
std::shared_ptr<hooks_list> cpp_hooks_list_;
// The post_acc_grad_hooks_ field stores only Python hooks
// (PyFunctionTensorPostAccGradHooks) that are called after the
// .grad field has been accumulated into. This is less complicated
// than the hooks_ field, which encapsulates a lot more.
std::unique_ptr<PostAccumulateGradHook> post_acc_grad_hooks_ = nullptr;
// Only meaningful on leaf variables (must be false otherwise)
bool requires_grad_{false};
// Only meaningful on non-leaf variables (must be false otherwise)
bool retains_grad_{false};
bool is_view_{false};
// The "output number" of this variable; e.g., if this variable
// was the second output of a function, then output_nr == 1.
// We use this to make sure we can setup the backwards trace
// correctly when this variable is passed to another function.
uint32_t output_nr_;
// Mutex to ensure that concurrent read operations that modify internal
// state are still thread-safe. Used by grad_fn(), grad_accumulator(),
// fw_grad() and set_fw_grad()
// This is mutable because we need to be able to acquire this from const
// version of this class for the functions above
mutable std::mutex mutex_;
/// Sets the `requires_grad` property of `Variable`. This should be true for
/// leaf variables that want to accumulate gradients, and false for all other
/// variables.
void set_requires_grad(bool requires_grad, at::TensorImpl* self_impl) final {
TORCH_CHECK(
!requires_grad ||
isDifferentiableType(at::typeMetaToScalarType(self_impl->dtype())),
"Only Tensors of floating point and complex dtype can require gradients");
requires_grad_ = requires_grad;
}
bool requires_grad() const override {
return requires_grad_ || grad_fn_;
}
/// Accesses the gradient `Variable` of this `Variable`.
Variable& mutable_grad() override {
return grad_;
}
const Variable& grad() const override {
return grad_;
}
const Variable& fw_grad(uint64_t level, const at::TensorBase& self)
const override;
void set_fw_grad(
const at::TensorBase& new_grad,
const at::TensorBase& self,
uint64_t level,
bool is_inplace_op) override;
AutogradMeta(
at::TensorImpl* self_impl = nullptr,
bool requires_grad = false,
Edge gradient_edge = Edge())
: grad_fn_(std::move(gradient_edge.function)),
output_nr_(gradient_edge.input_nr) {
// set_requires_grad also checks error conditions.
if (requires_grad) {
TORCH_INTERNAL_ASSERT(self_impl);
set_requires_grad(requires_grad, self_impl);
}
TORCH_CHECK(
!grad_fn_ || !requires_grad_,
"requires_grad should be false if grad_fn is set");
}
~AutogradMeta() override {
// If AutogradMeta is being destroyed, it means that there is no other
// reference to its corresponding Tensor. It implies that no other thread
// can be using this object and so there is no need to lock mutex_ here to
// guard the check if fw_grad_ is populated.
if (fw_grad_) {
// See note [ Using ForwardGrad ]
fw_grad_->clear();
}
}
};
/// Base class for view functions, providing reapplication of a view on a new
/// base. Each view op should get a codegenerated subclass of this class
/// containing any state needed to reconstruct the view. The class also provides
/// convenience accessors for saved SymInts / tensor state. This is useful for
/// e.g. fake-ification, where we want to use symbolic values or fake tensors
/// instead.
struct TORCH_API ViewFunc {
virtual ~ViewFunc() = default;
/// Returns any SymInts in the saved state.
virtual std::vector<c10::SymInt> get_symints() const {
return {};
}
/// Returns the number of SymInts in the saved state.
virtual size_t num_symints() const {
return 0;
}
/// Returns any tensors in the saved state.
virtual std::vector<at::Tensor> get_tensors() const {
return {};
}
/// Returns the number of tensors in the saved state.
virtual size_t num_tensors() const {
return 0;
}
/// Reapplies the view on the given base using the saved state.
virtual at::Tensor operator()(const at::Tensor&) const = 0;
/// Returns a clone of this ViewFunc, optionally with the specified saved
/// state.
virtual std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = std::nullopt,
std::optional<std::vector<at::Tensor>> = std::nullopt) const = 0;
protected:
/// Sets the values of any SymInts in the saved state. The input vector size
/// must match the number of SymInts in the saved state (i.e. the size of the
/// list returned by get_symints()).
/// NOLINTNEXTLINE(performance-unnecessary-value-param)
virtual void set_symints(std::vector<c10::SymInt>) {}
/// Sets the values of any Tensors in the saved state. The input vector size
/// must match the number of Tensors in the saved state (i.e. the size of the
/// list returned by get_tensors()).
/// NOLINTNEXTLINE(performance-unnecessary-value-param)
virtual void set_tensors(std::vector<at::Tensor>) {}
};
/// ViewFunc that represents a chain of two ViewFuncs.
struct ChainedViewFunc : public ViewFunc {
ChainedViewFunc(
std::unique_ptr<ViewFunc> first,
std::unique_ptr<ViewFunc> second)
: first(std::move(first)), second(std::move(second)) {}
~ChainedViewFunc() override = default;
std::vector<c10::SymInt> get_symints() const override;
size_t num_symints() const override {
return first->num_symints() + second->num_symints();
}
std::vector<at::Tensor> get_tensors() const override;
size_t num_tensors() const override {
return first->num_tensors() + second->num_tensors();
}
at::Tensor operator()(const at::Tensor&) const override;
std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = std::nullopt,
std::optional<std::vector<at::Tensor>> = std::nullopt) const override;
private:
std::unique_ptr<ViewFunc> first;
std::unique_ptr<ViewFunc> second;
};
/// ViewFunc that errors with a specified error message when called.
struct ErroringViewFunc : public ViewFunc {
ErroringViewFunc(std::string error_msg) : error_msg(std::move(error_msg)) {}
~ErroringViewFunc() override = default;
at::Tensor operator()(const at::Tensor&) const override {
TORCH_CHECK(false, error_msg);
}
std::unique_ptr<ViewFunc> clone_and_set(
std::optional<std::vector<c10::SymInt>> = std::nullopt,
std::optional<std::vector<at::Tensor>> = std::nullopt) const override {
return std::make_unique<ErroringViewFunc>(error_msg);
}
private:
std::string error_msg;
};
struct TORCH_API ViewInfo {
/// The base `Variable`
/// If this ViewInfo represents a forward (respectively backward) AD gradient,
/// then this Tensor cannot be a forward (respectively backward) view.
Variable base_;
/// By default we use as_strided to recover views which is more efficient.
/// view_fn is only saved when as_strided is not supported.
/// If view_fn has value, we use it to recover views in backward.
std::unique_ptr<ViewFunc> view_fn_;
/// Analogue of view_fn but in reverse: given a view -> produce the base by
/// applying the inverse view.
std::function<Variable(const Variable&)> rev_view_fn_;
/// Accessors for the view function
bool has_view_fn() const {
// assume either BOTH or NEITHER of view_fn_ and rev_view_fn_ exist
return view_fn_ != nullptr;
}
const ViewFunc& view_fn() const {
TORCH_CHECK(
has_view_fn(), "Can only access the view function if it exists.");
return *view_fn_;
}
std::function<Variable(const Variable&)> rev_view_fn() const {
TORCH_CHECK(
has_view_fn(),
"Can only access the reverse view function if it exists.");
return rev_view_fn_;
}
/// The chain function can be used to build a new ViewInfo for a
/// differentiable view function. It will return a new view info that
/// accurately represents how "tensor" is a view of this instance's "base_".
/// The "base" and "tensor" are respectively the input and output of the
/// differentiable view function that happened. They are required to properly
/// set the optional view_fn_ when it is not provided. The "view_func", if
/// provided, should be a function that allows to re-do the view between
/// "base" and "tensor".
ViewInfo chain(
const Variable& base,
const Variable& tensor,
std::unique_ptr<ViewFunc> view_func = nullptr,
std::function<Variable(const Variable&)> rev_view_func = nullptr) const;
ViewInfo(
Variable base,
std::unique_ptr<ViewFunc> view_fn,
std::function<Variable(const Variable&)> rev_view_fn)
: base_(std::move(base)),
view_fn_(std::move(view_fn)),
rev_view_fn_(std::move(rev_view_fn)) {
TORCH_CHECK(base_.defined(), "base is undefined");
}
};
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// DifferentiableViewMeta
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// NOTE [ Autograd View Variables ]
///
/// Many operations return Variable that shares storage with an input Variable.
/// The returned Variable is called a **view** Variable on the input **base**
/// Variable.
///
/// In PyTorch, we have two types of views: differentiable views, and
/// non-differentiable views. In either type, to support proper version
/// checking, the base and view Variables must always share the same
/// version_counter.
///
///
/// Differentiable Views
/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// This class allows to track both forward and backward AD differentiable
/// views. These views can have different base as non-differentiable view for
/// forward and backward mode AD are not the same.
///
/// Most function are either both forward and backward differentiable views (for
/// example: view, select, narrow, transpose, etc) or both not forward and not
/// backward differentiable views (for example: indices, values, eq, lt, etc).
/// But there are also functions that are forward but not backward
/// differentiable views (only detach for now) or functions that are backward
/// but not forward differentiable view (only make_dual and unpack dual for
/// now).
///
/// A concrete example of two views with different bases is as follow:
///
/// # Have:
/// # dual is a dual Tensor that is neither a forward or backward view
/// detached_dual = dual.detach()
/// view = detached_dual.view_as(dual)
/// # The forward base of view is dual
/// # The backward base of view is detached_dual
///
/// - Backward Mode View
/// Differentiable views are the view variables where you want gradients to flow
/// back to the base variables. Out-of-place operations on views are quite
/// straightforward, but in-place ones are very tricky. Even if the base
/// variable may not require grad when we create the view, we still need to
/// track the view relation because future in-place ops may require back-proping
/// through it. For example, we need to support
///
/// (1) in-place operation on view, e.g.,
///
/// # Have:
/// # base.requires_grad = False
/// # var.requires_grad = True
/// base[1] = var # i.e., base[1].copy_(var)
/// torch.autograd.grad(base.sum(), var) <- should return an all ones
/// tensor
///
/// (2) in-place operation on base after view is created, e.g.,
///
/// # Have:
/// # base.requires_grad = False
/// # var.requires_grad = True
/// view = base[1]
/// base.copy_(var)
/// torch.autograd.grad(view.sum(), var) <- should return a tensor with
/// var[1] filled with all ones and
/// zeros everywhere else
///
/// - Forward Mode View
/// Forward differentiable views follow the same semantic as backward ones but
/// show up differently as they are computed along with the forward evaluation.
/// The hard examples above are thus very similar
///
/// (1) in-place operation on view, e.g.,
///
/// # Have:
/// # base is a regular Tensor
/// # var is a dual Tensor whose tangent is all ones
/// base[1] = var # i.e., base[1].copy_(var)
/// # Now, base is a dual Tensor
/// _, fw_grad = fwAD.unpack_dual(base) <- fw_grad should be a tensor with
/// fw_grad[1] filled with all ones
/// and zeros everywhere else
///
/// (2) in-place operation on base after view is created, e.g.,
///
/// # Have:
/// # base is a regular Tensor
/// # var is a dual Tensor whose tangent is all ones
/// view = base[1]
/// base.copy_(var)
/// _, fw_grad = fwAD.unpack_dual(view) <- fw_grad should be an all ones
/// tensor
///
/// See Note [Forward Grad View/inplace] for more details on how we handle these
/// hard cases.
///
///
/// DifferentiableViewMeta is created to support gradient tracking of
/// such **in-place** operations. In particular,
/// + if an in-place op is done on base, the grad_fn field of the view may
/// become stale. So accesses should always go through grad_fn(), which
/// reconstructs an updated grad_fn if the version_counter has incremented.
/// All other fields are always valid.
/// + if an in-place op is done on view, in rebase_history() of view, which is
/// called after every in-place op in VariableType.cpp, the grad_fn of base
/// is updated.
/// + if a single autograd Node returns multiple differentiable views, if any
/// output is modified by an inplace operation, the autograd engine will
/// make an equivalent graph (corresponding to the view operations) without
/// using equivalent graph, where each output is treated as if it were
/// produced by a distinct view operation. This discards the original (e.g.,
/// user provided) grad_fn. If the provided grad_fn does more than the
/// backward of the view, then the DifferentiableViewMeta must be created
/// with creation_meta= CreationMeta::MULTI_OUTPUT_NODE to prevent the
/// engine from ignoring the provided grad_fn.
///
/// Interaction with GradMode:
/// The particular case that we consider here is:
///
/// # Have:
/// # base.requires_grad = True or False
/// with torch.no_grad():
/// view = base[1]
/// base.requires_grad_()
/// view.copy_(var)
/// torch.autograd.grad(base.sum(), var) <- what should it return?
///
/// Given that this particular code example is ambiguous and can easily be
/// replace by either moving both inside the no_grad block or both outside, we
/// explicitly forbid it. For now, it is deprecated by a warning. This is
/// achieved by setting creation_meta=CreationMeta::NO_GRAD_MODE for all
/// differentiable views created in no_grad mode.
///
/// See Note [View + Inplace update for base tensor]
/// and Note [View + Inplace update for view tensor] for the details how
/// autograd handles inplace update with view ops.
///
/// Non-Differentiable Views
/// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// In certain cases, although function outputs share storage with inputs, they
/// will **never** require gradient history tracking. Instead of registering the
/// view relation via DifferentiableViewMeta in autograd, the views will be
/// using usual AutogradMeta and just share the version counters with the base
/// Variables.
/// Such views include:
/// 1. Views created from .detach()
/// 2. Views that are non-differentiable by its nature.
/// E.g., `sparse_tensor.indices()` is a integral view on a (possibly)
/// floating point tensor.
/// See top of `derivatives.yaml` on how to specify that outputs of a
/// function are non-differentiable.
/// These are called non-differentiable views as the gradients do not flow
/// through the view relation.
///
/// Relevant logic for both differentiable and non-differentiable views is
/// implemented in make_variable_(non_)differentiable_view below, and
/// wrap_output of gen_variable_type.py.
/// NOTE [ View + Inplace detection ]
///
/// We want to detect views followed by inplace as they are often forbidden to
/// ensure correctness of the computed gradients. But since we want to only
/// notify the user when both happen, we tag the DifferentiableViewMeta when the
/// view is created via the `make_variable_*_view()` functions. This tag is then
/// checked by the `check_inplace()` function from `VariableTypeUtils.h` that
/// should be called before every inplace operation and to detect cases where
/// other views are modified and this one is rebased by side effect, we also
/// check in the `VariableHooks::grad_fn()`.
/// Flag that gives more information about when this view was created:
/// - IN_CUSTOM_FUNCTION should be set when the view is created inside a custom
/// autograd Function is returned.
/// - NO_GRAD_MODE should be set when a view in created when GradMode is
/// disabled
/// - MULTI_OUTPUT_NODE should be set when a Node created by codegen code
/// returns
/// multiple differentiable views
/// - Inference_MODE should be set when a view of normal tensor is created in
/// InferenceMode.
/// - DEFAULT is for all other cases
enum class CreationMeta : uint8_t {
DEFAULT,
IN_CUSTOM_FUNCTION,
MULTI_OUTPUT_NODE,
NO_GRAD_MODE,
INFERENCE_MODE
};
/// Handles correctly propagating CreationMeta when a new view is created from a
/// previous view. In general, we don't want the new view to be _less_
/// restrictive than the previous view (it's okay to be _more_ restrictive). A
/// CreationMeta value of DEFAULT is currently the least restrictive, as the
/// behavior for all other CreationMeta values is to error out for in-place ops.
/// A CreationMeta value of INFERENCE_MODE is currently the most restrictive, so
/// it takes precedence in propagation. If this changes, the logic here will
/// need to be updated to properly handle the new semantics.
inline CreationMeta propagate_creation_meta(
CreationMeta prev_view_creation_meta,
CreationMeta new_view_creation_meta) {
return (new_view_creation_meta == CreationMeta::DEFAULT)
? prev_view_creation_meta
: (prev_view_creation_meta == CreationMeta::INFERENCE_MODE
? prev_view_creation_meta
: new_view_creation_meta);
}
/// Unified function to handle error checking when rebase happens
/// indirect=true means that the caller is not doing the inplace, but the
/// inplace happened somewhere else.
TORCH_API void handle_view_on_rebase(
DifferentiableViewMeta* diff_view_meta,
bool indirect = false);
struct TORCH_API DifferentiableViewMeta : public AutogradMeta {
private:
/// Information about the views
std::optional<ViewInfo> backward_info_;
std::optional<ViewInfo> forward_info_;
// Optimization to reduce the number of ViewInfo we create.
// In the (very common) case where backward_info_ == forward_info_, we only
// populate backward_info_ (that should be used as both the forward and
// backward view information) and set shared_view_info_ = true. Invariants:
// - If shared_view_info_ is false, there is no special constraints on
// backward_info_ and forward_info_
// - If shared_view_info_ is true, we must have:
// - backward_info_.has_value() == true
// - forward_info_.has_value() == false
bool shared_view_info_;
/// The two following fields are extra information that we track to ensure
/// that any operation on this backward view is valid.
/// The value of the version_counter at the time grad_fn was created. The
/// grad_fn field is stale if attr_version_ !=
/// version_counter.current_version().
uint32_t attr_version_;
CreationMeta creation_meta_;
public:
/// requires_grad is a backward AD field so we only use the view specific
/// logic for backward differentiable views
bool requires_grad() const override {
return requires_grad_ || grad_fn_ ||
(has_bw_view() && get_backward_view().base_.requires_grad());
}
bool shared_view_info() const {
return shared_view_info_;
}
bool has_bw_view() const {
return backward_info_.has_value();
}
const ViewInfo& get_backward_view() const {
TORCH_CHECK(
has_bw_view(), "backward view info can only exist for backward views.");
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
return backward_info_.value();
}
uint32_t get_attr_version() const {
TORCH_CHECK(
has_bw_view(), "attr_version can only exist for backward views.");
return attr_version_;
}
void set_attr_version(uint32_t new_attr_version) {
TORCH_CHECK(
has_bw_view(), "attr_version can only exist for backward views.");
attr_version_ = new_attr_version;
}
CreationMeta get_creation_meta() const {
TORCH_CHECK(
has_bw_view(), "creation_meta can only exist for backward views.");
return creation_meta_;
}
void set_creation_meta(CreationMeta new_creation_meta) {
TORCH_CHECK(
has_bw_view(), "creation_meta can only exist for backward views.");
creation_meta_ = new_creation_meta;
}
bool has_fw_view() const {
return shared_view_info_ || forward_info_.has_value();
}
const ViewInfo& get_forward_view() const {
TORCH_CHECK(
has_fw_view(), "forward view info can only exist for forward views.");
TORCH_CHECK(
!shared_view_info_ || has_bw_view(),
"forward view info can only exist for forward views.");
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
return shared_view_info_ ? backward_info_.value() : forward_info_.value();
}
DifferentiableViewMeta(
at::TensorImpl* self_impl,
std::optional<ViewInfo> backward_info,
std::optional<ViewInfo> forward_info,
bool shared_view_info,
CreationMeta creation_meta = CreationMeta::DEFAULT);
};
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Variable Implementation
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Factory Functions
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/// Creates a `Variable` that is a *view* of another (*base*) variable.
/// The `gradient_edge` is an optional (gradient_function, input_number) pair.
/// `is_differentiable` is a bool that specifies whether this view is
/// differentiable, i.e., whether the relation should be tracked by autograd.
/// See NOTE [ Autograd View Variables ] for details.
/// NOTE: `allow_tensor_metadata_change` is set to true by default, because
/// there are a lot of call sites to these factory functions that need to change
/// the variable's size or storage afterwards, and they don't expect the
/// original tensor (where the variable is created from) to be updated. Setting
/// `allow_tensor_metadata_change_` to false by default would unnecessarily
/// prevent those changes from happening and is undesirable.
// See NOTE [ Autograd View Variables ] for details.
// Differentiable view. Track history with DifferentiableViewMeta.
inline Variable make_variable_differentiable_view(
const at::Tensor& data,
std::optional<ViewInfo> backward_info,
std::optional<ViewInfo> forward_info,
bool shared_view_info,
CreationMeta creation_meta,
bool allow_tensor_metadata_change = true) {
if (data.defined()) {
TORCH_CHECK(
data.getIntrusivePtr()->autograd_meta() == nullptr,
"Attempted to make a tensor into a differentiable view, but the "
"tensor already had autograd metadata associated with it. If you are "
"using a __torch_dispatch__ mode, the most common cause for this "
"problem is that you used torch.overrides.enable_reentrant_dispatch() "
"improperly; tensors created within the extent of reentrant dispatch "
"MUST NOT be directly returned from __torch_dispatch__; instead, they "
"must be wrapped into fresh tensors that serve as the output. If you "
"are not using wrappers, you probably don't need reentrant dispatch. "
"If this doesn't seem applicable, please file a bug to PyTorch.");
at::TensorImpl* data_impl = data.unsafeGetTensorImpl();
data_impl->set_allow_tensor_metadata_change(allow_tensor_metadata_change);
data_impl->set_autograd_meta(std::make_unique<DifferentiableViewMeta>(
data_impl,
std::move(backward_info),
std::move(forward_info),
shared_view_info,
creation_meta));
return data;
}
return Variable();
}
// See NOTE [ Autograd View Variables ] for details.
// Non-differentiable view. Just share version counter.
inline Variable make_variable_non_differentiable_view(
const Variable& base,
const at::Tensor& data,
bool allow_tensor_metadata_change = true) {
if (data.defined()) {
// Currently all of non-differentiable view ops(detach/_indices/_values)
// share the same TensorImpl as their base Tensor. Thus a new TensorImpl
// allocation here is required.
auto data_impl_copy = data.getIntrusivePtr()->shallow_copy_and_detach(
/*version_counter=*/impl::version_counter(base),
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
data_impl_copy->set_autograd_meta(nullptr);
return Variable(data_impl_copy);
}
return Variable();
}
/// Creates a `Variable` from the given `Tensor`, copying its underlying
/// `TensorImpl`. `requires_grad` should be set only for leaves, and determines
/// whether the `Variable` will accumulate gradients. NOTE: `data` must *not* be
/// a `Variable` already. Its dynamic type *must* be `Tensor`.
///
/// TODO: Eliminate this function as much as possible, as it can be expressed
/// more clearly as detach() or a no-op in most call sites (especially when
/// there is only one use of the variable).
inline Variable make_variable(
at::Tensor data,
bool requires_grad = false,
bool allow_tensor_metadata_change = true) {
if (data.defined()) {
if (data.getIntrusivePtr().use_count() == 1 &&
data.getIntrusivePtr()->unique_version()) {
auto data_impl = data.unsafeReleaseIntrusivePtr();
data_impl->set_allow_tensor_metadata_change(allow_tensor_metadata_change);
if (requires_grad) {
data_impl->set_autograd_meta(
std::make_unique<AutogradMeta>(data_impl.get(), requires_grad));
} else {
data_impl->set_autograd_meta(nullptr);
}
return Variable(std::move(data_impl));
} else {
auto data_impl_copy = data.getIntrusivePtr()->shallow_copy_and_detach(
/*version_counter=*/0,
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
if (requires_grad) {
data_impl_copy->set_autograd_meta(std::make_unique<AutogradMeta>(
data_impl_copy.get(), requires_grad));
} else {
data_impl_copy->set_autograd_meta(nullptr);
}
return Variable(std::move(data_impl_copy));
}
}
return Variable();
}
/// Creates a `Variable` from the given `Tensor`, copying its underlying
/// `TensorImpl`. `gradient_edge` should be a (function, input_nr) pair
/// specifying the function in the autograd graph, and what particular input of
/// that function, this variable is connected to.
inline Variable make_variable(
const at::Tensor& data,
Edge gradient_edge,
bool allow_tensor_metadata_change = true) {
if (data.defined()) {
auto data_impl_copy = data.getIntrusivePtr()->shallow_copy_and_detach(
/*version_counter=*/0,
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
data_impl_copy->set_autograd_meta(std::make_unique<AutogradMeta>(
data_impl_copy.get(), false, std::move(gradient_edge)));
return Variable(data_impl_copy);
}
return Variable();
}
struct VariableHooks final : at::impl::VariableHooksInterface {
at::TensorBase tensor_data(const at::TensorBase&) const override;
at::TensorBase variable_data(const at::TensorBase&) const override;
const std::shared_ptr<torch::autograd::Node>& grad_fn(
const at::TensorBase&) const override;
unsigned _register_hook(
const at::TensorBase&,
std::function<at::TensorBase(const at::TensorBase&)> hook) const override;
void remove_hook(const at::TensorBase&, unsigned pos) const override;
bool is_view(const at::TensorBase&) const override;
const at::TensorBase& base(const at::TensorBase&) const override;
const std::string& name(const at::TensorBase&) const override;
bool is_leaf(const at::TensorBase&) const override;
int64_t output_nr(const at::TensorBase&) const override;
void set_data(const at::TensorBase& self, const at::TensorBase& new_data)
const override;
at::TensorBase data(const at::TensorBase& self) const override;
int64_t _version(const at::TensorBase& self) const override;
void retain_grad(const at::TensorBase& self) const override;
bool retains_grad(const at::TensorBase& self) const override;
void _backward(
const at::Tensor& self,
at::TensorList inputs,
const std::optional<at::Tensor>& gradient,
std::optional<bool> keep_graph,
bool create_graph) const override;
void requires_grad_(const at::TensorBase& self, bool _requires_grad)
const override;
void basic_autograd_not_implemented_fallback(
const c10::OperatorHandle& op,
c10::DispatchKeySet dispatch_keys,
torch::jit::Stack* stack) const override;
};
namespace utils {
TORCH_API bool has_same_meta(const Variable& base, const Variable& other);
} // namespace utils
} // namespace torch::autograd
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
```
|
=========================================================================================================================================
SOURCE CODE FILE: variable_info.h
LINES: 1
SIZE: 0.62 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\autograd\variable_info.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/autograd/variable.h>
namespace torch::autograd {
struct TORCH_API VariableInfo {
explicit VariableInfo();
explicit VariableInfo(const Variable& var, bool use_zeros_like = false);
Variable zeros(at::OptionalDeviceGuard& device_guard) const;
at::Layout layout = at::Layout::Strided;
at::Device device = at::kCPU;
at::ScalarType scalar_type = at::kFloat;
std::vector<c10::SymInt> size;
bool requires_grad;
bool is_empty;
// needed for e.g. NJTs since they only support zeros_like()
std::optional<Variable> the_var;
};
} // namespace torch::autograd
```
|
=============================================================================================================================
SOURCE CODE FILE: copy_utils.h
LINES: 1
SIZE: 1.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\copy_utils.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Types.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils.h>
#include <functional>
#include <vector>
typedef std::function<void(PyObject*, PyObject*, bool)> THPCopyFunction;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct THPCopyInfo {
PyTypeObject* srcType; // Python type of src tensor/storage
THPCopyFunction copy; // copy function
bool non_blocking; // true if copy implements an 'non_blocking' copy
bool broadcast; // true if the copy implements a broadcast copy
};
typedef std::vector<THPCopyInfo> THPCopyList;
inline bool tryTHPCopy(
const THPCopyList& v,
PyObject* dst,
PyObject* src,
bool non_blocking,
bool broadcast) {
for (auto& i : v) {
if (i.non_blocking == non_blocking &&
PyType_IsSubtype(Py_TYPE(src), i.srcType)) {
(i.copy)(dst, src, broadcast);
return true;
}
}
return false;
}
inline bool THPCopy(
const THPCopyList& v,
PyObject* dst,
PyObject* src,
bool non_blocking,
bool broadcast) {
// NOLINTNEXTLINE(bugprone-branch-clone)
if (tryTHPCopy(v, dst, src, non_blocking, broadcast)) {
return true;
} else if (non_blocking && tryTHPCopy(v, dst, src, false, broadcast)) {
return true;
}
THPUtils_setError(
"copy from %s to %s isn't implemented",
THPUtils_typename(src),
THPUtils_typename(dst));
return false;
}
```
|
==============================================================================================================================================
SOURCE CODE FILE: CUDAPluggableAllocator.h
LINES: 1
SIZE: 6.99 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\cuda\CUDAPluggableAllocator.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/Allocator.h>
#include <c10/cuda/CUDAGraphsC10Utils.h>
#include <c10/cuda/CUDAMacros.h>
#include <c10/cuda/CUDAStream.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include <mutex>
namespace torch::cuda::CUDAPluggableAllocator {
using MallocFuncType = void*(size_t, int, cudaStream_t);
using FreeFuncType = void(void*, size_t, int, cudaStream_t);
// A CUDAPluggableAllocatorDeleterContext object is used as the `ctx`
// argument for DataPtr. We need context because a user can use
// multiple allocators in the same PyTorch program, and
// the allocators can have different free functions, such as:
// free, cudaFree, cudaFreeAsync, ncclMemFree etc.
struct TORCH_CUDA_CPP_API CUDAPluggableAllocatorDeleterContext {
explicit CUDAPluggableAllocatorDeleterContext(
std::function<FreeFuncType> free_fn,
void* data,
size_t size,
int device,
cudaStream_t stream);
void free();
private:
std::function<FreeFuncType> free_fn_;
void* data_;
size_t size_;
int device_;
cudaStream_t stream_{};
};
#if defined(TORCH_HIP_VERSION)
using streamType = c10::hip::HIPStream;
#else
using streamType = c10::cuda::CUDAStream;
#endif
TORCH_CUDA_CPP_API std::shared_ptr<
c10::cuda::CUDACachingAllocator::CUDAAllocator>
getCurrentAllocator();
TORCH_CUDA_CPP_API std::shared_ptr<
c10::cuda::CUDACachingAllocator::CUDAAllocator>
createCustomAllocator(
std::function<MallocFuncType> alloc_fn,
std::function<FreeFuncType> free_fn);
TORCH_CUDA_CPP_API void changeCurrentAllocator(
const std::shared_ptr<c10::cuda::CUDACachingAllocator::CUDAAllocator>&
allocator);
struct _AllocationMetadata {
_AllocationMetadata();
_AllocationMetadata(
size_t size,
c10::DeviceIndex device_idx,
cudaStream_t stream);
size_t size;
c10::DeviceIndex device_idx;
cudaStream_t stream{};
};
struct TORCH_CUDA_CPP_API CUDAPluggableAllocator
: public c10::cuda::CUDACachingAllocator::CUDAAllocator {
CUDAPluggableAllocator(
std::function<MallocFuncType> alloc_fn,
std::function<FreeFuncType> free_fn);
CUDAPluggableAllocator(CUDAPluggableAllocator& other);
CUDAPluggableAllocator(CUDAPluggableAllocator&& other) = delete;
CUDAPluggableAllocator& operator=(const CUDAPluggableAllocator& other) =
delete;
CUDAPluggableAllocator& operator=(CUDAPluggableAllocator&& other) = delete;
~CUDAPluggableAllocator() override = default;
void set_init_fn(std::function<void(int)> init_fn);
void set_reset_fn(std::function<void()> reset_fn);
void set_memory_fraction_fn(
std::function<void(double, int)> memory_fraction_fn);
void set_base_alloc_fn(std::function<void*(void*, size_t*)> base_alloc_fn);
void set_record_stream_fn(
std::function<void(void* ptr, cudaStream_t stream)> record_stream_fn);
void set_begin_allocate_to_pool(
std::function<
void(int, c10::cuda::MempoolId_t, std::function<bool(cudaStream_t)>)>
capture_begin_fn);
void set_end_allocate_to_pool_fn(
std::function<void(int, c10::cuda::MempoolId_t)> capture_about_to_end_fn);
void set_release_pool(
std::function<void(int, c10::cuda::MempoolId_t)> capture_destroy_fn);
void* malloc(size_t size, c10::DeviceIndex device, cudaStream_t stream);
c10::DataPtr allocate(size_t size) override;
c10::DeleterFnPtr raw_deleter() const override;
void* raw_alloc(size_t nbytes) override;
void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream) override;
void raw_delete(void* ptr) override;
void init(int device_count) override;
bool initialized() override;
double getMemoryFraction(c10::DeviceIndex device) override;
void setMemoryFraction(double fraction, c10::DeviceIndex device) override;
void emptyCache() override;
void enable(bool) override {}
bool isEnabled() const override {
return true;
}
void cacheInfo(c10::DeviceIndex device, size_t* largestBlock) override;
void* getBaseAllocation(void* ptr, size_t* size) override;
void recordStream(const c10::DataPtr&, streamType stream) override;
c10::CachingDeviceAllocator::DeviceStats getDeviceStats(
c10::DeviceIndex device) override;
void resetAccumulatedStats(c10::DeviceIndex device) override;
void resetPeakStats(c10::DeviceIndex device) override;
c10::cuda::CUDACachingAllocator::SnapshotInfo snapshot() override;
void beginAllocateToPool(
c10::DeviceIndex device,
c10::cuda::MempoolId_t mempool_id,
std::function<bool(cudaStream_t)>) override;
void endAllocateToPool(
c10::DeviceIndex device,
c10::cuda::MempoolId_t mempool_id) override;
void releasePool(c10::DeviceIndex device, c10::cuda::MempoolId_t mempool_id)
override;
std::shared_ptr<void> getIpcDevPtr(std::string handle) override;
c10::cuda::CUDACachingAllocator::ShareableHandle shareIpcHandle(
void*) override;
void recordHistory(
bool enabled,
c10::cuda::CUDACachingAllocator::CreateContextFn context_recorder,
size_t alloc_trace_max_entries,
c10::cuda::CUDACachingAllocator::RecordContext when) override;
void attachOutOfMemoryObserver(
c10::cuda::CUDACachingAllocator::OutOfMemoryObserver observer) override;
void attachAllocatorTraceTracker(
c10::cuda::CUDACachingAllocator::AllocatorTraceTracker tracker) override;
std::shared_ptr<c10::cuda::CUDACachingAllocator::AllocatorState>
getCheckpointState(c10::DeviceIndex device, at::cuda::MempoolId_t id)
override;
c10::cuda::CUDACachingAllocator::CheckpointDelta setCheckpointPoolState(
c10::DeviceIndex device,
std::shared_ptr<c10::cuda::CUDACachingAllocator::AllocatorState> pps)
override;
void enablePeerAccess(c10::DeviceIndex dev, c10::DeviceIndex dev_to_access)
override;
cudaError_t memcpyAsync(
void* dst,
int dstDevice,
const void* src,
int srcDevice,
size_t count,
cudaStream_t stream,
bool p2p_enabled) override;
std::string name() override;
void copy_data(void* dest, const void* src, std::size_t count) const final;
protected:
std::function<MallocFuncType> alloc_fn_;
std::function<FreeFuncType> free_fn_;
std::function<void(int)> init_fn_;
std::function<void()> reset_fn_;
std::function<void(double, int)> memory_fraction_fn_;
std::function<void*(void*, size_t*)> base_alloc_fn_;
std::function<void(void* ptr, cudaStream_t stream)> record_stream_fn_;
std::function<
void(int, c10::cuda::MempoolId_t, std::function<bool(cudaStream_t)>)>
begin_allocate_to_pool_fn_;
std::function<void(int, c10::cuda::MempoolId_t)> end_allocate_to_pool_fn_;
std::function<void(int, c10::cuda::MempoolId_t)> relase_pool_fn_;
std::mutex allocator_mutex_;
// We do the bookeeping here in order to simplify custom allocators
std::unordered_map<void*, _AllocationMetadata> allocation_metadata_;
bool initialized_ = false;
};
} // namespace torch::cuda::CUDAPluggableAllocator
```
|
=============================================================================================================================
SOURCE CODE FILE: Event.h
LINES: 1
SIZE: 0.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\cuda\Event.h
ENCODING: utf-8
```h
#ifndef THCP_EVENT_INC
#define THCP_EVENT_INC
#include <ATen/cuda/CUDAEvent.h>
#include <torch/csrc/Event.h>
#include <torch/csrc/python_headers.h>
struct THCPEvent : THPEvent {
at::cuda::CUDAEvent cuda_event;
};
extern PyObject* THCPEventClass;
void THCPEvent_init(PyObject* module);
inline bool THCPEvent_Check(PyObject* obj) {
return THCPEventClass && PyObject_IsInstance(obj, THCPEventClass);
}
#endif // THCP_EVENT_INC
```
|
===============================================================================================================================
SOURCE CODE FILE: GdsFile.h
LINES: 1
SIZE: 0.16 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\cuda\GdsFile.h
ENCODING: utf-8
```h
#ifndef THCP_GDSFILE_INC
#define THCP_GDSFILE_INC
#include <torch/csrc/python_headers.h>
void initGdsBindings(PyObject* module);
#endif // THCP_GDSFILE_INC
```
|
==============================================================================================================================
SOURCE CODE FILE: Module.h
LINES: 1
SIZE: 0.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\cuda\Module.h
ENCODING: utf-8
```h
#ifndef THCP_CUDA_MODULE_INC
#define THCP_CUDA_MODULE_INC
#include <torch/csrc/utils/pythoncapi_compat.h>
PyObject* THCPModule_getDevice_wrap(PyObject* self);
PyObject* THCPModule_setDevice_wrap(PyObject* self, PyObject* arg);
PyObject* THCPModule_getDeviceName_wrap(PyObject* self, PyObject* arg);
PyObject* THCPModule_getDriverVersion(PyObject* self);
PyObject* THCPModule_isDriverSufficient(PyObject* self);
PyObject* THCPModule_getCurrentBlasHandle_wrap(PyObject* self);
#endif
```
|
==============================================================================================================================
SOURCE CODE FILE: Stream.h
LINES: 1
SIZE: 0.51 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\cuda\Stream.h
ENCODING: utf-8
```h
#ifndef THCP_STREAM_INC
#define THCP_STREAM_INC
#include <c10/cuda/CUDAStream.h>
#include <torch/csrc/Stream.h>
#include <torch/csrc/python_headers.h>
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct THCPStream : THPStream {
at::cuda::CUDAStream cuda_stream;
};
extern PyObject* THCPStreamClass;
void THCPStream_init(PyObject* module);
inline bool THCPStream_Check(PyObject* obj) {
return THCPStreamClass && PyObject_IsInstance(obj, THCPStreamClass);
}
#endif // THCP_STREAM_INC
```
|
============================================================================================================================
SOURCE CODE FILE: THCP.h
LINES: 1
SIZE: 0.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\cuda\THCP.h
ENCODING: utf-8
```h
#ifndef THCP_H
#define THCP_H
#include <torch/csrc/THP.h>
#include <torch/csrc/cuda/Event.h>
#include <torch/csrc/cuda/Module.h>
#include <torch/csrc/cuda/Stream.h>
#include <torch/csrc/python_headers.h>
#endif
```
|
============================================================================================================================
SOURCE CODE FILE: comm.h
LINES: 1
SIZE: 1.53 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\cuda\comm.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ATen.h>
#include <ATen/cuda/ATenCUDAGeneral.h>
#include <ATen/cuda/CUDAContext.h>
#include <torch/csrc/Export.h>
#include <optional>
#include <cstddef>
#include <vector>
namespace torch::cuda {
using tensor_list2d = std::vector<std::vector<at::Tensor>>;
TORCH_CUDA_CU_API std::vector<at::Tensor>& broadcast_out(
const at::Tensor& tensor,
std::vector<at::Tensor>& out_tensors);
TORCH_CUDA_CU_API std::vector<at::Tensor> broadcast(
const at::Tensor& tensor,
at::IntArrayRef devices);
TORCH_CUDA_CU_API tensor_list2d broadcast_coalesced(
at::TensorList tensors,
at::IntArrayRef devices,
size_t buffer_size);
TORCH_CUDA_CU_API std::vector<at::Tensor>& scatter_out(
const at::Tensor& tensor,
std::vector<at::Tensor>& out_tensors,
int64_t dim = 0,
const std::optional<std::vector<std::optional<at::cuda::CUDAStream>>>&
streams = std::nullopt);
TORCH_CUDA_CU_API std::vector<at::Tensor> scatter(
const at::Tensor& tensor,
at::IntArrayRef devices,
const std::optional<std::vector<int64_t>>& chunk_sizes = std::nullopt,
int64_t dim = 0,
const std::optional<std::vector<std::optional<at::cuda::CUDAStream>>>&
streams = std::nullopt);
TORCH_CUDA_CU_API at::Tensor& gather_out(
at::TensorList tensors,
at::Tensor& out_tensor,
int64_t dim);
TORCH_CUDA_CU_API at::Tensor gather(
at::TensorList tensors,
int64_t dim,
std::optional<int32_t> destination_index);
} // namespace torch::cuda
```
|
==================================================================================================================================
SOURCE CODE FILE: device_set.h
LINES: 1
SIZE: 0.19 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\cuda\device_set.h
ENCODING: utf-8
```h
#pragma once
#include <c10/cuda/CUDAMacros.h>
#include <bitset>
#include <cstddef>
namespace torch {
using device_set = std::bitset<C10_COMPILE_TIME_MAX_GPUS>;
} // namespace torch
```
|
=======================================================================================================================================
SOURCE CODE FILE: memory_snapshot.h
LINES: 1
SIZE: 0.78 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\cuda\memory_snapshot.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <cstdint>
#include <optional>
#include <string>
namespace torch::cuda {
// C++-only versions of these, for python use
// those defined in cuda/Module.cpp which also record python state.
TORCH_CUDA_CU_API void _record_memory_history(
bool enabled,
bool record_context = true,
int64_t trace_alloc_max_entries = 1,
bool trace_alloc_record_context = false,
bool record_cpp_context = false);
TORCH_CUDA_CU_API void _record_memory_history(
std::optional<std::string> enabled = "all",
std::optional<std::string> context = "all",
const std::string& stacks = "all",
size_t max_entries = SIZE_MAX);
TORCH_CUDA_CU_API std::string _memory_snapshot_pickled();
} // namespace torch::cuda
```
|
============================================================================================================================
SOURCE CODE FILE: nccl.h
LINES: 1
SIZE: 5.95 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\cuda\nccl.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <cstddef>
#include <optional>
#include <vector>
// NCCL BFloat16 is enabled only for CUDA 11+ and NCCL versions 2.10+, or for
// HIP 3.1+
#if defined(__CUDA_BF16_TYPES_EXIST__)
#define HAS_NCCL_BF16_DATATYPE \
((NCCL_MAJOR > 2) || (NCCL_MAJOR == 2) && (NCCL_MINOR >= 10))
#elif defined(USE_ROCM) && (TORCH_HIP_VERSION >= 301)
#define HAS_NCCL_BF16_DATATYPE 1
#else
#define HAS_NCCL_BF16_DATATYPE 0
#endif
namespace torch::cuda::nccl {
/* The following are copied from <nccl.h> and redefined in torch::cuda::nccl
* namespace */
/* pytorch should only use the following definition within pytorch scope */
/* Opaque handle to communicator to ncclComm*, this will reinterpret as ncclComm
* in nccl.cpp */
typedef void* ncclComm_t;
/** redefine nccl unique ID in torch scope. this should be identical to native
* nccl impp. */
#define NCCL_UNIQUE_ID_BYTES 128
typedef struct {
// NOLINTNEXTLINE(*array*)
char internal[NCCL_UNIQUE_ID_BYTES];
} ncclUniqueId;
/* Error type */
enum class ncclResult {
Success = 0,
UnhandledCudaError = 1,
SystemError = 2,
InternalError = 3,
InvalidArgument = 4,
InvalidUsage = 5,
RemoteError = 6,
InProgress = 7,
NumResults = 8
};
/* Reduction operation selector */
enum class ncclRedOp { Sum = 0, Prod = 1, Max = 2, Min = 3, NumOps = 4 };
/* Data types */
enum class ncclDataType {
Int8 = 0,
Char = 0,
Uint8 = 1,
Int32 = 2,
Int = 2,
Uint32 = 3,
Int64 = 4,
Uint64 = 5,
Float16 = 6,
Half = 6,
Float32 = 7,
Float = 7,
Float64 = 8,
Double = 8,
Bfloat16 = 9,
NumTypes = 10
};
// RAII helper class to manage NCCL group API and CUDA free mutex.
// The destructor is allowed to throw since this helper class only
// manages group and lock lifetimes.
struct TORCH_CUDA_CPP_API AutoNcclGroup {
AutoNcclGroup();
AutoNcclGroup(ncclComm_t comm, bool comm_nonblocking);
~AutoNcclGroup() noexcept(false);
ncclComm_t comm_;
bool comm_nonblocking_;
};
// NOTE: this is exposed only so that python_nccl.cpp can some of these helpers.
// Don't use them outside of these files.
namespace detail {
TORCH_CUDA_CPP_API void throw_nccl_error(ncclResult status);
inline void NCCL_CHECK(ncclResult status) {
if (status != ncclResult::Success) {
throw_nccl_error(status);
}
}
TORCH_CUDA_CPP_API at::ArrayRef<ncclComm_t> get_communicators(
at::TensorList inputs);
TORCH_CUDA_CPP_API void check_inputs(
at::TensorList inputs,
at::TensorList outputs,
size_t input_multiplier,
size_t output_multiplier);
TORCH_CUDA_CPP_API void check_inputs(
at::TensorList inputs,
const at::Tensor& output,
int root,
size_t input_multiplier,
size_t output_multiplier);
} // namespace detail
using comm_list = std::vector<ncclComm_t>;
using stream_list = std::vector<std::optional<at::cuda::CUDAStream>>;
TORCH_CUDA_CPP_API std::uint64_t version();
TORCH_CUDA_CPP_API const char* version_suffix();
bool is_available(at::TensorList tensors);
TORCH_CUDA_CPP_API void get_unique_id(ncclUniqueId& id);
TORCH_CUDA_CPP_API ncclComm_t
comm_init_rank(int nranks, const ncclUniqueId& comm_id, int rank);
TORCH_CUDA_CPP_API void comm_destroy(ncclComm_t comm);
TORCH_CUDA_CPP_API void broadcast(
at::TensorList tensors,
const stream_list& streams = {},
const comm_list& user_comms = {});
size_t get_max_count();
TORCH_CUDA_CPP_API void reduce(
const std::vector<at::Tensor>& inputs,
at::Tensor& output,
int32_t root = 0,
int32_t op = static_cast<int>(ncclRedOp::Sum),
const stream_list& streams = {},
const comm_list& user_comms = {});
TORCH_CUDA_CPP_API void reduce(
std::vector<at::Tensor>& inputs,
int32_t root = 0,
int32_t op = static_cast<int>(ncclRedOp::Sum),
const stream_list& streams = {},
const comm_list& user_comms = {});
TORCH_CUDA_CPP_API void all_reduce(
const std::vector<at::Tensor>& inputs,
std::vector<at::Tensor>& outputs,
int32_t op = static_cast<int>(ncclRedOp::Sum),
const stream_list& streams = {},
const comm_list& user_comms = {});
TORCH_CUDA_CPP_API void reduce_scatter(
const std::vector<at::Tensor>& inputs,
std::vector<at::Tensor>& outputs,
int32_t op = static_cast<int>(ncclRedOp::Sum),
const stream_list& streams = {},
const comm_list& user_comms = {});
TORCH_CUDA_CPP_API void scatter(
const std::vector<at::Tensor>& inputs,
at::Tensor& outputs,
ncclComm_t comm,
at::cuda::CUDAStream& stream,
int32_t root = 0);
TORCH_CUDA_CPP_API void all_gather(
const std::vector<at::Tensor>& inputs,
std::vector<at::Tensor>& outputs,
const stream_list& streams = {},
const comm_list& user_comms = {});
TORCH_CUDA_CPP_API void gather(
const at::Tensor& inputs,
std::vector<at::Tensor>& outputs,
ncclComm_t comm,
at::cuda::CUDAStream& stream,
int32_t root = 0);
TORCH_CUDA_CPP_API void all2all_single_equal_split(
at::Tensor& input,
at::Tensor& output,
int size,
ncclComm_t comm,
at::cuda::CUDAStream& stream);
TORCH_CUDA_CPP_API void all2all_single_unequal_split(
void* sendbuff,
const size_t* sendcounts,
const size_t* senddispls,
void* recvbuff,
const size_t* recvcounts,
const size_t* recvdispls,
size_t size,
c10::ScalarType type,
ncclComm_t comm,
at::cuda::CUDAStream& stream);
TORCH_CUDA_CPP_API void all2all(
std::vector<at::Tensor>& outputTensors,
std::vector<at::Tensor>& inputTensors,
ncclComm_t _comm,
at::cuda::CUDAStream& stream);
TORCH_CUDA_CPP_API void send(
const at::Tensor& input,
ncclComm_t comm,
at::cuda::CUDAStream stream,
int dst);
TORCH_CUDA_CPP_API void recv(
at::Tensor& output,
ncclComm_t comm,
at::cuda::CUDAStream stream,
int src);
} // namespace torch::cuda::nccl
```
|
===================================================================================================================================
SOURCE CODE FILE: python_comm.h
LINES: 1
SIZE: 0.17 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\cuda\python_comm.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/utils/pythoncapi_compat.h>
namespace torch::cuda::python {
void initCommMethods(PyObject* module);
} // namespace torch::cuda::python
```
|
===================================================================================================================================
SOURCE CODE FILE: python_nccl.h
LINES: 1
SIZE: 0.68 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\cuda\python_nccl.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
PyObject* THCPModule_nccl_version(PyObject* self, PyObject* args);
PyObject* THCPModule_nccl_version_suffix(PyObject* self, PyObject* args);
PyObject* THCPModule_nccl_unique_id(PyObject* self, PyObject* args);
PyObject* THCPModule_nccl_init_rank(PyObject* self, PyObject* args);
PyObject* THCPModule_nccl_reduce(PyObject* self, PyObject* args);
PyObject* THCPModule_nccl_all_reduce(PyObject* self, PyObject* args);
PyObject* THCPModule_nccl_broadcast(PyObject* self, PyObject* args);
PyObject* THCPModule_nccl_all_gather(PyObject* self, PyObject* args);
PyObject* THCPModule_nccl_reduce_scatter(PyObject* self, PyObject* args);
```
|
=========================================================================================================================================================
SOURCE CODE FILE: container.h
LINES: 1
SIZE: 6.39 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\autograd\context\container.h
ENCODING: utf-8
```h
#pragma once
#include <mutex>
#include <unordered_map>
#include <torch/csrc/distributed/autograd/context/context.h>
namespace torch::distributed::autograd {
// Singleton class per worker which is responsible for storing the distributed
// autograd context for each autograd pass and also cleans up data for an
// autograd pass once its done.
//
// Each autograd pass is assigned a unique autograd_context_id and all data for
// that pass (DistAutogradContext) is stored in this container indexed by the
// autograd_context_id. The autograd_context_id itself is a 64 bit globally
// unique id. The first 16 bits is the worker_id and the next 48 bits is an
// auto-incrementing id for each worker.
//
// This container is also responsible for maintaining a globally unique message
// id, which is used to associate send/recv autograd function pairs. The format
// is similar to the autograd_context_id where we have a 64 bit integer with
// first 16 bits being the worker id and next 48 bits are auto-incrementing.
class TORCH_API DistAutogradContainer {
public:
explicit DistAutogradContainer(uint32_t num_shards);
// One time initialization of the container.
static DistAutogradContainer& init(int64_t worker_id);
// Retrieve the singleton instance of the container, ensures we have
// initialized the container.
static DistAutogradContainer& getInstance();
// Create a new context for a distributed autograd pass.
const ContextPtr newContext();
// Clean up resources for a given context_id once the autograd pass is done.
// Sends RPC to other workers this worker knows about, telling them to clean
// up their context as well. Throws an exception if the context_id does not
// exist.
void releaseContext(int64_t context_id);
// Releases an autograd context if it is present on this node. Also sends RPC
// to other workers this worker knows about, telling them to clean up their
// context. Does nothing if it is not present.
void releaseContextIfPresent(int64_t context_id);
// Checks if the passed in context_id is valid.
void isValidContext(int64_t context_id);
// Retrieve the autograd context for a given context_id.
ContextPtr retrieveContext(int64_t context_id);
// Retrieves the currently active autograd context for the current thread.
ContextPtr currentContext();
// Checks whether or not the current thread has a valid autograd context.
bool hasValidContext() const;
// Generate a new autograd_message_id for send/recv autograd functions.
int64_t newAutogradMessageId();
// Creates a new autograd context with the provided context_id. If a context
// already exists with the provided context_id, we just return it.
// This does not set the current context for the current thread.
ContextPtr getOrCreateContext(int64_t context_id);
// Retrieves the maximum possible autograd_context_id/autograd_message_id that
// can be generated by this worker.
int64_t getMaxId();
// Retrieves the worker ID for this node
rpc::worker_id_t getWorkerId() const;
// Can set current context id if there is no valid context yet
static void setCurrentContextId(int64_t contextId);
// Forcibly sets the thread local current context id. Should only be used in
// cases where you know what you're doing and need to override the thread
// local. Otherwise, use setCurrentContextId instead.
static void forceCurrentContextId(int64_t contextId);
// Clear current context id
void clearCurrentContext();
// Returns the number of autograd contexts in the container.
size_t numAutogradContexts() const;
// Returns the current thread local context id for this thread.
static int64_t currentContextId();
DistAutogradContainer() = delete;
~DistAutogradContainer() = default;
DistAutogradContainer(const DistAutogradContainer&) = delete;
DistAutogradContainer& operator=(const DistAutogradContainer&) = delete;
DistAutogradContainer(DistAutogradContainer&&) = delete;
DistAutogradContainer& operator=(DistAutogradContainer&&) = delete;
private:
// Number of shards for the map storing autograd contexts. We'd like this
// to be a power of 2 and we don't expect a value much higher than the
// number of cores would provide much benefit.
static constexpr uint32_t kNumDefaultShards = 128;
// Use cache line size for alignment.
static constexpr int kCacheLineSize = 64;
// Structure holding one shard of the sharded autograd context map with its
// associated lock. Align to cache line size to avoid contention between
// adjacent entries.
struct alignas(kCacheLineSize) ContextsShard {
// Lock for this shard.
mutable std::mutex lock;
// Map storing autograd contexts for this shard.
std::unordered_map<int64_t, ContextPtr> contexts;
};
static DistAutogradContainer& getInstanceInternal();
// Retrieve the shard for given context_id.
ContextsShard& getShard(int64_t context_id);
// Sends an RPC to the workers that have a context corresponding to passed in
// context_id. This function should be called with the lock.
void sendReleaseContextRpc(
const std::unordered_set<rpc::worker_id_t>& workerIds,
int64_t context_id);
// Erase context_id from the autograd context map, and reset the thread local
// current context id if it corresponds to the passed in context id. This
// function should be called with the lock.
void eraseContextIdAndReset(ContextsShard& shard, int64_t context_id);
// Compute the number of shards for the autograd_contexts_ map.
static uint32_t computeNumShards();
// Auto incrementing context id used to identify unique autograd passes.
// Initialized with the first 16 bits being the worker_id.
std::atomic<int64_t> next_context_id_;
// Unique id to identify a worker in the distributed setting.
int16_t worker_id_;
// Whether or not the container has been initialized appropriately.
bool initialized_;
// Sharded autograd context map.
std::vector<ContextsShard> autograd_contexts_;
// Number of shards for the sharded autograd_contexts_ map.
uint32_t num_shards_;
// Autograd message id to identify unique send/recv autograd function pairs.
std::atomic<int64_t> next_autograd_message_id_;
// Maximum allowed value for autograd_context_id or autograd_message_id.
int64_t max_id_;
};
} // namespace torch::distributed::autograd
```
|
=======================================================================================================================================================
SOURCE CODE FILE: context.h
LINES: 1
SIZE: 6.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\autograd\context\context.h
ENCODING: utf-8
```h
#pragma once
#include <cstdint>
#include <functional>
#include <ATen/core/Dict.h>
#include <torch/csrc/autograd/engine.h>
#include <torch/csrc/distributed/autograd/functions/recvrpc_backward.h>
#include <torch/csrc/distributed/autograd/functions/sendrpc_backward.h>
#include <torch/csrc/distributed/rpc/rpc_agent.h>
namespace torch::distributed::autograd {
class RecvRpcBackward;
// DistAutogradContext which stores information for a single distributed
// autograd pass on a worker.
class TORCH_API DistAutogradContext {
public:
using GradCallback = std::function<bool(torch::Tensor&)>;
explicit DistAutogradContext(int64_t contextId);
~DistAutogradContext() = default;
// Retrieves the autograd context id for this context.
int64_t contextId() const;
// Records a 'send' autograd function for this context with the provided
// message id.
void addSendFunction(
const std::shared_ptr<SendRpcBackward>& func,
int64_t autograd_message_id);
// Records a 'recv' autograd function for this context with the provided
// message id.
void addRecvFunction(
std::shared_ptr<RecvRpcBackward>& func,
int64_t autograd_message_id);
// Given an autograd_message_id, retrieve the appropriate send function.
std::shared_ptr<SendRpcBackward> retrieveSendFunction(
int64_t autograd_message_id);
// Return all send functions for this context.
std::unordered_map<int64_t, std::shared_ptr<SendRpcBackward>> sendFunctions()
const;
// Return all recv functions for this context.
std::unordered_map<int64_t, std::shared_ptr<RecvRpcBackward>> recvFunctions()
const;
// Adds a future message recording an outstanding RPC.
void addOutstandingRpc(const c10::intrusive_ptr<rpc::JitFuture>& jitFuture);
// Returns all gradients.
const c10::Dict<torch::Tensor, torch::Tensor> getGradients() const;
// This function gives a mutable grad reference to the callback.
// If the callback returns true, it means the grad in the context
// needs to be updated.
void runGradCallbackForVariable(
const torch::autograd::Variable& variable,
const GradCallback& cb);
DistAutogradContext(const DistAutogradContext&) = delete;
DistAutogradContext& operator=(const DistAutogradContext&) = delete;
DistAutogradContext(DistAutogradContext&&) = delete;
DistAutogradContext& operator=(DistAutogradContext&&) = delete;
// records the workerID of a node that we sent an RPC to.
// workerIDs are added here when we attach a send function to this autograd
// context
void addKnownWorkerId(const rpc::worker_id_t workerId);
// Retrieves a set containing the known workerIds for this context
// These are the different workers that this context has sent RPCs to.
std::unordered_set<rpc::worker_id_t> getKnownWorkerIds() const;
private:
friend class BackwardPassCleanupGuard;
friend class DistEngine;
friend class RecvRpcBackward;
friend class DistAccumulateGradCaptureHook;
// Record that we would like to accumulate the provided gradient on the given
// variable.
void accumulateGrad(
const torch::autograd::Variable& variable,
const torch::Tensor& grad,
size_t num_expected_refs);
// Retrieve the GraphTask.
std::shared_ptr<torch::autograd::GraphTask> retrieveGraphTask();
// Set the appropriate graph task for the backward pass. Can be called only
// once.
void setGraphTask(std::shared_ptr<torch::autograd::GraphTask> graphTask);
// Resets the graph task to ensure we can run another distributed backward
// pass for the same autograd context.
void resetGraphTask();
// Waits for all outstanding RPCs for this context to finish and clears all
// outstanding rpcs held in this context. This should be called only once.
c10::intrusive_ptr<c10::ivalue::Future> clearAndWaitForOutstandingRpcsAsync();
void clearOutstandingRpcs();
// Record an event to mark the completion of gradient computation. These
// events will later help to properly synchronize gradients consumptions
// in getGradients(). We need these events because backward and
// optimizer.step are separate RPC calls, and will occur on different CUDA
// streams. Without synchronization, it is possible that gradients are
// consumed before they are ready.
void recordGradEvent(c10::Device device);
const int64_t contextId_;
// Set containing known worker IDs, used in cleaning up autograd context.
// Whenever a sendRpcBackward is attached to the autograd graph for this
// context, the destination is added here.
std::unordered_set<rpc::worker_id_t> knownWorkerIds_;
// Map from autograd_message_id to appropriate 'send' autograd function.
std::unordered_map<int64_t, std::shared_ptr<SendRpcBackward>>
sendAutogradFunctions_;
// Map from autograd_message_id to appropriate 'recv' autograd function.
std::unordered_map<int64_t, std::shared_ptr<RecvRpcBackward>>
recvAutogradFunctions_;
// Gradients accumulated in this context so far. The key is the variable on
// which the gradient needs to be accumulated and the value is the gradient
// that needs to be accumulated on that variable..
c10::Dict<torch::Tensor, torch::Tensor> accumulatedGrads_;
// See comments for recordGradEvent(c10::Device device);
std::unordered_map<c10::Device, c10::Event> gradReadyEvents_;
const c10::impl::VirtualGuardImpl impl_;
// The autograd GraphTask for the backward pass on this node for this context.
std::shared_ptr<torch::autograd::GraphTask> graphTask_;
// List of futures for RPCs initiated by this node to propagate gradients to
// other nodes. The distributed autograd engine on this node can return
// successfully only if all these futures are done and are successful.
std::vector<c10::intrusive_ptr<rpc::JitFuture>> outStandingRpcs_;
// Lock to protect concurrent modification of the context.
mutable std::mutex lock_;
};
using ContextPtr = std::shared_ptr<DistAutogradContext>;
// This class stores a shared_ptr to a DistAutogradContext instance in a
// thread local variable. The instance is given by the call site. The class
// doesn't know the current context. It's just a util class.
class TORCH_API ThreadLocalDistAutogradContext {
public:
// Store 'new_context' to the thread local variable maintained by this class.
explicit ThreadLocalDistAutogradContext(ContextPtr&& new_context);
~ThreadLocalDistAutogradContext();
// Retrieve the stored DistAutogradContext instance.
static ContextPtr getContextPtr();
private:
ContextPtr prev_context_ptr_;
};
} // namespace torch::distributed::autograd
```
|
==================================================================================================================================================================
SOURCE CODE FILE: recvrpc_backward.h
LINES: 1
SIZE: 1.67 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\autograd\functions\recvrpc_backward.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/distributed/autograd/context/context.h>
#include <torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h>
#include <torch/csrc/distributed/rpc/rpc_agent.h>
namespace torch::distributed::autograd {
// Forward declarations.
class DistAutogradContext;
// As part of our distributed autograd implementation, whenever we receive an
// RPC from a node, we add a 'RecvRpcBackward' autograd function to the
// autograd graph. This is more or less a placeholder function that is used to
// pass gradients to the remote host during the backward pass. The inputs to the
// RPC function are the inputs to this autograd function.
class TORCH_API RecvRpcBackward : public torch::autograd::Node {
public:
explicit RecvRpcBackward(
const AutogradMetadata& autogradMetadata,
const std::shared_ptr<DistAutogradContext>& autogradContext,
rpc::worker_id_t fromWorkerId,
rpc::DeviceMap deviceMap);
torch::autograd::variable_list apply(
torch::autograd::variable_list&& grads) override;
private:
const AutogradMetadata autogradMetadata_;
// Hold a weak reference to the autograd context to avoid circular
// dependencies with the context (since it holds a reference to
// RecvRpcBackward).
std::weak_ptr<DistAutogradContext> autogradContext_;
// The worker id from which the RPC was received. During the backward pass,
// we need to propagate the gradients to this workerId.
rpc::worker_id_t fromWorkerId_;
// Device mapping for tensors sent over RPC.
const rpc::DeviceMap deviceMap_;
};
} // namespace torch::distributed::autograd
```
|
==================================================================================================================================================================
SOURCE CODE FILE: sendrpc_backward.h
LINES: 1
SIZE: 1.32 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\autograd\functions\sendrpc_backward.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/autograd/function.h>
namespace torch::distributed::autograd {
// As part of our distributed autograd implementation, whenever we send an RPC
// from one node to another, we add a 'SendRpcBackward' autograd function to the
// autograd graph. This is more or less a placeholder function that is used to
// kickoff the autograd engine on the current worker on the backward pass. The
// edges for this autograd function are the inputs to the RPC method.
//
// During the backward pass, this function is queued for execution in the
// autograd engine which eventually runs the rest of the autograd graph.
struct TORCH_API SendRpcBackward : public torch::autograd::Node {
public:
torch::autograd::variable_list apply(
torch::autograd::variable_list&& inputs) override;
// SendRpcBackward is actually the root of an autograd graph on the local
// node. As a result, it doesn't receive any 'inputs', but rather the RPC
// framework passes gradients over to this function to kickoff local autograd
// computation.
void setGrads(const torch::autograd::variable_list& grads);
// Retrieve the grads for the function.
const torch::autograd::variable_list& getGrads() const;
private:
torch::autograd::variable_list grads_;
};
} // namespace torch::distributed::autograd
```
|
======================================================================================================================================================================
SOURCE CODE FILE: autograd_metadata.h
LINES: 1
SIZE: 0.70 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\autograd\rpc_messages\autograd_metadata.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <cstdint>
namespace torch::distributed::autograd {
// This structure represents autograd metadata that we need to pass across
// different nodes when we call an RPC which needs autograd computation.
struct TORCH_API AutogradMetadata {
AutogradMetadata(int64_t autogradContextId, int64_t autogradMessageId);
// autogradContextId_ is a globally unique integer that identifies a
// particular distributed autograd pass.
int64_t autogradContextId;
// autogradMessageId_ is a globally unique integer that identifies a pair
// of send/recv autograd functions.
int64_t autogradMessageId;
};
} // namespace torch::distributed::autograd
```
|
=================================================================================================================================================================================
SOURCE CODE FILE: cleanup_autograd_context_req.h
LINES: 1
SIZE: 0.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\autograd\rpc_messages\cleanup_autograd_context_req.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h>
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
namespace torch::distributed::autograd {
// Used to request other workers to clean up their autograd context.
class TORCH_API CleanupAutogradContextReq : public rpc::RpcCommandBase {
public:
explicit CleanupAutogradContextReq(int64_t context_id);
// Serialization and deserialization methods.
c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
static std::unique_ptr<CleanupAutogradContextReq> fromMessage(
const rpc::Message& message);
// Retrieve the context id we are cleaning up with this message.
int64_t getContextId();
private:
int64_t context_id_;
};
} // namespace torch::distributed::autograd
```
|
==================================================================================================================================================================================
SOURCE CODE FILE: cleanup_autograd_context_resp.h
LINES: 1
SIZE: 0.66 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\autograd\rpc_messages\cleanup_autograd_context_resp.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
namespace torch::distributed::autograd {
// Empty response for CleanupAutogradContextReq. Send to acknowledge receipt of
// a CleanupAutogradContextReq.
class TORCH_API CleanupAutogradContextResp : public rpc::RpcCommandBase {
public:
CleanupAutogradContextResp() = default;
// Serialization and deserialization methods.
c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
static std::unique_ptr<CleanupAutogradContextResp> fromMessage(
const rpc::Message& message);
};
} // namespace torch::distributed::autograd
```
|
============================================================================================================================================================================
SOURCE CODE FILE: propagate_gradients_req.h
LINES: 1
SIZE: 1.26 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\autograd\rpc_messages\propagate_gradients_req.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h>
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
#include <vector>
namespace torch::distributed::autograd {
// Used to propagate gradients from one node to another during a distributed
// backwards pass. This RPC call is invoked when we hit a `recv` autograd
// function during backward pass execution.
class TORCH_API PropagateGradientsReq : public rpc::RpcCommandBase {
public:
PropagateGradientsReq(
const AutogradMetadata& autogradMetadata,
std::vector<torch::autograd::Variable> grads,
bool retainGraph = false);
const AutogradMetadata& getAutogradMetadata();
const std::vector<torch::autograd::Variable>& getGrads();
// Serialization and deserialization methods.
c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
static std::unique_ptr<PropagateGradientsReq> fromMessage(
const rpc::Message& message);
// Whether or not to retain the autograd graph.
bool retainGraph();
private:
AutogradMetadata autogradMetadata_;
std::vector<torch::autograd::Variable> grads_;
bool retainGraph_;
};
} // namespace torch::distributed::autograd
```
|
=============================================================================================================================================================================
SOURCE CODE FILE: propagate_gradients_resp.h
LINES: 1
SIZE: 0.76 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\autograd\rpc_messages\propagate_gradients_resp.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
namespace torch::distributed::autograd {
// Response for the PropagateGradients call. Currently, this class is mostly
// just a placeholder and sends an empty message over the wire. The purpose of
// this RPC command is to indicate whether or not the PropagateGradientsReq call
// was successfully or not.
class TORCH_API PropagateGradientsResp : public rpc::RpcCommandBase {
public:
PropagateGradientsResp() = default;
c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
static std::unique_ptr<PropagateGradientsResp> fromMessage(
const rpc::Message& message);
};
} // namespace torch::distributed::autograd
```
|
======================================================================================================================================================================
SOURCE CODE FILE: rpc_with_autograd.h
LINES: 1
SIZE: 3.52 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\autograd\rpc_messages\rpc_with_autograd.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/autograd/rpc_messages/autograd_metadata.h>
#include <torch/csrc/distributed/rpc/rpc_agent.h>
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
namespace torch::distributed::autograd {
// Represents an RPC that includes autograd information. This class basically
// wraps another `RpcCommandBase` object which represents the actual RPC and has
// additional autograd information associated with that RPC.
class TORCH_API RpcWithAutograd final : public rpc::RpcCommandBase {
public:
// Used when we are sending an RPC over the wire.
RpcWithAutograd(
rpc::worker_id_t fromWorkerId,
rpc::MessageType messageType,
const AutogradMetadata& autogradMetadata,
c10::intrusive_ptr<rpc::Message> wrappedMessage,
rpc::DeviceMap deviceMap = {});
// Used when receiving an RPC over the wire.
RpcWithAutograd(
rpc::worker_id_t fromWorkerId,
rpc::MessageType messageType,
const AutogradMetadata& autogradMetadata,
std::unique_ptr<rpc::RpcCommandBase> wrappedRpc,
rpc::MessageType wrappedMessageType,
std::vector<torch::Tensor> tensors,
rpc::DeviceMap deviceMap = {});
c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
static std::unique_ptr<RpcWithAutograd> fromMessage(
const rpc::Message& message);
// Retrieves tensors as part of this RPC, which need to be considered for
// autograd computations.
std::vector<torch::Tensor>& tensors();
const AutogradMetadata& autogradMetadata() const;
RpcCommandBase& wrappedRpc();
void setWrappedRpc(std::unique_ptr<RpcCommandBase> wrappedRpc);
std::unique_ptr<RpcCommandBase> moveWrappedRpc() &&;
// Message type of the wrapped RPC.
rpc::MessageType wrappedMessageType() const;
// Retrieve the worker id from which the RPC originated.
rpc::worker_id_t fromWorkerId() const;
// Retrieve the device map.
const rpc::DeviceMap& deviceMap();
private:
// WorkerId from which this RPC originated. This is necessary for knowing
// which worker we need to contact during the backward pass.
rpc::worker_id_t fromWorkerId_;
// Message type for this call.
rpc::MessageType messageType_;
AutogradMetadata autogradMetadata_;
// Since wrappedMessage_ is destructively constructed from wrappedRpc_,
// they are valid exclusively. They are used for different purpose.
// wrappedRpc_ is used while constructing receive rpcWithAutograd;
// wrappedMessage_ is used while constructing send rpcWithAutograd;
// When receive rpcWithAutograd is constructed fromMessage, it is valid;
// When send rpcWithAutograd is constructed before toMessage, it is nullptr;
std::unique_ptr<RpcCommandBase> wrappedRpc_;
// Serialized message representing wrappedRpc_. Used mostly as a cache to
// avoid serializing the request twice.
// When receive rpcWithAutograd is constructed fromMessage, it is nullptr;
// When send rpcWithAutograd is constructed before toMessage, it is valid;
c10::intrusive_ptr<rpc::Message> wrappedMessage_;
// message type of the wrappedMessage, this is stored separately since
// wrappedMessage_ is not always guaranteed to be populated.
rpc::MessageType wrappedMessageType_;
// Tensors part of the wrappedRpc that need to be considered for autograd.
std::vector<torch::Tensor> tensors_;
// Device mapping for tensors that are sent across an RPC to another node.
rpc::DeviceMap deviceMap_;
};
} // namespace torch::distributed::autograd
```
|
===========================================================================================================================================================================
SOURCE CODE FILE: rpc_with_profiling_req.h
LINES: 1
SIZE: 2.51 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\autograd\rpc_messages\rpc_with_profiling_req.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/autograd/profiler.h>
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/rpc_agent.h>
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
#include <torch/csrc/distributed/rpc/types.h>
namespace torch::distributed::autograd {
class TORCH_API RpcWithProfilingReq : public rpc::RpcCommandBase {
public:
// For sending RPCs, invoked when client is creating this RPC command.
RpcWithProfilingReq(
rpc::MessageType messageType,
c10::intrusive_ptr<rpc::Message> wrappedMessage,
torch::autograd::profiler::ProfilerConfig&& profilerConfig,
rpc::ProfilingId profilingKeyId);
// For receiving an RPC
// Used in fromMessage.
RpcWithProfilingReq(
rpc::MessageType messageType,
std::unique_ptr<rpc::RpcCommandBase> wrappedRpc,
rpc::MessageType wrappedMessageType,
std::vector<torch::Tensor> tensors,
torch::autograd::profiler::ProfilerConfig&& profilerConfig,
rpc::ProfilingId profilingKeyId);
// Convert this RPC Command to a Message that can be sent over the wire.
c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
static std::unique_ptr<RpcWithProfilingReq> fromMessage(
const rpc::Message& message);
// Retrieve the profiling data that is associated with this command.
torch::autograd::profiler::ProfilerConfig getProfilingConfig() const;
// Retrieve the globally unique profiling ID corresponding to this command.
const rpc::ProfilingId& getProfilingId() const;
// Retrieve the original RPC which this ProfilingRPC wraps.
RpcCommandBase& wrappedRpc();
// Destructively move the wrapped RPC.
std::unique_ptr<RpcCommandBase> moveWrappedRpc() &&;
// Message type of the wrapped RPC
rpc::MessageType wrappedMessageType() const;
void setWrappedRpc(std::unique_ptr<RpcCommandBase> wrappedRpc);
private:
// message type
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const rpc::MessageType messageType_;
// wrapped message
c10::intrusive_ptr<rpc::Message> wrappedMessage_;
std::unique_ptr<RpcCommandBase> wrappedRpc_;
rpc::MessageType wrappedMessageType_;
std::vector<torch::Tensor> tensors_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const torch::autograd::profiler::ProfilerConfig profilerConfig_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const rpc::ProfilingId profilingKeyId_;
};
} // namespace torch::distributed::autograd
```
|
============================================================================================================================================================================
SOURCE CODE FILE: rpc_with_profiling_resp.h
LINES: 1
SIZE: 2.47 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\autograd\rpc_messages\rpc_with_profiling_resp.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/autograd/profiler.h>
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/rpc_agent.h>
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
#include <torch/csrc/distributed/rpc/types.h>
namespace torch::distributed::autograd {
class TORCH_API RpcWithProfilingResp : public rpc::RpcCommandBase {
public:
// For sending RPCs over the wire
RpcWithProfilingResp(
rpc::MessageType messageType,
c10::intrusive_ptr<rpc::Message> wrappedMessage,
std::vector<torch::autograd::profiler::LegacyEvent> profiledEvents,
rpc::ProfilingId profilingId);
// For receiving RPCs. Used in from message when converting a message received
// over the wire.
RpcWithProfilingResp(
rpc::MessageType messageType,
std::unique_ptr<rpc::RpcCommandBase> wrappedRpc,
rpc::MessageType wrappedMessageType,
std::vector<torch::Tensor> tensors,
std::vector<torch::autograd::profiler::LegacyEvent> profiledEvents,
rpc::ProfilingId profilingId);
c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
static std::unique_ptr<RpcWithProfilingResp> fromMessage(
const rpc::Message& message);
// Retrieve remote Events
std::vector<torch::autograd::profiler::LegacyEvent> getProfiledEvents() const;
// Retrieve the globally unique profiling ID corresponding to this command.
const rpc::ProfilingId& getProfilingId() const;
// Retrieve the original RPC which this ProfilingRPC wraps.
RpcCommandBase& wrappedRpc();
// Destructively move the wrapped RPC.
std::unique_ptr<RpcCommandBase> moveWrappedRpc() &&;
// Message type of the wrapped RPC
rpc::MessageType wrappedMessageType() const;
// Set the wrapped RPC for this RPC.
void setWrappedRpc(std::unique_ptr<RpcCommandBase> wrappedRpc);
private:
// message type
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const rpc::MessageType messageType_;
// wrapped message
c10::intrusive_ptr<rpc::Message> wrappedMessage_;
std::unique_ptr<RpcCommandBase> wrappedRpc_;
rpc::MessageType wrappedMessageType_;
std::vector<torch::Tensor> tensors_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const std::vector<torch::autograd::profiler::LegacyEvent> profiledEvents_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const rpc::ProfilingId profilingId_;
};
} // namespace torch::distributed::autograd
```
|
======================================================================================================================================================================
SOURCE CODE FILE: rref_backward_req.h
LINES: 1
SIZE: 1.20 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\autograd\rpc_messages\rref_backward_req.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
#include <torch/csrc/distributed/rpc/types.h>
namespace torch::distributed::autograd {
// Internal system RPC to invoke distributed backward pass on remote nodes when
// 'rref.backward()' is invoked.
class TORCH_API RRefBackwardReq : public rpc::RpcCommandBase {
public:
RRefBackwardReq(
const rpc::RRefId& rrefId,
int64_t autogradContextId,
bool retainGraph = false);
const rpc::RRefId& getRRefId() const;
int64_t getAutogradContextId() const;
bool retainGraph() const;
// Serialization and deserialization methods.
c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
static std::unique_ptr<RRefBackwardReq> fromMessage(
const rpc::Message& message);
private:
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const rpc::RRefId rrefId_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const int64_t autogradContextId_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const bool retainGraph_;
};
} // namespace torch::distributed::autograd
```
|
=======================================================================================================================================================================
SOURCE CODE FILE: rref_backward_resp.h
LINES: 1
SIZE: 0.51 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\autograd\rpc_messages\rref_backward_resp.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
namespace torch::distributed::autograd {
// Response for the RRefBackwardReq.
class TORCH_API RRefBackwardResp : public rpc::RpcCommandBase {
public:
RRefBackwardResp() = default;
c10::intrusive_ptr<rpc::Message> toMessageImpl() && override;
static std::unique_ptr<RRefBackwardResp> fromMessage(
const rpc::Message& message);
};
} // namespace torch::distributed::autograd
```
|
===========================================================================================================================================================
SOURCE CODE FILE: CUDASymmetricMemory-inl.h
LINES: 1
SIZE: 14.07 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\c10d\CUDASymmetricMemory-inl.h
ENCODING: utf-8
```h
#pragma once
#include <atomic>
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900) && CUDART_VERSION >= 12010
#define NVCC_SUPPORTS_MULTICAST 1
#endif
#include <ATen/ATen.h>
#if !defined(USE_ROCM)
#include <cuda_bf16.h>
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
#include <cuda/atomic>
#endif
#endif
namespace c10d::symmetric_memory {
template <typename T>
__inline__ size_t get_alignment(T ptr_or_size) {
auto val = reinterpret_cast<uintptr_t>(ptr_or_size);
if (val % 16 == 0) {
return 16;
} else if (val % 8 == 0) {
return 8;
} else if (val % 4 == 0) {
return 4;
} else if (val % 2 == 0) {
return 2;
} else {
return 1;
}
}
template <>
__inline__ size_t get_alignment<size_t>(size_t size) {
return get_alignment(reinterpret_cast<void*>(size));
}
template <bool Value, class... Args>
inline constexpr bool dependent_bool_value = Value;
template <class... Args>
inline constexpr bool dependent_false = dependent_bool_value<false, Args...>;
template <std::memory_order Sem>
__device__ __forceinline__ uint32_t
cas(uint32_t* addr, uint32_t compare, uint32_t val) {
#if !defined(USE_ROCM) && defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
cuda::atomic_ref<uint32_t, cuda::thread_scope_system> ref(*addr);
ref.compare_exchange_strong(compare, val, cuda::std::memory_order(Sem));
return compare;
#else
CUDA_KERNEL_ASSERT(false);
return 0;
#endif
}
__device__ __forceinline__ void trap() {
#if defined(USE_ROCM)
assert(0);
#else
__trap();
#endif
}
__device__ __forceinline__ size_t global_timer_ns() {
#if defined(USE_ROCM)
CUDA_KERNEL_ASSERT(false);
return 0;
#else
size_t val;
asm volatile("mov.u64 %0, %globaltimer;" : "=l"(val) : : "memory");
return val;
#endif
}
constexpr size_t ns_per_ms = 1e6;
template <std::memory_order Sem>
__device__ __forceinline__ bool try_put_signal(
uint32_t* addr,
size_t timeout_ms) {
size_t deadline = global_timer_ns() + timeout_ms * ns_per_ms;
while (cas<Sem>(addr, 0, 1) != 0) {
if (timeout_ms != 0 && global_timer_ns() > deadline) {
return false;
}
}
return true;
}
template <std::memory_order Sem>
__device__ __forceinline__ bool try_wait_signal(
uint32_t* addr,
size_t timeout_ms) {
size_t deadline = global_timer_ns() + timeout_ms * ns_per_ms;
while (cas<Sem>(addr, 1, 0) != 1) {
if (timeout_ms != 0 && global_timer_ns() > deadline) {
return false;
}
}
return true;
}
template <std::memory_order Sem>
__device__ __forceinline__ void put_signal(uint32_t* addr) {
while (cas<Sem>(addr, 0, 1) != 0)
;
}
template <std::memory_order Sem>
__device__ __forceinline__ void wait_signal(uint32_t* addr) {
while (cas<Sem>(addr, 1, 0) != 1)
;
}
// Synchronizes blocks with matching blockIdx across participating devices.
// Note: sync_remote_block itself is not a system level barrier/fence. It is a
// building block for expressing different synchronization patterns.
//
// Pattern 0: Ensures that all writes to symm_mem buffers from previous
// kernels across all devices are visible to the current kernel:
//
// sync_remote_blocks<std::memory_order_relaxed>(...);
// __syncthreads();
//
// Pattern 1: Ensures that all writes to symm_mem buffers from the current
// block are visible to all remote blocks with matching blockIdx:
//
// __syncthreads();
// sync_remote_blocks<std::memory_order_acq_rel>(...);
// __syncthreads();
//
// Pattern 2: Ensures that symm_mem buffers read by the current kernel are safe
// for writing by subsequent kernels across all devices.
//
// __syncthreads();
// sync_remote_blocks<std::memory_order_relaxed>(...);
template <std::memory_order Sem>
__device__ __forceinline__ void sync_remote_blocks(
uint32_t** signal_pads,
size_t rank,
size_t world_size);
template <>
__device__ __forceinline__ void sync_remote_blocks<std::memory_order_relaxed>(
uint32_t** signal_pads,
size_t rank,
size_t world_size) {
if (threadIdx.x < world_size) {
auto target_rank = threadIdx.x;
put_signal<std::memory_order_relaxed>(
signal_pads[target_rank] + blockIdx.x * world_size + rank);
wait_signal<std::memory_order_relaxed>(
signal_pads[rank] + blockIdx.x * world_size + target_rank);
}
}
template <>
__device__ __forceinline__ void sync_remote_blocks<std::memory_order_acq_rel>(
uint32_t** signal_pads,
size_t rank,
size_t world_size) {
if (threadIdx.x < world_size) {
auto target_rank = threadIdx.x;
put_signal<std::memory_order_release>(
signal_pads[target_rank] + blockIdx.x * world_size + rank);
wait_signal<std::memory_order_acquire>(
signal_pads[rank] + blockIdx.x * world_size + target_rank);
}
}
template <int Size>
union Vec;
template <>
union Vec<4> {
uint16_t u16[2];
uint32_t u32, as_scalar;
float f32;
};
template <>
union Vec<8> {
uint16_t u16[4];
uint32_t u32[2];
uint64_t u64, as_scalar;
float f32[2];
};
template <>
union alignas(16) Vec<16> {
uint16_t u16[8];
uint32_t u32[4];
uint64_t u64[2];
uint4 u128, as_scalar;
float f32[4];
};
template <typename T>
struct MultimemLdReduce {
template <int Alignment>
__device__ __inline__ Vec<Alignment> operator()(T* mc_ptr) {
static_assert(dependent_false<T>);
}
};
template <int Alignment, typename T>
__device__ __inline__ Vec<Alignment> multimem_ld_reduce_add(T* mc_ptr) {
MultimemLdReduce<T> functor;
return functor.template operator()<Alignment>(mc_ptr);
}
#if defined(USE_ROCM) || !defined(NVCC_SUPPORTS_MULTICAST)
#define SPECIALIZE_MULTIMEM_LD_REDUCE_VEC_32(type, asm_type, acc_prec) \
template <> \
struct MultimemLdReduce<type> { \
template <int Alignment> \
__device__ __inline__ Vec<Alignment> operator()(type* mc_ptr) { \
CUDA_KERNEL_ASSERT(false); \
} \
};
#else
#define SPECIALIZE_MULTIMEM_LD_REDUCE_VEC_32(type, asm_type, acc_prec) \
template <> \
struct MultimemLdReduce<type> { \
template <int Alignment> \
__device__ __inline__ Vec<Alignment> operator()(type* mc_ptr) { \
Vec<Alignment> vec; \
if constexpr (Alignment == 16) { \
asm("multimem.ld_reduce.relaxed.sys.global.add" acc_prec \
".v4" asm_type " {%0,%1,%2,%3}, [%4];" \
: "=r"(vec.u32[0]), \
"=r"(vec.u32[1]), \
"=r"(vec.u32[2]), \
"=r"(vec.u32[3]) \
: "l"(mc_ptr) \
: "memory"); \
} else if constexpr (Alignment == 8) { \
asm("multimem.ld_reduce.relaxed.sys.global.add" acc_prec \
".v2" asm_type " {%0,%1}, [%2];" \
: "=r"(vec.u32[0]), "=r"(vec.u32[1]) \
: "l"(mc_ptr) \
: "memory"); \
} else if constexpr (Alignment == 4) { \
asm("multimem.ld_reduce.relaxed.sys.global.add" acc_prec asm_type \
" %0, [%1];" \
: "=r"(vec.u32) \
: "l"(mc_ptr) \
: "memory"); \
} \
return vec; \
} \
};
#endif
SPECIALIZE_MULTIMEM_LD_REDUCE_VEC_32(at::BFloat16, ".bf16x2", ".acc::f32");
SPECIALIZE_MULTIMEM_LD_REDUCE_VEC_32(float, ".f32", "");
template <int Alignment, typename T>
__device__ __inline__ void multimem_st(T* mc_ptr, Vec<Alignment>& vec) {
#if defined(USE_ROCM) || !defined(NVCC_SUPPORTS_MULTICAST)
CUDA_KERNEL_ASSERT(false);
#else
if constexpr (Alignment == 16) {
asm("multimem.st.relaxed.sys.global.v4.f32 [%0], {%1,%2,%3,%4};"
:
: "l"(mc_ptr),
"r"(vec.u32[0]),
"r"(vec.u32[1]),
"r"(vec.u32[2]),
"r"(vec.u32[3])
: "memory");
} else if constexpr (Alignment == 8) {
asm("multimem.st.relaxed.sys.global.v2.f32 [%0], {%1,%2};"
:
: "l"(mc_ptr), "r"(vec.u32[0]), "r"(vec.u32[1])
: "memory");
} else if constexpr (Alignment == 4) {
asm("multimem.st.relaxed.sys.global.f32 [%0], %1;"
:
: "l"(mc_ptr), "r"(vec.u32)
: "memory");
} else {
static_assert(dependent_false<T>);
}
#endif
}
template <int Alignment, typename T>
__device__ __inline__ Vec<Alignment> ld_vec(const T* addr) {
#if defined(USE_ROCM) || (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800))
CUDA_KERNEL_ASSERT(false);
#else
Vec<Alignment> vec;
if constexpr (Alignment == 16) {
asm("ld.global.v4.u32 {%0,%1,%2,%3}, [%4];"
: "=r"(vec.u32[0]), "=r"(vec.u32[1]), "=r"(vec.u32[2]), "=r"(vec.u32[3])
: "l"(addr)
: "memory");
} else if constexpr (Alignment == 8) {
asm("ld.global.v2.u32 {%0,%1}, [%2];"
: "=r"(vec.u32[0]), "=r"(vec.u32[1])
: "l"(addr)
: "memory");
} else if constexpr (Alignment == 4) {
asm("ld.global.u32 %0, [%1];" : "=r"(vec.u32) : "l"(addr) : "memory");
} else {
static_assert(dependent_false<T>);
}
return vec;
#endif
}
template <int Alignment, typename T>
__device__ __inline__ void st_vec(T* addr, const Vec<Alignment>& vec) {
#if defined(USE_ROCM) || !defined(NVCC_SUPPORTS_MULTICAST)
CUDA_KERNEL_ASSERT(false);
#else
if constexpr (Alignment == 16) {
asm("st.global.v4.u32 [%0], {%1,%2,%3,%4};"
:
: "l"(addr),
"r"(vec.u32[0]),
"r"(vec.u32[1]),
"r"(vec.u32[2]),
"r"(vec.u32[3])
: "memory");
} else if constexpr (Alignment == 8) {
asm("st.global.v2.u32 [%0], {%1,%2};"
:
: "l"(addr), "r"(vec.u32[0]), "r"(vec.u32[1])
: "memory");
} else if constexpr (Alignment == 4) {
asm("st.global.u32 [%0], %1;" : : "l"(addr), "r"(vec.u32) : "memory");
} else {
static_assert(dependent_false<T>);
}
#endif
}
#if defined(USE_ROCM)
using __nv_bfloat162 = uint32_t;
#endif
template <typename T>
__device__ __inline__ T add_bf16x2(T a, T b) {
static_assert(sizeof(T) == 4);
#if defined(USE_ROCM) || (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 800))
CUDA_KERNEL_ASSERT(false);
return T{};
#else
auto res = __hadd2(
*reinterpret_cast<__nv_bfloat162*>(&a),
*reinterpret_cast<__nv_bfloat162*>(&b));
return *reinterpret_cast<T*>(&res);
#endif
}
template <int Alignment, typename T>
__device__ __inline__ Vec<Alignment> add_vec(
const Vec<Alignment>& a,
const Vec<Alignment>& b) {
Vec<Alignment> c{};
if constexpr (std::is_same_v<T, float>) {
if constexpr (Alignment == 16) {
c.f32[0] = a.f32[0] + b.f32[0];
c.f32[1] = a.f32[1] + b.f32[1];
c.f32[2] = a.f32[2] + b.f32[2];
c.f32[3] = a.f32[3] + b.f32[3];
} else if constexpr (Alignment == 8) {
c.f32[0] = a.f32[0] + b.f32[0];
c.f32[1] = a.f32[1] + b.f32[1];
} else if constexpr (Alignment == 4) {
c.f32 = a.f32 + b.f32;
} else {
static_assert(dependent_false<T>);
}
} else if constexpr (std::is_same_v<T, at::BFloat16>) {
if constexpr (Alignment == 16) {
c.u32[0] = add_bf16x2(a.u32[0], b.u32[0]);
c.u32[1] = add_bf16x2(a.u32[1], b.u32[1]);
c.u32[2] = add_bf16x2(a.u32[2], b.u32[2]);
c.u32[3] = add_bf16x2(a.u32[3], b.u32[3]);
} else if constexpr (Alignment == 8) {
c.u32[0] = add_bf16x2(a.u32[0], b.u32[0]);
c.u32[1] = add_bf16x2(a.u32[1], b.u32[1]);
} else if constexpr (Alignment == 4) {
c.u32 = add_bf16x2(a.u32, b.u32);
} else {
static_assert(dependent_false<T>);
}
} else {
static_assert(dependent_false<T>);
}
return c;
}
// With world_size specialization: perform balanced load from all peers before
// performing reduction.
template <typename T, int alignment, int k_world_size>
__device__ inline std::enable_if_t<(k_world_size > 0), Vec<alignment>>
load_and_reduce(T** ptrs, size_t rank, size_t world_size, size_t offset) {
Vec<alignment> vecs[k_world_size];
#pragma unroll k_world_size
for (size_t step = 0; step < k_world_size; ++step) {
size_t remote_rank = (rank + step) % k_world_size;
vecs[remote_rank] = ld_vec<alignment>(ptrs[remote_rank] + offset);
}
auto acc = vecs[0];
#pragma unroll k_world_size - 1
for (size_t r = 1; r < world_size; ++r) {
acc = add_vec<alignment, T>(acc, vecs[r]);
}
return acc;
}
// Without world_size specialization: perform ordered (unbalanced) load and
// accumulate on each load.
template <typename T, int alignment, int k_world_size>
__device__ inline std::enable_if_t<(k_world_size <= 0), Vec<alignment>>
load_and_reduce(T** ptrs, size_t rank, size_t world_size, size_t offset) {
Vec<alignment> acc{};
for (size_t step = 0; step < world_size; ++step) {
auto vec = ld_vec<alignment>(ptrs[step] + offset);
acc = add_vec<alignment, T>(acc, vec);
}
return acc;
}
} // namespace c10d::symmetric_memory
```
|
==============================================================================================================================================
SOURCE CODE FILE: TraceUtils.h
LINES: 12
SIZE: 10.30 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\c10d\TraceUtils.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/ScalarType.h>
#include <c10/util/ApproximateClock.h>
#include <c10/util/irange.h>
#include <c10/util/string_view.h>
#include <torch/csrc/distributed/c10d/Store.hpp>
#include <torch/csrc/distributed/c10d/Types.hpp>
#include <torch/csrc/distributed/c10d/Utils.hpp>
#include <torch/csrc/jit/serialization/pickler.h>
#include <torch/csrc/profiler/combined_traceback.h>
#include <sys/types.h>
#include <cstdlib>
#include <string>
#include <vector>
namespace c10d {
// A struct to hold the latest status of the process group.
struct ProcessGroupStatus {
// the sequential number of the last collective enqueued into workMetaList_
// This is useful for indentifying a rank that has not join a collective
// initialized to be -1 to indicate no collective has been enqueued
int64_t lastEnqueuedSeq{-1};
// the sequential number of the last collective started as the kernel
int64_t lastStartedSeq{-1};
// the sequential number of the last collective completed marked by
// the watchdog thread
// initialized to be -1 to indicate no collective has been completed
int64_t lastCompletedSeq{-1};
// the name of the last collective enqueued into workMetaList_
std::string lastEnqueuedWorkName;
// the name of the last collective started as the kernel
std::string lastStartedWorkName;
// the name of the last collective completed
std::string lastCompletedWorkName;
// the sizes of the last work enqueued
size_t lastEnqueuedNumelIn;
size_t lastEnqueuedNumelOut;
// the sizes of the last work completed
size_t lastCompletedNumelIn;
size_t lastCompletedNumelOut;
// the sizes of the last work started
size_t lastStartedNumelIn;
size_t lastStartedNumelOut;
};
inline std::string getTraceStartKey(const std::string& pgName, int rank) {
return pgName + "_" + std::to_string(rank) + "_trace_start";
}
inline std::string getTraceEndKey(const std::string& pgName, int rank) {
return pgName + "_" + std::to_string(rank) + "_trace_end";
}
inline bool traceUpdate(
c10::intrusive_ptr<Store>& store,
const std::string& key,
uint64_t seq,
const std::string& col) {
std::vector<uint8_t> value(col.size() + sizeof(seq) + 1);
memcpy(value.data(), &seq, sizeof(seq));
memcpy(value.data() + sizeof(seq), col.data(), col.size());
try {
store->set(key, value);
return true;
} catch (...) {
LOG(ERROR) << "Store is down while updating #" << seq << " with key "
<< key;
return false;
}
return true;
}
enum TraceDebugEvent {
kEventStart,
kEventEnd,
};
// <seq, <rank, <col, start/end>>>
using TraceMap =
std::map<uint64_t, std::map<int, std::pair<std::string, TraceDebugEvent>>>;
inline std::string ranksToString(const std::vector<int>& ranks) {
std::string str;
for (int rank : ranks) {
if (str.empty()) {
str = std::to_string(rank);
} else {
str += ", " + std::to_string(rank);
}
}
return str;
}
inline std::string ranksFromTrace(
const std::vector<std::pair<int, std::string>>& items) {
std::string ranks;
for (auto& p : items) {
if (ranks.empty()) {
ranks = std::to_string(p.first);
} else {
ranks += ", " + std::to_string(p.first);
}
}
return ranks;
}
inline std::string analyzeMissingRanks(const std::vector<int>& missingRanks) {
return c10::str(
"\n\t - To our best knowledge, ranks [",
ranksToString(missingRanks),
"] are the lagging ranks that caused this timeout. "
"They never joined any collectives");
}
inline std::string analyzeLaggingRanks(const TraceMap& traceMap) {
uint64_t lagSeq = traceMap.begin()->first;
std::vector<int> startRanks;
std::vector<int> endRanks;
for (auto& p : traceMap.begin()->second) {
if (p.second.second == kEventStart) {
startRanks.push_back(p.first);
} else {
endRanks.push_back(p.first);
}
}
std::string report =
"\n\t - To our best knowledge, the lagging/dead/mismatched ranks "
"that caused the desync are:";
if (!startRanks.empty()) {
report += c10::str(
"\n\t - [",
ranksToString(startRanks),
"] joined but didn't finish collective #",
lagSeq,
" (count from 1)");
}
if (!endRanks.empty()) {
report += c10::str(
"\n\t [",
ranksToString(endRanks),
"] finished collective #",
lagSeq,
", but didn't join collective #",
lagSeq + 1,
" (count from 1)");
}
return report;
}
inline std::string dumpSnapshot(TraceMap& traceMap) {
std::string report = "\n\t - Snapshot of ranks' latest states:";
for (auto& tracePair : traceMap) {
uint64_t seq = tracePair.first;
std::map<int, std::pair<std::string, TraceDebugEvent>>& subMap =
tracePair.second;
std::unordered_map<std::string, std::vector<int>> collectivesStart;
std::unordered_map<std::string, std::vector<int>> collectivesEnd;
for (auto& p : subMap) {
int rank = p.first;
const std::string& col = p.second.first;
if (p.second.second == kEventStart) {
collectivesStart[col].push_back(rank);
} else {
collectivesEnd[col].push_back(rank);
}
}
if (!collectivesStart.empty()) {
report += c10::str("\n\t #", seq, " started ranks:");
for (auto& mapPair : collectivesStart) {
report += c10::str(
"\n\t [",
ranksToString(mapPair.second),
"] started ",
mapPair.first);
}
}
if (!collectivesEnd.empty()) {
report += c10::str("\n\t #", seq, " finished ranks:");
for (auto& mapPair : collectivesEnd) {
report += c10::str(
"\n\t [",
ranksToString(mapPair.second),
"] finished ",
mapPair.first);
}
}
}
return report;
}
inline bool parseTraceValue(
c10::intrusive_ptr<Store>& store,
const std::string& key,
uint64_t& seq,
std::string& col) {
try {
std::vector<uint8_t> traceValue = store->get(key);
memcpy(&seq, traceValue.data(), sizeof(seq));
std::string colName((char*)traceValue.data() + sizeof(seq));
col = colName;
return true;
} catch (...) {
LOG(ERROR) << "Store is down while getting key " << key;
return false;
}
return true;
}
inline std::string retrieveDesyncReport(
c10::intrusive_ptr<Store>& store,
const std::string& pgName,
int myRank,
int worldSize) {
std::string report;
uint64_t thisSeq = 0;
std::string thisCol;
std::vector<int> missingRanks;
TraceMap traceMap;
for (const auto rank : c10::irange(worldSize)) {
// Build traceMapStart.
uint64_t seqStart = 0;
{
std::string traceKeyStart = getTraceStartKey(pgName, rank);
if (!store->check({traceKeyStart})) {
missingRanks.push_back(rank);
continue;
}
std::string col;
if (!parseTraceValue(store, traceKeyStart, seqStart, col)) {
return report;
}
traceMap[seqStart].emplace(rank, std::make_pair(col, kEventStart));
if (rank == myRank) {
thisSeq = seqStart;
thisCol = std::move(col);
}
}
// Build traceMapEnd.
{
std::string traceKeyEnd = getTraceEndKey(pgName, rank);
if (!store->check({traceKeyEnd})) {
continue;
}
uint64_t seq = 0;
std::string col;
if (!parseTraceValue(store, traceKeyEnd, seq, col)) {
return report;
}
if (seq == seqStart) {
traceMap[seq][rank].second = kEventEnd;
}
}
}
TORCH_INTERNAL_ASSERT(
!missingRanks.empty() || !traceMap.empty(),
"Trace shouldn't be empty while enabled GLOO_ASYNC_TIMEOUT_DEBUG");
TORCH_INTERNAL_ASSERT(
!thisCol.empty(),
"Timeout rank [",
myRank,
"] must have collective tracking iteam in c10::Store trace");
TORCH_INTERNAL_ASSERT(
traceMap[thisSeq][myRank].second == kEventStart,
"Timeout rank [",
myRank,
"] last trace item must be kEventStart. thisSeq = ",
thisSeq,
", col = ",
thisCol);
report += c10::str(
"\n\t - [", myRank, "] Timeout at collective: ", thisCol, ", #", thisSeq);
if (!missingRanks.empty()) {
report += analyzeMissingRanks(missingRanks);
} else {
report += analyzeLaggingRanks(traceMap);
report += dumpSnapshot(traceMap);
}
return report;
}
inline std::string pickle_str(const c10::IValue& v) {
std::vector<char> result;
{
auto writer = [&](const char* data, size_t size) {
result.insert(result.end(), data, data + size);
};
torch::jit::Pickler pickler(
writer, nullptr, nullptr, nullptr, nullptr, false);
pickler.protocol();
pickler.pushIValue(v);
pickler.stop();
}
return std::string(result.begin(), result.end());
}
inline std::string get_python_cpp_trace() {
// usage:
// LOG(INFO) << "stacktrace: "
// << get_python_cpp_trace();
// warn: might be slow in getting cpp traces
// because of slow/broken addr2line
// in different system libs
std::shared_ptr<torch::CapturedTraceback> tb =
torch::CapturedTraceback::gather(
/*python=*/true, /*script=*/true, /*cpp=*/true);
torch::SymbolizedTracebacks s_tbs = torch::symbolize({tb.get()});
const auto& s_tb = s_tbs.tracebacks.at(0);
std::stringstream oss;
for (auto idx : c10::irange(s_tb.size())) {
auto frame_id = s_tb[idx];
const auto& frame = s_tbs.all_frames.at(frame_id);
oss << "#" << idx << " " << frame.funcname << " from " << frame.filename
<< ":" << frame.lineno << '\n';
}
return oss.str();
}
inline c10::Dict<c10::IValue, c10::IValue> new_dict() {
return c10::Dict<c10::IValue, c10::IValue>(
c10::AnyType::get(), c10::AnyType::get());
}
inline c10::List<c10::IValue> new_list() {
return c10::List<c10::IValue>(c10::AnyType::get());
}
inline std::string ranks_str(const std::vector<uint64_t>& ranks) {
std::string str;
for (const auto& rank : ranks) {
if (str.empty()) {
str = std::to_string(rank);
} else {
str += ", " + std::to_string(rank);
}
}
return c10::str("[", str, "]");
}
} // namespace c10d
```
|
========================================================================================================================================
SOURCE CODE FILE: c10d.h
LINES: 1
SIZE: 0.17 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\c10d\c10d.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
namespace torch::distributed::c10d {
PyMethodDef* python_functions();
} // namespace torch::distributed::c10d
```
|
=========================================================================================================================================
SOURCE CODE FILE: debug.h
LINES: 1
SIZE: 0.61 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\c10d\debug.h
ENCODING: utf-8
```h
// Copyright (c) Meta Platforms, Inc. and its affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <c10/macros/Macros.h>
namespace c10d {
enum class DebugLevel { Off = 0, Info = 1, Detail = 2 };
TORCH_API void setDebugLevel(DebugLevel level);
// Sets the debug level based on the value of the `TORCH_DISTRIBUTED_DEBUG`
// environment variable.
TORCH_API void setDebugLevelFromEnvironment();
TORCH_API DebugLevel debug_level() noexcept;
} // namespace c10d
```
|
=========================================================================================================================================
SOURCE CODE FILE: error.h
LINES: 1
SIZE: 1.36 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\c10d\error.h
ENCODING: utf-8
```h
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <cstring>
#include <system_error>
#include <fmt/format.h>
namespace fmt {
template <>
struct formatter<std::error_category> {
constexpr decltype(auto) parse(format_parse_context& ctx) const {
return ctx.begin();
}
template <typename FormatContext>
decltype(auto) format(const std::error_category& cat, FormatContext& ctx)
const {
if (std::strcmp(cat.name(), "generic") == 0) {
return fmt::format_to(ctx.out(), "errno");
} else {
return fmt::format_to(ctx.out(), "{} error", cat.name());
}
}
};
template <>
struct formatter<std::error_code> {
constexpr decltype(auto) parse(format_parse_context& ctx) const {
return ctx.begin();
}
template <typename FormatContext>
decltype(auto) format(const std::error_code& err, FormatContext& ctx) const {
return fmt::format_to(
ctx.out(), "({}: {} - {})", err.category(), err.value(), err.message());
}
};
} // namespace fmt
namespace c10d::detail {
inline std::error_code lastError() noexcept {
return std::error_code{errno, std::generic_category()};
}
} // namespace c10d::detail
```
|
=============================================================================================================================================
SOURCE CODE FILE: exception.h
LINES: 1
SIZE: 1.33 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\c10d\exception.h
ENCODING: utf-8
```h
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
// Utility macro similar to C10_THROW_ERROR, the major difference is that this
// macro handles exception types defined in the c10d namespace, whereas
// C10_THROW_ERROR requires an exception to be defined in the c10 namespace.
#define C10D_THROW_ERROR(err_type, ...) \
throw ::c10d::err_type( \
{__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, \
c10::str(__VA_ARGS__))
#define C10D_CHECK_WITH(error_t, cond, ...) \
if (C10_UNLIKELY_OR_CONST(!(cond))) { \
C10D_THROW_ERROR( \
error_t, TORCH_CHECK_MSG(cond, "", c10::str(__VA_ARGS__))); \
}
namespace c10d {
using c10::DistNetworkError;
using c10::DistStoreError;
class TORCH_API SocketError : public DistNetworkError {
using DistNetworkError::DistNetworkError;
};
class TORCH_API TimeoutError : public DistNetworkError {
using DistNetworkError::DistNetworkError;
};
} // namespace c10d
```
|
===========================================================================================================================================
SOURCE CODE FILE: logging.h
LINES: 1
SIZE: 1.83 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\c10d\logging.h
ENCODING: utf-8
```h
// Copyright (c) Meta Platforms, Inc. and its affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <string>
#include <c10/macros/Macros.h>
#include <c10/util/Logging.h>
#include <fmt/format.h>
namespace c10d::detail {
enum class LogLevel { Trace, Debug, Info, Warning, Error };
TORCH_API bool isLogLevelEnabled(LogLevel level) noexcept;
template <typename... T>
// NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
std::string formatLogMessage(fmt::string_view fmt, T&&... args) {
return fmt::vformat(fmt, fmt::make_format_args(args...));
}
} // namespace c10d::detail
#define C10D_ERROR(...) \
if (c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Error)) \
LOG(ERROR) << "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__)
#define C10D_WARNING(...) \
if (c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Warning)) \
LOG(WARNING) << "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__)
#define C10D_INFO(...) \
if (c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Info)) \
LOG(INFO) << "[c10d] " << c10d::detail::formatLogMessage(__VA_ARGS__)
#define C10D_DEBUG(...) \
if (c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Debug)) \
LOG(INFO) << "[c10d - debug] " << c10d::detail::formatLogMessage(__VA_ARGS__)
#define C10D_TRACE(...) \
if (c10d::detail::isLogLevelEnabled(c10d::detail::LogLevel::Trace)) \
LOG(INFO) << "[c10d - trace] " << c10d::detail::formatLogMessage(__VA_ARGS__)
```
|
====================================================================================================================================================
SOURCE CODE FILE: python_comm_hook.h
LINES: 1
SIZE: 1.08 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\c10d\python_comm_hook.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/c10d/comm.hpp>
#include <ATen/ATen.h>
#include <ATen/core/ivalue.h>
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
#include <torch/csrc/utils/pybind.h>
namespace c10d {
class TORCH_PYTHON_API PythonCommHook : public CommHookInterface {
public:
// Takes a state and a callable hook. The inputs are Python objects.
// The state is passed to the hook in runHook method, and it can be used to
// maintain and update any state information during the execution of the hook.
// The hook performs user-specified processing and returns a future indicating
// asychronous communication of gradients.
PythonCommHook(py::object state, py::object hook)
: state_(std::move(state)), hook_(std::move(hook)) {}
~PythonCommHook() override;
c10::intrusive_ptr<c10::ivalue::Future> runHook(GradBucket& bucket) override;
at::Tensor parseHookResult(const c10::IValue& result) override;
private:
// Only needed for stateful communication.
py::object state_;
py::object hook_;
};
} // namespace c10d
```
|
==========================================================================================================================================
SOURCE CODE FILE: socket.h
LINES: 1
SIZE: 2.50 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\c10d\socket.h
ENCODING: utf-8
```h
// Copyright (c) Meta Platforms, Inc. and its affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <chrono>
#include <cstdint>
#include <memory>
#include <string>
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <torch/csrc/distributed/c10d/Backoff.hpp>
#include <torch/csrc/distributed/c10d/exception.h>
namespace c10d::detail {
class SocketOptions {
public:
SocketOptions& prefer_ipv6(bool value) noexcept {
prefer_ipv6_ = value;
return *this;
}
bool prefer_ipv6() const noexcept {
return prefer_ipv6_;
}
SocketOptions& connect_timeout(std::chrono::milliseconds value) noexcept {
connect_timeout_ = value;
return *this;
}
std::chrono::milliseconds connect_timeout() const noexcept {
return connect_timeout_;
}
// Sets the backoff policy to use for socket connect ops.
SocketOptions& connect_backoff(std::shared_ptr<Backoff> value) noexcept {
connect_backoff_ = std::move(value);
return *this;
}
const std::shared_ptr<Backoff>& connect_backoff() const noexcept {
return connect_backoff_;
}
private:
bool prefer_ipv6_ = true;
std::chrono::milliseconds connect_timeout_{std::chrono::seconds{30}};
std::shared_ptr<Backoff> connect_backoff_{
std::make_shared<FixedBackoff>(std::chrono::milliseconds(1000))};
};
class SocketImpl;
class Socket {
public:
// This function initializes the underlying socket library and must be called
// before any other socket function.
static void initialize();
static Socket listen(std::uint16_t port, const SocketOptions& opts = {});
static Socket listenFromFd(int fd, std::uint16_t expected_port);
static Socket connect(
const std::string& host,
std::uint16_t port,
const SocketOptions& opts = {});
Socket() noexcept = default;
Socket(const Socket& other) = delete;
Socket& operator=(const Socket& other) = delete;
Socket(Socket&& other) noexcept;
Socket& operator=(Socket&& other) noexcept;
~Socket();
Socket accept() const;
int handle() const noexcept;
std::uint16_t port() const;
bool waitForInput(std::chrono::milliseconds timeout);
std::string repr() const;
private:
explicit Socket(std::unique_ptr<SocketImpl>&& impl) noexcept;
std::unique_ptr<SocketImpl> impl_;
};
} // namespace c10d::detail
```
|
==============================================================================================================================================
SOURCE CODE FILE: socket_fmt.h
LINES: 1
SIZE: 0.71 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\c10d\socket_fmt.h
ENCODING: utf-8
```h
// (c) Meta Platforms, Inc. and affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
/*
This file should not be included from other .h files and only used in cpp files
as it exposes the underlying platform specific socket headers.
*/
#include <string>
#ifdef _WIN32
#include <mutex>
#include <winsock2.h>
#include <ws2tcpip.h>
#else
#include <netinet/in.h>
#endif
namespace c10d::detail {
// Returns a human-readable representation of the given socket address.
std::string formatSockAddr(const struct ::sockaddr* addr, socklen_t len);
} // namespace c10d::detail
```
|
==============================================================================================================================================
SOURCE CODE FILE: agent_utils.h
LINES: 1
SIZE: 1.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\agent_utils.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/c10d/PrefixStore.hpp>
#include <torch/csrc/distributed/rpc/utils.h>
namespace torch::distributed::rpc {
// All RPC peers should call into this function at the same time. Each peer
// provides its own id and name, and this function uses the given Store to
// gather global name-to-id mapping on all peers.
TORCH_API std::unordered_map<std::string, worker_id_t> collectNames(
::c10d::PrefixStore store,
const worker_id_t selfId,
const std::string& selfName,
const int worldSize);
// Ranks in dynamic RPC groups will initially call into this to establish the
// name-to-id mapping for the current peers in the group. The current rank will
// put its own worker info in the store and discover all the ranks that came
// before it. NOTE: This needs to be called with the Dynamic RPC group
// membership management token held.
TORCH_API std::unordered_map<std::string, worker_id_t> collectCurrentNames(
::c10d::PrefixStore store,
const worker_id_t selfId,
const std::string& selfName);
// Remove name frmo Store, used in dynamic RPC groups.
// NOTE: This needs to be called with the Dynamic RPC group
// membership management token held.
TORCH_API void removeCurrentName(
::c10d::PrefixStore store,
const worker_id_t selfId,
const std::string& selfName);
// This performs a synchronization of all call counts by using store.
// All RPC peers wait for others to join to exit at the same time.
TORCH_API int syncCallCount(
::c10d::PrefixStore store,
const int worldSize,
int activeCalls = 0);
} // namespace torch::distributed::rpc
```
|
==========================================================================================================================================
SOURCE CODE FILE: message.h
LINES: 1
SIZE: 7.64 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\message.h
ENCODING: utf-8
```h
#pragma once
#include <torch/types.h>
#include <vector>
namespace torch::distributed::rpc {
// An enum denoting common RPC errors to allow specific error handling for them.
// NOLINTNEXTLINE(performance-enum-size)
enum RPCErrorType {
UNKNOWN_ERROR = 0, /* Indicates that error type could not be parsed */
TIMEOUT = 1, /* Indicates that the RPC has timed out */
INTENTIONAL_FAILURE = 2 /* Deliberate failure, such as those injected by
FaultyAgent for testing */
};
// The enum values are bitwise ORed with MessageType
// They are bit flags starting from 0x100 and should have
// value such as 0x100, 0x200, 0x400, 0x800, 0xF00, etc.
// NOLINTNEXTLINE(performance-enum-size)
enum MessageTypeFlags {
REQUEST_TYPE = 0x100,
RESPONSE_TYPE = 0x200,
};
// Message types must have values between 0x00 to 0xff
// NOLINTNEXTLINE(performance-enum-size)
enum MessageType {
// messages for dist.rpc on builtin operators
SCRIPT_CALL = 0x00 | MessageTypeFlags::REQUEST_TYPE,
SCRIPT_RET = 0x01 | MessageTypeFlags::RESPONSE_TYPE,
// messages for dist.rpc on Python UDF
PYTHON_CALL = 0x02 | MessageTypeFlags::REQUEST_TYPE,
PYTHON_RET = 0x03 | MessageTypeFlags::RESPONSE_TYPE,
// messages for dist.remote on builtin operators and Python UDF
SCRIPT_REMOTE_CALL = 0x04 |
MessageTypeFlags::REQUEST_TYPE, // A remote call on a builtin operator
PYTHON_REMOTE_CALL =
0x05 | MessageTypeFlags::REQUEST_TYPE, // A remote call on a Python UDF
REMOTE_RET =
0x06 | MessageTypeFlags::RESPONSE_TYPE, // Response for remote calls for
// UDF, builtin, or script
// RRef related internal messages
SCRIPT_RREF_FETCH_CALL =
0x07 | MessageTypeFlags::REQUEST_TYPE, // A UserRRef<IValue> fetches value
// from owner
PYTHON_RREF_FETCH_CALL =
0x08 | MessageTypeFlags::REQUEST_TYPE, // A UserRRef<py::object> fetches
// value from owner
SCRIPT_RREF_FETCH_RET = 0x09 |
MessageTypeFlags::RESPONSE_TYPE, // An OwnerRRef sends ivalue to user
PYTHON_RREF_FETCH_RET = 0x0a |
MessageTypeFlags::RESPONSE_TYPE, // An OwnerRRef sends py::object to user
RREF_USER_DELETE = 0x0b |
MessageTypeFlags::REQUEST_TYPE, // A UserRRef tells the owner to deref
RREF_FORK_REQUEST =
0x0c | MessageTypeFlags::REQUEST_TYPE, // A child UserRRef tells the owner
// about itself
RREF_CHILD_ACCEPT =
0x0d | MessageTypeFlags::REQUEST_TYPE, // A child UserRRef tells parent
// that owner knows it
RREF_ACK =
0x0e | MessageTypeFlags::RESPONSE_TYPE, // ACK to internal RRef messages
// Messages with autograd info
FORWARD_AUTOGRAD_REQ = 0x0f | MessageTypeFlags::REQUEST_TYPE,
FORWARD_AUTOGRAD_RESP = 0x10 | MessageTypeFlags::RESPONSE_TYPE,
// Messages to propagate gradients on the backward pass.
BACKWARD_AUTOGRAD_REQ = 0x11 | MessageTypeFlags::REQUEST_TYPE,
BACKWARD_AUTOGRAD_RESP = 0x12 | MessageTypeFlags::RESPONSE_TYPE,
// Messages to tell workers to clean up their autograd context.
CLEANUP_AUTOGRAD_CONTEXT_REQ = 0x13 | MessageTypeFlags::REQUEST_TYPE,
CLEANUP_AUTOGRAD_CONTEXT_RESP = 0x14 | MessageTypeFlags::RESPONSE_TYPE,
// Messages that tell workers to run requests with profiling enabled.
RUN_WITH_PROFILING_REQ = 0x15 | MessageTypeFlags::REQUEST_TYPE,
RUN_WITH_PROFILING_RESP = 0x16 | MessageTypeFlags::RESPONSE_TYPE,
// Messages to support RRef.backward().
RREF_BACKWARD_REQ = 0x17 | MessageTypeFlags::REQUEST_TYPE,
RREF_BACKWARD_RESP = 0x18 | MessageTypeFlags::RESPONSE_TYPE,
// Other internal message types
EXCEPTION = 0x37 | MessageTypeFlags::RESPONSE_TYPE,
UNKNOWN = 0x3c
};
// A message to be sent/received by an RpcAgent.
//
// A Message object contains 4 fields:
// payload (std::vector<char>): a binary chunk of data.
// tensors (std::vector<torch::Tensor>): all tensors. Tensor data are not
// included in the payload, and it is up to the RpcAgent implementation
// to determine how to serialize them. This design is helpful for
// communicating super large tensors where serializing all the data at
// once leads to excessively large memory footprint. An implementation
// can then serialize and send tensors chunk-by-chunk, in the streaming
// fashion.
// type (MessageType): type of the message.
// id (int64_t): message id, this is used to match request and response.
// Other implementation can ignore it if they have their own
// ways to do matching.
//
// Layers above ``RpcAgent`` only converts ScriptCall, ScriptResp, PythonCall,
// and PythonResp into a Message, and it is up to the RpcAgent
// implementation to determine how to serialize a message.
class TORCH_API Message final : public torch::CustomClassHolder {
private:
// Keep these private in order to force users to go through make_intrusive and
// thus prevent creating a Message that's not held by an intrusive_ptr.
Message();
Message(
std::vector<char>&& payload,
std::vector<torch::Tensor>&& tensors,
MessageType type);
Message(
std::vector<char>&& payload,
std::vector<torch::Tensor>&& tensors,
MessageType type,
int64_t id);
friend c10::intrusive_ptr<Message>;
public:
Message(const Message& other) = delete;
Message(Message&& other) = delete;
Message& operator=(Message const& rhs) = delete;
Message& operator=(Message&& rhs) = delete;
~Message() override = default;
// Destructively retrieves the payload.
std::vector<char>&& movePayload() &&;
std::vector<torch::Tensor>&& moveTensors() &&;
std::vector<char>& payload();
const std::vector<char>& payload() const;
std::vector<torch::Tensor>& tensors();
const std::vector<torch::Tensor>& tensors() const;
MessageType type() const;
bool isRequest() const;
bool isResponse() const;
bool isShutdown() const;
// id is an optional field to match request/response. If an RpcAgent
// implementation is able to do the matching without using this id, it can be
// dropped during message serialization.
int64_t id() const;
void setId(int64_t id);
std::vector<c10::weak_intrusive_ptr<c10::StorageImpl>> getStorages() const;
private:
std::vector<char> payload_;
std::vector<torch::Tensor> tensors_;
MessageType type_ = MessageType::UNKNOWN;
int64_t id_ = -1;
};
// Create a response Message of type Exception.
// The exception string representation will be used as the message's payload.
// A message ID corresponding to the request that resulted in this response can
// be provided for matching requests/responses.
TORCH_API c10::intrusive_ptr<Message> createExceptionResponse(
const std::exception& e,
int64_t id);
// Create a response Message of type Exception.
// The passed in string representation will be used as the message's payload.
// A message ID corresponding to the request that resulted in this response can
// be provided for matching requests/responses.
TORCH_API c10::intrusive_ptr<Message> createExceptionResponse(
const std::string& exceptionStr,
int64_t id);
inline std::tuple<
c10::intrusive_ptr<Message>,
std::vector<c10::weak_intrusive_ptr<c10::StorageImpl>>>
withStorages(c10::intrusive_ptr<Message> message) {
auto storages = message->getStorages();
return std::make_tuple(std::move(message), std::move(storages));
}
using JitFuture = c10::ivalue::Future;
} // namespace torch::distributed::rpc
```
|
==========================================================================================================================================
SOURCE CODE FILE: py_rref.h
LINES: 1
SIZE: 2.97 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\py_rref.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/rref_impl.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/pybind.h>
namespace torch::distributed::rpc {
// NOLINTNEXTLINE(performance-enum-size)
enum RRefProxyType { RPC_SYNC, RPC_ASYNC, REMOTE };
// Python wrapper of an RRef shared_ptr that supports Python
// pickle and unpickle.
class PYBIND11_EXPORT PyRRef {
public:
// The first ctor can only be called while holding GIL. See its implementation
// for more explanations.
explicit PyRRef(const py::object& value, const py::object& type_hint);
explicit PyRRef(c10::intrusive_ptr<RRef> rref);
PyRRef(const PyRRef&) = default;
~PyRRef();
bool isOwner() const;
bool confirmedByOwner() const;
WorkerInfo owner() const;
std::string ownerName() const;
py::object toHere(
const float timeoutSeconds =
torch::distributed::rpc::kUnsetRpcTimeout) const;
py::object localValue() const;
std::string str() const;
py::tuple pickle() const;
static PyRRef unpickle(const py::tuple& t);
c10::IValue toIValue() const;
// Future that is associated with the creation of this RRef on the remote end.
// This is only used to get the future corresponding to the rref for profiling
// use cases.
c10::intrusive_ptr<JitFuture> getFuture() const;
// Keeps track of the future responsible for profiling owner creation
// acknowledgement
c10::intrusive_ptr<JitFuture> getProfilingFuture() const;
// Sets the future responsible for profiling owner creation acknowledgement.
// This future is set from python to be a future that returns when profiling
// callbacks have been run.
void setProfilingFuture(c10::intrusive_ptr<JitFuture> profilingFuture);
// create a proxy on this RRef, which can be used to launch RPC on the owner
// of this RRef to run functions on the object referenced by this RRef.
py::object createRRefProxy(
const RRefProxyType& mode,
float timeoutSeconds = rpc::kUnsetRpcTimeout) const;
// get the type of the data object referenced by this RRef. Timeout argument
// is only used in the first invocation of this function as an argument to the
// RPC to the owner node of the RRef.
py::object getRRefType(
float timeout = rpc::kUnsetRpcTimeout,
bool blocking = true);
// Run the backward pass with the RRef as the root.
void backward(int64_t autogradContextId, bool retainGraph);
// Helper static function to run backward on a given rref.
static void backward(
int64_t autogradContextId,
bool retainGraph,
const c10::intrusive_ptr<RRef>& rref);
// Specialization of backward if the rref is an OwnerRRef.
static void backwardOwnerRRef(
int64_t autogradContextId,
bool retainGraph,
IValue value);
private:
c10::intrusive_ptr<RRef> rref_;
std::optional<c10::intrusive_ptr<JitFuture>> profilingFuture_;
std::optional<py::object> type_;
};
} // namespace torch::distributed::rpc
```
|
==============================================================================================================================================
SOURCE CODE FILE: python_call.h
LINES: 1
SIZE: 0.82 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\python_call.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
#include <torch/csrc/distributed/rpc/types.h>
namespace torch::distributed::rpc {
// RPC call representing calling a Python function over RPC.
class TORCH_API PythonCall final : public RpcCommandBase {
public:
PythonCall(SerializedPyObj&& serializedPyObj, bool isAsyncExecution);
c10::intrusive_ptr<Message> toMessageImpl() && override;
static std::unique_ptr<PythonCall> fromMessage(const Message& message);
const SerializedPyObj& serializedPyObj() const;
inline bool isAsyncExecution() const {
return isAsyncExecution_;
}
private:
SerializedPyObj serializedPyObj_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const bool isAsyncExecution_;
};
} // namespace torch::distributed::rpc
```
|
===================================================================================================================================================
SOURCE CODE FILE: python_functions.h
LINES: 1
SIZE: 2.27 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\python_functions.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/py_rref.h>
#include <torch/csrc/distributed/rpc/rpc_agent.h>
#include <torch/csrc/jit/python/pybind_utils.h>
#include <torch/csrc/utils/pybind.h>
namespace torch::distributed::rpc {
// Converts an internal ivalue::Future of Message into a user-facing
// ivalue::Future of py::object type by creating a new ivalue::Future and call
// its markCompleted as a callback in the given ivalue::Future.
// If hasValue is true, the Message will be converted into a py::object and then
// wrap it with an IValue. If hasValue is false, this ivalue::Future is only
// used for signaling and launching callbacks. In this case, the message will be
// discarded and then set the ivalue::Future using an empty IValue or the given
// FutureError if there is an error.
c10::intrusive_ptr<JitFuture> toPyJitFuture(
const c10::intrusive_ptr<JitFuture>& messageJitFuture,
bool hasValue = true);
c10::intrusive_ptr<JitFuture> pyRpcBuiltin(
const WorkerInfo& dst,
const std::string& opName,
const py::args& args,
const py::kwargs& kwargs,
const float rpcTimeoutSeconds);
c10::intrusive_ptr<JitFuture> pyRpcPythonUdf(
const WorkerInfo& dst,
std::string& pickledPythonUDF,
std::vector<torch::Tensor>& tensors,
const float rpcTimeoutSeconds,
const bool isAsyncExecution);
c10::intrusive_ptr<JitFuture> pyRpcTorchscript(
const std::string& dstWorkerName,
const std::string& qualifiedNameStr,
const py::tuple& argsTuple,
const py::dict& kwargsDict,
const float rpcTimeoutSeconds,
const bool isAsyncExecution);
PyRRef pyRemoteBuiltin(
const WorkerInfo& dst,
const std::string& opName,
const float rpcTimeoutSeconds,
const py::args& args,
const py::kwargs& kwargs);
PyRRef pyRemotePythonUdf(
const WorkerInfo& dst,
std::string& pickledPythonUDF,
std::vector<torch::Tensor>& tensors,
const float rpcTimeoutSeconds,
const bool isAsyncExecution);
PyRRef pyRemoteTorchscript(
const std::string& dstWorkerName,
const std::string& qualifiedNameStr,
const float rpcTimeoutSeconds,
const bool isAsyncExecution,
const py::args& args,
const py::kwargs& kwargs);
} // namespace torch::distributed::rpc
```
|
=====================================================================================================================================================
SOURCE CODE FILE: python_remote_call.h
LINES: 1
SIZE: 1.34 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\python_remote_call.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
#include <torch/csrc/distributed/rpc/types.h>
#include <torch/csrc/jit/serialization/pickler.h>
namespace torch::distributed::rpc {
class TORCH_API PythonRemoteCall : public RpcCommandBase {
public:
PythonRemoteCall(
SerializedPyObj&& serializedPyObj,
at::IValue retRRefId,
at::IValue retForkId,
const bool isAsyncExecution);
inline const SerializedPyObj& serializedPyObj() const {
return serializedPyObj_;
}
inline const at::IValue& retRRefId() const {
return retRRefId_;
}
inline const at::IValue& retForkId() const {
return retForkId_;
}
inline bool isAsyncExecution() const {
return isAsyncExecution_;
}
c10::intrusive_ptr<Message> toMessageImpl() && override;
static std::unique_ptr<PythonRemoteCall> fromMessage(const Message& message);
private:
SerializedPyObj serializedPyObj_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const at::IValue retRRefId_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const at::IValue retForkId_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const bool isAsyncExecution_;
};
} // namespace torch::distributed::rpc
```
|
==============================================================================================================================================
SOURCE CODE FILE: python_resp.h
LINES: 1
SIZE: 0.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\python_resp.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
#include <torch/csrc/distributed/rpc/types.h>
namespace torch::distributed::rpc {
// RPC call representing the response of a Python UDF over RPC.
class TORCH_API PythonResp final : public RpcCommandBase {
public:
explicit PythonResp(SerializedPyObj&& serializedPyObj);
c10::intrusive_ptr<Message> toMessageImpl() && override;
static std::unique_ptr<PythonResp> fromMessage(const Message& message);
const SerializedPyObj& serializedPyObj() const;
private:
SerializedPyObj serializedPyObj_;
};
} // namespace torch::distributed::rpc
```
|
=====================================================================================================================================================
SOURCE CODE FILE: python_rpc_handler.h
LINES: 1
SIZE: 4.96 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\python_rpc_handler.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/types.h>
#include <torch/csrc/jit/frontend/script_type_parser.h>
#include <torch/csrc/utils/pybind.h>
namespace torch::distributed::rpc {
// Singleton class provides interface to execute python UDF remote call
// and deserialize the returned results by running python function
// in internal_rpc_utilities.
// The singleton object is constructed at first when RPC agent is
// constructed, where the python function in
// torch/distributed/internal_rpc_utils.py are imported only once.
class PYBIND11_EXPORT PythonRpcHandler {
public:
struct RRefProxyFunctions {
py::object rrefProxyCtor_;
py::object rpcSync_;
py::object rpcAsync_;
py::object remote_;
};
struct RRefTypeFunctions {
py::object onOwner_;
py::object onUser_;
};
static PythonRpcHandler& getInstance();
// Run a pickled Python UDF and return the result py::object
py::object runPythonUdf(const py::object& pythonUdf);
// Serialized a py::object into a string
SerializedPyObj serialize(const py::object& obj);
// Deserialize a string into a py::object
py::object deserialize(const SerializedPyObj& serializedObj);
// Check if obj is RemoteException, then throw it
void handleException(const py::object& obj);
// Alternative if the caller is already holding the GIL.
void handleExceptionGILHeld(const py::object& obj);
// Check if obj is an RemoteException instance.
bool isRemoteException(const py::object& obj);
// Explicitly clean up py::objects to avoid segment faults when
// py::objects with CPython are cleaned up later at program exit
// See similar issues reported https://github.com/pybind/pybind11/issues/1598
// and https://github.com/pybind/pybind11/issues/1493
// Our local tests also caught this segment faults if py::objects are cleaned
// up at program exit. The explanation is: CPython cleans up most critical
// utilities before cleaning up PythonRpcHandler singleton, so when
// PythonRpcHandler singleton cleans up py::objects and call dec_ref(), it
// will crash.
// The solution is to clean up py::objects earlier when Rpc agent join().
// Be note that py::objects can not be cleaned up when Rpc agent is destroyed
// as well, as Rpc agent is global variable and it will have same issue as
// PythonRpcHandler.
void cleanup();
std::shared_ptr<torch::jit::CompilationUnit> jitCompilationUnit();
// Parse the string to recover the jit_type, this is used for RRef python
// pickling/unpickling type recovery. The type string inference rule is as
// follows:
// 1. first try to parse if this is primitive types.
// i.e. TensorType, IntType, PyObjectType, etc.
// 2. if not primitive type, we query the python_cu to see if it is a
// class type or interface type registered in python
// We use a ScriptTypeParser instance with custom PythonTypeResolver
// to resolve types according to the above rules.
TypePtr parseTypeFromStr(const std::string& typeStr);
// Return a set of Python functions for RRef helpers.
const RRefProxyFunctions& getRRefProxyFunctions() const;
// Return a set of Python functions to retrieve the type of the object
// referenced by a given RRef.
const RRefTypeFunctions& getRRefTypeFunctions() const;
PythonRpcHandler(const PythonRpcHandler&) = delete;
PythonRpcHandler& operator=(const PythonRpcHandler&) = delete;
PythonRpcHandler(PythonRpcHandler&&) = delete;
PythonRpcHandler& operator=(PythonRpcHandler&&) = delete;
private:
void init();
PythonRpcHandler();
~PythonRpcHandler() = default;
// Ref to `torch.distributed.rpc.internal._run_function`.
py::object pyRunFunction_;
// Ref to `torch.distributed.rpc.internal.serialize`.
py::object pySerialize_;
// Ref to `torch.distributed.rpc.internal.deserialize`.
py::object pyDeserialize_;
// Ref to 'torch.distributed.rpc.internal._handle_exception'
py::object pyHandleException_;
// Python functions for RRef proxy
RRefProxyFunctions rrefProxyFunctions_;
// Ref to 'torch.distributed.rpc.api._rref_typeof_on_'
RRefTypeFunctions rrefTypeFunctions_;
// Shared ptr to python compilation unit in jit, it is constructed in python
// side (see _python_cu = torch._C.CompilationUnit() in jit/__init__.py)
// and imported in C++ (see get_python_cu() in
// csrc/jit/python/pybind_utils.h). We import the compilation unit here only
// once for less cost and thread safety.
std::shared_ptr<torch::jit::CompilationUnit> jitCompilationUnit_;
// jit type parser to parse type_str back to TypePtr for RRef type
// recovery when pickling and unpickling RRef
std::shared_ptr<jit::ScriptTypeParser> typeParser_;
// Indicates whether or not we have properly initialized the handler.
bool initialized_;
// Lock to protect initialization.
std::mutex init_lock_;
};
} // namespace torch::distributed::rpc
```
|
===================================================================================================================================================
SOURCE CODE FILE: request_callback.h
LINES: 1
SIZE: 1.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\request_callback.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/message.h>
namespace torch::distributed::rpc {
// Functor which is invoked to process an RPC message. This is an abstract class
// with some common functionality across all request handlers. Users need to
// implement this interface to perform the actual business logic.
class TORCH_API RequestCallback {
public:
// Invoke the callback.
c10::intrusive_ptr<JitFuture> operator()(
Message& request,
std::vector<c10::Stream> streams) const;
virtual ~RequestCallback() = default;
protected:
// RpcAgent implementation should invoke ``RequestCallback`` to process
// received requests. There is no restriction on the implementation's
// threading model. This function takes an rvalue reference of the Message
// object. It is expected to return the future to a response message or
// message containing an exception. Different rpc agent implementations are
// expected to ensure delivery of the response/exception based on their
// implementation specific mechanisms.
virtual c10::intrusive_ptr<JitFuture> processMessage(
Message& request,
std::vector<c10::Stream> streams) const = 0;
};
} // namespace torch::distributed::rpc
```
|
========================================================================================================================================================
SOURCE CODE FILE: request_callback_impl.h
LINES: 1
SIZE: 2.09 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\request_callback_impl.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/request_callback_no_python.h>
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
#include <torch/csrc/jit/python/pybind.h>
namespace torch::distributed::rpc {
class TORCH_API RequestCallbackImpl : public RequestCallbackNoPython {
public:
std::unique_ptr<RpcCommandBase> deserializePythonRpcCommand(
std::unique_ptr<RpcCommandBase> rpc,
const MessageType& messageType) const override;
c10::intrusive_ptr<JitFuture> processPythonCall(
RpcCommandBase& rpc,
const std::vector<c10::Stream>& streams) const override;
c10::intrusive_ptr<JitFuture> processScriptCall(
RpcCommandBase& rpc,
const std::vector<c10::Stream>& streams) const override;
c10::intrusive_ptr<JitFuture> processScriptRemoteCall(
RpcCommandBase& rpc,
const std::vector<c10::Stream>& streams) const override;
c10::intrusive_ptr<JitFuture> processPythonRemoteCall(
RpcCommandBase& rpc,
const std::vector<c10::Stream>& streams) const override;
c10::intrusive_ptr<JitFuture> processPythonRRefFetchCall(
RpcCommandBase& rpc) const override;
void handleRRefDelete(c10::intrusive_ptr<RRef>& rref) const override;
c10::intrusive_ptr<JitFuture> processRpcWithErrors(
RpcCommandBase& rpc,
const MessageType& messageType,
const std::vector<c10::Stream>& streams) const override;
bool cudaAvailable() const override;
c10::intrusive_ptr<JitFuture> processRRefBackward(
RpcCommandBase& rpc) const override;
// Helpers to run user-defined functions, operators and other computations.
c10::intrusive_ptr<JitFuture> runJitFunction(
const c10::QualifiedName& name,
std::vector<at::IValue>& stack,
const std::vector<c10::Stream>& streams,
bool isAsyncExecution) const;
c10::intrusive_ptr<JitFuture> runPythonFunction(
const py::object& function,
const std::vector<c10::Stream>& streams,
bool isAsyncExecution) const;
};
} // namespace torch::distributed::rpc
```
|
=============================================================================================================================================================
SOURCE CODE FILE: request_callback_no_python.h
LINES: 1
SIZE: 3.92 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\request_callback_no_python.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/request_callback.h>
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
#include <torch/csrc/distributed/rpc/rref_impl.h>
#include <torch/csrc/distributed/rpc/script_call.h>
#include <torch/csrc/distributed/rpc/script_remote_call.h>
namespace torch::distributed::rpc {
// RequestCallback implementation with no Python dependencies.
class TORCH_API RequestCallbackNoPython : public RequestCallback {
public:
c10::intrusive_ptr<JitFuture> processMessage(
Message& request,
std::vector<c10::Stream> streams) const override;
protected:
virtual std::unique_ptr<RpcCommandBase> deserializePythonRpcCommand(
std::unique_ptr<RpcCommandBase> rpc,
const MessageType& messageType) const;
virtual c10::intrusive_ptr<JitFuture> processScriptCall(
RpcCommandBase& rpc,
const std::vector<c10::Stream>& streams) const;
virtual c10::intrusive_ptr<JitFuture> processPythonCall(
RpcCommandBase& rpc,
const std::vector<c10::Stream>& streams) const;
c10::intrusive_ptr<JitFuture> assignOwnerRRef(
const RRefId& rrefId,
const RRefId& forkId,
const c10::intrusive_ptr<JitFuture>& valueFuture) const;
virtual c10::intrusive_ptr<JitFuture> processScriptRemoteCall(
RpcCommandBase& rpc,
const std::vector<c10::Stream>& streams) const;
virtual c10::intrusive_ptr<JitFuture> processPythonRemoteCall(
RpcCommandBase& rpc,
const std::vector<c10::Stream>& streams) const;
c10::intrusive_ptr<JitFuture> retrieveOwnerRRef(const RRefId& rrefId) const;
c10::intrusive_ptr<JitFuture> processScriptRRefFetchCall(
RpcCommandBase& rpc) const;
virtual c10::intrusive_ptr<JitFuture> processPythonRRefFetchCall(
RpcCommandBase& rpc) const;
c10::intrusive_ptr<JitFuture> processRRefUserDelete(
RpcCommandBase& rpc) const;
c10::intrusive_ptr<JitFuture> processRRefChildAccept(
RpcCommandBase& rpc) const;
c10::intrusive_ptr<JitFuture> processRRefForkRequest(
RpcCommandBase& rpc) const;
c10::intrusive_ptr<JitFuture> processForwardAutogradReq(
RpcCommandBase& rpc,
const std::vector<c10::Stream>& streams) const;
c10::intrusive_ptr<JitFuture> processBackwardAutogradReq(
RpcCommandBase& rpc,
const std::vector<c10::Stream>& streams) const;
c10::intrusive_ptr<JitFuture> processCleanupAutogradContextReq(
RpcCommandBase& rpc) const;
c10::intrusive_ptr<JitFuture> processRunWithProfilingReq(
RpcCommandBase& rpc) const;
virtual void handleRRefDelete(c10::intrusive_ptr<RRef>& rref) const;
c10::intrusive_ptr<JitFuture> processRpc(
RpcCommandBase& rpc,
const MessageType& messageType,
const std::vector<c10::Stream>& streams) const;
virtual c10::intrusive_ptr<JitFuture> processRpcWithErrors(
RpcCommandBase& rpc,
const MessageType& messageType,
const std::vector<c10::Stream>& streams) const;
c10::intrusive_ptr<Message> handleError(
const std::exception& e,
const MessageType messageType,
int64_t messageId) const;
virtual bool cudaAvailable() const;
virtual c10::intrusive_ptr<JitFuture> processRRefBackward(
RpcCommandBase& rpc) const;
// Helpers to run user-defined functions, operators and other computations.
c10::intrusive_ptr<JitFuture> runJitOperator(
const jit::Operator& op,
std::vector<at::IValue>& stack,
const std::vector<c10::Stream>& streams) const;
// Helpers to convert various kinds of objects into already-completed futures.
c10::intrusive_ptr<JitFuture> asFuture(IValue value, TypePtr type) const;
c10::intrusive_ptr<JitFuture> asFuture(
c10::intrusive_ptr<Message> message) const;
c10::intrusive_ptr<JitFuture> asFuture(std::exception_ptr err) const;
};
} // namespace torch::distributed::rpc
```
|
======================================================================================================================================
SOURCE CODE FILE: rpc.h
LINES: 1
SIZE: 0.17 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\rpc.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/python_headers.h>
namespace torch::distributed::rpc {
PyMethodDef* python_functions();
} // namespace torch::distributed::rpc
```
|
============================================================================================================================================
SOURCE CODE FILE: rpc_agent.h
LINES: 1
SIZE: 13.61 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\rpc_agent.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/request_callback.h>
#include <torch/csrc/distributed/rpc/types.h>
#include <cctype>
#include <chrono>
#include <condition_variable>
#include <mutex>
#include <thread>
namespace torch::distributed::rpc {
using DeviceMap = std::unordered_map<c10::Device, c10::Device>;
// Default RPC timeout
constexpr float kDefaultRpcTimeoutSeconds = 60;
// Unset RPC timeout. This is the value agent::send() will have if user does not
// pass in a specific timeout, and indicates that we must use the default
// timeout for RPCs.
constexpr float kUnsetRpcTimeout = -1;
constexpr auto kDefaultInitMethod = "env://";
constexpr float kSecToMsConversion = 1000;
constexpr auto kRpcTimeoutErrorStr =
"RPC ran for more than set timeout ({} ms) and will now be marked with an error";
using steady_clock_time_point =
std::chrono::time_point<std::chrono::steady_clock>;
// Input is qualified name string, output is JIT StrongTypePtr
// Same as jit::TypeResolver, did not import jit::TypeResolver to here
// because it could introduce cyclic dependencies.
using TypeResolver =
std::function<c10::StrongTypePtr(const c10::QualifiedName&)>;
struct TORCH_API RpcBackendOptions {
RpcBackendOptions()
: RpcBackendOptions(kDefaultRpcTimeoutSeconds, kDefaultInitMethod) {}
RpcBackendOptions(float rpcTimeoutSeconds, std::string initMethod)
: rpcTimeoutSeconds(rpcTimeoutSeconds),
initMethod(std::move(initMethod)) {
TORCH_CHECK(rpcTimeoutSeconds >= 0, "RPC Timeout must be non-negative");
}
float rpcTimeoutSeconds;
std::string initMethod;
};
// A globally unique ID to identify an RpcAgent
struct TORCH_API WorkerInfo : torch::CustomClassHolder {
WorkerInfo(std::string name, int64_t id);
WorkerInfo(std::string name, worker_id_t id);
bool operator==(const WorkerInfo& rhs) {
return (id_ == rhs.id_) && (name_ == rhs.name_);
}
static constexpr size_t MAX_NAME_LEN = 128;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const std::string name_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const worker_id_t id_;
};
struct TORCH_API RegisterWorkerInfoOnce {
RegisterWorkerInfoOnce();
};
TORCH_API std::ostream& operator<<(
std::ostream& os,
const WorkerInfo& workerInfo);
// Struct for options to configure the RPC Retry protocol.
struct TORCH_API RpcRetryOptions {
// Using a default constructor like all other Options structs in the RPC
// codebase. TORCH_CHECKs for input validation are done in the
// sendWithRetries function.
RpcRetryOptions() = default;
// Maximum number of times we will retry the RPC
int maxRetries{5};
// Initial duration between consecutive RPC send attempts
std::chrono::milliseconds rpcRetryDuration{std::chrono::milliseconds(1000)};
// Constant for exponential backoff used while calculating future wait
// durations
float retryBackoff{1.5};
};
// Struct that stores all the metadata needed to retry a given RPC.
struct TORCH_API RpcRetryInfo {
RpcRetryInfo(
const WorkerInfo& to,
c10::intrusive_ptr<Message> message,
c10::intrusive_ptr<JitFuture> originalFuture,
int retryCount,
RpcRetryOptions options)
: to_(to),
message_(std::move(message)),
originalFuture_(std::move(originalFuture)),
retryCount_(retryCount),
options_(options) {}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const WorkerInfo& to_;
c10::intrusive_ptr<Message> message_;
// Future that is returned to the caller of sendWithRetries().
c10::intrusive_ptr<JitFuture> originalFuture_;
// Number of send attempts completed so far.
int retryCount_;
RpcRetryOptions options_;
};
// ``RpcAgent`` is the base class for sending and receiving RPC messages. It
// provides a unified ``send`` API for both request and response messages, and
// will invoke the given ``RequestCallback`` to process received requests. It
// should immediately become ready to serve request and accept response after
// construction.
class TORCH_API RpcAgent {
public:
// `WorkerInfo` is the globally unique identifier for this RpcAgent instance.
// It contains a ``name_`` field and an ``id_`` field. ``name_`` is the
// globally unique name for this ``RpcAgent``. It is up to the ``RpcAgent``
// implementation to determine how to resolve names. ``id_`` is the globally
// unique ID for this ``RpcAgent``. This should be determined by the
// ``RpcAgent`` implementation.
// The ``RequestCallback`` will be invoked to handle received requests. This
// ``RpcAgent`` base class makes no assumption on the thread-safeness of the
// ``RequestCallback``. ``RpcAgent`` implementations need to make sure that
// its threading model conform to ``RequestCallback``'s requirement.
// NB: RpcAgent implementations should not start serving requests until
// ``start()`` is called, as there could be other contexts that have not been
// initialized yet at this time.
RpcAgent(
WorkerInfo id,
std::unique_ptr<RequestCallback> cb,
std::chrono::milliseconds rpcTimeout);
virtual ~RpcAgent();
// Send a message to the ``RpcAgent`` of id ``to`` and returns a
// ``JitFuture`` ptr. The implementation must be asynchronous, i.e., it
// cannot block until it receives the response.
//
// If ``message.isRequest()`` is true, the ``JitFuture`` will be
// completed when the response arrives. For other message types, the Future
// should be ignored by the caller.
virtual c10::intrusive_ptr<JitFuture> send(
const WorkerInfo& to,
c10::intrusive_ptr<Message> message,
const float rpcTimeoutSeconds = kUnsetRpcTimeout,
const DeviceMap& deviceMap = {}) = 0;
// Retries sending the message up to maxRetries times until an ACK is
// received. The duration between consecutive sends is increased over
// time using an exponential backoff algorithm.
//
// Sends ``message`` to the ``RpcAgent`` of id ``to`` and returns a
// ``JitFuture`` ptr, just like send(). Caller can specify the maximum
// number of retries for this RPC (default is 5), initial duration between
// sends (default is 1000ms), and backoff constant (default is 1.5) by
// passing in the RpcRetryOptions struct. This API might end up
// executing a method twice on the remote end (it does not guarantee
// exactly-once semantics). Therefore, the user must ensure their requests
// are idempotent.
c10::intrusive_ptr<JitFuture> sendWithRetries(
const WorkerInfo& to,
c10::intrusive_ptr<Message> message,
RpcRetryOptions retryOptions = RpcRetryOptions());
// Return a reference to the ``WorkerInfo`` of this RpcAgent.
// NB: not using ``std::optional<const std::string&>`` here because we might
// need to create a separate RPC API lib and avoid forcing all ``RpcAgent``
// implementations to depend on libtorch.
const WorkerInfo& getWorkerInfo() const;
// Return a reference to the ``WorkerInfo`` of the given ``workerName``.
virtual const WorkerInfo& getWorkerInfo(
const std::string& workerName) const = 0;
virtual const WorkerInfo& getWorkerInfo(worker_id_t id) const = 0;
virtual std::vector<WorkerInfo> getWorkerInfos() const = 0;
// Retrieve the timeout for all RPCs.
inline std::chrono::milliseconds getRpcTimeout() const {
return rpcTimeout_.load();
}
// Set the timeout for all RPCs
inline void setRpcTimeout(const std::chrono::milliseconds& rpcTimeout) {
rpcTimeout_.store(rpcTimeout);
}
// Call sync and join all internal threads. This method should be called
// before every RPC process exits.
virtual void join(bool shutdown = false, float timeout = 0) = 0;
// Synchronize the this process with other ``RpcAgent`` processes. Block until
// all ``RpcAgent``s reach this method and send all pending messages.
virtual void sync() = 0;
// Sets up backend-agnostic state for accepting requests. Currently, this
// entails setting rpcAgentRunning_ to true, creating the retry thread, and
// calling the backend's startImpl.
void start();
// Derived classes must override this function to start accepting requests.
// This is used to initialize any backend-specific state. Users must call
// start, not startImpl, to initialize the RPC Agent.
virtual void startImpl() = 0;
// Stop accepting requests and shutdown the RPC framework as soon as possible
// by terminating all RPC threads.
void shutdown();
// Derived classes must override this function to start accepting requests.
// THis is used to clean up any backend-specific state. Users must call
// shutdown, not shutdownImpl, to shutdown the RPC Agent.
virtual void shutdownImpl() = 0;
// Check if current RPC agent is set.
static bool isCurrentRpcAgentSet();
// Retrieve the valid current RPC agent.
static std::shared_ptr<RpcAgent> getCurrentRpcAgent();
// Set the current RPC agent.
static void setCurrentRpcAgent(std::shared_ptr<RpcAgent> rpcAgent);
// Retrieve metrics as KV map
virtual std::unordered_map<std::string, std::string> getMetrics() = 0;
// Retrieve debug info in addition to metrics as KV map
virtual std::unordered_map<std::string, std::string> getDebugInfo();
// Flag to control whether GIL wait times
// should be profiled or not.
void enableGILProfiling(bool flag);
// Retrieve wheher we should profile GIL wait times or not.
bool isGILProfilingEnabled();
// Set type resolver that will be passed to JIT pickler to resolver type Ptr
// based on type str.
void setTypeResolver(std::shared_ptr<TypeResolver> typeResolver);
// Get the type resolver
std::shared_ptr<TypeResolver> getTypeResolver();
// Retrieves the device map for the provided destination worker.
virtual DeviceMap getDeviceMap(const WorkerInfo& dst) const;
// Retrieve the (non-CPU) devices that are supported by the agent.
virtual const std::vector<c10::Device>& getDevices() const;
protected:
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
const WorkerInfo workerInfo_;
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
const std::unique_ptr<RequestCallback> cb_;
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
std::atomic<std::chrono::milliseconds> rpcTimeout_;
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
std::atomic<bool> profilingEnabled_;
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
std::shared_ptr<TypeResolver> typeResolver_;
// Atomic boolean indicating whether this agent is running. It controls
// whether several background threads should be running. It is set in
// RpcAgent::start() and unset in the derived class shutdown().
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
std::atomic<bool> rpcAgentRunning_;
private:
static std::shared_ptr<RpcAgent> currentRpcAgent_;
// Add GIL wait time data point to metrics
virtual void addGilWaitTime(const std::chrono::microseconds gilWaitTime) = 0;
friend class PythonRpcHandler;
// Map that stores metadata for RPC's that may need to be re-tried as well as
// the timepoint at which we should re-try them.
std::map<
steady_clock_time_point,
std::unordered_set<std::shared_ptr<RpcRetryInfo>>>
rpcRetryMap_;
// Thread that checks for retryable RPC's in the rpcRetryMap_ and sleeps until
// the next unACKed RPC's timeout has expired.
std::thread rpcRetryThread_;
// Function that rpcRetryThread_ calls in a loop as long as RpcAgent is
// running.
void retryExpiredRpcs();
// This is the callback attached to futures corresponding to send retries.
// This handles 3 cases: 1). send was completed, 2). send failed with an
// error and we've done maxRetries failed send attempts, and 3). send
// failed with an error and we have more retries to go. In case 1, we mark
// the original future as complete. In case 2, we mark the future with an
// error and do not retry again. In case 3, we move the RpcRetryInfo struct
// to another time point in the map to schedule the RPC for a future send.
void rpcRetryCallback(
JitFuture& message,
steady_clock_time_point newTime,
std::shared_ptr<RpcRetryInfo> earliestRpc);
// Function that uses the exponential backoff algorithm to compute the next
// time point to retry a given RPC.
inline steady_clock_time_point computeNewRpcRetryTime(
RpcRetryOptions& options,
int retryCount) {
// The exponential backoff algorithm being used here is:
// newTime = timeNow + (retryDuration * (backoffConstant ^ retryCount)).
std::chrono::milliseconds timedelta =
std::chrono::duration_cast<std::chrono::milliseconds>(
options.rpcRetryDuration * pow(options.retryBackoff, retryCount));
return std::chrono::time_point_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() + timedelta);
}
// Condition Variable to signal when the rpcRetryMap_ has been populated.
std::condition_variable rpcRetryMapCV_;
// Mutex to protect RpcRetryMap_.
std::mutex rpcRetryMutex_;
};
} // namespace torch::distributed::rpc
namespace std {
template <>
struct hash<torch::distributed::rpc::WorkerInfo> {
std::size_t operator()(
const torch::distributed::rpc::WorkerInfo& worker_info) const noexcept {
return worker_info.id_;
}
};
} // namespace std
```
|
===================================================================================================================================================
SOURCE CODE FILE: rpc_command_base.h
LINES: 1
SIZE: 0.68 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\rpc_command_base.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/types.h>
namespace torch::distributed::rpc {
// Base class for all RPC request and responses.
class RpcCommandBase {
public:
// Need to override this to serialize the RPC. This should destructively
// create a message for the RPC (Hence the &&).
c10::intrusive_ptr<Message> toMessage() && {
JitRRefPickleGuard jitPickleGuard;
return std::move(*this).toMessageImpl();
}
virtual c10::intrusive_ptr<Message> toMessageImpl() && = 0;
virtual ~RpcCommandBase() = 0;
};
inline RpcCommandBase::~RpcCommandBase() = default;
} // namespace torch::distributed::rpc
```
|
===============================================================================================================================================
SOURCE CODE FILE: rref_context.h
LINES: 1
SIZE: 15.73 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\rref_context.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/rpc_agent.h>
#include <torch/csrc/distributed/rpc/rref_impl.h>
#include <torch/csrc/distributed/rpc/types.h>
#include <torch/csrc/distributed/rpc/utils.h>
#include <atomic>
#include <optional>
namespace torch::distributed::rpc {
namespace callback {
// It's the callback for RemoteCall.
void TORCH_API
confirmPendingUser(const JitFuture& jitFuture, const ForkId& expectedForkId);
// It's the callback for finishing creating owner rref, it returned deletedRRef,
// so that the deletedRRef can be handled under GIL in python_functions.cpp if
// deletedRRef contains python object.
c10::intrusive_ptr<RRef> TORCH_API
finishCreatingOwnerRRef(const JitFuture& jitFuture, const RRefId& rrefId);
} // namespace callback
// Manages RRef lifetime and keeps track of RRef forks.
class TORCH_API RRefContext {
public:
static RRefContext& getInstance();
// NB: This method must be called before destructing RRefContext singleton.
// Similar to delForkOfOwner, this method returns a vector of OwnerRRefs that
// hold py::object. The call-site is also responsible for resetting those
// shared_ptr objects with a GIL. See comments at delForkOfOwner() for more
// details.
static std::vector<c10::intrusive_ptr<RRef>> destroyInstance(
bool ignoreRRefLeak = true);
static void handleException(const JitFuture& jitFuture);
// handle exception without throw ::c10::Error again
static void handleExceptionSilent(const JitFuture& jitFuture);
RRefContext(const RRefContext&) = delete;
RRefContext(RRefContext&& other) = delete;
void operator=(const RRefContext&) = delete;
RRefContext& operator=(RRefContext&& other) = delete;
~RRefContext();
// get the worker id of the current worker
inline worker_id_t getWorkerId() const {
return agent_->getWorkerInfo().id_;
}
// get the worker name of the current worker
inline const std::string& getWorkerName() const {
return agent_->getWorkerInfo().name_;
}
// generate a globally unique ID
inline GloballyUniqueId genGloballyUniqueId() {
return GloballyUniqueId(getWorkerId(), nextLocalId_++);
}
inline const std::shared_ptr<RpcAgent>& agent() const {
return agent_;
}
// create a ``UserRRef`` owned by the worker ``ownerId``
c10::intrusive_ptr<UserRRef> createUserRRef(
worker_id_t ownerId,
const TypePtr& type);
// Convert an RRefForkData into an RRef. This RRef could be user or owner.
// This RRef could have already existed before, or could be created in this
// method, we pass type here to validate or help the rref creation.
c10::intrusive_ptr<RRef> getOrCreateRRef(
const RRefForkData& rfd,
const TypePtr& type);
// Get the ``OwnerRRef`` of id ``rrefId``. If it does not exist, create a new
// one. This function is called in two places:
// 1. when processing ``rpc.remote()``, i.e., ``SCRIPT_REMOTE_CALL``
// ``PYTHON_REMOTE_CALL``.
// 2. when unpickling ``OwnerRRef``.
// What's common in these two cases are, 1) the RRefId is already generated
// 2) the TypePtr is presented. So it can always create the ``OwnerRRef`` if
// it is not yet available.
c10::intrusive_ptr<OwnerRRef> getOrCreateOwnerRRef(
const RRefId& rrefId,
const TypePtr& type);
// Create an empty owner rref of type.
// This method is called to first time generate an ``OwnerRRef``, e.g.,
// 1) ``rpc.RRef(obj)``
// 2) create the ``OwnerRRef`` on `rpc.remote()` caller side.
// What's common in these two cases are, 1) the RRefId hasn't been generated
// 2) the TypePtr is presented.
c10::intrusive_ptr<OwnerRRef> createOwnerRRef(const TypePtr& type);
// Returns a Future of the OwnerRRef, which will be marked completed when
// ``OwnerRRef`` is created. This method is used when the TypePtr is not
// available, e.g., when processing to_here(). The forceCreated flag can be
// used to ensure that the rref is created on the owner, otherwise throw in
// cases where the user of this API expects this to return a completed future.
// Note that the return value is a intrusive_ptr to a c10::ivalue::Future that
// holds the RRef.
c10::intrusive_ptr<JitFuture> getOwnerRRef(
const RRefId& rrefId,
bool forceCreated = false);
// Adding the RRefId of an OwnerRRef into the forks_ map. This is useful when
// making a remote call to self, which as for now, still goes through serde
// and invokes request callback. In this case, the OwnerRRef has already been
// created on the send side, and we need to pass it to the receive side,
// instead of creating a new OwnerRRef. This is done by adding the OwnerRRef
// into owners_. However, that alone is not enough, as it could be deleted
// when all UserRRef die, which would then remove the OwnerRRef from owners_
// and this could happen before the self remote call finishes. To prevent
// that, this API adds the RRefId as a ForkId, which will then delete the
// ForkId when the self remote is done.
void addSelfAsFork(c10::intrusive_ptr<OwnerRRef>& rref);
// Register a fork of the ``OwnerRRef``, and inserts a intrusive_ptr of the
// ``OwnerRRef`` in a map to keep it alive.
void addForkOfOwner(const RRefId& rrefId, const ForkId& forkId);
// Performs the same function as addForkOfOwner but ignores duplicate
// requests. This idempotent function is used with RREF_FORK_REQUEST calls,
// whereas all other message types use the non-idempotent variant.
void addForkOfOwnerIfNotPresent(const RRefId& rrefId, const ForkId& forkId);
// Delete a fork of the ``OwnerRRef``. NB: this could trigger deletion on the
// IValue or py::object. For the later, this method will acquire GIL.
// NB: If this fork deletion triggered deleting OwnerRRef, this method will
// return a shared_ptr to the OwnerRRef, which is likely to be the last
// shared_ptr instance for it. Therefore, deleting this shared_ptr<OwnerRRef>
// will also trigger deleting the object it points to. If OwnerRRef holds a
// py::object, deleting it require GIL. The call site should guarded it with
// a GIL and reset the shared_ptr. The GIL-guarded deletion is intentionally
// left out of this function to avoid creating dependency on pybind.
c10::intrusive_ptr<RRef> delForkOfOwner(
const RRefId& rrefId,
const ForkId& forkId);
// Invoked when pickling an RRef to setup child/fork properly
RRefForkData prepareChildFork(const c10::intrusive_ptr<RRef>& rref);
// Invoked when unpickling an RRef to send RREF_FORK_REQUEST to owner and
// send RREF_CHILD_ACCEPT to the parent.
// NB: forkId is necessary here as the rref could be an OwnerRRef
void notifyOwnerAndParentOfFork(
const ForkId& forkId,
worker_id_t parent,
const c10::intrusive_ptr<RRef>& rref);
// When a UserRRef is forked to another worker (user or owner), it is added
// into pendingChildren_ to be held alive until it receives RREF_CHILD_ACCEPT
// from the child.
// NB: This is necessary for both user and owner child. As we do not have FIFO
// communication between workers, we need this strategy to make sure that all
// previously submitted rpc/remote calls are acked before sending out the
// RREF_USER_DELETE message. Otherwise, the OwnerRRef could be deleted too
// soon.
void addPendingChild(
const ForkId& forkId,
const c10::intrusive_ptr<RRef>& rref);
void delPendingChild(const ForkId& forkId);
// When a UserRRef is created, it is added into pendingUsers_ to be held alive
// until it receives RREF_USER_ACCEPT from the owner.
void addPendingUser(
const ForkId& forkId,
const c10::intrusive_ptr<RRef>& rref);
void delPendingUser(const ForkId& forkId);
void addConfirmedUser(
const ForkId& forkId,
const c10::intrusive_ptr<RRef>& rref);
// Retrieve a pending user given the fork ID. Throws if the user has already
// been confirmed (i.e. is no longer in the pendingUsers_ map).
c10::intrusive_ptr<RRef> getPendingUser(const ForkId& forkId);
// Start recording new pending UserRRefs. All pending UserRRefs introduced
// after this point will be put into the thread_local userTable_, which will
// then be consumed and cleared in waitForThreadLocalPendingRRefs().
void recordThreadLocalPendingRRefs();
// End recording new pending UserRRefs, and clear the thread_local userTable_.
// Returns a Future which will be marked as completed when all pending
// UserRRefs in the current userTable_ are confirmed by their owners. The bool
// value in the Future is unused.
// This method is useful to make sure RRefs in user function arguments are
// confirmed before launching user code.
// NB: Callers of this method does not need to keep the returned Future alive,
// because this Future is already captured in callbacks of the
// PendingUserState. If there is no pending UserRRefs, this method returns a
// completed future.
c10::intrusive_ptr<JitFuture> waitForThreadLocalPendingRRefs();
// Only call this function when there are errors during a recording session,
// and it is likely that waitForThreadLocalPendingRRefs() cannot be invoked
// properly.
// TODO: make this a context guard
void clearRecordedPendingRRefsOnError();
void delUser(
const worker_id_t owner,
const RRefId& rrefId,
const ForkId& forkId);
void delAllUsersAndUnforkedOwners(std::chrono::milliseconds timeoutMillis);
std::unordered_map<std::string, std::string> getDebugInfo();
private:
struct PendingUserState {
PendingUserState(c10::intrusive_ptr<RRef> rref)
: rref_(std::move(rref)),
confirmationFuture_(c10::make_intrusive<JitFuture>(BoolType::get())) {
}
inline void confirm() {
c10::static_intrusive_pointer_cast<UserRRef>(rref_)->confirm();
confirmationFuture_->markCompleted();
}
c10::intrusive_ptr<RRef> rref_;
// Use Future.wait() and Future.markCompleted() to block and unblock user
// functions. The bool value wrapped by the future_ is not used.
c10::intrusive_ptr<JitFuture> confirmationFuture_;
};
RRefContext(std::shared_ptr<RpcAgent>);
c10::intrusive_ptr<UserRRef> createUserRRef(
worker_id_t ownerId,
const RRefId& rrefId,
const ForkId& forkId,
const TypePtr& type);
void finishForkRequest(const ForkId& forkId, worker_id_t parent);
// If there is any leak on any RRef, this method will throw an error.
void checkRRefLeaks(bool ignoreRRefLeak);
static std::atomic<local_id_t> nextLocalId_;
const std::shared_ptr<RpcAgent> agent_;
mutable std::mutex mutex_;
// Keep OwnerRRefs alive until there is no living UserRRefs.
std::unordered_map<RRefId, c10::intrusive_ptr<RRef>, RRefId::Hash> owners_;
// A map to track OwnerRRefs that are requested but not yet created. This can
// happen if the to_here() message is processed on the owner before the
// corresponding creator rpc.remote() message. If this happens, instead of
// to_here() RPC thread to block waiting for the OwnerRRef creation, the
// RRefContext returns a Future, so that the RPC request processing logic can
// attach subsequent code as a callback to that Future.
// NB: the OwnerRRefs in this map must be cleared when the corresponding
// OwnerRRef is created. Note that the values in this map are intrusive_ptrs
// to c10::ivalue::Future that will be marked completed with the owner RRef.
std::unordered_map<RRefId, c10::intrusive_ptr<JitFuture>, RRefId::Hash>
pendingOwners_;
// Tracks known living UserRRefs of an OwnerRRef
std::unordered_map<
RRefId,
std::unordered_set<ForkId, ForkId::Hash>,
RRefId::Hash>
forks_;
// This cond var is used by deleteAllUsers(), a event notification is sent if
// number of pending UserRRef or UserRRef children is reduced, or
// number of owned OwnerRRef is reduced.
std::condition_variable deleteAllUsersCV_;
// The follow 3 maps keep UserRRefs alive by holding a intrusive_ptr to the
// RRef instances. A UserRRef must be added into this map if any of the
// following two conditions is true:
//
// (1) A UserRRef has not been accepted by owner yet.
//
// It can be used or shared, but cannot be deleted, and hence kept alive
// in this map. A message of type RREF_USER_ACCEPT will move the
// corresponding RRef from pendingUsers_ map to confirmedUsers_ map.
std::unordered_map<ForkId, std::shared_ptr<PendingUserState>, ForkId::Hash>
pendingUsers_;
// UserRRefs are added into this map when it is confirmed by the owner.
// When destroying RRefContext this map helps to find local UserRRefs
// and send delete messages if they are still not deleted by Python
// garbage collection.
std::unordered_map<ForkId, c10::weak_intrusive_ptr<RRef>, ForkId::Hash>
confirmedUsers_;
// (2) A UserRRef has forked a child UserRRef which has not been accepted by
// the owner yet.
//
// In this case, this UserRRef cannot send out RREF_USER_DELETE message,
// as it could potentially trigger the OwnerRRef been deleted before the
// owner learns about the forked child.
std::unordered_map<ForkId, c10::intrusive_ptr<RRef>, ForkId::Hash>
pendingChildren_;
// The RRef context performs its operations through async RPC requests, in
// order to not block the user code. Therefore the RRef context's state may be
// lagging a bit behind what it is intended to be, while it waits for these
// requests to complete. To allow syncing when needed, we store the count of
// these pending requests, so that users can wait for it to reach zero.
std::atomic<int64_t> numPendingFutures_{0};
std::mutex destroyedMutex_;
bool destroyed_{false};
// Thread local states to keep UserRRefs deserialized from user function
// arguments.
static thread_local std::vector<std::shared_ptr<PendingUserState>> userTable_;
// A flag indicating whether subsequently created UserRRefs should be added to
// the thread_local userTable_. The flag is set to true before serializing
// RPC arguments and then set to false before running the corresponding
// user code. See addPendingUser and delPendingUser for more details.
// NB: The reason for having this flag is because addPendingUser are called in
// two cases, and we only want to track the 2nd case.
// (1) RRef as the return value: when calling rpc.remote, the UserRRef on the
// caller side is added to the context using addPendingUser.
// (2) RRef as an argument: When running an RPC using RRefs as arguments, the
// RRef is forwarded to the callee as new UserRRefs (if the callee is not
// the owner). In this case, we block running the user function until all
// UserRRefs are confirmed by the owner.
// This contract gurantees that no UserRRefs can be used remotely without
// confirmation. Note that, however, the UserRRef created by rpc.remote can
// still be passed to local functions as arguments and used there. This is by
// design, because this feature is especially useful when, say a master node
// creates multiple UserRRefs in a loop and then shares them with other nodes.
// Blocking every iteration in the loop until RRefs are confirmed will slow
// this down. This nuance on UserRRef can be interpreted as we only make
// exceptions for UserRRef creators. And using the UserRRef on its creator
// without confirmation is OK, because the creator would either call to_here
// or forward the UserRRef, and both would then require confirmations from the
// owner.
static thread_local bool recording_;
};
} // namespace torch::distributed::rpc
```
|
============================================================================================================================================
SOURCE CODE FILE: rref_impl.h
LINES: 1
SIZE: 16.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\rref_impl.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/jit_type.h>
#include <ATen/core/rref_interface.h>
#include <c10/core/Event.h>
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/rpc_agent.h>
#include <torch/csrc/distributed/rpc/types.h>
#include <optional>
#include <atomic>
namespace torch::distributed::rpc {
class RRef;
class RRefContext;
class UserRRef;
constexpr int OWNER_IDX = 0; // index of ownerId in the tuple
constexpr int RREFID_ON_IDX = 1; // index of RRefId.createdOn_ in the tuple
constexpr int RREFID_ID_IDX = 2; // index of RRefId.localId_ in the tuple
constexpr int FORKID_ON_IDX = 3; // index of ForkId.createdOn_ in the tuple
constexpr int FORKID_ID_IDX = 4; // index of ForkId.localId_ in the tuple
constexpr int PARENT_IDX = 5; // index of parent in the tuple
constexpr int TYPE_IDX = 6; // index of parent in the tuple
// NB: if more fields are added, make sure this field is also bumped
constexpr int RFD_TUPLE_SIZE = 7; // number of RRefForkData fields in py::tuple
// Represents fork of an RRef to be sent over the wire.
struct TORCH_API RRefForkData {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const worker_id_t ownerId_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const RRefId rrefId_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const ForkId forkId_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const worker_id_t parent_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const std::string typeStr_;
RRefForkData(
worker_id_t ownerId,
const RRefId& rrefId,
const ForkId& forkId,
worker_id_t parent,
std::string typeStr);
};
// Note [RRef Protocol]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// [Background]
//
// RRef stands for Remote REFerence. Each RRef is owned by a single worker
// (i.e., owner) and can be used by multiple users. The owner stores the real
// data referenced by its RRefs. RRef needs to support fast and scalable RPC.
// Hence, in the design, we avoid using a single global master to keep RRef
// states, instead owners will keep track of the global reference counts
// for its RRefs. Every RRef can be uniquely identified by a global RRefId,
// which is assigned at the time it is first created either on a user or on the
// owner.
//
// On the owner worker, there is only one OwnerRRef instance, which contains the
// real data, while on user workers, there can be as many UserRRefs as
// necessary, and UserRRef does not hold the data. All usage on the OwnerRRef
// should retrieve the unique OwnerRRef instance using the globally unique
// RRefId. //A UserRRef will be created when it is used as an argument or return
// value in dist.rpc or dist.remote call, but RRef forking and reference
// counting (RC) are completely transparent to applications. Every UserRRef will
// also have its globally unique ForkId.
//
// [Assumptions]
//
// 1. Transient Network Failures
//
// TODO: current RRef implementation does not tolerate failures
//
// The RRef design handles transient network failures by retrying
// messages. Node crashes or permanent network partition is beyond the scope.
// When those incidents occur, the application may take down all workers, revert
// to the previous checkpoint, and resume training.
//
// 2. Non-idempotent UDFs
//
// We assume UDFs are not idempotent and therefore cannot be retried. However,
// internal RRef control messages are idempotent and retried upon message
// failure.
//
// TODO: RRef internal messages are not yet idempotent
//
// 3. Out of Order Message Delivery
//
// We do not assume message delivery order between any pair of nodes, because
// both sender and receiver are using multiple threads. There is no guarantee on
// which message will be processed first.
//
// [RRef Lifetime]
//
// The goal of the protocol is to delete an OwnerRRef at an appropriate time.
// The right time to delete an OwnerRRef is when there are no living UserRRefs
// and Python GC also agrees to delete the OwnerRRef instance on the owner. The
// tricky part is to determine if there are any living UserRRefs.
//
// A user can get a UserRRef in three situations:
//
// (1). Receiving a UserRRef from the owner.
// (2). Receiving a UserRRef from another user.
// (3). Creating a new UserRRef owned by another worker.
//
// (1) is the simplest case where the owner initiates the fork, and hence it can
// easily increment local RC. The only requirement is that any UserRRef must
// notify the owner before destruction. Hence, we need the first guarantee:
//
// G1. The owner will be notified when any UserRRef is deleted.
//
// As messages might come delayed or out-of-order, we need more one guarantee to
// make sure the delete message is not sent out too soon. Let us first introduce
// a new concept. If A sends an RPC to B that involves an RRef, we call the RRef
// on A the parent RRef and the RRef on B the child RRef.
//
// G2. Parent RRef cannot be deleted until the child RRef is confirmed by the
// owner.
//
// Under (1), where the caller is UserRRef and callee is OwnerRRef, it simply
// means that the user will not send out the delete message until all previous
// messages are ACKed. Note that ACKed does not mean the owner finishes
// executing the function, instead, it only means the owner has retrieved its
// local OwnerRRef and about to pass it to the function, which is sufficient to
// keep the OwnerRRef alive even if the delete message from the user arrives at
// the owner before the function finishes execution.
//
// With (2) and (3), it is possible that the owner only partially knows the RRef
// fork graph or not even knowing it at all. For example, the RRef could be
// constructed on a user, and before the owner receives the RPC call, the
// creator user might have already shared the RRef with other users, and those
// users could further share the RRef. One invariant is that the fork graph of
// any RRef is always a tree rooted at the owner, because forking an RRef always
// creates a new RRef instance, and hence every RRef has a single parent. One
// nasty detail is that when an RRef is created on a user, technically the owner
// is not its parent but we still consider it that way and it does not break the
// argument below.
//
// The owner's view on any node (fork) in the tree has three stages:
//
// 1) unknown -> 2) known -> 3) deleted.
//
// The owner's view on the entire tree keeps changing. The owner deletes its
// OwnerRRef instance when it thinks there are no living UserRRefs, i.e., when
// OwnerRRef is deleted, all UserRRefs could be either indeed deleted or
// unknown. The dangerous case is when some forks are unknown and others are
// deleted.
//
// G2 trivially guarantees that no parent UserRRef Y can be deleted before the
// owner knows all of Y's children UserRRefs.
//
// However, it is possible that the child UserRRef Z may be deleted before the
// owner knows its parent Y. More specifically, this can happen when all of Z's
// messages are processed by the owner before all messages from Y, including the
// delete message. Nevertheless, this does not cause any problem. Because, at
// least one of Y's ancestor will be alive, and it will prevent the owner from
// deleting the OwnerRRef. Consider the following example: (NB: this scenario
// will no longer relevant when we block UDF until all RRefs are confirmed by
// the owner)
//
// OwnerRRef -> A -> Y -> Z
//
// OwnerRRef forks to A, then A forks to Y, and Y forks to Z. Z can be deleted
// without OwnerRRef knowing Y. However, the OwnerRRef will at least know A, as
// the owner directly forks the RRef to A. A won't die before the owner knows Y.
//
// Things get a little trickier if the RRef is created on a user:
//
// OwnerRRef
// ^
// |
// A -> Y -> Z
//
// If Z calls to_here on the UserRRef, the owner at least knows A when Z is
// deleted, because otherwise to_here wouldn't finish. If Z does not call
// to_here, it is possible that the owner receives all messages from Z before
// any message from A and Y. In this case, as the real data of the OwnerRRef has
// not been created yet, there is nothing to be deleted either. It is the same
// as Z does not exist at all Hence, it's still OK.
//
// See #26759 for more details and discussions.
//
// TODO: make RRef an IValue, and edit createStackForSchema accordingly
// TODO: make RRef system messages idempotent and retry on failures.
//
// ``RRef`` is the base type for both ``UserRRef`` and ``OwnerRRef``.
// Each ``RRef`` has a globally unique ``RRefId``.
class TORCH_API RRef : public RRefInterface {
public:
// RRef is made NOT copyable NOT movable to prevent messing up reference
// counting.
explicit RRef(const RRef& other) = delete;
explicit RRef(RRef&& other) = delete;
RRef& operator=(RRef&& other) = delete;
~RRef() override = default;
// returns the worker id of the owner
inline worker_id_t owner() const override {
return ownerId_;
}
// returns the worker name of the owner
inline std::string ownerName() const override {
return RpcAgent::getCurrentRpcAgent()->getWorkerInfo(ownerId_).name_;
}
// returns the worker info of the owner
inline WorkerInfo ownerWorkerInfo() const {
return RpcAgent::getCurrentRpcAgent()->getWorkerInfo(ownerId_);
}
// Returns the globally unique RRefId of this RRef
inline const RRefId& rrefId() const {
return rrefId_;
}
inline bool isPyObj() const {
return type_ == PyObjectType::get();
}
inline const TypePtr type() const override {
return type_;
}
// Save the future corresponding to the creation of this RRef on a remote
// node. Note that this is only set when processing requests invoked with
// rpc.remote. This is only used to get the future corresponding to the rref
// for profiling use cases.
inline void registerOwnerCreationFuture(c10::intrusive_ptr<JitFuture> fut) {
ownerCreationFuture_ = std::move(fut);
}
// Get the future corresponding to the creation of this rref.
inline c10::intrusive_ptr<JitFuture> getOwnerCreationFuture() const {
return ownerCreationFuture_;
}
// Check if creation of this RRef on owner node has timed out.
inline bool getTimedOut() const {
return timedOut_.load();
}
// Dispatches an error to the correct handler based on its RPCErrorType.
void handleError(RPCErrorType errorType, const JitFuture& JitFuture);
// Send delete UserRRef request to Owner,
// if the request hasn't been sent yet.
// There are 2 cases to call it,
// 1, Python GC decides end of UserRRef lifetime, calling destructor.
// 2, RPC module graceful shutdown calls it on all UserRRefs tracked
// in the RRefContext.
virtual void tryDel() {}
protected:
// Indicates that the creation of this RRef on owner node has timed out.
inline void setTimedOut() {
timedOut_ = true;
}
friend class RRefContext;
RRef(worker_id_t ownerId, const RRefId& rrefId, TypePtr type);
virtual RRefForkData fork() const;
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
const worker_id_t ownerId_;
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
const RRefId rrefId_;
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
std::atomic<bool> timedOut_{false};
// type field to denote the type of the element that the RRef is holding
// it could be any TypePtr that JIT support, including PyObjectType
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
const TypePtr type_;
// Future corresponding to request to create RRef on remote node.
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
c10::intrusive_ptr<JitFuture> ownerCreationFuture_;
};
// ``UserRRef`` represents a user of an RRef. Besides the ``RRefId``, each user
// also has a globally unique ``ForkId`` to identify this user. ``UserRRef``
// never owns the real value, the only way to get the value of the ``RRef`` is
// to call ``to_here()`` and get a copy..
class TORCH_API UserRRef final : public RRef {
public:
UserRRef(const UserRRef& other) = delete;
UserRRef(UserRRef&& other) = delete;
UserRRef& operator=(const UserRRef& other) = delete;
UserRRef& operator=(UserRRef&& other) = delete;
UserRRef(
worker_id_t ownerId,
const RRefId& rrefId,
const ForkId& forkId,
TypePtr type);
inline bool isOwner() const override {
return false;
}
inline bool confirmedByOwner() const override {
return confirmedByOwner_;
}
// Returns the globally unique ForkId of this RRef
const ForkId& forkId() const;
// Get of copy of the value from the ``OwnerRRef``. If the value is not ready
// yet, this call will block.
IValue toHere(
const float timeoutSeconds =
torch::distributed::rpc::kUnsetRpcTimeout) const;
void tryDel() override;
// Will be called when refcount reaches 0.
// Upon destruction, this ``UserRRef`` will tell the owner to deref.
void release_resources() override;
// Will be called when both refcount and weakcount reach 0. See
// https://github.com/pytorch/pytorch/blob/9116f02bebf3a5260feef5732d36c54ecb3b4033/c10/util/intrusive_ptr.h#L204
// This is called on destructing the wrapping intrusive_ptr_target instance
// and it's data members.
~UserRRef() override;
private:
friend class RRefContext;
RRefForkData fork() const override;
inline void confirm() {
confirmedByOwner_ = true;
}
const ForkId forkId_;
// Indicates if this user has sent delete message to it's owner.
// Note, thread safety is needed because delete message could be sent by
// either the destructor called by Python garbage collection or RRefContext
// proactive cleanup on RPC graceful shutdown.
std::mutex deletedOnOwnerMutex_;
bool deletedOnOwner_{false};
// Indicating whether this UserRRef has been confirmed by its owner.
std::atomic<bool> confirmedByOwner_;
};
// Keep the template only on the derived class because ``RRefContext`` needs to
// erase the type on ``RRef`` and keep them in one map.
class TORCH_API OwnerRRef final : public RRef {
public:
OwnerRRef(const OwnerRRef& other) = delete;
OwnerRRef(OwnerRRef&& other) = delete;
OwnerRRef& operator=(const OwnerRRef& other) = delete;
OwnerRRef& operator=(OwnerRRef&& other) = delete;
OwnerRRef(
worker_id_t ownerId,
const RRefId& rrefId,
TypePtr type,
std::vector<c10::Device> devices);
OwnerRRef(
worker_id_t ownerId,
const RRefId& rrefId,
TypePtr type,
std::optional<IValue> value,
std::vector<c10::Device> devices);
inline bool isOwner() const override {
return true;
}
// OwnerRRef is always confirmed, while UserRRef is only confirmed when the
// owner knows about it.
inline bool confirmedByOwner() const override {
return true;
}
// Get a constant reference of the real value. This method will block if the
// value is not ready. This method does not need GIL as it does not create
// any new py::object. It will throw if there is an error.
const IValue& getValue() const;
// Set the value of this ``OwnerRRef``. This method does not need GIL as it
// does not create any new py::object.
void setValue(IValue&& value);
// Sets the value of this ``OwnerRRef`` to contain an exception.
void setError(std::exception_ptr eptr);
// Has a value or error been set?
bool hasValue() const;
// Gets a future that is satisfied when the value or error is set.
c10::intrusive_ptr<JitFuture> getFuture();
private:
friend class RRefContext;
c10::intrusive_ptr<JitFuture> future_;
};
TORCH_API std::ostream& operator<<(std::ostream& os, const RRef& rref);
// Helper function that casts from c10::RRefInterface to OwnerRRef
inline TORCH_API c10::intrusive_ptr<OwnerRRef> fromRRefInterface(
const c10::intrusive_ptr<c10::RRefInterface>& rrefInterface) {
return c10::static_intrusive_pointer_cast<OwnerRRef>(rrefInterface);
}
// Helper function that casts from OwnerRRef to c10::RRefInterface
inline TORCH_API c10::intrusive_ptr<c10::RRefInterface> fromOwnerRRef(
const c10::intrusive_ptr<RRef>& ownerRRef) {
return c10::static_intrusive_pointer_cast<c10::RRefInterface>(ownerRRef);
}
} // namespace torch::distributed::rpc
```
|
=============================================================================================================================================
SOURCE CODE FILE: rref_proto.h
LINES: 1
SIZE: 5.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\rref_proto.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
#include <torch/csrc/distributed/rpc/types.h>
#include <torch/csrc/jit/runtime/operator.h>
#include <torch/csrc/jit/serialization/pickler.h>
#include <vector>
namespace torch::distributed::rpc {
// Temporary solution of RRef operations.
// TODO: Remove all these messages and use rpc + registered functions instead.
class TORCH_API RRefMessageBase : public RpcCommandBase {
public:
RRefMessageBase(const RRefId& rrefId, MessageType type)
: rrefId_(rrefId), type_(type) {}
const RRefId& rrefId();
protected:
// NOLINTNEXTLINE(cppcoreguidelines*)
const RRefId rrefId_;
// NOLINTNEXTLINE(cppcoreguidelines*)
const MessageType type_;
};
class TORCH_API ForkMessageBase : public RRefMessageBase {
public:
ForkMessageBase(const RRefId& rrefId, const ForkId& forkId, MessageType type)
: RRefMessageBase(rrefId, type), forkId_(forkId) {}
const ForkId& forkId();
c10::intrusive_ptr<Message> toMessageImpl() && override;
static std::pair<RRefId, ForkId> fromMessage(
const Message& message,
MessageType type);
protected:
// NOLINTNEXTLINE(cppcoreguidelines*)
const ForkId forkId_;
};
// UserRRef uses this message to fetch the remote RRef value from the owner.
class TORCH_API ScriptRRefFetchCall final : public RRefMessageBase {
public:
ScriptRRefFetchCall(worker_id_t fromWorkerId, const RRefId& rrefId)
: RRefMessageBase(rrefId, MessageType::SCRIPT_RREF_FETCH_CALL),
fromWorkerId_(fromWorkerId) {}
inline worker_id_t fromWorkerId() const {
return fromWorkerId_;
}
c10::intrusive_ptr<Message> toMessageImpl() && override;
static std::unique_ptr<ScriptRRefFetchCall> fromMessage(
const Message& message);
private:
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const worker_id_t fromWorkerId_;
};
class TORCH_API PythonRRefFetchCall final : public RRefMessageBase {
public:
PythonRRefFetchCall(worker_id_t fromWorkerId, const RRefId& rrefId)
: RRefMessageBase(rrefId, MessageType::PYTHON_RREF_FETCH_CALL),
fromWorkerId_(fromWorkerId) {}
c10::intrusive_ptr<Message> toMessageImpl() && override;
static std::unique_ptr<PythonRRefFetchCall> fromMessage(
const Message& message);
private:
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const worker_id_t fromWorkerId_;
};
// OwnerRRef uses this message to send the RRef value to a remote UserRRef
class TORCH_API RRefFetchRet : public RpcCommandBase {
public:
RRefFetchRet(std::vector<at::IValue> values, MessageType type)
: values_(std::move(values)), type_(type) {}
const std::vector<at::IValue>& values();
c10::intrusive_ptr<Message> toMessageImpl() && override;
private:
std::vector<at::IValue> values_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const MessageType type_;
};
class TORCH_API ScriptRRefFetchRet final : public RRefFetchRet {
public:
explicit ScriptRRefFetchRet(std::vector<at::IValue> values)
: RRefFetchRet(std::move(values), MessageType::SCRIPT_RREF_FETCH_RET) {}
static std::unique_ptr<ScriptRRefFetchRet> fromMessage(
const Message& message);
};
class TORCH_API PythonRRefFetchRet final : public RRefFetchRet {
public:
explicit PythonRRefFetchRet(std::vector<at::IValue> values)
: RRefFetchRet(std::move(values), MessageType::PYTHON_RREF_FETCH_RET) {}
static std::unique_ptr<PythonRRefFetchRet> fromMessage(
const Message& message);
};
// UserRRef (regardless it's the creator or not) uses this message to notify
// OwnerRRef on delete.
class TORCH_API RRefUserDelete final : public ForkMessageBase {
public:
RRefUserDelete(const RRefId& rrefId, const ForkId& forkId)
: ForkMessageBase(rrefId, forkId, MessageType::RREF_USER_DELETE) {}
static std::unique_ptr<RRefUserDelete> fromMessage(const Message& message);
};
class TORCH_API RemoteRet final : public ForkMessageBase {
public:
RemoteRet(const RRefId& rrefId, const ForkId& forkId)
: ForkMessageBase(rrefId, forkId, MessageType::REMOTE_RET) {}
static std::unique_ptr<RemoteRet> fromMessage(const Message& message);
};
// A child RRef uses this message to notify its parent that the child has been
// confirmed by the owner.
class TORCH_API RRefChildAccept final : public RpcCommandBase {
public:
explicit RRefChildAccept(const ForkId& forkId) : forkId_(forkId) {}
const ForkId& forkId() const;
c10::intrusive_ptr<Message> toMessageImpl() && override;
static std::unique_ptr<RRefChildAccept> fromMessage(const Message& message);
private:
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const ForkId forkId_;
};
// A child RRef uses this message to send a fork request to the owner.
class TORCH_API RRefForkRequest final : public ForkMessageBase {
public:
RRefForkRequest(const RRefId& rrefId, const ForkId& forkId)
: ForkMessageBase(rrefId, forkId, MessageType::RREF_FORK_REQUEST) {}
static std::unique_ptr<RRefForkRequest> fromMessage(const Message& message);
};
class TORCH_API RRefAck final : public RpcCommandBase {
public:
RRefAck() = default;
c10::intrusive_ptr<Message> toMessageImpl() && override;
static std::unique_ptr<RRefAck> fromMessage(const Message& message);
};
} // namespace torch::distributed::rpc
```
|
==============================================================================================================================================
SOURCE CODE FILE: script_call.h
LINES: 1
SIZE: 2.54 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\script_call.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
#include <torch/csrc/jit/runtime/operator.h>
#include <torch/csrc/jit/serialization/pickler.h>
#include <optional>
#include <vector>
namespace torch::distributed::rpc {
using torch::jit::Operator;
// A ScriptCall instance represents an invocation of a builtin operator for a
// TorchScript function. If it is a builtin operator, it
// contains a shared ptr to the `Operator` and a list of arguments.
// If it is a TorchScript function, it contains a non empty qualifiedName string
// to the TorchScript function schema name and a list of arguments.
class TORCH_API ScriptCall : public RpcCommandBase {
public:
// Constructor for builtin operator call.
ScriptCall(std::shared_ptr<Operator> op, std::vector<at::IValue>&& stack);
// Constructor for TorchScript function call.
ScriptCall(
const c10::QualifiedName& qualifiedName,
std::vector<at::IValue>&& stack,
const bool isAsyncExecution = false);
bool hasOp() const;
std::shared_ptr<Operator> op() const;
bool hasQualifiedName() const;
const c10::QualifiedName& qualifiedName() const;
// return the argument stack of this builtin operator
const std::vector<at::IValue>& stack() const;
std::vector<at::IValue>& stackRef();
inline bool isAsyncExecution() const {
return isAsyncExecution_;
}
c10::intrusive_ptr<Message> toMessageImpl() && override;
static std::unique_ptr<ScriptCall> fromMessage(const Message& message);
~ScriptCall() override = default;
protected:
virtual void toIValues(std::vector<at::IValue>& ivalues) const;
static std::unique_ptr<ScriptCall> fromIValues(
std::vector<at::IValue>& ivalues);
private:
// Given an operator symbol and a string schema, return the matched operator.
static std::shared_ptr<Operator> matchOperator(const std::string& str_schema);
static const std::string BUILTIN_OP_NAMESPACE_;
static const std::string ATEN_PREFIX_;
// This field has value if this ScriptCall represents invocation of a builtin
// operator.
std::optional<std::shared_ptr<Operator>> op_;
// This field has non empty string if this ScriptCall represents invocation of
// an annotated torchscript function defined by users.
std::optional<const c10::QualifiedName> qualifiedName_;
std::vector<at::IValue> stack_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const bool isAsyncExecution_;
};
} // namespace torch::distributed::rpc
```
|
=====================================================================================================================================================
SOURCE CODE FILE: script_remote_call.h
LINES: 1
SIZE: 1.76 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\script_remote_call.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/script_call.h>
#include <torch/csrc/distributed/rpc/types.h>
#include <torch/csrc/jit/runtime/operator.h>
#include <torch/csrc/jit/serialization/pickler.h>
#include <vector>
namespace torch::distributed::rpc {
using torch::jit::Operator;
// A ScriptRemoteCall instance represents an invocation of `dist.remote` on a
// builtin operator. Currently, it does not support using RRef as arguments yet.
// Besides the operator and a vector of arguments, ScriptRemoteCall also
// contains the RRefId and the ForkId of the return value RRef.
class TORCH_API ScriptRemoteCall final : public ScriptCall {
public:
// Constructor for builtin operator call.
ScriptRemoteCall(
std::shared_ptr<Operator> op,
std::vector<at::IValue>&& stack,
const RRefId& retRRefId,
const ForkId& retForkId);
// Constructor for TorchScript function call.
ScriptRemoteCall(
const c10::QualifiedName& qualifiedName,
std::vector<at::IValue>&& stack,
const RRefId& retRRefId,
const ForkId& retForkId,
const bool isAsyncExecution);
inline const RRefId& retRRefId() const {
return retRRefId_;
}
inline const ForkId& retForkId() const {
return retForkId_;
}
static std::unique_ptr<ScriptRemoteCall> fromIValues(
std::vector<at::IValue>& ivalues);
c10::intrusive_ptr<Message> toMessageImpl() && override;
static std::unique_ptr<ScriptRemoteCall> fromMessage(const Message& message);
private:
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const RRefId retRRefId_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const ForkId retForkId_;
};
} // namespace torch::distributed::rpc
```
|
==============================================================================================================================================
SOURCE CODE FILE: script_resp.h
LINES: 1
SIZE: 0.71 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\script_resp.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/message.h>
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
#include <torch/csrc/jit/serialization/pickler.h>
namespace torch::distributed::rpc {
// Return value of a builtin operator or a TorchScript function.
class TORCH_API ScriptResp final : public RpcCommandBase {
public:
explicit ScriptResp(at::IValue&& values);
const at::IValue& value();
c10::intrusive_ptr<Message> toMessageImpl() && override;
static std::unique_ptr<ScriptResp> fromMessage(const Message& message);
private:
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const at::IValue value_;
};
} // namespace torch::distributed::rpc
```
|
===================================================================================================================================================
SOURCE CODE FILE: tensorpipe_agent.h
LINES: 1
SIZE: 17.72 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\tensorpipe_agent.h
ENCODING: utf-8
```h
#pragma once
#ifdef USE_TENSORPIPE
#include <atomic>
#include <thread>
#include <c10/core/thread_pool.h>
#include <torch/csrc/distributed/c10d/PrefixStore.hpp>
#include <torch/csrc/distributed/c10d/Store.hpp>
#include <torch/csrc/distributed/rpc/rpc_agent.h>
#include <utility>
// Forward-declare the TensorPipe classes we need, to avoid including its
// headers in PyTorch's ones and thus have it become a public dependency.
namespace tensorpipe {
class Context;
class Error;
class Listener;
class Message;
class Pipe;
namespace transport {
class Context;
} // namespace transport
namespace channel {
class Context;
} // namespace channel
} // namespace tensorpipe
namespace torch::distributed::rpc {
// These priorities instruct TensorPipe on which transport/channel to pick
// during handshake. Higher priorities will take precedence over lower ones.
// The transport with lowest priority will be the one used to bootstrap pipes.
constexpr int64_t kShmTransportPriority = 200;
constexpr int64_t kIbvTransportPriority = 100;
// The UV transport just uses TCP and should work everywhere, thus keep it last.
constexpr int64_t kUvTransportPriority = 0;
constexpr int64_t kCmaChannelPriority = 1200;
constexpr int64_t kMultiplexedUvChannelPriority = 1100;
// The basic channel reuses a transport as a channel, and is thus our fallback.
constexpr int64_t kBasicChannelPriority = 1000;
// CPU channel have higher priority than CUDA channels, since the latter might
// handle CPU-to-CPU transfers, but will always be less efficient than their
// CPU-only counterparts.
constexpr int64_t kCudaIpcChannelPriority = 300;
constexpr int64_t kCudaGdrChannelPriority = 200;
constexpr int64_t kCudaXthChannelPriority = 400;
constexpr int64_t kCudaBasicChannelPriority = 0;
using steady_clock_time_point =
std::chrono::time_point<std::chrono::steady_clock>;
struct TORCH_API TransportRegistration {
std::shared_ptr<tensorpipe::transport::Context> transport;
int64_t priority;
std::string address;
};
TORCH_DECLARE_REGISTRY(TensorPipeTransportRegistry, TransportRegistration);
struct TORCH_API ChannelRegistration {
std::shared_ptr<tensorpipe::channel::Context> channel;
int64_t priority;
};
TORCH_DECLARE_REGISTRY(TensorPipeChannelRegistry, ChannelRegistration);
constexpr auto kDefaultNumWorkerThreads = 16;
struct TORCH_API TensorPipeRpcBackendOptions : public RpcBackendOptions {
TensorPipeRpcBackendOptions(
int numWorkerThreads,
std::optional<std::vector<std::string>> transports,
std::optional<std::vector<std::string>> channels,
float rpc_timeout,
std::string init_method,
std::unordered_map<std::string, DeviceMap> device_maps = {},
std::vector<c10::Device> devices = {})
: RpcBackendOptions(rpc_timeout, std::move(init_method)),
numWorkerThreads(numWorkerThreads),
transports(std::move(transports)),
channels(std::move(channels)),
deviceMaps(std::move(device_maps)),
devices(std::move(devices)) {
TORCH_CHECK(
numWorkerThreads > 0,
"num_worker_threads must be positive, got ",
numWorkerThreads);
if (this->transports.has_value()) {
for (const std::string& transportName : this->transports.value()) {
TORCH_CHECK(
TensorPipeTransportRegistry()->Has(transportName),
"Unknown transport: ",
transportName);
}
}
if (this->channels.has_value()) {
for (const std::string& channelName : this->channels.value()) {
TORCH_CHECK(
TensorPipeChannelRegistry()->Has(channelName),
"Unknown channel: ",
channelName);
}
}
}
void setDeviceMap(const std::string& workerName, const DeviceMap& deviceMap) {
auto iter = deviceMaps.find(workerName);
if (iter == deviceMaps.end()) {
deviceMaps[workerName] = deviceMap;
} else {
for (auto& entry : deviceMap) {
// c10::Device has no default constructor, hence map[device] dosn't work
// In C++-17 we can use insert_or_assign.
auto entryIter = iter->second.find(entry.first);
if (entryIter == iter->second.end()) {
iter->second.emplace(entry.first, entry.second);
} else {
entryIter->second = entry.second;
}
}
}
}
int numWorkerThreads;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const std::optional<std::vector<std::string>> transports;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const std::optional<std::vector<std::string>> channels;
std::unordered_map<std::string, DeviceMap> deviceMaps;
std::vector<c10::Device> devices;
};
// Struct to track the network source metrics
struct TORCH_API NetworkSourceInfo {
worker_id_t srcRank;
std::vector<uint8_t> srcMachineAddr;
};
// Struct to track aggregated network metrics
struct TORCH_API AggregatedNetworkData {
uint64_t numCalls{0};
uint64_t totalSentBytes{0};
uint64_t totalRecvBytes{0};
uint64_t totalErrors{0};
};
// TensorPipeAgent leverages TensorPipe (https://github.com/pytorch/tensorpipe)
// to transparently move tensors and payloads through the fastest available
// transport or channel. It acts like a hybrid RPC transport, providing shared
// memory (linux) and TCP (linux & mac) support. CUDA support is in progress.
class TORCH_API TensorPipeAgent : public RpcAgent {
public:
TensorPipeAgent(
const c10::intrusive_ptr<::c10d::Store>& store,
std::string selfName,
worker_id_t selfId,
std::optional<int> worldSize,
TensorPipeRpcBackendOptions opts,
std::unordered_map<std::string, DeviceMap> reverseDeviceMaps,
std::vector<c10::Device> devices,
std::unique_ptr<RequestCallback> cb);
TensorPipeAgent(const TensorPipeAgent&) = delete;
TensorPipeAgent& operator=(const TensorPipeAgent&) = delete;
c10::intrusive_ptr<JitFuture> send(
const WorkerInfo& to,
c10::intrusive_ptr<Message> message,
const float rpcTimeoutSeconds = kUnsetRpcTimeout,
const DeviceMap& deviceMap = {}) override;
// join() and sync() would be deprecated -
// https://github.com/pytorch/pytorch/issues/27647
void join(bool shutdown = false, float timeout = 0) override;
void sync() override {}
void startImpl() override;
void shutdownImpl() override;
~TensorPipeAgent() override;
const WorkerInfo& getWorkerInfo(const std::string& workerName) const override;
const WorkerInfo& getWorkerInfo(worker_id_t workerId) const override;
std::vector<WorkerInfo> getWorkerInfos() const override;
void updateGroupMembership(
const WorkerInfo& workerInfo,
const std::vector<c10::Device>& devices,
const std::unordered_map<std::string, DeviceMap>& reverseDeviceMaps,
bool isJoin);
std::unordered_map<std::string, std::string> getMetrics() override;
void addGilWaitTime(const std::chrono::microseconds gilWaitTime) override;
TensorPipeRpcBackendOptions getBackendOptions() const;
const c10::intrusive_ptr<::c10d::Store> getStore() const;
DeviceMap getDeviceMap(const WorkerInfo& dest) const override;
const std::vector<c10::Device>& getDevices() const override;
using NetworkDataDict =
std::unordered_map<std::string, AggregatedNetworkData>;
// Returns metrics tracked by the NetworkDataDict
NetworkDataDict getNetworkData();
// Returns NetworkSourceInfo struct
NetworkSourceInfo getNetworkSourceInfo();
static const std::string& guessAddress();
// For testing purposes.
size_t timeoutMapSize();
size_t numPendingResponses();
size_t messageIdToTimeoutMapSize();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const bool isStaticGroup_;
protected:
// TensorPipe write function that could be used to write response
// messages by server, and write request messages by client. This
// is a protected method since it is overwritten by FaultyTensorPipeAgent
virtual void pipeWrite(
const std::shared_ptr<tensorpipe::Pipe>&,
const c10::intrusive_ptr<Message>& message,
std::vector<c10::Device>&& devices,
std::vector<c10::Stream> streams,
std::function<void(const tensorpipe::Error&)>) noexcept;
private:
// Removes the given messageId with the given expirationTime from the
// timeoutMap_.
void removeFromTimeoutMap(uint64_t messageId);
// Populates workerIdToInfo_ and workerNameToInfo_ using addressStore_
void prepareNames(bool isStaticGroup);
// Check the static group attribute with the value set in store
void checkAndSetStaticGroup(const c10::intrusive_ptr<::c10d::Store>& store);
const std::string& findWorkerURL(const WorkerInfo& worker) const;
// Only use for Dynamic RPC groups, method to have worker leave group
void leaveGroup();
// TensorPipe read function that could be used to read response messages
// by client, and read request messages by server.
void pipeRead(
const std::shared_ptr<tensorpipe::Pipe>&,
std::function<void(
const tensorpipe::Error&,
c10::intrusive_ptr<Message>,
std::vector<c10::Stream>)>) noexcept;
// Callback of listener accept()
void onListenerAccepted(
const tensorpipe::Error& error,
std::shared_ptr<tensorpipe::Pipe>& pipe);
// Respond to a call from a peer
void respond(std::shared_ptr<tensorpipe::Pipe>& pipe);
void sendCompletedResponseMessage(
std::shared_ptr<tensorpipe::Pipe>& pipe,
JitFuture& futureResponseMessage,
uint64_t messageId,
std::vector<c10::Stream> stream);
// Collects metrics from successful RPC calls
void trackNetworkData(
uint64_t requestSize,
uint64_t responseSize,
const std::string& destWorkerName);
// Collects metrics from failed RPC calls
void trackNetworkError(
uint64_t requestSize,
const std::string& destWorkerName);
inline std::vector<c10::Device> getDevicesForRemote(
const std::string& remoteName,
const Message& message) const;
// When a request+response completes, we need to mark the future message as
// complete. However, if its timeout has already expired, it already has an
// error set. There is no atomic "test-and-set" way to mark a future complete
// only if it isn't yet. It does exist for errors (setErrorIfNeeded) but, even
// then, it ends up printing a log message, which may worry the user. To solve
// both issues we use a separate atomic flag to know the status of the future.
struct AtomicJitFuture {
explicit AtomicJitFuture(const std::vector<c10::Device>& devices) {
jitFuture = c10::make_intrusive<at::ivalue::Future>(
at::AnyClassType::get(), devices);
}
std::atomic_flag isComplete = ATOMIC_FLAG_INIT;
c10::intrusive_ptr<JitFuture> jitFuture;
};
// Maintains state per client pipe to track pending response messages and
// error states. pendingResponseMessage_ should be protected by a mutex since
// it can be raced with user send() call.
// TODO: To achieve better performance we can have a pipe pool per
// client that can be configured using RpcBackendOptions.
struct ClientPipe {
explicit ClientPipe(std::shared_ptr<tensorpipe::Pipe> pipe)
: pipe_(std::move(pipe)) {}
std::shared_ptr<tensorpipe::Pipe> pipe_;
mutable std::mutex mutex_;
bool inError_{false};
// Map from Message Request ID's to corresponding futures.
std::unordered_map<uint64_t, std::shared_ptr<AtomicJitFuture>>
pendingResponseMessage_;
};
const c10::intrusive_ptr<::c10d::Store> store_;
const TensorPipeRpcBackendOptions opts_;
// For dynamic RPC, the reverse device maps are updated whenever a new rank
// joins or leaves the group
std::unordered_map<std::string, DeviceMap> reverseDeviceMaps_;
// Local devices used by this agent. If application didn't specify this
// field, it will be initialized using corresponding local devices in
// opts_.deviceMaps and reverseDeviceMaps_;
std::vector<c10::Device> devices_;
ThreadPool threadPool_;
std::shared_ptr<tensorpipe::Context> context_;
std::shared_ptr<tensorpipe::Listener> listener_;
mutable std::mutex connectedPipesMutex_;
std::unordered_map<worker_id_t, ClientPipe> connectedPipes_;
// Maps keyed on name and id for easy WorkerInfo lookup.
std::unordered_map<worker_id_t, WorkerInfo> workerIdToInfo_;
std::unordered_map<std::string, WorkerInfo> workerNameToInfo_;
std::unordered_map<std::string, std::string> workerNameToURL_;
::c10d::PrefixStore rankToNameStore_;
::c10d::PrefixStore nameToAddressStore_;
// Store keys that will used to count joined processes and active calls during
// the shutdown process
::c10d::PrefixStore shutdownStore_;
int worldSize_ = 0;
std::atomic<uint64_t> nextMessageID_{0};
// Metadata used for tracking of whether certain RPCs have timed out or not.
struct TimeoutMessageMetadata {
TimeoutMessageMetadata(
uint64_t messageId_,
std::shared_ptr<AtomicJitFuture> responseFuture_,
std::chrono::milliseconds timeout_)
: messageId(messageId_),
responseFuture(std::move(responseFuture_)),
timeout(timeout_) {}
uint64_t messageId;
std::shared_ptr<AtomicJitFuture> responseFuture;
std::chrono::milliseconds timeout;
};
// Map to store the expiration times for each message.
std::map<steady_clock_time_point, std::vector<TimeoutMessageMetadata>>
timeoutMap_;
// Map to store the messageId to expiry time.
std::unordered_map<uint64_t, steady_clock_time_point> messageIdToTimeout_;
// Thread that will poll the timeoutMap_ for timed out messages and mark them
// with an error accordingly
std::thread timeoutThread_;
// Function run by the timeoutThread_ to check for timed out RPCs
void pollTimeoutRpcs();
// Mutex to guard the timeoutMap_
std::mutex timeoutMapMutex_;
// Condition Variable to signal population of the timeoutMap_
std::condition_variable timeoutThreadCV_;
// Returns the expiration time for an RPC by adding the current time to the
// passed in timeout.
inline steady_clock_time_point computeRpcMessageExpiryTime(
std::chrono::milliseconds timeout) const {
return std::chrono::time_point_cast<std::chrono::milliseconds>(
std::chrono::steady_clock::now() + timeout);
}
// Handle error on an outgoing pipe
void handleClientError(
ClientPipe& clientPipe,
const tensorpipe::Error& error);
// This is a generic struct for capturing Time-Series Metrics. It keeps a
// running sum and count of data points (observations), and can return an
// average of the data points seen so far. This is currently only used for
// tracking the GIL Wait Time in RPC Agents, but can be used for other metrics
// as well.
struct TimeSeriesMetricsTracker {
// Running sum of the data points seen so far
uint64_t currentSum_;
// Running count of the data points seen so far
uint64_t currentCount_;
explicit TimeSeriesMetricsTracker(
uint64_t currentSum = 0,
uint64_t currentCount = 0);
// Adds a data point (which is basically one observation for the metric
// being tracked) to the running sum and count.
void addData(uint64_t dataPoint);
// Returns the average of all the data points seen so far.
float computeAverage() const;
};
// Map of Time-Series metrics tracked by the RPC Agent
std::unordered_map<std::string, TimeSeriesMetricsTracker> timeSeriesMetrics_;
// Mutex to guard timeSeriesMetrics_
std::mutex metricsMutex_;
// Custom lock guard used to check if the RPC group is dynamic and lock the
// mutex if so
struct GroupMembershipLockGuard {
GroupMembershipLockGuard(std::mutex& mutex, bool isStaticGroup)
: ref_(mutex), isStaticGroup_(isStaticGroup) {
if (isStaticGroup_) {
ref_.lock();
}
}
~GroupMembershipLockGuard() {
if (isStaticGroup_) {
ref_.unlock();
}
}
GroupMembershipLockGuard(const GroupMembershipLockGuard&) = delete;
private:
std::mutex& ref_;
bool isStaticGroup_;
};
// Mutex to guard access to group membership data
// e.g. updates to (workerIdToInfo_, workerNameToInfo_, workerNameToURL_)
mutable std::mutex groupMembershipMutex_;
// Map to Track Network Data
NetworkDataDict networkData_;
// Mutex to guard networkData_
std::mutex networkDataMutex_;
// A mutex and a cv to guard access to the call counts and watch for changes.
std::mutex callCountMutex_;
std::condition_variable callCountCV_;
// Running total of un-processed, un-errored RPC calls sent
int32_t clientActiveCalls_{0};
// Running total of un-processed RPC requests received
int32_t serverActiveCalls_{0};
// Running total of RPC requests that will be completed asynchronously
int32_t serverActiveAsyncCalls_{0};
// Whether a global graceful shutdown has begun, in which case we'll silence
// error messages due to remote workers closing their pipes.
std::atomic<bool> shuttingDown_{false};
// Helpers to modify the counts while correctly dealing with the mutex and cv.
void increaseCallCount(int32_t& count);
void decreaseCallCount(int32_t& count);
// Helpers to set the state of the requests.
void markFutureAsComplete(
std::shared_ptr<AtomicJitFuture> atomicFuture,
c10::intrusive_ptr<Message> message,
std::vector<c10::Stream> streams);
void markFutureWithError(
std::shared_ptr<AtomicJitFuture> atomicFuture,
std::string errorMsg);
};
} // namespace torch::distributed::rpc
#endif // USE_TENSORPIPE
```
|
===================================================================================================================================================
SOURCE CODE FILE: tensorpipe_utils.h
LINES: 1
SIZE: 4.73 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\tensorpipe_utils.h
ENCODING: utf-8
```h
#pragma once
#ifdef USE_TENSORPIPE
#include <torch/csrc/distributed/rpc/utils.h>
namespace tensorpipe {
class Message;
class Allocation;
class Descriptor;
} // namespace tensorpipe
namespace torch::distributed::rpc {
TORCH_API const c10::Stream& getStreamForDevice(
const std::vector<c10::Stream>& streams,
const c10::Device& device);
// Inspired by c10/core/impl/DeviceGuardImplInterface.h.
class TensorpipeDeviceTypeConverter {
public:
// Ideally we'd want this to also return a tensorpipe::Message::Tensor object
// but we cannot forward-declare that class (because it's nested), and we
// cannot include the TensorPipe headers because it's a private dependency.
// Thus we bend over backwards and entrust this method with appending that
// object to the `tensors` field of the tensorpipe::Message object we pass.
virtual std::optional<std::vector<char>> prepareTensorForSending(
const c10::Storage& storage,
const std::vector<c10::Stream>& streams,
tensorpipe::Message& message) const = 0;
// Same as above: this method cannot return a tensorpipe::Allocation::Tensor,
// thus it appends it to the `tensors` field of the tensorpipe::Allocation.
virtual at::DataPtr allocateTensorForReceiving(
c10::DeviceIndex deviceIndex,
size_t length,
const std::vector<c10::Stream>& streams,
tensorpipe::Allocation& allocation) const = 0;
virtual ~TensorpipeDeviceTypeConverter() = default;
};
extern TORCH_API std::array<
std::atomic<const TensorpipeDeviceTypeConverter*>,
static_cast<size_t>(DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES)>
device_type_converter_registry;
class TORCH_API TensorpipeDeviceTypeConverterRegistrar {
public:
TensorpipeDeviceTypeConverterRegistrar(
DeviceType,
const TensorpipeDeviceTypeConverter*);
};
#define C10_REGISTER_TENSORPIPE_DEVICE_TYPE_CONVERTER( \
DevType, TensorpipeDeviceTypeConverter) \
static ::torch::distributed::rpc::TensorpipeDeviceTypeConverterRegistrar \
C10_ANONYMOUS_VARIABLE(g_##DeviceType)( \
::c10::DeviceType::DevType, new TensorpipeDeviceTypeConverter());
inline const TensorpipeDeviceTypeConverter* getDeviceTypeConverter(
DeviceType type) {
return device_type_converter_registry[static_cast<size_t>(type)].load();
}
// A struct that holds pointers that keep alive all the memory that will be
// accessed by TensorPipe during a write operation.
struct TensorpipeWriteBuffers {
// Allocate on heap so pointers stay valid as we move the holder.
std::unique_ptr<MessageType> type;
std::unique_ptr<int64_t> id;
std::vector<char> payload;
std::vector<char> pickle;
// This contains the original tensors and the clones of the sparse tensors.
std::vector<torch::Tensor> tensors;
// This contains the copies of the data of the tensors that didn't own their
// memory, e.g., the ones created from torch::from_blob() with no deleter.
std::vector<std::vector<char>> copiedTensors;
};
// A struct that holds pointers that keep alive all the memory that will be
// accessed by TensorPipe during a read operation.
struct TensorpipeReadBuffers {
// Allocate on heap so pointers stay valid as we move the holder.
std::unique_ptr<MessageType> type;
std::unique_ptr<int64_t> id;
std::vector<char> payload;
std::vector<char> pickle;
std::vector<c10::DataPtr> tensors;
};
// Convert an RPC message into a TensorPipe message, plus a holder to all the
// data that must be kept alive while the write is performed asynchronously.
TORCH_API std::tuple<tensorpipe::Message, TensorpipeWriteBuffers>
tensorpipeSerialize(
const c10::intrusive_ptr<Message>& rpcMessage,
std::vector<c10::Device> devices,
const std::vector<c10::Stream>& streams);
// Allocate the buffers that will hold the incoming data. They will be managed
// by the returned holder, which must be kept alive until the asynchronous read
// has finished. Pointers to these buffers will be stored in the returned
// tensorpipe::Allocation struct.
TORCH_API std::pair<tensorpipe::Allocation, TensorpipeReadBuffers>
tensorpipeAllocate(
const tensorpipe::Descriptor& tpDescriptor,
const std::vector<c10::Stream>& streams);
// Convert a TensorPipe message back into an RPC message. This requires the data
// to be available and can thus only be performed once the asynchronous read has
// completed. The holder can be destroyed once this function returns.
TORCH_API c10::intrusive_ptr<Message> tensorpipeDeserialize(
const tensorpipe::Descriptor& tpDescriptor,
TensorpipeReadBuffers&& holder);
} // namespace torch::distributed::rpc
#endif // USE_TENSORPIPE
```
|
========================================================================================================================================================
SOURCE CODE FILE: torchscript_functions.h
LINES: 1
SIZE: 1.65 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\torchscript_functions.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/ivalue.h>
#include <torch/csrc/autograd/profiler.h>
#include <torch/csrc/distributed/autograd/utils.h>
#include <torch/csrc/distributed/rpc/rref_context.h>
#include <torch/csrc/distributed/rpc/script_remote_call.h>
namespace torch::distributed::rpc {
// This function sends an rpc call to run torchscript function, currently the
// torchscript function could only be a user defined python function with
// "@torch.jit.script" annotation. The torchscript function could not be
// a class constructor, class method, instance method or a script module.
// dst: destination worker name
// qualifiedName: torchscript function qualified name string like
// "moduleName::torchscriptFunctionName", e.g,
// "dist_autograd_test::my_py_add"
// stack: a bag of IValue args passed to torchscriptFunctionName
// It returns c10::intrusive_ptr<ivalue::Future>
c10::intrusive_ptr<c10::ivalue::Future> TORCH_API rpcTorchscript(
const std::string& dstWorkerName,
const c10::QualifiedName& qualifiedName,
const c10::FunctionSchema& functionSchema,
std::vector<c10::IValue> stack,
const float rpcTimeoutSeconds = torch::distributed::rpc::kUnsetRpcTimeout,
const bool isAsyncExecution = false);
c10::intrusive_ptr<RRef> TORCH_API remoteTorchscript(
const std::string& dstWorkerName,
const c10::QualifiedName& qualifiedName,
const c10::FunctionSchema& functionSchema,
std::vector<c10::IValue>& stack,
const float rpcTimeoutSeconds = torch::distributed::rpc::kUnsetRpcTimeout,
const bool isAsyncExecution = false);
} // namespace torch::distributed::rpc
```
|
========================================================================================================================================
SOURCE CODE FILE: types.h
LINES: 1
SIZE: 2.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\types.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/ivalue.h>
namespace torch::distributed::rpc {
using worker_id_t = int16_t;
using local_id_t = int64_t;
bool getAllowJitRRefPickle();
TORCH_API void enableJitRRefPickle();
TORCH_API void disableJitRRefPickle();
struct TORCH_API JitRRefPickleGuard {
JitRRefPickleGuard();
JitRRefPickleGuard(JitRRefPickleGuard&& other) = delete;
JitRRefPickleGuard(const JitRRefPickleGuard&) = delete;
JitRRefPickleGuard& operator=(const JitRRefPickleGuard&) = delete;
JitRRefPickleGuard& operator=(JitRRefPickleGuard&&) = delete;
~JitRRefPickleGuard();
};
struct TORCH_API GloballyUniqueId final {
GloballyUniqueId(worker_id_t createdOn, local_id_t localId);
GloballyUniqueId(const GloballyUniqueId& other) = default;
GloballyUniqueId& operator=(const GloballyUniqueId& other) = delete;
GloballyUniqueId(GloballyUniqueId&& other) = default;
GloballyUniqueId& operator=(GloballyUniqueId&& other) = delete;
~GloballyUniqueId() = default;
bool operator==(const GloballyUniqueId& other) const;
bool operator!=(const GloballyUniqueId& other) const;
at::IValue toIValue() const;
static GloballyUniqueId fromIValue(const at::IValue&);
struct Hash {
size_t operator()(const GloballyUniqueId& key) const {
return (uint64_t(key.createdOn_) << kLocalIdBits) | key.localId_;
}
};
static constexpr int kLocalIdBits = 48;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const worker_id_t createdOn_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const local_id_t localId_;
};
TORCH_API std::ostream& operator<<(
std::ostream& os,
const GloballyUniqueId& globalId);
using RRefId = GloballyUniqueId;
using ForkId = GloballyUniqueId;
using ProfilingId = GloballyUniqueId;
struct TORCH_API SerializedPyObj final {
SerializedPyObj(std::string&& payload, std::vector<at::Tensor>&& tensors)
: payload_(std::move(payload)), tensors_(std::move(tensors)) {}
std::vector<at::IValue> toIValues() &&;
static SerializedPyObj fromIValues(std::vector<at::IValue> value);
std::string payload_;
std::vector<at::Tensor> tensors_;
};
} // namespace torch::distributed::rpc
```
|
========================================================================================================================================================
SOURCE CODE FILE: unpickled_python_call.h
LINES: 1
SIZE: 1.39 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\unpickled_python_call.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
#include <torch/csrc/distributed/rpc/types.h>
#include <torch/csrc/utils/pybind.h>
namespace torch::distributed::rpc {
// This class converts the content in a PythonCall into py::object. This is a
// helper class to make sure that all arguments deserialization is done before
// entering RequestCallbackImpl::processRpc(...), so that the deserialization
// related logic can be carried out in one spot instead of scattered in multiple
// places for different message types.
// NB: The reason for not consolidating class into PythonCall is because
// PythonCall is a libtorch type which should not depend on Python types.
class TORCH_API UnpickledPythonCall : public RpcCommandBase {
public:
UnpickledPythonCall(
const SerializedPyObj& serializedPyObj,
bool isAsyncExecution);
~UnpickledPythonCall() override;
// toMessage() method is not implemented, as objects of this class should
// never be directly converted into a Message object.
c10::intrusive_ptr<Message> toMessageImpl() && override;
const py::object& pythonUdf() const;
inline bool isAsyncExecution() const {
return isAsyncExecution_;
}
private:
py::object pythonUdf_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const bool isAsyncExecution_;
};
} // namespace torch::distributed::rpc
```
|
===============================================================================================================================================================
SOURCE CODE FILE: unpickled_python_remote_call.h
LINES: 1
SIZE: 1.21 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\unpickled_python_remote_call.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
#include <torch/csrc/distributed/rpc/types.h>
#include <torch/csrc/distributed/rpc/unpickled_python_call.h>
#include <torch/csrc/utils/pybind.h>
namespace torch::distributed::rpc {
// This class converts the content in a PythonRemoteCall into py::object. This
// is a helper class to make sure that all arguments deserialization is done
// before entering RequestCallbackImpl::processRpc(...), so that the
// deserialization related logic can be carried out in one spot instead of
// scattered in multiple places for different message types.
// NB: The reason for not consolidating class into PythonRemoteCall is because
// PythonRemoteCall is a libtorch type which should not depend on Python types.
class TORCH_API UnpickledPythonRemoteCall final : public UnpickledPythonCall {
public:
explicit UnpickledPythonRemoteCall(
const SerializedPyObj& serializedPyObj,
const at::IValue& retRRefId,
const at::IValue& retForkId,
const bool isAsyncExecution);
const RRefId& rrefId() const;
const ForkId& forkId() const;
private:
RRefId rrefId_;
ForkId forkId_;
};
} // namespace torch::distributed::rpc
```
|
========================================================================================================================================
SOURCE CODE FILE: utils.h
LINES: 1
SIZE: 3.79 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\distributed\rpc\utils.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/Device.h>
#include <c10/core/Event.h>
#include <c10/core/Stream.h>
#include <torch/csrc/autograd/profiler.h>
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
#include <torch/csrc/jit/serialization/pickle.h>
#include <torch/csrc/utils/byte_order.h>
namespace torch::distributed::rpc {
// Parse error message and return RPCErrorType based on the message.
TORCH_API RPCErrorType getRPCErrorType(const JitFuture& jitFuture);
// Create an error string given the error description and error type
TORCH_API std::string makeRPCError(
const std::string& rpcErrorStr,
RPCErrorType errorType);
// Given an RPC message received as a request over the wire, deserialize it into
// the appropriate 'RpcCommandBase' type.
TORCH_API std::unique_ptr<RpcCommandBase> deserializeRequest(
const Message& request);
// Given an RPC message received as a response over the wire, deserialize it
// into the appropriate 'RpcCommandBase' type, if the response is
// FORWARD_AUTOGRAD_RESP type, unwrap it, attach recvBackward() functions
// to received tensors and set the wrappedMsgType to its wrapped message type.
TORCH_API std::unique_ptr<RpcCommandBase> deserializeResponse(
const Message& response,
MessageType& wrappedMsgType);
// Given an RPC message received as a response over the wire, deserialize it
// into the valid IValue if the message is for a script rpc result,
// otherwise deserialize it into dummy none ivalue that will never be used.
// In this deserialization, we also attach recv rpc backward functions if
// needed.
IValue deserializeResptoIValueInternal(
RpcCommandBase& rpc,
MessageType messageType);
TORCH_API IValue deserializeRespToIValue(const Message& message);
// Note: format is subject to change and intended for RPCs.
// For saving persistently to disk, use torch::save().
TORCH_API std::string wireSerialize(
const std::vector<char>& payload,
const std::vector<at::Tensor>& tensors);
TORCH_API std::pair<std::vector<char>, std::vector<at::Tensor>> wireDeserialize(
const void* data,
size_t data_size);
// We use vector<char> as the type of blobs because it's what rpc::Message uses
// for its payload, even though it has the disadvantage that it cannot be
// allocated with uninitialized memory: it is always zeroed out.
// Some Tensors are effectively views of larger Tensors, where only a small
// subset of the Storage data is referenced. This normally is good and avoids
// copies when kept locally, but if we naively push the whole Storage over the
// wire, we'll end up with excess network traffic. This change clones tensors if
// we'd save at least half the data, and over a minimum hurdle.
TORCH_API c10::List<at::Tensor> cloneSparseTensors(
const std::vector<at::Tensor>& tensors);
// Combines an original payload and wrapped payload into the original payload.
// Used to generate the overall payload for the wrapped RPC.
TORCH_API void writeWrappedPayload(
std::vector<char>& originalPayload,
std::vector<char>& additionalPayload);
// Reads the additional, wrapped payload from a wrapped RPC off of the input
// payload. After this, payload will contain the payload of the original,
// un-wrapped RPC.
TORCH_API std::vector<at::IValue> readWrappedPayload(
std::vector<char>& payload,
const rpc::Message& message);
// Takes a list of events from autograd profiler and populates them into
// profiledEvents to be carried over RPC.
TORCH_API void populateRemoteProfiledEvents(
std::vector<torch::autograd::profiler::LegacyEvent>& profiledEvents,
const torch::autograd::profiler::ProfilerConfig& profilerConfig,
const std::vector<std::vector<torch::autograd::profiler::LegacyEvent>>&
eventLists);
} // namespace torch::distributed::rpc
```
|
=====================================================================================================================================
SOURCE CODE FILE: cache_entry.h
LINES: 1
SIZE: 2.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\dynamo\cache_entry.h
ENCODING: utf-8
```h
#pragma once
#include <Python.h>
#ifdef __cplusplus
#include <torch/csrc/dynamo/utils.h>
#include <torch/csrc/utils/pybind.h>
#include <list>
extern "C" {
#endif
/*
Our cache resides on the extra scratch space of the code object. The structure
of the cache is as follows:
-> ExtraState
-> CacheEntry (list)
-> guard_manager (a wrapper that contains the actual guard manager at its
attr named root)
-> code
-> FrameState
CacheEntry is a linked list node containing the guard_manager for guards
and the optimized code.
The FrameState is a PyDict that enables sharing between different frames. This
is used to detect dynamism in automatic dynamic shapes.
These two are encapsulated into a ExtraState.
*/
typedef struct CacheEntry CacheEntry;
typedef struct ExtraState ExtraState;
#ifdef __cplusplus
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(
"-Wdeprecated-copy-with-user-provided-dtor")
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wdeprecated-copy-dtor")
// NOLINTNEXTLINE(cppcoreguidelines-special-member-functions)
typedef struct VISIBILITY_HIDDEN CacheEntry {
// check the guards: lambda: <locals of user function>: bool
py::object guard_manager;
// modified user bytecode (protected by guard_manager's guards)
py::object code;
// CompileId corresponding to this compilation
py::object compile_id;
// root guard manager if exists
void* root_mgr{nullptr};
// diff guard root guard manager if exists
void* diff_guard_root_mgr{nullptr};
// backend used to create this cache entry
PyObject* backend{nullptr};
// Reference to owning ExtraState
ExtraState* _owner{nullptr};
// Reference to this CacheEntry's location in owner's linked list
std::list<CacheEntry>::iterator _owner_loc;
// Reference to string representation of the CompileContext
std::string trace_annotation;
CacheEntry(const py::handle& guarded_code, PyObject* backend);
CacheEntry(const CacheEntry&) = default;
CacheEntry(CacheEntry&&) = default;
CacheEntry& operator=(const CacheEntry&) = default;
CacheEntry& operator=(CacheEntry&&) = default;
~CacheEntry();
// Warning: returns a reference whose lifetime is controlled by C++
py::object next();
void invalidate(py::object deleted_guard_manager);
// Called from the python side to update the diff guard root manager
void update_diff_guard_root_manager();
} CacheEntry;
C10_DIAGNOSTIC_POP()
C10_DIAGNOSTIC_POP()
#endif
// Returns borrowed reference
PyCodeObject* CacheEntry_get_code(CacheEntry* e);
// Returns borrowed string representation of CompileContext
const char* CacheEntry_get_trace_annotation(CacheEntry* e);
// Returns a borrowed reference to CacheEntry as a PyObject
// Warning: lifetime is controlled by C++
PyObject* CacheEntry_to_obj(CacheEntry* e);
#ifdef __cplusplus
} // extern "C"
#endif
```
|
===========================================================================================================================================
SOURCE CODE FILE: compiled_autograd.h
LINES: 1
SIZE: 49.55 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\dynamo\compiled_autograd.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/TensorGeometry.h>
#include <ATen/core/ivalue.h>
#include <c10/core/impl/TorchDispatchModeTLS.h>
#include <c10/util/flat_hash_map.h>
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/input_metadata.h>
#include <torch/csrc/autograd/saved_variable.h>
#include <torch/csrc/autograd/variable_info.h>
#include <torch/csrc/utils/python_stub.h>
#include <torch/csrc/utils/torch_dispatch_mode.h>
#include <typeindex>
#include <vector>
// see [Note: Compiled Autograd]
namespace torch::dynamo::autograd {
using namespace torch::autograd;
// This is a layer of indirection for calling methods on the Python
// AutogradCompilerInstance (referred to as the "py_compiler") from
// libtorch_cpu (where Python is not available).
// A PyCompilerInterfaceImpl in libtorch_python subclasses it and
// overrides the methods to do the actual calls back to Python.
struct TORCH_API PyCompilerInterface {
PyCompilerInterface() = default;
PyCompilerInterface(const PyCompilerInterface&) = delete;
PyCompilerInterface& operator=(const PyCompilerInterface&) = delete;
PyCompilerInterface(PyCompilerInterface&&) = delete;
PyCompilerInterface& operator=(PyCompilerInterface&&) = delete;
virtual ~PyCompilerInterface() = default;
// Invokes py_compiler.bind_function
virtual std::string bind_function(
PyObject* py_compiler,
const std::string& fn_name,
// NOLINTNEXTLINE(performance-unnecessary-value-param)
functional_apply_t fn,
// NOLINTNEXTLINE(performance-unnecessary-value-param)
std::vector<at::TypePtr> packed_args_schema,
bool is_custom_function = false,
bool is_traceable = true) {
TORCH_INTERNAL_ASSERT(false, "Needs to be overridden");
}
// Invokes py_compiler.method_name(fn_name, inputs, packed_args,
// output_metadata)
virtual variable_list call_function(
PyObject* py_compiler,
const char* method_name,
const std::string& fn_name,
const variable_list& inputs,
const ivalue_list& packed_args,
const c10::IValue& output_metadata) {
TORCH_INTERNAL_ASSERT(false, "Needs to be overridden");
}
virtual variable_list call_copy_slices_prologue(
PyObject* py_compiler,
const variable_list& inputs,
const at::TensorGeometry& base,
const at::TensorGeometry& view) {
TORCH_INTERNAL_ASSERT(false, "Needs to be overridden");
}
virtual variable_list call_copy_slices_epilogue(
PyObject* py_compiler,
const std::vector<bool>& needs_input_grad,
const at::Tensor& result,
const variable_list& res,
const at::Tensor& grad_slice) {
TORCH_INTERNAL_ASSERT(false, "Needs to be overridden");
}
virtual at::Tensor call_unpack(
PyObject* py_compiler,
std::optional<size_t> hook_id,
size_t hook_input_id) {
TORCH_INTERNAL_ASSERT(false, "Needs to be overridden");
}
};
TORCH_API const std::unique_ptr<PyCompilerInterface>& getPyCompilerInterface();
struct TORCH_API PyCompilerGuard {
explicit PyCompilerGuard(std::unique_ptr<PyCompilerInterface>&& impl);
PyCompilerGuard(const PyCompilerGuard&) = delete;
PyCompilerGuard& operator=(const PyCompilerGuard&) = delete;
PyCompilerGuard(PyCompilerGuard&&) = delete;
PyCompilerGuard& operator=(PyCompilerGuard&&) = delete;
~PyCompilerGuard();
};
// including torch/csrc/autograd/engine.h breaks BC by somehow introducing
// symbol resolution issues. Instead requiring downstream users to include
// engine.h to access collect_input_metadata, we provide it here (with a
// different name to avoid ambigous symbols...)
TORCH_API std::vector<std::optional<InputMetadata>> get_input_metadata(
const edge_list& edges);
struct SizeInput {
// Note: int value is still needed when dynamic to pass as an arg
enum DynType : uint8_t { STATIC = 0, DYNAMIC = 1 };
SizeInput(DynType dt, int64_t v) : dyn_type(dt), value(v) {}
DynType dyn_type;
int64_t value;
};
struct CacheKeyBuffer {
CacheKeyBuffer(const uint8_t* key, uint16_t len) : data(new uint8_t[len]) {
std::memcpy(data.get(), key, len);
}
const uint8_t* get() const {
return data.get();
}
private:
// NOLINTNEXTLINE(*c-array*)
std::unique_ptr<uint8_t[]> data;
};
struct CacheKey {
// Key to find the next node in the shadow graph. We use C++ RTTI for the
// type of the node (ntype), then a key generated with a visitor pattern.
CacheKey(const std::type_index& ntype, const uint8_t* key, uint16_t len)
: node_type(ntype), key_size(len), key(key) {}
bool operator<(const CacheKey& other) const {
if (node_type != other.node_type) {
return node_type < other.node_type;
}
if (key_size != other.key_size) {
return key_size < other.key_size;
}
return std::memcmp(key, other.key, key_size) < 0;
}
bool operator==(const CacheKey& other) const {
return node_type == other.node_type && key_size == other.key_size &&
std::memcmp(key, other.key, key_size) == 0;
}
size_t hash() const {
// don't bother hashing the key data, common case 1 cache entry per node
return std::hash<std::type_index>()(node_type) ^ key_size;
}
std::type_index node_type;
uint16_t key_size;
const uint8_t* key;
};
struct NodeCall {
NodeCall(uint32_t id_, std::shared_ptr<Node> node_)
: id(id_), node(std::move(node_)) {}
void mark_output(int input_nr, int output_idx) {
graph_output.emplace_back(input_nr, output_idx);
}
uint32_t id;
std::shared_ptr<Node> node;
std::vector<std::pair<int, int>> tensor_pre_hooks;
std::vector<int> pre_hooks;
std::vector<int> post_hooks;
std::vector<int> post_acc_grad_hooks;
std::vector<std::pair<int, int>> graph_output;
bool needed = true;
};
struct NodeCalls : public std::unordered_map<Node*, NodeCall> {
NodeCall& lookup(const std::shared_ptr<Node>& function) {
auto it = find(function.get());
if (it == end()) {
it = emplace(function.get(), NodeCall(_next_id++, function)).first;
nodes.emplace_back(function.get());
}
return it->second;
}
const NodeCall& lookup(uint32_t id) const {
TORCH_INTERNAL_ASSERT(id < nodes.size());
auto it = find(nodes[id]);
TORCH_INTERNAL_ASSERT(it != end());
return it->second;
}
void clear() {
_next_id = 0;
std::unordered_map<Node*, NodeCall>::clear();
nodes.clear();
}
private:
uint32_t _next_id = 0;
std::vector<Node*> nodes;
};
struct TensorArg {
// Represents a de-duplicated tensor that will be passed into the graph
TensorArg(uint32_t i = 0) : id(i) {}
uint32_t index() const {
TORCH_INTERNAL_ASSERT(defined());
return id - 1;
}
bool defined() const {
return id != 0;
}
uint32_t id;
at::Tensor proxy_tensor;
};
struct TensorArgs {
// Manages a collection of TensorArgs and mappings from Tensors/SavedVariables
// to them. This also allows us to unpack SavedVariable exactly once and
// store the unpacked Tensor.
TensorArgs(const std::optional<size_t>& active_node_call_idx)
: active_node_call_idx(active_node_call_idx) {}
TensorArg& lookup(const at::Tensor& tensor, bool create = false) {
if (!tensor.defined()) {
return _undefined;
}
auto impl = tensor.unsafeGetTensorImpl();
auto it = _args.find(impl);
if (it == _args.end()) {
TORCH_INTERNAL_ASSERT(create && inputs.size() == _next_id - 1);
it = _args.emplace(impl, TensorArg(_next_id++)).first;
inputs.emplace_back(tensor);
if (active_node_call_idx.has_value()) {
input_origins.emplace_back(active_node_call_idx.value());
}
}
return it->second;
}
TensorArg& lookup(const SavedVariable& sv) {
if (auto it = _saved_variables.find(&sv); it != _saved_variables.end()) {
// unpacked before graph
return *it->second;
}
// unpacked in graph
auto it2 = _saved_variables_proxies.find(&sv);
TORCH_INTERNAL_ASSERT(it2 != _saved_variables_proxies.end());
return *it2->second;
}
TensorArg& add(const at::Tensor& tensor) {
return lookup(tensor, true);
}
TensorArg& add(const SavedVariable& sv, const std::shared_ptr<Node>& node) {
// no unpack hooks in this codepath
at::Tensor tensor = sv.unpack(node);
TensorArg& arg = add(tensor);
_saved_variables.emplace(&sv, &arg);
return arg;
}
// the concrete tensors that will get passed into the graph as inputs
std::vector<at::Tensor> inputs;
// NodeCall id of each input, only when verbose logging is enabled
std::vector<uint32_t> input_origins;
private:
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const std::optional<size_t>& active_node_call_idx;
std::unordered_map<const c10::TensorImpl*, TensorArg> _args;
// Every TensorArg from this is actually owned by _args (or _undefined) and
// that's why we have an un-owned pointer here.
std::unordered_map<const SavedVariable*, TensorArg*> _saved_variables;
std::unordered_map<const SavedVariable*, TensorArg*> _saved_variables_proxies;
TensorArg _undefined;
uint32_t _next_id = 1; // id=0 used by _undefined
};
struct LiftedIValueArg {
LiftedIValueArg() = delete;
LiftedIValueArg(const at::IValue* ptr)
: actual_ptr(ptr), proxy(at::IValue::uninitialized()) {}
const at::IValue* actual_ptr; // lifetime handled by autograd node
at::IValue proxy;
};
struct LiftedIValueArgs {
LiftedIValueArgs(const std::optional<size_t>& active_node_call_idx)
: active_node_call_idx(active_node_call_idx) {}
at::IValue& next_proxy(const at::IValue* actual_ptr) {
TORCH_INTERNAL_ASSERT(next < args.size());
auto& iv_arg = args.at(next++);
TORCH_INTERNAL_ASSERT(iv_arg.actual_ptr == actual_ptr);
return iv_arg.proxy;
}
void add(const at::IValue* iv) {
args.emplace_back(iv);
if (active_node_call_idx.has_value()) {
args_origins.emplace_back(active_node_call_idx.value());
}
}
std::vector<LiftedIValueArg> args;
size_t next = 0;
// NodeCall id of each arg, only when verbose logging is enabled
std::vector<uint32_t> args_origins;
private:
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const std::optional<size_t>& active_node_call_idx;
};
struct AutogradCompilerCall {
AutogradCompilerCall(SizeInput::DynType default_dyn_type)
: active_node_call_idx(std::nullopt),
tensor_args(active_node_call_idx),
lifted_ivalue_args(active_node_call_idx),
default_dyn_type(default_dyn_type) {}
void add_size_input(const c10::SymInt& s) {
all_size_inputs.emplace_back(
default_dyn_type, s.guard_int(__FILE__, __LINE__));
if (active_node_call_idx.has_value()) {
size_input_origins.emplace_back(active_node_call_idx.value());
}
}
size_t emplace_hook(c10::SafePyObject&& fn) {
hooks.emplace_back(std::move(fn));
return hooks.size() - 1;
}
size_t emplace_packed_input(c10::SafePyObject&& input) {
packed_inputs.emplace_back(std::move(input));
return packed_inputs.size() - 1;
}
void set_active_node_call_idx(size_t node_call_idx) {
active_node_call_idx = node_call_idx;
}
std::optional<size_t> active_node_call_idx;
TensorArgs tensor_args;
std::vector<SizeInput> all_size_inputs;
LiftedIValueArgs lifted_ivalue_args;
std::vector<int64_t> dyn_size_inputs;
std::vector<c10::SafePyObject> hooks;
std::vector<c10::SafePyObject> packed_inputs;
NodeCalls node_calls;
SizeInput::DynType default_dyn_type;
// NodeCall id of each size, only when verbose logging is enabled
std::vector<uint32_t> size_input_origins;
std::unordered_map<const SavedVariable*, std::pair<size_t, size_t>>
sv_to_hooks;
// pynode -> backward and backward state idx
std::unordered_map<const Node*, std::pair<size_t, std::optional<size_t>>>
pynode_objs;
};
class CompiledNodeArgs {
// CompiledNodeArgs builds a representation of the constant values found
// across all the nodes in the compiled graph, via 'collect' overloads. The
// collected constants are specialized on by concatenation into a cache key.
// Tensor, symint arguments (which are lifted to become graph inputs rather
// than specialized on) are forwarded to the compiler and not included in the
// key.
public:
void collect(const TensorArg& t) {
collect_size(t.id);
if (t.defined()) {
const at::Tensor& tensor = _compiler.tensor_args.inputs[t.index()];
// including these in the cache key means dynamo-level tensor guards can
// be skipped
collect(tensor.device());
collect(tensor.dtype());
collect(tensor.requires_grad());
}
}
void collect(const at::Tensor& t) {
collect(_compiler.tensor_args.add(t));
}
void collect(const SavedVariable& sv, bool is_output) {
if (auto hook_data = sv.retrieve_unpack_hook_data();
hook_data.has_value()) {
// hooks, unpack in graph
auto& [hook, packed_input] = hook_data.value();
size_t hook_id = _compiler.emplace_hook(std::move(hook));
// rely on dynamo to dedup packed tensors from unpacked tensors
size_t input_id = _compiler.emplace_packed_input(std::move(packed_input));
_compiler.sv_to_hooks.emplace(&sv, std::make_pair(hook_id, input_id));
} else {
// no hooks, unpack now
collect(
_compiler.tensor_args.add(sv, is_output ? _node_call.node : nullptr));
}
}
void collect(const c10::SymInt& t) {
_compiler.add_size_input(t);
}
void collect(const std::vector<SavedVariable>& t, bool is_output) {
collect_size(t.size());
for (const SavedVariable& i : t) {
collect(i, is_output);
}
}
template <typename T>
void collect(const std::vector<T>& t) {
collect_size(t.size());
for (const T& i : t) {
collect(i);
}
}
void collect(const c10::ArrayRef<SavedVariable>& t, bool is_output) {
collect_size(t.size());
for (const SavedVariable& i : t) {
collect(i, is_output);
}
}
template <typename T>
void collect(const c10::ArrayRef<T>& t) {
collect_size(t.size());
for (const T& i : t) {
collect(i);
}
}
template <typename T>
void collect(const c10::OptionalArray<T>& t) {
collect(t.list);
}
template <typename T>
void collect(const std::optional<T>& t) {
if (cond(t.has_value())) {
// NOLINTNEXTLINE(bugprone-unchecked-optional-access)
collect(*t);
}
}
template <typename A, typename B>
void collect(const std::pair<A, B>& t) {
collect(t.first);
collect(t.second);
}
template <typename V>
void collect(const ska::flat_hash_map<std::string, V>& m) {
collect_size(m.size());
std::vector<std::string> keys;
keys.reserve(m.size());
std::transform(
m.begin(), m.end(), std::back_inserter(keys), [](const auto& entry) {
return entry.first;
});
std::sort(keys.begin(), keys.end());
for (const auto& k : keys) {
collect(k);
collect(m.at(k));
}
}
void collect(const at::IValue& iv, bool nested = false) {
// used by AutogradContext::saved_data from CppNode
if (iv.isList()) {
c10::List<at::IValue> list = iv.toList();
collect_size(list.size());
for (auto&& value : list) {
collect(value, true);
}
} else if (iv.isGenericDict()) {
c10::Dict<at::IValue, at::IValue> ordered_dict = iv.toGenericDict();
collect_size(ordered_dict.size());
// NOLINTNEXTLINE(modernize-loop-convert)
for (auto it = ordered_dict.begin(); it != ordered_dict.end(); it++) {
collect(it->key());
collect(it->value(), true);
}
} else if (iv.isTensor()) {
collect(iv.toTensor());
} else if (
!nested &&
(iv.isInt() || iv.isSymInt() || iv.isDouble() || iv.isSymFloat())) {
// can't lift ivalues nested in collections
_compiler.lifted_ivalue_args.add(&iv);
} else {
try {
collect(static_cast<uint64_t>(at::IValue::hash(iv)));
} catch (const std::runtime_error& e) {
std::string msg =
"Compiled autograd can not trace unhashable IValues, error: " +
std::string(e.what());
TORCH_CHECK_NOT_IMPLEMENTED(false, msg);
}
}
}
void collect(const c10::Scalar& t) {
auto type = t.type();
specialize_on_bytes(type);
if (type == c10::ScalarType::Double) {
collect(t.toDouble());
} else if (type == c10::ScalarType::Long) {
collect(t.toLong());
} else if (type == c10::ScalarType::Bool) {
collect(t.toBool());
} else if (type == c10::ScalarType::ComplexDouble) {
auto c = t.toComplexDouble();
collect(c.real());
collect(c.imag());
} else {
TORCH_INTERNAL_ASSERT(false);
}
}
void collect(const c10::TensorOptions& t) {
collect(t.device());
collect(t.dtype());
collect(t.layout());
collect(t.requires_grad());
collect(t.pinned_memory());
collect(t.memory_format_opt());
}
void collect(const at::TensorGeometry& t) {
collect(t.sym_sizes());
collect(t.sym_strides());
collect(t.sym_storage_offset());
}
void collect(const torch::autograd::TypeAndSize& t) {
collect(t.sym_sizes);
collect(t.options);
}
void collect(const c10::Device& t) {
collect(t.type());
collect(t.index());
}
void collect(const std::string& t) {
collect_size(t.size());
for (char c : t) {
collect(c);
}
}
void collect(const caffe2::TypeMeta& t) {
specialize_on_bytes(t.id());
}
void collect(const std::shared_ptr<Node>& t) {
// Note: this is only capturing the ID of the node not everything
// contained inside it. This is used for tracking connections between
// nodes and the actual details of the node itself must be handled by
// a separate call to `node->compiled_args()`.
if (cond((bool)t)) {
collect(_compiler.node_calls.lookup(t));
}
}
void collect(const NodeCall& t) {
collect_size(t.id);
collect(t.graph_output);
collect_hooks_from(t.node.get());
}
void collect(const Edge& t) {
if (cond(t.is_valid())) {
collect_size(_compiler.node_calls.lookup(t.function).id);
collect_size(t.input_nr);
collect(t.function->input_metadata(t.input_nr)); // for validate_outputs
}
}
void collect(const InputMetadata& t) {
TORCH_CHECK(!t.is_nested_tensor(), "NestedTensor not implemented");
collect(t.options());
collect(t.is_tensor_subclass());
collect(t.shape_as_dim_vector());
}
void collect(const VariableInfo& t) {
collect(t.layout);
collect(t.device);
collect(t.scalar_type);
collect(t.size);
collect(t.requires_grad);
collect(t.is_empty);
}
bool cond(bool cond) {
collect(cond);
return cond;
}
#define COLLECT_AS_BYTES(T) \
void collect(T t) { \
specialize_on_bytes(t); \
}
COLLECT_AS_BYTES(c10::ScalarType)
COLLECT_AS_BYTES(c10::DeviceType)
COLLECT_AS_BYTES(c10::Layout)
COLLECT_AS_BYTES(c10::MemoryFormat)
COLLECT_AS_BYTES(int8_t)
COLLECT_AS_BYTES(int16_t)
COLLECT_AS_BYTES(int32_t)
COLLECT_AS_BYTES(int64_t)
COLLECT_AS_BYTES(uint8_t)
COLLECT_AS_BYTES(uint16_t)
COLLECT_AS_BYTES(uint32_t)
COLLECT_AS_BYTES(uint64_t)
COLLECT_AS_BYTES(bool)
COLLECT_AS_BYTES(float)
COLLECT_AS_BYTES(double)
#undef COLLECT_AS_BYTES
void collect_hooks_from(Node* fn) {
TORCH_CHECK(
fn->retains_grad_hooks().empty(),
"retains_grad_hooks not implemented for compiled autograd");
for (auto& i : fn->tensor_pre_hooks()) {
i->compiled_args(*this);
}
for (auto& i : fn->pre_hooks()) {
i->compiled_args(*this);
}
for (auto& i : fn->post_hooks()) {
i->compiled_args(*this);
}
collect_size(_node_call.tensor_pre_hooks.size());
collect_size(_node_call.pre_hooks.size());
collect_size(_node_call.post_hooks.size());
for (const auto& h : _node_call.tensor_pre_hooks) {
collect_size(static_cast<size_t>(h.second));
}
}
CacheKey key() const {
Node* node = _node_call.node.get();
return CacheKey(
typeid(*node), _specialization_key, _specialization_key_size);
}
void collect_pynode_objs(
const Node* pynode,
c10::SafePyObject&& bwd,
std::optional<c10::SafePyObject>&& bwd_state) {
size_t bwd_idx = _compiler.emplace_hook(std::move(bwd));
std::optional<size_t> bwd_state_idx;
if (auto state = std::move(bwd_state); state.has_value()) {
bwd_state_idx = _compiler.emplace_hook(std::move(state.value()));
}
_compiler.pynode_objs.emplace(
pynode, std::make_pair(bwd_idx, bwd_state_idx));
}
void add_tensor_pre_hook(c10::SafePyObject&& obj, int index) {
auto fn_id = _compiler.emplace_hook(std::move(obj));
collect_size(fn_id);
_node_call.tensor_pre_hooks.emplace_back(fn_id, index);
}
void add_pre_hook(c10::SafePyObject&& obj) {
auto fn_id = _compiler.emplace_hook(std::move(obj));
collect_size(fn_id);
_node_call.pre_hooks.emplace_back(fn_id);
}
void add_post_hook(c10::SafePyObject&& obj) {
auto fn_id = _compiler.emplace_hook(std::move(obj));
collect_size(fn_id);
_node_call.post_hooks.emplace_back(fn_id);
}
void add_post_acc_grad_hook(c10::SafePyObject&& obj) {
auto fn_id = _compiler.emplace_hook(std::move(obj));
collect_size(fn_id);
_node_call.post_acc_grad_hooks.emplace_back(fn_id);
}
// Need to template the size_t to silence internal 32-bit build errors due to
// a mix of -Werror, -Wtautological-type-limit-compare and
// -Wunknown-pragmas
template <typename T>
std::enable_if_t<std::is_unsigned_v<T>, void> collect_size(T s) {
// we expect sizes to be small, so try to cram them into a single byte
constexpr uint8_t encode_as_u64 = std::numeric_limits<uint8_t>::max();
constexpr uint8_t encode_as_u32 = encode_as_u64 - 1;
constexpr uint8_t encode_as_u16 = encode_as_u64 - 2;
if (C10_UNLIKELY(s >= encode_as_u16)) {
// first write a byte indicating the path we followed, then the data
if (s <= std::numeric_limits<uint16_t>::max()) {
// 3 bytes
specialize_on_bytes(encode_as_u16);
specialize_on_bytes(static_cast<uint16_t>(s));
} else if (s <= std::numeric_limits<uint32_t>::max()) {
// 5 bytes
specialize_on_bytes(encode_as_u32);
specialize_on_bytes(static_cast<uint32_t>(s));
} else {
// 9 bytes
specialize_on_bytes(encode_as_u64);
specialize_on_bytes(s);
}
} else {
// happy case, 1 byte
specialize_on_bytes(static_cast<uint8_t>(s));
}
}
SizeInput::DynType set_default_dyn_type(SizeInput::DynType default_dyn_type) {
return std::exchange(_compiler.default_dyn_type, default_dyn_type);
}
CompiledNodeArgs(AutogradCompilerCall& compiler, NodeCall& node_call)
: _compiler(compiler),
_node_call(node_call),
_specialization_key(
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
(uint8_t*)std::malloc(_specialization_key_storage)) {}
CompiledNodeArgs(const CompiledNodeArgs&) = delete;
CompiledNodeArgs(CompiledNodeArgs&&) = delete;
CompiledNodeArgs& operator=(const CompiledNodeArgs&) = delete;
CompiledNodeArgs& operator=(CompiledNodeArgs&&) = delete;
~CompiledNodeArgs() {
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
std::free(_specialization_key);
}
private:
template <typename T>
void specialize_on_bytes(const T& t) {
while (C10_UNLIKELY(
_specialization_key_size + sizeof(T) > _specialization_key_storage)) {
_specialization_key_storage *= 2;
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
_specialization_key = (uint8_t*)std::realloc(
_specialization_key, _specialization_key_storage);
}
std::memcpy(_specialization_key + _specialization_key_size, &t, sizeof(T));
_specialization_key_size += sizeof(T);
}
AutogradCompilerCall& _compiler;
NodeCall& _node_call;
size_t _specialization_key_size{0};
size_t _specialization_key_storage{1024};
uint8_t* _specialization_key;
};
struct TraceState {
TraceState(std::vector<std::optional<c10::SymInt>>&& ss, size_t num_outputs)
: sym_sizes(std::move(ss)), outputs(num_outputs) {}
void debug_asserts() {
TORCH_INTERNAL_ASSERT(sym_sizes_index == sym_sizes.size());
}
std::optional<c10::SymInt> next_sym_size() {
TORCH_INTERNAL_ASSERT(sym_sizes_index < sym_sizes.size());
return sym_sizes[sym_sizes_index++];
}
size_t sym_sizes_index{0};
std::vector<std::optional<c10::SymInt>> sym_sizes;
variable_list outputs;
};
class SwapSavedVariables {
// SwapSavedVariables is used during the tracing/compilation phase after a
// cache-miss. It swaps any 'lifted' inputs (tensors, symints) to proxy nodes,
// allows tracing to happen, then swaps them back afterwards.
public:
std::pair<size_t, std::optional<size_t>> retrieve_pynode_objs(
Node* pynode) const {
auto it = compiler.pynode_objs.find(pynode);
TORCH_INTERNAL_ASSERT(it != compiler.pynode_objs.end());
return it->second;
}
void before(at::Tensor& t) {
TensorArg& arg = compiler.tensor_args.lookup(t);
stashed_tensors.save(&t, std::move(t));
if (arg.defined()) {
TORCH_INTERNAL_ASSERT(arg.proxy_tensor.defined());
t = arg.proxy_tensor;
}
}
void after(at::Tensor& t) {
stashed_tensors.restore(&t);
}
void before(SavedVariable& t) {
if (auto it = compiler.sv_to_hooks.find(&t);
it != compiler.sv_to_hooks.end()) {
const auto& pyinterface =
torch::dynamo::autograd::getPyCompilerInterface();
auto proxy_tensor = pyinterface->call_unpack(
get_py_compiler(), it->second.first, it->second.second);
stashed_variables.save(&t, std::move(t));
bool prior = at::SavedTensorDefaultHooks::set_tracing(true);
t = SavedVariable(proxy_tensor, false);
at::SavedTensorDefaultHooks::set_tracing(prior);
} else {
// no hooks, was already unpacked
TensorArg& arg = compiler.tensor_args.lookup(t);
stashed_variables.save(&t, std::move(t));
if (arg.defined()) {
bool prior = at::SavedTensorDefaultHooks::set_tracing(true);
TORCH_INTERNAL_ASSERT(arg.proxy_tensor.defined());
t = SavedVariable(arg.proxy_tensor, false);
at::SavedTensorDefaultHooks::set_tracing(prior);
}
}
}
void after(SavedVariable& t) {
stashed_variables.restore(&t);
}
void before(c10::SymInt& t) {
stashed_symints.save(&t, c10::SymInt(t));
auto opt_value = state.next_sym_size();
if (opt_value.has_value()) {
t = *opt_value; // dynamic shape
}
}
void after(c10::SymInt& t) {
stashed_symints.restore(&t);
}
void before(at::IValue& iv) {
if (iv.isTensor()) {
before(iv.toTensor());
} else {
stashed_ivalues.save(&iv, at::IValue(iv));
if (iv.isInt() || iv.isSymInt() || iv.isDouble() || iv.isSymFloat()) {
iv = compiler.lifted_ivalue_args.next_proxy(&iv);
}
}
}
void after(at::IValue& t) {
if (t.isTensor()) {
after(t.toTensor());
} else {
stashed_ivalues.restore(&t);
}
}
void before(Edge& t) {
if (t.is_valid()) {
// need for symints used by validate_outputs
before(t.function->mutable_input_metadata(t.input_nr));
}
}
void after(Edge& t) {
if (t.is_valid()) {
after(t.function->mutable_input_metadata(t.input_nr));
}
}
void before(InputMetadata& t) {
before(t.mutable_shape_as_dim_vector());
}
void after(InputMetadata& t) {
after(t.mutable_shape_as_dim_vector());
}
void before(at::TensorGeometry& t) {
before(t.mutable_sizes());
before(t.mutable_strides());
before(t.mutable_storage_offset());
t.recompute();
}
void after(at::TensorGeometry& t) {
after(t.mutable_sizes());
after(t.mutable_strides());
after(t.mutable_storage_offset());
t.recompute();
}
void before(torch::autograd::TypeAndSize& t) {
before(t.sym_sizes);
before(t.options);
}
void after(torch::autograd::TypeAndSize& t) {
after(t.sym_sizes);
after(t.options);
}
void before(VariableInfo& t) {
before(t.size);
}
void after(VariableInfo& t) {
after(t.size);
}
template <typename T>
void before(std::vector<T>& t) {
for (T& i : t) {
before(i);
}
}
template <typename T>
void after(std::vector<T>& t) {
for (T& i : t) {
after(i);
}
}
template <typename T, unsigned N>
void before(c10::SmallVector<T, N>& t) {
for (T& i : t) {
before(i);
}
}
template <typename T, unsigned N>
void after(c10::SmallVector<T, N>& t) {
for (T& i : t) {
after(i);
}
}
template <typename T>
void before(c10::OptionalArray<T>& t) {
before(t.list);
}
template <typename T>
void after(c10::OptionalArray<T>& t) {
after(t.list);
}
template <typename T>
void before(std::optional<T>& t) {
if (t.has_value()) {
before(*t);
}
}
template <typename T>
void after(std::optional<T>& t) {
if (t.has_value()) {
after(*t);
}
}
template <typename V>
void before(ska::flat_hash_map<std::string, V>& m) {
std::vector<std::string> keys;
keys.reserve(m.size());
std::transform(
m.begin(), m.end(), std::back_inserter(keys), [](const auto& entry) {
return entry.first;
});
std::sort(keys.begin(), keys.end());
for (auto& k : keys) {
before(m.at(k));
}
}
template <typename V>
void after(ska::flat_hash_map<std::string, V>& m) {
for (auto& [_, v] : m) {
after(v);
}
}
#define NO_OP_VISIT(T) \
void before(const T&) {} \
void after(const T&) {}
NO_OP_VISIT(caffe2::TypeMeta)
NO_OP_VISIT(c10::Device)
NO_OP_VISIT(c10::DeviceType)
NO_OP_VISIT(c10::Layout)
NO_OP_VISIT(c10::MemoryFormat)
NO_OP_VISIT(c10::ScalarType)
NO_OP_VISIT(c10::Scalar)
NO_OP_VISIT(c10::TensorOptions)
NO_OP_VISIT(std::string)
NO_OP_VISIT(int64_t)
NO_OP_VISIT(bool)
NO_OP_VISIT(double)
#undef NO_OP_VISIT
SwapSavedVariables(
AutogradCompilerCall& c,
TraceState& s,
PyObject* p,
const NodeCall& n)
: compiler(c), state(s), py_compiler(p), curr_node_call(n) {}
PyObject* get_py_compiler() const {
return py_compiler;
}
const NodeCall& get_curr_node_call() {
return curr_node_call;
}
void debug_asserts() {
stashed_variables.debug_assert();
stashed_tensors.debug_assert();
stashed_symints.debug_assert();
}
private:
template <typename T>
struct Stashed {
Stashed(T&& v) : prior_value(std::move(v)) {}
T prior_value;
// Note: we need count here to support duplicate calls to before()
// which happen when we have multiple autograd::Edge objects pointing
// to the same autograd::Node
int count = 1;
};
template <typename T>
struct StashedVars : public std::unordered_map<const T*, Stashed<T>> {
void save(const T* key, T&& value) {
auto [it, inserted] = this->try_emplace(key, std::move(value));
if (!inserted) {
// keep the value from the prior save()
it->second.count++;
}
}
void restore(T* var) {
auto it = this->find(var);
TORCH_INTERNAL_ASSERT(it != this->end(), "missing before())");
if (--it->second.count == 0) {
// restore the value on the last restore()
*var = std::move(it->second.prior_value);
this->erase(it);
}
}
void debug_assert() {
TORCH_INTERNAL_ASSERT(this->empty(), "missing call to after()");
}
};
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
AutogradCompilerCall& compiler;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
TraceState& state;
// This is a borrowed reference, we do not increment ownership, or lower it,
// it's lifecycle is entirely longer than this objects.
PyObject* py_compiler;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const NodeCall& curr_node_call;
// These mappings are used to save the prior values when we overwrite things
// in before(). In after(), we use these to cleanup after ourselves.
StashedVars<SavedVariable> stashed_variables;
StashedVars<at::Tensor> stashed_tensors;
StashedVars<c10::SymInt> stashed_symints;
StashedVars<at::IValue> stashed_ivalues;
};
// NOTE: [Compiled Autograd and backward functions]
// Built-in autograd nodes have functional apply variants
// (e.g. MulBackward0_apply_functional). Compiled Autograd's initial graph
// capture wants to take a variant of this function and proxy it into the graph.
// Every autograd node defines an apply_with_saved function, that when invoked,
// proxys a call to a function into the Compiled Autograd graph.
//
// Some requirements that we have are:
// - The proxy'ed function must have inputs that are FX-graphable types.
// - Windows has a DLL symbol limit of 65536.
// - Node::apply_with_saved is in libtorch_cpu which does not have direct access
// to Python
//
// There were multiple ways to skin the cat, but what we end up doing is:
// - for e.g. MulBackward0_apply_functional, we create a new C++ function
// MulBackward0_apply_functional_ivalue that accepts vector<IValue>.
// - We define how to pack and unpack arbitrary C++ types into IValues.
// - apply_with_saved passes MulBackward0_apply_functional_ivalue and
// the IValue arguments to Python via an indirection.
// In Python, these get proxy'ed into a graph.
// Helper struct for packing/unpacking an arbitrary C++ type into a single
// IValue. There are various full and partial specializations for IValuePacker
// to handle packing specific types (like TensorOptions) into an IValue.
template <typename T>
struct IValuePacker {
// Defines how to pack T into an IValue.
static at::IValue pack(const T& t) {
return t;
}
// Defines how to unpack an IValue into T.
static T unpack(const at::IValue& t) {
return t.to<T>();
}
// Returns the TypePtr for the IValue (this is like the "type" of the IValue).
// We use this when passing the packed IValue from Python to C++.
// In Python, the IValue is just a PyObject* with the native type.
// For example, it may be a Python int, a Python List[int], etc.
// When passing this PyObject* into C++, we need to know how to parse it
// into a C++ type that then gets put into an IValue.
// That's what the TypePtr is for: it contains the information to do the
// parsing. See torch::jit::toIValue for more information.
static at::TypePtr packed_type() {
#ifdef _WIN32
// NB: the if-constexpr usage triggers compilation errors on Windows
// with certain compiler settings
// (see https://github.com/pytorch/pytorch/pull/144707 for examples).
// It's not clear what the problem is, so we're going to ignore it for now.
TORCH_INTERNAL_ASSERT(false, "torch.compile not supported on Windows");
#else
if constexpr (::std::is_same_v<T, at::Tensor>) {
return at::TensorType::get();
} else if constexpr (::std::is_same_v<T, int64_t>) {
return at::IntType::get();
} else if constexpr (::std::is_same_v<T, c10::SymInt>) {
return at::SymIntType::get();
} else if constexpr (::std::is_same_v<T, bool>) {
return at::BoolType::get();
} else if constexpr (::std::is_same_v<T, double>) {
return at::FloatType::get();
} else if constexpr (::std::is_same_v<T, c10::SymFloat>) {
return at::SymFloatType::get();
} else if constexpr (::std::is_same_v<T, c10::SymBool>) {
return at::SymBoolType::get();
} else if constexpr (::std::is_same_v<T, c10::Layout>) {
return at::LayoutType::get();
} else if constexpr (::std::is_same_v<T, ::std::string>) {
return at::StringType::get();
} else if constexpr (::std::is_same_v<T, at::Device>) {
return at::DeviceObjType::get();
} else if constexpr (::std::is_same_v<T, at::Scalar>) {
return at::NumberType::get();
} else if constexpr (::std::is_same_v<T, at::MemoryFormat>) {
return at::MemoryFormatType::get();
} else if constexpr (::std::is_same_v<T, at::ScalarType>) {
return at::ScalarTypeType::get();
} else {
// If you got here, you have probably added a member of a new type
// to a built-in C++ autograd node.
// Unfortunately, we don't know how to handle this type yet.
// To get this new type to work with Compiled Autograd, please
// either change it to be an IValue-constructible type, or
// define how to pack and unpack an object of this time into an IValue
// by creating a specialization of IValuePacker for this type.
// See NOTE: [Compiled Autograd and backward functions] for context.
TORCH_INTERNAL_ASSERT(false, "IValuePacker not implemented for type");
return at::NoneType::get();
}
#endif
}
};
template <>
struct IValuePacker<size_t> {
static at::IValue pack(const size_t& t) {
// We generally use size_t as the size of a list of Tensors or number of
// dimensions. The number of dimensions generally do not exceed 64
// (TensorIterator has that limitation), and lists of Tensors generally do
// not exceed the int64_t max (you'd probably run out of RAM or run into
// significant Tensor overhead). If you run into this limitation the fix is
// to figure out how to pack size_t into int64_t. Note that size_t has some
// weird behavior on Mac OS.
uint64_t maximum_value = std::numeric_limits<int64_t>::max();
TORCH_INTERNAL_ASSERT(
static_cast<uint64_t>(t) <= maximum_value,
"size_t too large to pack into IValue");
return static_cast<int64_t>(t); // pack as int64_t
}
static size_t unpack(const at::IValue& t) {
return static_cast<size_t>(t.toInt());
}
static at::TypePtr packed_type() {
return IValuePacker<int64_t>::packed_type();
}
};
template <>
struct IValuePacker<std::vector<at::SymInt>> {
static at::IValue pack(const std::vector<at::SymInt>& t) {
return t;
}
static std::vector<at::SymInt> unpack(const at::IValue& t) {
// We need this because there's no t.to<std::vector<at::SymInt>>() override?
return t.toSymIntVector();
}
static at::TypePtr packed_type() {
return at::ListType::create(at::SymIntType::get());
}
};
template <>
struct IValuePacker<VariableInfo> {
static at::IValue pack(const VariableInfo& t) {
auto tuple = std::make_tuple(
t.layout, t.device, t.scalar_type, t.size, t.requires_grad, t.is_empty);
return tuple;
}
static VariableInfo unpack(const at::IValue& t) {
auto tuple = t.toTuple();
const auto& tuple_elements = tuple->elements();
const auto elements = tuple_elements.asArrayRef();
TORCH_INTERNAL_ASSERT(elements.size() == 6);
VariableInfo v;
v.layout = elements[0].toLayout();
v.device = elements[1].toDevice();
v.scalar_type = elements[2].toScalarType();
v.size = elements[3].toSymIntVector();
v.requires_grad = elements[4].toBool();
v.is_empty = elements[5].toBool();
return v;
}
static at::TypePtr packed_type() {
return at::TupleType::create({
at::LayoutType::get(),
at::DeviceObjType::get(),
at::ScalarTypeType::get(),
at::ListType::create(at::SymIntType::get()),
at::BoolType::get(),
at::BoolType::get(),
});
}
};
template <>
struct IValuePacker<caffe2::TypeMeta> {
static at::IValue pack(const caffe2::TypeMeta& t) {
return at::typeMetaToScalarType(t); // pack as at::ScalarType
}
static caffe2::TypeMeta unpack(const at::IValue& t) {
return caffe2::TypeMeta::fromScalarType(t.to<at::ScalarType>());
}
static at::TypePtr packed_type() {
return IValuePacker<at::ScalarType>::packed_type();
}
};
inline std::optional<at::ScalarType> optTypeMetaToScalarType(
const std::optional<caffe2::TypeMeta>& t) {
if (t.has_value()) {
return at::typeMetaToScalarType(t.value());
} else {
return std::nullopt;
}
}
using packed_tensoroptions_t = std::tuple<
std::optional<bool>,
std::optional<at::MemoryFormat>,
std::optional<at::Device>,
std::optional<at::ScalarType>,
std::optional<at::Layout>,
std::optional<bool>>;
inline packed_tensoroptions_t pack_TensorOptions(const at::TensorOptions& t) {
auto tuple = std::make_tuple(
t.requires_grad_opt(),
t.memory_format_opt(),
t.device_opt(),
optTypeMetaToScalarType(t.dtype_opt()),
t.layout_opt(),
t.pinned_memory_opt());
return tuple;
}
inline at::TensorOptions unpack_TensorOptions(
const packed_tensoroptions_t& tuple) {
at::TensorOptions result;
auto maybe_requires_grad = std::get<0>(tuple);
if (maybe_requires_grad.has_value()) {
result = result.requires_grad(maybe_requires_grad.value());
}
auto maybe_memory_format = std::get<1>(tuple);
if (maybe_memory_format.has_value()) {
result = result.memory_format(maybe_memory_format.value());
}
auto maybe_device = std::get<2>(tuple);
if (maybe_device.has_value()) {
result = result.device(maybe_device.value());
}
auto maybe_dtype = std::get<3>(tuple);
if (maybe_dtype.has_value()) {
result =
result.dtype(caffe2::TypeMeta::fromScalarType(maybe_dtype.value()));
}
auto maybe_layout = std::get<4>(tuple);
if (maybe_layout.has_value()) {
result = result.layout(maybe_layout.value());
}
auto maybe_pinned_memory = std::get<5>(tuple);
if (maybe_pinned_memory.has_value()) {
result = result.pinned_memory(maybe_pinned_memory.value());
}
return result;
}
template <>
struct IValuePacker<at::TensorOptions> {
static at::IValue pack(const at::TensorOptions& t) {
return pack_TensorOptions(t);
}
static at::TensorOptions unpack(const at::IValue& t) {
auto tuple = t.to<packed_tensoroptions_t>();
return unpack_TensorOptions(tuple);
}
static at::TypePtr packed_type() {
return at::TupleType::create(
{at::OptionalType::create(at::BoolType::get()),
at::OptionalType::create(at::MemoryFormatType::get()),
at::OptionalType::create(at::DeviceObjType::get()),
at::OptionalType::create(at::ScalarTypeType::get()),
at::OptionalType::create(at::LayoutType::get()),
at::OptionalType::create(at::BoolType::get())});
}
};
template <>
struct IValuePacker<TypeAndSize> {
static at::IValue pack(const TypeAndSize& t) {
auto tuple = std::make_tuple(t.sym_sizes, pack_TensorOptions(t.options));
return tuple;
}
static TypeAndSize unpack(const at::IValue& t) {
auto tuple =
t.to<std::tuple<std::vector<at::SymInt>, packed_tensoroptions_t>>();
TypeAndSize result;
result.sym_sizes = std::get<0>(tuple);
result.options = unpack_TensorOptions(std::get<1>(tuple));
return result;
}
static at::TypePtr packed_type() {
return at::TupleType::create(
{IValuePacker<std::vector<at::SymInt>>::packed_type(),
IValuePacker<at::TensorOptions>::packed_type()});
}
};
template <typename T>
struct IValuePacker<std::optional<T>> {
static at::IValue pack(const std::optional<T>& t) {
if (t.has_value()) {
return IValuePacker<T>::pack(t.value());
} else {
return std::nullopt;
}
}
static std::optional<T> unpack(const at::IValue& t) {
if (t.isNone()) {
return std::nullopt;
} else {
return IValuePacker<T>::unpack(t);
}
}
static at::TypePtr packed_type() {
return at::OptionalType::create(IValuePacker<T>::packed_type());
}
};
template <typename T>
struct IValuePacker<std::vector<T>> {
static at::IValue pack(const std::vector<T>& t) {
if constexpr (::std::is_constructible_v<at::IValue, T>) {
return t;
}
if (t.empty()) {
auto lst = c10::impl::GenericList(at::AnyType::get());
return lst;
}
auto type_ptr = IValuePacker<T>::pack(t[0]).type();
auto lst = c10::impl::GenericList(type_ptr);
for (const auto& elt : t) {
lst.emplace_back(IValuePacker<T>::pack(elt));
}
return lst;
}
static std::vector<T> unpack(const at::IValue& t) {
if constexpr (::std::is_constructible_v<at::IValue, T>) {
return t.to<::std::vector<T>>();
}
std::vector<T> result;
auto lst = t.toList();
for (const at::IValue& elt : lst) {
result.emplace_back(IValuePacker<T>::unpack(elt));
}
return result;
}
static at::TypePtr packed_type() {
return at::ListType::create(IValuePacker<T>::packed_type());
}
};
template <typename T>
struct IValuePacker<c10::List<T>> {
static at::IValue pack(const c10::List<T>& t) {
return IValuePacker<std::vector<T>>::pack(t.vec());
}
static c10::List<T> unpack(const at::IValue& t) {
return c10::List<T>(IValuePacker<std::vector<T>>::unpack(t));
}
static at::TypePtr packed_type() {
return IValuePacker<std::vector<T>>::packed_type();
}
};
template <size_t N>
struct IValuePacker<std::array<bool, N>> {
static at::IValue pack(const std::array<bool, N>& t) {
std::vector<bool> result(t.begin(), t.end());
return IValuePacker<std::vector<bool>>::pack(result);
}
static std::array<bool, N> unpack(const at::IValue& t) {
std::array<bool, N> result;
auto packed = IValuePacker<std::vector<bool>>::unpack(t);
for (size_t i = 0; i < packed.size(); i++) {
result[i] = packed[i];
}
return result;
}
static at::TypePtr packed_type() {
return IValuePacker<std::vector<bool>>::packed_type();
}
};
template <>
struct IValuePacker<at::TensorGeometry> {
static at::IValue pack(const at::TensorGeometry& t) {
auto tuple = std::make_tuple(
t.sym_sizes().vec(), t.sym_strides().vec(), t.sym_storage_offset());
return tuple;
}
static at::TensorGeometry unpack(const at::IValue& t) {
auto tuple = t.to<std::tuple<
std::vector<at::SymInt>,
std::vector<at::SymInt>,
at::SymInt>>();
return at::TensorGeometry(
std::get<0>(tuple), std::get<1>(tuple), std::get<2>(tuple));
}
static at::TypePtr packed_type() {
return at::TupleType::create(
{IValuePacker<std::vector<at::SymInt>>::packed_type(),
IValuePacker<std::vector<at::SymInt>>::packed_type(),
at::SymIntType::get()});
}
};
template <>
struct IValuePacker<InputMetadata> {
static at::IValue pack(const InputMetadata& t) {
TORCH_INTERNAL_ASSERT(!t.is_nested_tensor());
auto tuple = std::make_tuple(
pack_TensorOptions(t.options()),
t.shape_as_dim_vector().vec(),
t.is_tensor_subclass());
return tuple;
}
static InputMetadata unpack(const at::IValue& t) {
auto tuple = t.to<
std::tuple<packed_tensoroptions_t, std::vector<at::SymInt>, bool>>();
return InputMetadata(
unpack_TensorOptions(std::get<0>(tuple)),
SymIntSmallVec(std::get<1>(tuple)),
std::get<2>(tuple),
false);
}
static at::TypePtr packed_type() {
return at::TupleType::create(
{IValuePacker<at::TensorOptions>::packed_type(),
IValuePacker<std::vector<at::SymInt>>::packed_type(),
at::BoolType::get()});
}
};
template <typename T>
struct IValuePacker<at::OptionalArray<T>> {
static at::IValue pack(const at::OptionalArray<T>& t) {
return IValuePacker<std::optional<std::vector<T>>>::pack(t.list);
}
static at::OptionalArray<T> unpack(const at::IValue& t) {
auto result = IValuePacker<std::optional<std::vector<T>>>::unpack(t);
if (result.has_value()) {
return {result.value()};
} else {
return {};
}
}
static at::TypePtr packed_type() {
return IValuePacker<std::optional<std::vector<T>>>::packed_type();
}
};
// This is a helper struct for packing and unpacking multiple arguments into
// an ivalue_list. It leverages IValuePacker<T>.
struct PackedArgs {
PackedArgs() = default;
explicit PackedArgs(std::vector<at::IValue> stack_)
: stack(std::move(stack_)) {}
const std::vector<at::IValue>& vec() const {
return stack;
}
template <typename T>
void pack(const T& t) {
stack.emplace_back(IValuePacker<T>::pack(t));
}
template <typename T>
T unpack() {
return IValuePacker<T>::unpack(std::move(stack[idx++]));
}
void pack_saved_data(const ska::flat_hash_map<std::string, at::IValue>& dct) {
std::vector<std::string> keys;
std::vector<at::IValue> values;
for (const auto& [key, value] : dct) {
keys.emplace_back(key);
values.emplace_back(value);
}
pack(keys);
for (const auto& value : values) {
pack(value);
}
}
ska::flat_hash_map<std::string, at::IValue> unpack_saved_data() {
ska::flat_hash_map<std::string, at::IValue> dct;
auto keys = unpack<std::vector<std::string>>();
for (const auto& key : keys) {
dct.insert({key, std::move(stack[idx++])});
}
return dct;
}
private:
std::vector<at::IValue> stack;
int64_t idx = 0;
};
} // namespace torch::dynamo::autograd
template <>
struct std::hash<torch::dynamo::autograd::CacheKey> {
size_t operator()(const torch::dynamo::autograd::CacheKey& k) const {
return k.hash();
}
};
```
|
==================================================================================================================================
SOURCE CODE FILE: cpp_shim.h
LINES: 1
SIZE: 0.36 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\dynamo\cpp_shim.h
ENCODING: utf-8
```h
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
struct _PytorchRecordFunctionState;
typedef struct _PytorchRecordFunctionState _PytorchRecordFunctionState;
_PytorchRecordFunctionState* _pytorch_record_function_enter(const char* name);
void _pytorch_record_function_exit(_PytorchRecordFunctionState* state);
#ifdef __cplusplus
} // extern "C"
#endif
```
|
======================================================================================================================================
SOURCE CODE FILE: cpython_defs.h
LINES: 1
SIZE: 0.97 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\dynamo\cpython_defs.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/utils/python_compat.h>
// Functions that need to be copied from the CPython source
// should go in cpython_defs.c. Copying is required when, e.g.,
// we need to call internal CPython functions that are not exposed.
#if IS_PYTHON_3_11_PLUS
typedef struct _PyInterpreterFrame _PyInterpreterFrame;
PyFunctionObject* _PyFunction_CopyWithNewCode(
PyFunctionObject* o,
PyCodeObject* code);
void THP_PyFrame_Clear(_PyInterpreterFrame* frame);
_PyInterpreterFrame* THP_PyThreadState_BumpFramePointerSlow(
PyThreadState* tstate,
size_t size);
void THP_PyThreadState_PopFrame(
PyThreadState* tstate,
_PyInterpreterFrame* frame);
#endif
// pointers to _PyOpcode_Caches for C++
#ifdef __cplusplus
extern "C" const uint8_t* THP_PyOpcode_Caches;
extern "C" const int THP_PyOpcode_Caches_size;
#else
extern const uint8_t* THP_PyOpcode_Caches;
extern const int THP_PyOpcode_Caches_size;
#endif
```
|
==========================================================================================================================================
SOURCE CODE FILE: cpython_includes.h
LINES: 1
SIZE: 1.04 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\dynamo\cpython_includes.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/utils/python_compat.h>
// Problem in CPython includes when mixing core and non-core build
// The fix was not backported to 3.12 so this is needed here
// https://github.com/python/cpython/issues/105268
#if IS_PYTHON_3_12_PLUS
#undef _PyGC_FINALIZED
#endif
// see https://bugs.python.org/issue35886
#if PY_VERSION_HEX >= 0x03080000
#define Py_BUILD_CORE
#ifndef __cplusplus
// C-only headers
#include <internal/pycore_pystate.h>
#endif // __cplusplus
#if IS_PYTHON_3_11_PLUS
#include <internal/pycore_frame.h>
#endif
#undef Py_BUILD_CORE
#endif // PY_VERSION_HEX >= 0x03080000
#ifdef __cplusplus
extern "C" {
#endif
#if IS_PYTHON_3_13_PLUS
#define F_CODE(x) ((PyCodeObject*)(x)->f_executable)
#define PREV_INSTR(x) (x)->instr_ptr
#else
#define F_CODE(x) ((PyCodeObject*)(x)->f_code)
#define PREV_INSTR(x) (x)->prev_instr
#endif
#if IS_PYTHON_3_12_PLUS
#define FUNC(x) ((x)->f_funcobj)
#else
#define FUNC(x) ((x)->f_func)
#endif
#ifdef __cplusplus
} // extern "C"
#endif
```
|
======================================================================================================================================
SOURCE CODE FILE: debug_macros.h
LINES: 5
SIZE: 3.54 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\dynamo\debug_macros.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/utils/python_compat.h>
#ifdef __cplusplus
#include <cstdio>
#else
#include <stdio.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
#ifdef _WIN32
#define unlikely(x) (x)
#else
#define unlikely(x) __builtin_expect((x), 0)
#endif
#define NULL_CHECK(val) \
if (unlikely((val) == NULL)) { \
fprintf(stderr, "NULL ERROR: %s:%d\n", __FILE__, __LINE__); \
PyErr_Print(); \
abort(); \
} else { \
}
// CHECK might be previously declared
#undef CHECK
#define CHECK(cond) \
if (unlikely(!(cond))) { \
fprintf(stderr, "DEBUG CHECK FAILED: %s:%d\n", __FILE__, __LINE__); \
abort(); \
} else { \
}
// Uncomment next line to print debug message
// #define TORCHDYNAMO_DEBUG 1
#ifdef TORCHDYNAMO_DEBUG
#define DEBUG_CHECK(cond) CHECK(cond)
#define DEBUG_NULL_CHECK(val) NULL_CHECK(val)
#define DEBUG_TRACE(msg, ...) \
fprintf(stderr, "TRACE[%s:%d] " msg "\n", __func__, __LINE__, __VA_ARGS__)
#define DEBUG_TRACE0(msg) \
fprintf(stderr, "TRACE[%s:%d] " msg "\n", __func__, __LINE__)
#else
#define DEBUG_CHECK(cond)
#define DEBUG_NULL_CHECK(val)
#define DEBUG_TRACE(msg, ...)
#define DEBUG_TRACE0(msg)
#endif
inline _PyFrameEvalFunction _debug_set_eval_frame(
PyThreadState* tstate,
_PyFrameEvalFunction eval_frame) {
_PyFrameEvalFunction prev =
_PyInterpreterState_GetEvalFrameFunc(tstate->interp);
_PyInterpreterState_SetEvalFrameFunc(tstate->interp, eval_frame);
return prev;
}
// Inspect PyObject*'s from C/C++ at the Python level, in pdb.
// e.g.
//
// PyObject* obj1 = PyList_New(...);
// PyObject* obj2 = PyObject_CallFunction(...);
// INSPECT(obj1, obj2);
// (pdb) p args[0]
// # list
// (pdb) p args[1]
// # some object
// (pdb) p args[1].some_attr
// # etc.
//
// Implementation: set eval frame callback to default, call
// torch._dynamo.utils._breakpoint_for_c_dynamo, reset eval frame callback.
#define INSPECT(...) \
{ \
PyThreadState* cur_tstate = PyThreadState_Get(); \
_PyFrameEvalFunction prev_eval_frame = \
_debug_set_eval_frame(cur_tstate, &_PyEval_EvalFrameDefault); \
PyObject* torch__dynamo_utils_module = \
PyImport_ImportModule("torch._dynamo.utils"); \
NULL_CHECK(torch__dynamo_utils_module); \
PyObject* breakpoint_for_c_dynamo_fn = PyObject_GetAttrString( \
torch__dynamo_utils_module, "_breakpoint_for_c_dynamo"); \
NULL_CHECK(breakpoint_for_c_dynamo_fn); \
PyObject_CallFunctionObjArgs( \
breakpoint_for_c_dynamo_fn, __VA_ARGS__, NULL); \
_debug_set_eval_frame(cur_tstate, prev_eval_frame); \
Py_DECREF(breakpoint_for_c_dynamo_fn); \
Py_DECREF(torch__dynamo_utils_module); \
}
#ifdef __cplusplus
} // extern "C"
#endif
```
|
====================================================================================================================================
SOURCE CODE FILE: eval_frame.h
LINES: 1
SIZE: 1.51 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\dynamo\eval_frame.h
ENCODING: utf-8
```h
#pragma once
#include <stdbool.h>
#include <torch/csrc/dynamo/extra_state.h>
#include <torch/csrc/utils/python_compat.h>
#ifdef __cplusplus
extern "C" {
PyObject* torch_c_dynamo_eval_frame_init(void);
#endif
// All the eval APIs change in 3.11 so we need to decide which one to use on the
// fly https://docs.python.org/3/c-api/init.html#c._PyFrameEvalFunction
#if IS_PYTHON_3_11_PLUS
#define THP_EVAL_API_FRAME_OBJECT _PyInterpreterFrame
#else
#define THP_EVAL_API_FRAME_OBJECT PyFrameObject
#endif // IS_PYTHON_3_11_PLUS
// We need to be able to return the _PyInterpreterFrame to python so create
// a python binding for it
typedef struct THPPyInterpreterFrame {
PyObject_HEAD
THP_EVAL_API_FRAME_OBJECT* frame; // Borrowed reference
PyObject* locals;
} THPPyInterpreterFrame;
THPPyInterpreterFrame* THPPyInterpreterFrame_New(
THP_EVAL_API_FRAME_OBJECT* frame);
extern bool is_skip_guard_eval_unsafe;
void clear_old_frame_if_python_312_plus(
PyThreadState* tstate,
THP_EVAL_API_FRAME_OBJECT* frame);
void eval_frame_callback_set(PyObject* obj);
const char* get_frame_name(THP_EVAL_API_FRAME_OBJECT* frame);
PyObject* dynamo_eval_frame_default(
PyThreadState* tstate,
THP_EVAL_API_FRAME_OBJECT* frame,
int throw_flag);
PyObject* dynamo_eval_custom_code(
PyThreadState* tstate,
THP_EVAL_API_FRAME_OBJECT* frame,
PyCodeObject* code,
const char* trace_annotation,
int throw_flag);
#ifdef __cplusplus
} // extern "C"
#endif
```
|
========================================================================================================================================
SOURCE CODE FILE: eval_frame_cpp.h
LINES: 1
SIZE: 0.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\dynamo\eval_frame_cpp.h
ENCODING: utf-8
```h
#pragma once
#include <Python.h>
#include <torch/csrc/dynamo/eval_frame.h>
#include <torch/csrc/dynamo/extra_state.h>
#include <torch/csrc/dynamo/framelocals_mapping.h>
#ifdef __cplusplus
extern "C" {
#endif
PyObject* dynamo__custom_eval_frame(
PyThreadState* tstate,
THP_EVAL_API_FRAME_OBJECT* frame,
int throw_flag,
PyObject* callback);
PyObject* set_code_exec_strategy(PyObject* dummy, PyObject* obj);
#ifdef __cplusplus
} // extern "C"
#endif
```
|
=====================================================================================================================================
SOURCE CODE FILE: extra_state.h
LINES: 1
SIZE: 5.90 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\dynamo\extra_state.h
ENCODING: utf-8
```h
#pragma once
#include <Python.h>
#include <torch/csrc/dynamo/framelocals_mapping.h>
#ifdef __cplusplus
#include <torch/csrc/dynamo/utils.h>
#include <torch/csrc/utils/pybind.h>
#include <list>
namespace py = pybind11;
extern "C" {
#else
#include <stdbool.h>
#endif
enum FrameAction {
DEFAULT, // look through the cache, compile if not found
SKIP, // eager
RUN_ONLY, // look through the cache, run eager if not found
};
typedef struct FrameExecStrategy {
enum FrameAction cur_action; // action to take for current frame
enum FrameAction recursive_action; // action to take for recursive frames
} FrameExecStrategy;
// Points to the extra scratch space on the code object
extern Py_ssize_t extra_index;
// function to call when cache lookup errors
extern PyObject* guard_error_hook;
typedef PyObject FrameState;
typedef struct CacheEntry CacheEntry;
// ExtraState encasulates CacheEntry and FrameState. ExtraState is the highest
// level of abstraction of what is stored on the extra code object. Previously,
// we saved different parts on different extra indexes. We prefer this way
// because of cleaner abstraction and faster SetExtra access.
#ifdef __cplusplus
typedef struct VISIBILITY_HIDDEN ExtraState {
// A pointer to the orig_code object to prevent race conditions in invalidate
// function.
PyCodeObject* orig_code;
// List of cache entries for compiled code objects
std::list<CacheEntry> cache_entry_list;
// Frame state to detect dynamic shape dims
py::dict frame_state;
// Actions to apply to all frames with this code object
FrameExecStrategy strategy{DEFAULT, DEFAULT};
ExtraState(PyCodeObject* orig_code_arg);
CacheEntry* get_first_entry();
void move_to_front(CacheEntry* cache_entry);
void move_to_back(CacheEntry* cache_entry);
void invalidate(CacheEntry* cache_entry, py::object deleted_guard_manager);
} ExtraState;
#else
typedef struct ExtraState ExtraState;
#endif
// Helper to extra the cache_entry from the extra state.
// Ownership contract
// args
// - extra_state: Borrowed
// return
// - CacheEntry: Borrowed.
CacheEntry* extract_cache_entry(ExtraState* extra_state);
// Returns either the previously stored frame state or an empty dict.
// Ownership contract
// args
// - extra_state: Borrowed
// return
// - extra_state->frame_state: Borrowed.
FrameState* extract_frame_state(ExtraState* extra_state);
// Returns the FrameExecStrategy stored in extra_state.
// Ownership contract
// args
// - extra_state: Borrowed
FrameExecStrategy extra_state_get_exec_strategy(ExtraState* extra_state);
// Set the FrameExecStrategy to be done to all frames with code object
// corresponding to this extra_state. Ownership contract
// - extra_state: Borrowed
void extra_state_set_exec_strategy(
ExtraState* extra_state,
FrameExecStrategy strategy);
// Ownership contract
// args
// - code: Borrowed
// return
// - extra_state: Borrowed.
ExtraState* get_extra_state(PyCodeObject* code);
// This is passed as freefunc to _PyEval_RequestCodeExtraIndex. This acts as a
// deleter for the object on extra scratch space. This function is called
// internally in _PyCode_SetExtra and also during the code deallocation.
// Destroys the extra state by deleting cache_entry, frame state and finally
// freeing the constructed extra state.
// Developer note - You should not call this function directly. This is called
// directly inside set_extra_state. If you are in a situation trying to call
// this function, consider if set_extra_state should be called.
void destroy_extra_state(void* obj);
// Clears the existing object sitting on the extra scratch spance and sets it
// up with the new state. Note that _PyCode_SetExtra calls the
// destroy_extra_state deleter internally, and therefore we don't call it
// explicity here.
// Ownership contract
// args
// - extra_state: Stolen
// return
// - there is no return, but the extra_state is stolen, so it becomes
// set_extra_state responsibility to clean it up. It will be deleted during
// the reset_code, when the set_extra_state is called with NULL.
// Invariant - Dont set the extra state for the extra state that is already on
// the code object. Otherwise, we will first free up the old extra state
// (which is also the new extra state) and write something invalid on the
// scratch space.
void set_extra_state(PyCodeObject* code, ExtraState* extra_state);
// Creates a new extra state and put it on the extra scrach space of the code
// object.
// Ownership contract
// args
// - code: Borrowed
// return:
// - extra_state: New reference.
// These references are then further passed to set_extra_state which becomes
// the final owner of these references.
ExtraState* init_and_set_extra_state(PyCodeObject* code);
// Lookup the cache held by extra_state.
// Ownership contract
// args
// - extra_state: Borrowed
// return:
// - Py_None or PyCodeObject: Borrowed reference.
// - Py_None or PyObject: Trace id of the compiled code.
void lookup(
ExtraState* extra_state,
FrameLocalsMapping* f_locals,
PyObject* backend,
PyObject** maybe_cached_code,
const char** trace_annotation,
bool is_skip_guard_eval_unsafe);
// Create a new cache entry at extra_state holding on to guarded_code.
// Ownership contract
// args
// - extra_state: Borrowed
// - guarded_code: Borrowed
// return:
// - cache_entry: Borrowed reference
CacheEntry* create_cache_entry(
ExtraState* extra_state,
PyObject* guraded_code,
PyObject* callback);
// Extracts the backend fn from the callback.
PyObject* get_backend(PyObject* callback);
#ifdef __cplusplus
} // extern "C"
// Returns the list of CacheEntry corresponding to code_obj.
// Warning: returns references whose lifetimes are controlled by C++
py::list _debug_get_cache_entry_list(const py::handle& code_obj);
#endif
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.