diff --git a/ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..386d61b7ec34b92621736b2435168817bba12bdc --- /dev/null +++ b/ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b28cb3c47f16f26d39b8b1931c84679f7b5100d761e584ea38feea3b44067e5 +size 33555612 diff --git a/ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..b661e784ed92ae46fd5cdc01509c8b19c4a0d9b5 --- /dev/null +++ b/ckpts/universal/global_step20/zero/10.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a6a73bc2742185ff89ba05ca969551caafef8ccd476d9ca04c80370b0357417 +size 33555627 diff --git a/ckpts/universal/global_step20/zero/26.post_attention_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/26.post_attention_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..0b2568971043b14b73f2e4e0245872ea504467ef --- /dev/null +++ b/ckpts/universal/global_step20/zero/26.post_attention_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64550c1d89888c4a2c4c5bf42a0ad72827bf98c7fa4b3cc7335809a3bfcc757e +size 9387 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1164daf58526c797f3b16f9646130512c1107659 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool2d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _adaptive_avg_pool2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor & _adaptive_avg_pool2d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & _adaptive_avg_pool2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size); +TORCH_API at::Tensor & _adaptive_avg_pool2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2e1380282531122ab0d813d32e5dd5bd8ec1fc01 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor _adaptive_avg_pool3d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_forward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_forward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..455db6babd2eedecebb732f6043e03b8a5b5f625 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_forward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _cdist_forward(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional compute_mode); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm_search_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm_search_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..71d249f4780d7c969a3d40d4365d205b3dd1a768 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm_search_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cslt_sparse_mm_search { + using schema = int64_t (const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, c10::optional, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cslt_sparse_mm_search") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cslt_sparse_mm_search(Tensor compressed_A, Tensor dense_B, Tensor? bias=None, Tensor? alpha=None, ScalarType? out_dtype=None, bool transpose_result=False) -> int") + static int64_t call(const at::Tensor & compressed_A, const at::Tensor & dense_B, const c10::optional & bias, const c10::optional & alpha, c10::optional out_dtype, bool transpose_result); + static int64_t redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_A, const at::Tensor & dense_B, const c10::optional & bias, const c10::optional & alpha, c10::optional out_dtype, bool transpose_result); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_atan.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_atan.h new file mode 100644 index 0000000000000000000000000000000000000000..7bf898f357700ef80e299ebe55f261a8f8cabe91 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_atan.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_atan(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_atan(at::TensorList self) { + return at::_ops::_foreach_atan::call(self); +} + +// aten::_foreach_atan_(Tensor(a!)[] self) -> () +inline void _foreach_atan_(at::TensorList self) { + return at::_ops::_foreach_atan_::call(self); +} + +// aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_atan_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_atan_out::call(self, out); +} +// aten::_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_atan_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_atan_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_solve_ex_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_solve_ex_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2e09372e526c6669216be7b02373dbebe560bdb5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_solve_ex_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured__linalg_solve_ex_out : public at::meta::structured__linalg_solve_ex { +void impl(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors, const at::Tensor & result, const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & info); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_ragged_idx.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_ragged_idx.h new file mode 100644 index 0000000000000000000000000000000000000000..cfa686f9521be5c1c23412b4bd34642ca858845f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_ragged_idx.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nested_get_ragged_idx(Tensor self) -> int +inline int64_t _nested_get_ragged_idx(const at::Tensor & self) { + return at::_ops::_nested_get_ragged_idx::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_left_aligned_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_left_aligned_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..90183b9489485b0bce55d37725bc643761218da9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_left_aligned_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API bool _nested_tensor_from_mask_left_aligned(const at::Tensor & t, const at::Tensor & mask); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_size_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_size_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f2a02ec4c81260ef7afa307797315c883d6ea9f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_size_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _nested_tensor_size_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & _nested_tensor_size_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f8c5f3c7bf5acc5350c211ba6e4db6f4d33cdf63 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _scaled_dot_product_flash_attention_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::SymInt, c10::SymInt, double, bool, const at::Tensor &, const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_scaled_dot_product_flash_attention_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_scaled_dot_product_flash_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor grad_query, Tensor grad_key, Tensor grad_value)") + static ::std::tuple call(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional scale); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional scale); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_backward_data_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_backward_data_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cc79427f0abda6abe19c957c3917ff44d8b0d29e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_backward_data_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype); +TORCH_API at::Tensor & _softmax_backward_data_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype); +TORCH_API at::Tensor & _softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsr_tensor_unsafe_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsr_tensor_unsafe_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..123c81fbaf9c09f38489931d87e9b2c8364037e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsr_tensor_unsafe_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_intlist_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_intlist_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9c392cdc5ee69cfee1f1fe17ce30918b21e27903 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_intlist_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _test_optional_intlist_out(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out); +TORCH_API at::Tensor _test_optional_intlist(const at::Tensor & values, at::OptionalIntArrayRef addends); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..97d6f8e304d423c9728fe445951e1b99f5755871 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _test_string_default { + using schema = at::Tensor (const at::Tensor &, c10::string_view, c10::string_view); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_string_default") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_string_default(Tensor dummy, str a=\"\\\"'\\\\\", str b='\"\\'\\\\') -> Tensor") + static at::Tensor call(const at::Tensor & dummy, c10::string_view a, c10::string_view b); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, c10::string_view a, c10::string_view b); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_triton_scaled_dot_attention_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_triton_scaled_dot_attention_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f1e0bf682b115555fa201a6aac43ef3ca4bf523f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_triton_scaled_dot_attention_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _triton_scaled_dot_attention_out(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p, at::Tensor & out); +TORCH_API at::Tensor triton_scaled_dot_attention(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p=0.0); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_put_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_put_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a23e14c220812cc36380793aa54380251ebbd0d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_put_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _unsafe_index_put(const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d3173437e0f812c48aa404bb6e2686a58adaed6d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _validate_sparse_csc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/add_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/add_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a8bd8b68cdc44cfe9e07653eb56cbba81c3f0021 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/add_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor add(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & add_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); +TORCH_API at::Tensor & add_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & add_(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..5e8f5aa249cfa55264ff19bfc543854af53ba706 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_meta.h @@ -0,0 +1,114 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_avg_pool2d : public at::impl::MetaBase { + + template + struct TORCH_API precompute_out { + + precompute_out set_kH(int64_t value) { + static_assert(KH == false, "kH already set"); + precompute_out ret; +ret.kH = value; +ret.kW = this->kW; +ret.dH = this->dH; +ret.dW = this->dW; +ret.padH = this->padH; +ret.padW = this->padW; +return ret; + } + + + precompute_out set_kW(int64_t value) { + static_assert(KW == false, "kW already set"); + precompute_out ret; +ret.kH = this->kH; +ret.kW = value; +ret.dH = this->dH; +ret.dW = this->dW; +ret.padH = this->padH; +ret.padW = this->padW; +return ret; + } + + + precompute_out set_dH(int64_t value) { + static_assert(DH == false, "dH already set"); + precompute_out ret; +ret.kH = this->kH; +ret.kW = this->kW; +ret.dH = value; +ret.dW = this->dW; +ret.padH = this->padH; +ret.padW = this->padW; +return ret; + } + + + precompute_out set_dW(int64_t value) { + static_assert(DW == false, "dW already set"); + precompute_out ret; +ret.kH = this->kH; +ret.kW = this->kW; +ret.dH = this->dH; +ret.dW = value; +ret.padH = this->padH; +ret.padW = this->padW; +return ret; + } + + + precompute_out set_padH(int64_t value) { + static_assert(PADH == false, "padH already set"); + precompute_out ret; +ret.kH = this->kH; +ret.kW = this->kW; +ret.dH = this->dH; +ret.dW = this->dW; +ret.padH = value; +ret.padW = this->padW; +return ret; + } + + + precompute_out set_padW(int64_t value) { + static_assert(PADW == false, "padW already set"); + precompute_out ret; +ret.kH = this->kH; +ret.kW = this->kW; +ret.dH = this->dH; +ret.dW = this->dW; +ret.padH = this->padH; +ret.padW = value; +return ret; + } + + int64_t kH; +int64_t kW; +int64_t dH; +int64_t dW; +int64_t padH; +int64_t padW; + }; + using meta_return_ty = precompute_out ; + meta_return_ty meta(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..c55426964617252aa4cca174b1615fd4e38accdb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_avg_pool3d : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bucketize_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bucketize_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..42e164390f65dd1d129c0abca008bd525456f2f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bucketize_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor bucketize(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false); +TORCH_API at::Tensor & bucketize_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false); +TORCH_API at::Tensor & bucketize_outf(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right, at::Tensor & out); +TORCH_API at::Tensor bucketize(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32=false, bool right=false); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_min_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_min_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..4cdf87ce9bafb2f679216cbe14dbe050ad2a0db7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_min_meta.h @@ -0,0 +1,32 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_clamp_min : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Scalar & min); +}; +struct TORCH_API structured_clamp_min_Tensor : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & min); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6956d2842605bd681dbc7fbaff08f5a2ed8d8461 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor cos(const at::Tensor & self); +TORCH_API at::Tensor & cos_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & cos_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & cos_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b3f7144b52f5240c70a0041c2de941099c7e31bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cudnn_grid_sampler { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_grid_sampler") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output") + static at::Tensor call(const at::Tensor & self, const at::Tensor & grid); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid); +}; + +struct TORCH_API cudnn_grid_sampler_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cudnn_grid_sampler") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cudnn_grid_sampler.out(Tensor self, Tensor grid, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & grid, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & grid, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/data.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/data.h new file mode 100644 index 0000000000000000000000000000000000000000..31d5b8067e5d0630f0b62cc9c048995fd70721a5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/data.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..66216294b4a2b8ae7c43534fe54d2070853c8565 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor erfc(const at::Tensor & self); +TORCH_API at::Tensor & erfc_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_native.h new file mode 100644 index 0000000000000000000000000000000000000000..10f03568167865601d192efb5774e50882d27652 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_erfc_out : public at::meta::structured_erfc { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifftn_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifftn_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a6ba9276caea8a9b5b050420f59bd6f8761314c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifftn_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fft_ifftn { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, at::OptionalIntArrayRef, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_ifftn") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm); +}; + +struct TORCH_API fft_ifftn_out { + using schema = at::Tensor & (const at::Tensor &, at::OptionalSymIntArrayRef, at::OptionalIntArrayRef, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_ifftn") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft.h new file mode 100644 index 0000000000000000000000000000000000000000..1812c96c0553d9c096d1f71e166f4637a5d4597e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ihfft.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +inline at::Tensor fft_ihfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_ihfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); + } +} + +// aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +inline at::Tensor fft_ihfft_symint(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft::call(self, n, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_ihfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft::call(self, n, dim, norm); + } +} + +// aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_ihfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_ihfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_ihfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ihfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_ihfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ihfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_ihfft_symint_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft_out::call(self, n, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_ihfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_ihfft_out::call(self, n, dim, norm, out); + } +} + +// aten::fft_ihfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_ihfft_symint_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ihfft_out::call(self, n, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_ihfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_ihfft_out::call(self, n, dim, norm, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bf806fb6079a1472bb4fa891b9e0c052b13ef305 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/flatten_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor flatten(const at::Tensor & self, int64_t start_dim=0, int64_t end_dim=-1); +TORCH_API at::Tensor flatten(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim); +TORCH_API at::Tensor flatten(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim); +TORCH_API at::Tensor flatten(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..058fdbb796fc12e1238b3a76ade4575504b0f0f0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor floor(const at::Tensor & self); +TORCH_API at::Tensor & floor_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & floor_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & floor_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_native.h new file mode 100644 index 0000000000000000000000000000000000000000..400a0c43b0e2438bf15859fafb6bf07003da32e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_gather_out : public at::meta::structured_gather { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad, const at::Tensor & out); +}; +TORCH_API at::Tensor gather(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad=false); +TORCH_API at::Tensor & gather_out(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gradient.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gradient.h new file mode 100644 index 0000000000000000000000000000000000000000..9ace2ce50654dae7ba757622cb766946f2d96d39 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gradient.h @@ -0,0 +1,60 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gradient.scalarint(Tensor self, *, Scalar? spacing=None, int? dim=None, int edge_order=1) -> Tensor[] +inline ::std::vector gradient(const at::Tensor & self, const c10::optional & spacing=c10::nullopt, c10::optional dim=c10::nullopt, int64_t edge_order=1) { + return at::_ops::gradient_scalarint::call(self, spacing, dim, edge_order); +} + +// aten::gradient.scalararray(Tensor self, *, Scalar spacing, int[] dim, int edge_order=1) -> Tensor[] +inline ::std::vector gradient(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order=1) { + return at::_ops::gradient_scalararray::call(self, spacing, dim, edge_order); +} + +// aten::gradient.array(Tensor self, *, int[] dim, int edge_order=1) -> Tensor[] +inline ::std::vector gradient(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order=1) { + return at::_ops::gradient_array::call(self, dim, edge_order); +} + +// aten::gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[] +inline ::std::vector gradient(const at::Tensor & self, at::ArrayRef spacing, c10::optional dim=c10::nullopt, int64_t edge_order=1) { + return at::_ops::gradient_scalarrayint::call(self, spacing, dim, edge_order); +} + +// aten::gradient.scalarrayarray(Tensor self, *, Scalar[] spacing, int[] dim, int edge_order=1) -> Tensor[] +inline ::std::vector gradient(const at::Tensor & self, at::ArrayRef spacing, at::IntArrayRef dim, int64_t edge_order=1) { + return at::_ops::gradient_scalarrayarray::call(self, spacing, dim, edge_order); +} + +// aten::gradient.tensorarrayint(Tensor self, *, Tensor[] spacing, int? dim=None, int edge_order=1) -> Tensor[] +inline ::std::vector gradient(const at::Tensor & self, at::TensorList spacing, c10::optional dim=c10::nullopt, int64_t edge_order=1) { + return at::_ops::gradient_tensorarrayint::call(self, spacing, dim, edge_order); +} + +// aten::gradient.tensorarray(Tensor self, *, Tensor[] spacing, int[] dim, int edge_order=1) -> Tensor[] +inline ::std::vector gradient(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order=1) { + return at::_ops::gradient_tensorarray::call(self, spacing, dim, edge_order); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..77db0b79ae45859727929e1c65b36d2a56155830 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor im2col(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky.h new file mode 100644 index 0000000000000000000000000000000000000000..17ab0613fe86d6eae4302da2dc524b4b5ab6d38d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_cholesky(Tensor self, *, bool upper=False) -> Tensor +inline at::Tensor linalg_cholesky(const at::Tensor & self, bool upper=false) { + return at::_ops::linalg_cholesky::call(self, upper); +} + +// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cholesky_out(at::Tensor & out, const at::Tensor & self, bool upper=false) { + return at::_ops::linalg_cholesky_out::call(self, upper, out); +} +// aten::linalg_cholesky.out(Tensor self, *, bool upper=False, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_cholesky_outf(const at::Tensor & self, bool upper, at::Tensor & out) { + return at::_ops::linalg_cholesky_out::call(self, upper, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..faa5515cd50444f2c47a711dfb55796886bc85ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor log1p(const at::Tensor & self); +TORCH_API at::Tensor & log1p_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2e018b18ad2ab1d2aa9bd13148584a7304142df0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lshift_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & __lshift___out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & __lshift___outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & __lshift___out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & __lshift___outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bc26d25fb1ba658adf0ad9aad3d5c2f5a0c1f349 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor maximum(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eb0ea998728cff73afd785f8cdcdfcff5d835d53 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mps_convolution_transpose_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple mps_convolution_transpose_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask); +TORCH_API ::std::tuple mps_convolution_transpose_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple mps_convolution_transpose_backward_symint_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask); +TORCH_API ::std::tuple mps_convolution_transpose_backward_symint_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/norm_except_dim_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/norm_except_dim_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..46a5bc81ed1e945c0c7d4f4ce0a0b011b897d323 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/norm_except_dim_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor norm_except_dim(const at::Tensor & v, int64_t pow=2, int64_t dim=0); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ee92d8a790a1291c8da8edf14fdeac7871896bd7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API reflection_pad3d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::reflection_pad3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); +}; + +struct TORCH_API reflection_pad3d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::reflection_pad3d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef padding); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef padding); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sign_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sign_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5b535456263c611a858bd8bca7a4c29a5756eea1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sign_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor sign(const at::Tensor & self); +TORCH_API at::Tensor & sign_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & sign_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & sign_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated3d_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated3d_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a09090c1ef08593fd90507f8cace3a9909b2ed57 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_dilated3d_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & slow_conv_dilated3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor & slow_conv_dilated3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out); +TORCH_API at::Tensor & slow_conv_dilated3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); +TORCH_API at::Tensor & slow_conv_dilated3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..404b5a485c15dcd972c9271caf6e4e0871f4d15e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_slow_conv_transpose2d : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, const at::Tensor & weight, at::ArrayRef kernel_size, at::OptionalTensorRef bias, at::ArrayRef stride, at::ArrayRef padding, at::ArrayRef output_padding, at::ArrayRef dilation); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..446a4de530b1c65fabcc5f03c88dcbbbfbcfb387 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor special_chebyshev_polynomial_t(const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_t_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_t_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +TORCH_API at::Tensor special_chebyshev_polynomial_t(const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_t_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_t_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_expit.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_expit.h new file mode 100644 index 0000000000000000000000000000000000000000..b74eae968bd015bbb548b2b34bdf8544561fce8c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_expit.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_expit(Tensor self) -> Tensor +inline at::Tensor special_expit(const at::Tensor & self) { + return at::_ops::special_expit::call(self); +} + +// aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_expit_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_expit_out::call(self, out); +} +// aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_expit_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_expit_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_native.h new file mode 100644 index 0000000000000000000000000000000000000000..72d499b31f20da5f6041e01423801b680a8bd6b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_xlog1py_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_xlog1py_out : public at::meta::structured_special_xlog1py { +void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out); +}; +TORCH_API at::Tensor special_xlog1py(const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & special_xlog1py_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor special_xlog1py(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & special_xlog1py_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4268533ec9c555880e4c62603aacef4ab6b7d5d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tril_indices_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & tril_indices_out(int64_t row, int64_t col, int64_t offset, at::Tensor & out); +TORCH_API at::Tensor tril_indices_cpu(int64_t row, int64_t col, int64_t offset=0, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor tril_indices_cuda(int64_t row, int64_t col, int64_t offset=0, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unbind.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unbind.h new file mode 100644 index 0000000000000000000000000000000000000000..b540322d4810c3c7859de27c9ed710ac610a895a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unbind.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[] +inline ::std::vector unbind(const at::Tensor & self, int64_t dim=0) { + return at::_ops::unbind_int::call(self, dim); +} + +// aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[] +inline ::std::vector unbind(const at::Tensor & self, at::Dimname dim) { + return at::_ops::unbind_Dimname::call(self, dim); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e49e02330fe45680e654c4c6e28bf84d65e2294e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +TORCH_API at::Tensor upsample_linear1d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); +struct TORCH_API structured_upsample_linear1d_out_cpu : public at::meta::structured_upsample_linear1d { +void impl(const at::Tensor & self, at::ArrayRef output_size, bool align_corners, c10::optional scales, const at::Tensor & out); +}; +struct TORCH_API structured_upsample_linear1d_out_cuda : public at::meta::structured_upsample_linear1d { +void impl(const at::Tensor & self, at::ArrayRef output_size, bool align_corners, c10::optional scales, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/CUDAPluggableAllocator.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/CUDAPluggableAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..22a61e48e4a24b31ad55ff21b199750512dafe95 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/CUDAPluggableAllocator.h @@ -0,0 +1,152 @@ +#pragma once + +#include +#include +#include +#include + +#include + +#include + +namespace torch::cuda::CUDAPluggableAllocator { + +#if defined(TORCH_HIP_VERSION) +using streamType = c10::hip::HIPStream; +#else +using streamType = c10::cuda::CUDAStream; +#endif + +std::shared_ptr +getCurrentAllocator(); +std::shared_ptr +createCustomAllocator( + std::function alloc_fn, + std::function free_fn); +void changeCurrentAllocator( + const std::shared_ptr& + allocator); + +struct _AllocationMetadata { + _AllocationMetadata(); + _AllocationMetadata( + size_t size, + c10::DeviceIndex device_idx, + cudaStream_t stream); + size_t size; + c10::DeviceIndex device_idx; + cudaStream_t stream; +}; + +struct CUDAPluggableAllocator + : public c10::cuda::CUDACachingAllocator::CUDAAllocator { + CUDAPluggableAllocator( + std::function alloc_fn, + std::function free_fn); + + CUDAPluggableAllocator(CUDAPluggableAllocator& other); + + void set_init_fn(std::function init_fn); + + void set_reset_fn(std::function reset_fn); + + void set_memory_fraction_fn( + std::function memory_fraction_fn); + + void set_base_alloc_fn(std::function base_alloc_fn); + + void set_record_stream_fn( + std::function record_stream_fn); + + void set_begin_allocate_to_pool( + std::function< + void(int, c10::cuda::MempoolId_t, std::function)> + capture_begin_fn); + + void set_end_allocate_to_pool_fn( + std::function capture_about_to_end_fn); + + void set_release_pool( + std::function capture_destroy_fn); + + void* malloc(size_t size, c10::DeviceIndex device, cudaStream_t stream); + + c10::DataPtr allocate(size_t size) override; + c10::DeleterFnPtr raw_deleter() const override; + + void* raw_alloc(size_t nbytes) override; + void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream) override; + void raw_delete(void* ptr) override; + void init(int device_count) override; + bool initialized() override; + void setMemoryFraction(double fraction, c10::DeviceIndex device) override; + void emptyCache() override; + void cacheInfo(c10::DeviceIndex device, size_t* largestBlock) override; + void* getBaseAllocation(void* ptr, size_t* size) override; + + void recordStream(const c10::DataPtr&, streamType stream) override; + + c10::cuda::CUDACachingAllocator::DeviceStats getDeviceStats( + c10::DeviceIndex device) override; + void resetAccumulatedStats(c10::DeviceIndex device) override; + void resetPeakStats(c10::DeviceIndex device) override; + c10::cuda::CUDACachingAllocator::SnapshotInfo snapshot() override; + void beginAllocateToPool( + c10::DeviceIndex device, + c10::cuda::MempoolId_t mempool_id, + std::function) override; + void endAllocateToPool( + c10::DeviceIndex device, + c10::cuda::MempoolId_t mempool_id) override; + void releasePool(c10::DeviceIndex device, c10::cuda::MempoolId_t mempool_id) + override; + std::shared_ptr getIpcDevPtr(std::string handle) override; + void recordHistory( + bool enabled, + c10::cuda::CUDACachingAllocator::CreateContextFn context_recorder, + size_t alloc_trace_max_entries, + c10::cuda::CUDACachingAllocator::RecordContext when) override; + void attachOutOfMemoryObserver( + c10::cuda::CUDACachingAllocator::OutOfMemoryObserver observer) override; + void attachAllocatorTraceTracker( + c10::cuda::CUDACachingAllocator::AllocatorTraceTracker tracker) override; + std::shared_ptr + getCheckpointState(c10::DeviceIndex device, at::cuda::MempoolId_t id) + override; + c10::cuda::CUDACachingAllocator::CheckpointDelta setCheckpointPoolState( + c10::DeviceIndex device, + std::shared_ptr pps) + override; + void enablePeerAccess(c10::DeviceIndex dev, c10::DeviceIndex dev_to_access) + override; + cudaError_t memcpyAsync( + void* dst, + int dstDevice, + const void* src, + int srcDevice, + size_t count, + cudaStream_t stream, + bool p2p_enabled) override; + std::string name() override; + void copy_data(void* dest, const void* src, std::size_t count) const final; + + protected: + std::function alloc_fn_; + std::function free_fn_; + std::function init_fn_; + std::function reset_fn_; + std::function memory_fraction_fn_; + std::function base_alloc_fn_; + std::function record_stream_fn_; + std::function< + void(int, c10::cuda::MempoolId_t, std::function)> + begin_allocate_to_pool_fn_; + std::function end_allocate_to_pool_fn_; + std::function relase_pool_fn_; + std::mutex allocator_mutex_; + // We do the bookeeping here in order to simplify custom allocators + std::unordered_map allocation_metadata_; + + bool initialized_ = false; +}; +} // namespace torch::cuda::CUDAPluggableAllocator diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Event.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Event.h new file mode 100644 index 0000000000000000000000000000000000000000..5c4d95b285997fa4ddfce57423e0b264dde5898a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Event.h @@ -0,0 +1,18 @@ +#ifndef THCP_EVENT_INC +#define THCP_EVENT_INC + +#include +#include + +struct THCPEvent { + PyObject_HEAD at::cuda::CUDAEvent cuda_event; +}; +extern PyObject* THCPEventClass; + +void THCPEvent_init(PyObject* module); + +inline bool THCPEvent_Check(PyObject* obj) { + return THCPEventClass && PyObject_IsInstance(obj, THCPEventClass); +} + +#endif // THCP_EVENT_INC diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Module.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Module.h new file mode 100644 index 0000000000000000000000000000000000000000..0c89e4bc65f2591c074083064e64eca421ee5760 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Module.h @@ -0,0 +1,11 @@ +#ifndef THCP_CUDA_MODULE_INC +#define THCP_CUDA_MODULE_INC + +PyObject* THCPModule_getDevice_wrap(PyObject* self); +PyObject* THCPModule_setDevice_wrap(PyObject* self, PyObject* arg); +PyObject* THCPModule_getDeviceName_wrap(PyObject* self, PyObject* arg); +PyObject* THCPModule_getDriverVersion(PyObject* self); +PyObject* THCPModule_isDriverSufficient(PyObject* self); +PyObject* THCPModule_getCurrentBlasHandle_wrap(PyObject* self); + +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Stream.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Stream.h new file mode 100644 index 0000000000000000000000000000000000000000..9b7197d74390c142744ec6d64df967b6c7f25903 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/Stream.h @@ -0,0 +1,20 @@ +#ifndef THCP_STREAM_INC +#define THCP_STREAM_INC + +#include +#include +#include + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct THCPStream : THPStream { + at::cuda::CUDAStream cuda_stream; +}; +extern PyObject* THCPStreamClass; + +void THCPStream_init(PyObject* module); + +inline bool THCPStream_Check(PyObject* obj) { + return THCPStreamClass && PyObject_IsInstance(obj, THCPStreamClass); +} + +#endif // THCP_STREAM_INC diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/THCP.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/THCP.h new file mode 100644 index 0000000000000000000000000000000000000000..697a66dc3ee91a22ae2503852db04dbba2fc74d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/THCP.h @@ -0,0 +1,10 @@ +#ifndef THCP_H +#define THCP_H + +#include +#include +#include +#include +#include + +#endif diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/comm.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/comm.h new file mode 100644 index 0000000000000000000000000000000000000000..cf89b365d0ce4b6af51550825b5a45387eb281f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/comm.h @@ -0,0 +1,52 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include + +namespace torch::cuda { + +using tensor_list2d = std::vector>; + +TORCH_CUDA_CU_API std::vector& broadcast_out( + const at::Tensor& tensor, + std::vector& out_tensors); +TORCH_CUDA_CU_API std::vector broadcast( + const at::Tensor& tensor, + at::IntArrayRef devices); +TORCH_CUDA_CU_API tensor_list2d broadcast_coalesced( + at::TensorList tensors, + at::IntArrayRef devices, + size_t buffer_size); + +TORCH_CUDA_CU_API std::vector& scatter_out( + const at::Tensor& tensor, + std::vector& out_tensors, + int64_t dim = 0, + const c10::optional>>& + streams = c10::nullopt); + +TORCH_CUDA_CU_API std::vector scatter( + const at::Tensor& tensor, + at::IntArrayRef devices, + const c10::optional>& chunk_sizes = c10::nullopt, + int64_t dim = 0, + const c10::optional>>& + streams = c10::nullopt); + +TORCH_CUDA_CU_API at::Tensor& gather_out( + at::TensorList tensors, + at::Tensor& out_tensor, + int64_t dim); + +TORCH_CUDA_CU_API at::Tensor gather( + at::TensorList tensors, + int64_t dim, + c10::optional destination_index); + +} // namespace torch::cuda diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/device_set.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/device_set.h new file mode 100644 index 0000000000000000000000000000000000000000..c533dae3baad36a42e0f97f55b9eb7a747191dc3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/device_set.h @@ -0,0 +1,11 @@ +#pragma once + +#include +#include +#include + +namespace torch { + +using device_set = std::bitset; + +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/memory_snapshot.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/memory_snapshot.h new file mode 100644 index 0000000000000000000000000000000000000000..f5f9bdbed1620e9777ade18fe70690fd0cc01892 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/memory_snapshot.h @@ -0,0 +1,27 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch::cuda { + +// C++-only versions of these, for python use +// those defined in cuda/Module.cpp which also record python state. +TORCH_CUDA_CU_API void _record_memory_history( + bool enabled, + bool record_context = true, + int64_t trace_alloc_max_entries = 1, + bool trace_alloc_record_context = false, + bool record_cpp_context = false); + +TORCH_CUDA_CU_API void _record_memory_history( + c10::optional enabled = "all", + c10::optional context = "all", + const std::string& stacks = "all", + size_t max_entries = SIZE_MAX); + +TORCH_CUDA_CU_API std::string _memory_snapshot_pickled(); + +} // namespace torch::cuda diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/nccl.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/nccl.h new file mode 100644 index 0000000000000000000000000000000000000000..ebf51b7633abbb393061258430f1d8a486cd3198 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/nccl.h @@ -0,0 +1,218 @@ +#pragma once + +#include +#include +#include + +#include +#include + +// NCCL BFloat16 is enabled only for CUDA 11+ and NCCL versions 2.10+, or for +// HIP 3.1+ +#if defined(__CUDA_BF16_TYPES_EXIST__) +#define HAS_NCCL_BF16_DATATYPE \ + ((NCCL_MAJOR > 2) || (NCCL_MAJOR == 2) && (NCCL_MINOR >= 10)) +#elif defined(USE_ROCM) && (TORCH_HIP_VERSION >= 301) +#define HAS_NCCL_BF16_DATATYPE 1 +#else +#define HAS_NCCL_BF16_DATATYPE 0 +#endif + +namespace torch::cuda::nccl { + +/* The following are copied from and redefined in torch::cuda::nccl + * namespace */ +/* pytorch should only use the following definition within pytorch scope */ + +/* Opaque handle to communicator to ncclComm*, this will reinterpret as ncclComm + * in nccl.cpp */ +typedef void* ncclComm_t; + +/** redefine nccl unique ID in torch scope. this should be identical to native + * nccl impp. */ +#define NCCL_UNIQUE_ID_BYTES 128 +// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) +typedef struct { + char internal[NCCL_UNIQUE_ID_BYTES]; +} ncclUniqueId; + +/* Error type */ +enum class ncclResult { + Success = 0, + UnhandledCudaError = 1, + SystemError = 2, + InternalError = 3, + InvalidArgument = 4, + InvalidUsage = 5, + NumResults = 6, + InProgress = 7 +}; + +/* Reduction operation selector */ +enum class ncclRedOp { Sum = 0, Prod = 1, Max = 2, Min = 3, NumOps = 4 }; + +/* Data types */ +enum class ncclDataType { + Int8 = 0, + Char = 0, + Uint8 = 1, + Int32 = 2, + Int = 2, + Uint32 = 3, + Int64 = 4, + Uint64 = 5, + Float16 = 6, + Half = 6, + Float32 = 7, + Float = 7, + Float64 = 8, + Double = 8, + Bfloat16 = 9, + NumTypes = 10 +}; + +// RAII helper class to manage NCCL group API and CUDA free mutex. +// The destructor is allowed to throw since this helper class only +// manages group and lock lifetimes. +struct AutoNcclGroup { + AutoNcclGroup(); + AutoNcclGroup(ncclComm_t comm, bool comm_nonblocking); + ~AutoNcclGroup() noexcept(false); + ncclComm_t comm_; + bool comm_nonblocking_; +}; + +// NOTE: this is exposed only so that python_nccl.cpp can some of these helpers. +// Don't use them outside of these files. +namespace detail { + +TORCH_CUDA_CPP_API void throw_nccl_error(ncclResult status); + +static inline void NCCL_CHECK(ncclResult status) { + if (status != ncclResult::Success) { + throw_nccl_error(status); + } +} + +TORCH_CUDA_CPP_API at::ArrayRef get_communicators( + at::TensorList inputs); +TORCH_CUDA_CPP_API void check_inputs( + at::TensorList inputs, + at::TensorList outputs, + int input_multiplier, + int output_multiplier); +TORCH_CUDA_CPP_API void check_inputs( + at::TensorList inputs, + const at::Tensor& output, + int root, + int input_multiplier, + int output_multiplier); + +} // namespace detail + +using comm_list = std::vector; +using stream_list = std::vector>; + +TORCH_CUDA_CPP_API std::uint64_t version(); +TORCH_CUDA_CPP_API const char* version_suffix(); + +bool is_available(at::TensorList tensors); + +TORCH_CUDA_CPP_API void get_unique_id(ncclUniqueId& id); +TORCH_CUDA_CPP_API ncclComm_t +comm_init_rank(int nranks, const ncclUniqueId& comm_id, int rank); +TORCH_CUDA_CPP_API void comm_destroy(ncclComm_t comm); + +TORCH_CUDA_CPP_API void broadcast( + at::TensorList tensors, + const stream_list& streams = {}, + const comm_list& user_comms = {}); + +size_t get_max_count(); + +TORCH_CUDA_CPP_API void reduce( + const std::vector& inputs, + at::Tensor& output, + int32_t root = 0, + int32_t op = static_cast(ncclRedOp::Sum), + const stream_list& streams = {}, + const comm_list& user_comms = {}); + +TORCH_CUDA_CPP_API void reduce( + std::vector& inputs, + int32_t root = 0, + int32_t op = static_cast(ncclRedOp::Sum), + const stream_list& streams = {}, + const comm_list& user_comms = {}); + +TORCH_CUDA_CPP_API void all_reduce( + const std::vector& inputs, + std::vector& outputs, + int32_t op = static_cast(ncclRedOp::Sum), + const stream_list& streams = {}, + const comm_list& user_comms = {}); + +TORCH_CUDA_CPP_API void reduce_scatter( + const std::vector& inputs, + std::vector& outputs, + int32_t op = static_cast(ncclRedOp::Sum), + const stream_list& streams = {}, + const comm_list& user_comms = {}); + +TORCH_CUDA_CPP_API void scatter( + const std::vector& inputs, + at::Tensor& outputs, + ncclComm_t comm, + at::cuda::CUDAStream& stream, + int32_t root = 0); + +TORCH_CUDA_CPP_API void all_gather( + const std::vector& inputs, + std::vector& outputs, + const stream_list& streams = {}, + const comm_list& user_comms = {}); + +TORCH_CUDA_CPP_API void gather( + const at::Tensor& inputs, + std::vector& outputs, + ncclComm_t comm, + at::cuda::CUDAStream& stream, + int32_t root = 0); + +TORCH_CUDA_CPP_API void all2all_single_equal_split( + at::Tensor& input, + at::Tensor& output, + int size, + ncclComm_t comm, + at::cuda::CUDAStream& stream); + +TORCH_CUDA_CPP_API void all2all_single_unequal_split( + void* sendbuff, + const size_t* sendcounts, + const size_t* senddispls, + void* recvbuff, + const size_t* recvcounts, + const size_t* recvdispls, + size_t size, + c10::ScalarType type, + ncclComm_t comm, + at::cuda::CUDAStream& stream); + +TORCH_CUDA_CPP_API void all2all( + std::vector& outputTensors, + std::vector& inputTensors, + ncclComm_t _comm, + at::cuda::CUDAStream& stream); + +TORCH_CUDA_CPP_API void send( + const at::Tensor& input, + ncclComm_t comm, + at::cuda::CUDAStream stream, + int dst); + +TORCH_CUDA_CPP_API void recv( + at::Tensor& output, + ncclComm_t comm, + at::cuda::CUDAStream stream, + int src); +} // namespace torch::cuda::nccl diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_comm.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_comm.h new file mode 100644 index 0000000000000000000000000000000000000000..e87ae053fbe7fc59d1713cf8b148dfe52cbd01dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_comm.h @@ -0,0 +1,7 @@ +#pragma once + +namespace torch::cuda::python { + +void initCommMethods(PyObject* module); + +} // namespace torch::cuda::python diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_nccl.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_nccl.h new file mode 100644 index 0000000000000000000000000000000000000000..ebaa666a22d2cff60e2ef2a2701003d0ca61a8e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/cuda/python_nccl.h @@ -0,0 +1,13 @@ +#pragma once + +#include + +PyObject* THCPModule_nccl_version(PyObject* self, PyObject* args); +PyObject* THCPModule_nccl_version_suffix(PyObject* self, PyObject* args); +PyObject* THCPModule_nccl_unique_id(PyObject* self, PyObject* args); +PyObject* THCPModule_nccl_init_rank(PyObject* self, PyObject* args); +PyObject* THCPModule_nccl_reduce(PyObject* self, PyObject* args); +PyObject* THCPModule_nccl_all_reduce(PyObject* self, PyObject* args); +PyObject* THCPModule_nccl_broadcast(PyObject* self, PyObject* args); +PyObject* THCPModule_nccl_all_gather(PyObject* self, PyObject* args); +PyObject* THCPModule_nccl_reduce_scatter(PyObject* self, PyObject* args); diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/container.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/container.h new file mode 100644 index 0000000000000000000000000000000000000000..3f3864077e2106650ca4ea126740f92d4ba3f824 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/autograd/context/container.h @@ -0,0 +1,167 @@ +#pragma once + +#include +#include + +#include + +namespace torch { +namespace distributed { +namespace autograd { + +// Singleton class per worker which is responsible for storing the distributed +// autograd context for each autograd pass and also cleans up data for an +// autograd pass once its done. +// +// Each autograd pass is assigned a unique autograd_context_id and all data for +// that pass (DistAutogradContext) is stored in this container indexed by the +// autograd_context_id. The autograd_context_id itself is a 64 bit globally +// unique id. The first 16 bits is the worker_id and the next 48 bits is an +// auto-incrementing id for each worker. +// +// This container is also responsible for maintaining a globally unique message +// id, which is used to associate send/recv autograd function pairs. The format +// is similar to the autograd_context_id where we have a 64 bit integer with +// first 16 bits being the worker id and next 48 bits are auto-incrementing. +class TORCH_API DistAutogradContainer { + public: + explicit DistAutogradContainer(uint32_t num_shards); + + // One time initialization of the container. + static DistAutogradContainer& init(int64_t worker_id); + + // Retrieve the singleton instance of the container, ensures we have + // initialized the container. + static DistAutogradContainer& getInstance(); + + // Create a new context for a distributed autograd pass. + const ContextPtr newContext(); + + // Clean up resources for a given context_id once the autograd pass is done. + // Sends RPC to other workers this worker knows about, telling them to clean + // up their context as well. Throws an exception if the context_id does not + // exist. + void releaseContext(int64_t context_id); + + // Releases an autograd context if it is present on this node. Also sends RPC + // to other workers this worker knows about, telling them to clean up their + // context. Does nothing if it is not present. + void releaseContextIfPresent(int64_t context_id); + + // Checks if the passed in context_id is valid. + void isValidContext(int64_t context_id); + + // Retrieve the autograd context for a given context_id. + ContextPtr retrieveContext(int64_t context_id); + + // Retrieves the currently active autograd context for the current thread. + ContextPtr currentContext(); + + // Checks whether or not the current thread has a valid autograd context. + bool hasValidContext() const; + + // Generate a new autograd_message_id for send/recv autograd functions. + int64_t newAutogradMessageId(); + + // Creates a new autograd context with the provided context_id. If a context + // already exists with the provided context_id, we just return it. + // This does not set the current context for the current thread. + ContextPtr getOrCreateContext(int64_t context_id); + + // Retrieves the maximum possible autograd_context_id/autograd_message_id that + // can be generated by this worker. + int64_t getMaxId(); + + // Retrieves the worker ID for this node + rpc::worker_id_t getWorkerId() const; + + // Can set current context id if there is no valid context yet + static void setCurrentContextId(int64_t contextId); + + // Forcibly sets the thread local current context id. Should only be used in + // cases where you know what you're doing and need to override the thread + // local. Otherwise, use setCurrentContextId instead. + static void forceCurrentContextId(int64_t contextId); + + // Clear current context id + void clearCurrentContext(); + + // Returns the number of autograd contexts in the container. + size_t numAutogradContexts() const; + + // Returns the current thread local context id for this thread. + static int64_t currentContextId(); + + DistAutogradContainer(const DistAutogradContainer&) = delete; + DistAutogradContainer& operator=(const DistAutogradContainer&) = delete; + DistAutogradContainer(DistAutogradContainer&&) = delete; + DistAutogradContainer& operator=(DistAutogradContainer&&) = delete; + + private: + // Number of shards for the map storing autograd contexts. We'd like this + // to be a power of 2 and we don't expect a value much higher than the + // number of cores would provide much benefit. + static constexpr uint32_t kNumDefaultShards = 128; + + // Use cache line size for alignment. + static constexpr int kCacheLineSize = 64; + + // Structure holding one shard of the sharded autograd context map with its + // associated lock. Align to cache line size to avoid contention between + // adjacent entries. + struct alignas(kCacheLineSize) ContextsShard { + // Lock for this shard. + mutable std::mutex lock; + + // Map storing autograd contexts for this shard. + std::unordered_map contexts; + }; + + DistAutogradContainer() = delete; + ~DistAutogradContainer() = default; + + static DistAutogradContainer& getInstanceInternal(); + + // Retrieve the shard for given context_id. + ContextsShard& getShard(int64_t context_id); + + // Sends an RPC to the workers that have a context corresponding to passed in + // context_id. This function should be called with the lock. + void sendReleaseContextRpc( + const std::unordered_set& workerIds, + int64_t context_id); + + // Erase context_id from the autograd context map, and reset the thread local + // current context id if it corresponds to the passed in context id. This + // function should be called with the lock. + void eraseContextIdAndReset(ContextsShard& shard, int64_t context_id); + + // Compute the number of shards for the autograd_contexts_ map. + static uint32_t computeNumShards(); + + // Auto incrementing context id used to identify unique autograd passes. + // Initialized with the first 16 bits being the worker_id. + std::atomic next_context_id_; + + // Unique id to identify a worker in the distributed setting. + int16_t worker_id_; + + // Whether or not the container has been initialized appropriately. + bool initialized_; + + // Sharded autograd context map. + std::vector autograd_contexts_; + + // Number of shards for the sharded autograd_contexts_ map. + uint32_t num_shards_; + + // Autograd message id to identify unique send/recv autograd function pairs. + std::atomic next_autograd_message_id_; + + // Maximum allowed value for autograd_context_id or autograd_message_id. + int64_t max_id_; +}; + +} // namespace autograd +} // namespace distributed +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h new file mode 100644 index 0000000000000000000000000000000000000000..6203905732667776ed9646d4ff3b4fa0ea2458de --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h @@ -0,0 +1,351 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +struct Def; +struct Property; +struct ClassDef; +struct SugaredValue; +struct Resolver; + +using ResolverPtr = std::shared_ptr; +struct Self { + virtual ~Self() = default; + virtual std::shared_ptr makeSugared(Value* v) const = 0; + virtual ClassTypePtr getClassType() const = 0; +}; + +// A CompilationUnit is a list of named Functions +// with helper methods to iterate the list or invoke the function. +// Classes have a CompilationUnit holding the class methods, +// and Modules have a CompilationUnit holding the Functions that +// are used to implement their Methods + +struct TORCH_API CompilationUnit { + enum class FunctionType { Method, Hook, PreHook }; + // constructor that takes a set of functions to compile using the native + // resolver + explicit CompilationUnit(const std::string& source); + CompilationUnit() = default; + + CompilationUnit& operator=(CompilationUnit&&) = default; + CompilationUnit(CompilationUnit&&) = default; + CompilationUnit& operator=(const CompilationUnit&) = delete; + CompilationUnit(const CompilationUnit&) = delete; + + Function* find_function(const c10::QualifiedName& name) const { + auto it = dict_.find(name); + if (it == dict_.end()) { + return nullptr; + } + return functions_[it->second].get(); + } + + Function& get_function(const c10::QualifiedName& name) const { + if (auto r = find_function(name)) { + return *r; + } + TORCH_CHECK(false, "attempted to get undefined function ", name.name()); + } + + void set_optimized(bool o) { + TORCH_WARN( + "CompilationUnit::set_optimized() is deprecated and has no effect. " + "Please use setGraphExecutorOptimize()"); + } + + bool is_optimized() const { + TORCH_WARN( + "CompilationUnit::is_optimized() is deprecated and always returns true. " + "Please use getGraphExecutorOptimize()"); + return true; + } + + // for historic reasons, these are defined in ir_emitter.cpp + // Returns the list of Functions just defined. + std::vector define( + const c10::optional& prefix, + const std::vector& properties, + const std::vector& propResolvers, + const std::vector& definitions, + const std::vector& + defResolvers, /* determines how we handle free + variables in each definition*/ + // if non-null, the first argument to each def, is bound to this value + const Self* self, + // see [name mangling] + bool shouldMangle = false, + c10::optional operator_set_version = c10::nullopt); + + void define_hooks( + const c10::optional& prefix, + const std::vector& hookDefs, + const std::vector& hookResolvers, + const std::vector& preHookDefs, + const std::vector& preHookResolvers, + const Self* self, + bool shouldMangle = false); + + // same as above but parse the definitions from source + // Returns the list of Functions just defined. + std::vector define( + // prefix namespace to put all the defined functions into + const c10::optional& prefix, + const std::string& source, + const ResolverPtr& resolver, + const Self* self); + + void define_interface( + const c10::QualifiedName& qualifiedName, + const ClassDef& classDef, + ResolverPtr rcb, + bool is_module = false); + + Function* create_function( + c10::QualifiedName name, + std::shared_ptr graph, + bool shouldMangle = false) { + if (shouldMangle) { + name = mangle(name); + } + auto fn = std::make_unique( + std::move(name), std::move(graph), nullptr); + auto ret = fn.get(); + register_function(std::move(fn)); + return ret; + } + + std::vector get_functions() const { + return fmap(functions_, [](const std::unique_ptr& fn) { + return fn.get(); + }); + } + + /// Run a method from this compilation. + /// + /// For example: + /// @code + /// IValue output = module->run("relu_script", a, b); + /// @endcode + /// + /// To get a compile a module from a source string, see torch::jit::compile + /// + /// @param method_name The name of the method to run + /// @param args Arguments to be passed to the method + /// @return An IValue containing the return value (or values if it is a tuple) + /// from the method + template + IValue run_method(const c10::QualifiedName& method_name, Types&&... args) { + return get_function(method_name)({IValue(std::forward(args))...}); + } + + void drop_all_functions() { + dict_.clear(); + functions_.clear(); + } + + /** + * Register a class as being owned by this compilation unit. + */ + void register_type(c10::NamedTypePtr namedType) { + // TODO: class types cannot be redefined because we have no way right now + // of invalidating their methods. NamedTuples are fine though, since they + // don't have methods. + TORCH_CHECK( + 0 == classDict_.count(*namedType->name()), + "class '", + namedType->name()->qualifiedName(), + "' already defined."); + classes_.push_back(std::move(namedType)); + classDict_[*classes_.back()->name()] = classes_.size() - 1; + }; + + c10::ClassTypePtr get_class(const c10::QualifiedName& name) const { + auto type = get_type(name); + if (!type) { + return nullptr; + } + return type->cast(); + } + + c10::InterfaceTypePtr get_interface(const c10::QualifiedName& name) const { + auto type = get_type(name); + if (!type) { + return nullptr; + } + return type->cast(); + } + + c10::TupleTypePtr get_named_tuple(const c10::QualifiedName& name) const { + for (const auto& cls : classes_) { + if (cls->name()->qualifiedName() == name.qualifiedName()) { + return cls->expect(); + } + } + return nullptr; + } + + c10::NamedTypePtr get_type(const c10::QualifiedName& name) const { + auto it = classDict_.find(name); + if (it == classDict_.end()) { + return nullptr; + } + return classes_[it->second]; + } + + // For testing: clear all Python-defined classes to ensure that unit tests + // have isolation. + void _clear_python_cu() { + // Delete all the associated class methods + for (const auto& type : classes_) { + if (auto cls = type->cast()) { + for (auto method : cls->methods()) { + // Tombstone the method in the compilation unit. + // Don't erase because the dict_ + auto it = dict_.find(method->qualname()); + if (it != dict_.end()) { + functions_[it->second] = nullptr; + // Erase in our big lookup table + dict_.erase(it); + } + } + // Classes can have multiple pointers to the same hook, + // need to make sure to not delete it twice + std::unordered_set hooks_to_delete; + for (const auto& hook : cls->getForwardHooks()) { + hooks_to_delete.insert(hook); + } + for (const auto& pre_hook : cls->getForwardPreHooks()) { + hooks_to_delete.insert(pre_hook); + } + for (const auto& hook : hooks_to_delete) { + // Tombstone the hook in the compilation unit. + auto it = dict_.find(hook->qualname()); + if (it != dict_.end()) { + functions_[it->second] = nullptr; + // Erase in our big lookup table + dict_.erase(it); + } + } + } + } + classes_.clear(); + classDict_.clear(); + } + + // [Internal Only] Remove method. + // Note Used for freezing. + void unsafeRemoveMethod(const c10::QualifiedName& method_name) { + auto it = dict_.find(method_name); + TORCH_CHECK( + it != dict_.end(), + "method '", + method_name.qualifiedName(), + "' does not exist."); + functions_[it->second] = nullptr; + dict_.erase(it); + } + + // [name mangling] All code objects must have a unique qualified name in a + // CompilationUnit. In Python, sometimes functions won't have unique qualified + // name (for example, nested functions). So we mangle Python functions to + // ensure that they are uniquely named. + // + // We also use mangling to distinguish different Module instances. Since each + // Module is a singleton class instance, different instances of the same + // Python Module will have different types but the same qualified name. + c10::QualifiedName mangle(const c10::QualifiedName& name) const { + auto mangled = name; + while (get_type(mangled) || find_function(mangled)) { + mangled = mangler_.mangle(mangled); + } + return mangled; + } + + private: + std::unique_ptr define( + const c10::optional& prefix, + const Def& def, + const ResolverPtr& resolver, + const Self* self, + const std::unordered_map& function_table, + bool shouldMangle = false, + FunctionType type = FunctionType::Method, + c10::optional version = c10::nullopt) const; + + // Define a property on \p self. + struct PropertyPair; + PropertyPair define_property( + const c10::optional& prefix, + const Property& prop, + const ResolverPtr& resolver, + const Self* self, + const std::unordered_map& function_table, + bool shouldMangle = false) const; + + Function& register_function(std::unique_ptr fn) { + TORCH_CHECK( + 0 == dict_.count(fn->qualname().qualifiedName()), + "method '", + fn->qualname().qualifiedName(), + "' already defined."); + functions_.emplace_back(std::move(fn)); + dict_[functions_.back()->qualname()] = functions_.size() - 1; + return *functions_.back(); + } + std::vector> functions_; + // for fast lookup + std::unordered_map dict_; + std::unordered_map classDict_; + + // [class ownership] Right now there are two relationships between classes + // and compilation units: + // 1. Classes have compilation units internally that hold their methods. + // 2. On load, the TypePtrs of any imported classes are owned by the main + // module's compilation unit. + std::vector classes_; + + mutable NameMangler mangler_; +}; + +// An owning pointer to a Function. Just a pair of a raw Function ptr and it's +// owning CU. We need this because pybind requires a ref-counted way to refer to +// Functions. +struct StrongFunctionPtr { + StrongFunctionPtr(std::shared_ptr cu, Function* function) + : cu_(std::move(cu)), function_(function) { + TORCH_INTERNAL_ASSERT(cu_); + TORCH_INTERNAL_ASSERT(function_); + } + std::shared_ptr cu_; + Function* function_; +}; + +namespace script { +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. +using CompilationUnit = ::torch::jit::CompilationUnit; +} // namespace script +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..74663cfb41ce717d7bd6668a86d22e09471efb82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h @@ -0,0 +1,181 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { + +struct TORCH_API GraphFunction : public Function { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + GraphFunction( + c10::QualifiedName name, + std::shared_ptr graph, + std::function function_creator, + c10::optional executor_execution_mode = + c10::nullopt) + : name_(std::move(name)), + graph_(std::move(graph)), + executor_execution_mode_(executor_execution_mode), + function_creator_(std::move(function_creator)) {} + + bool isGraphFunction() const override { + return true; + } + + void run(Stack& stack) override; + + std::function function_creator() const { + return function_creator_; + } + + c10::intrusive_ptr runAsync( + Stack& stack, + TaskLauncher taskLauncher = at::launch) override; + + std::shared_ptr graph() const { + return graph_; + } + + std::shared_ptr optimized_graph() const; + + const c10::QualifiedName& qualname() const override { + return name_; + } + + // private/unstable api. sets the initial execution mode + // will not affect executor if there is an existing executor + // created for this function + void _set_initial_executor_execution_mode(ExecutorExecutionMode mode) { + executor_execution_mode_ = mode; + } + // private/unstable api. sets flag of whether or not to ignore amp. + // will not affect executor if there is an existing executor + // created for this function + void _set_ignore_amp(bool ignore_amp) { + force_no_amp_ = ignore_amp; + } + + // if this isn't yet defined, run its method_creator function + void ensure_defined() override; + + size_t num_inputs() const override { + return graph()->inputs().size(); + } + + Function& setSchema(FunctionSchema schema) override { + schema_ = std::make_unique(std::move(schema)); + return *this; + } + + const FunctionSchema& getSchema() const override; + + GraphExecutorState getDebugState() { + return get_executor().getDebugState(); + } + + bool is_optimized() const { + TORCH_WARN( + "GraphFunction::is_optimized() is deprecated and always returns true. " + "Please use getGraphExecutorOptimize()"); + return true; + } + + void check_single_output() { + TORCH_CHECK( + graph()->outputs().size() == 1, + "Method (but not graphs in general) require a single output. Use None/Tuple for 0 or 2+ outputs"); + } + + GraphExecutor& get_executor() { + ensure_defined(); + std::lock_guard lock(compile_mutex); + auto& executor = executors_[currentSpecialization()]; + if (executor) { + return *executor; + } + check_single_output(); + const std::string& name = name_.name(); + std::shared_ptr opt_graph = optimized_graph(); + if (!executor_execution_mode_) { + executor = GraphExecutor(opt_graph, name); + } else { + executor = GraphExecutor(opt_graph, name, *executor_execution_mode_); + } + return *executor; + } + + using Function::call; + bool call( + Stack& stack, + c10::optional bailOut, + c10::function_ref f) override { + f(get_executor().getPlanFor(stack, bailOut).code); + return true; + } + + void clear_optimized_graphs() { + optimized_graphs_.fill(nullptr); + } + + private: + enum SpecializationKey { + AutocastOff, + CpuAutocastOn, + GpuAutocastOn, + CpuGpuAutocastOn, + + // This provides the number of specializations + // (Must be last entry) + TotalCount + }; + + SpecializationKey currentSpecialization() const; + + private: + c10::QualifiedName name_; + // The original, non-optimized graph + std::shared_ptr graph_; // for debugging and for inlining + + // allows users to specify Simple/Profiling Executor for function + // TODO: add more executors + mutable c10::optional executor_execution_mode_; + + // if invoked on a graph that has already traced through amp + // don't invoke amp pass + mutable bool force_no_amp_ = false; + // Optimized graph, computed lazily. Used for inlining. + mutable std::array, SpecializationKey::TotalCount> + optimized_graphs_; + + // GraphFunctions are invokable from multiple threads, so this lock needs to + // be held when we're initializing graph executor for the first time or + // computing the optimized graph. We're using reentrant mutex so that we don't + // need to worry about causing a deadlock by calling one method from another + // (e.g. optimized_graph() from get_executor()). + mutable std::recursive_mutex compile_mutex; + + // executor_[0] - autocast off + // executor_[1] - autocast cpu on + // executor_[2] - autocast gpu on + // executor_[3] - autocast cpu & gpu on + std::array, SpecializationKey::TotalCount> + executors_; + + // an optional function that actually creates the method when + // ensure_defined() is called. This is used by the compiler so + // that it can construct methods out of order + std::function function_creator_; + + // if absent, then we generate a default schema based on the graph + // mutable because getSchema caches the default schema if one is requested + // before a call to setSchema + mutable std::unique_ptr schema_; +}; + +// Short hands for dynamic_cast. +TORCH_API GraphFunction* tryToGraphFunction(Function&) noexcept; +TORCH_API GraphFunction& toGraphFunction(Function&); +TORCH_API const GraphFunction& toGraphFunction(const Function&); + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h new file mode 100644 index 0000000000000000000000000000000000000000..28675e5bd059f5e876e1b55c94b2c0a705aca28c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h @@ -0,0 +1,84 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch::jit { + +using ObjectPtr = c10::intrusive_ptr; + +// A method in a module, e.g. f in: +// +// class M(ScriptModule): +// @script_method +// def f(self, x): +// ... +// Note: because Method/Module are exposed to python these +// classes use python method naming conventions +struct TORCH_API Method : public torch::IMethod { + Method(ObjectPtr owner, Function* function); + + // the module that contains this method. + Module owner() const; + // the raw objectptr that owns this method, for when the method is owned by a + // torchbind object. + ObjectPtr raw_owner() const; + void run(Stack& stack); + void run(Stack&& stack) { + run(stack); + } + + c10::IValue operator()( + std::vector stack, + const Kwargs& kwargs = Kwargs()) const override; + + // Run method async. Invocation on this function would invokes a JIT + // interpreter that executes ops inline, one by one, on caller's thread. A + // model can utilize async op, i.e. `fork`, to launch an asynchronous task + // which will be launched on provided `taskLauncher`. + c10::intrusive_ptr run_async( + std::vector stack, + const Kwargs& kwargs = Kwargs(), + TaskLauncher taskLauncher = at::launch); + + std::shared_ptr graph() const { + return toGraphFunction(*function_).graph(); + } + + const std::string& name() const override { + return function_->name(); + } + + size_t num_inputs() const { + return function_->num_inputs(); + } + + GraphExecutor& get_executor() { + return toGraphFunction(*function_).get_executor(); + } + + Function& function() const { + return *function_; + } + + private: + void setArgumentNames(std::vector&) const override; + + // Methods are uniqued onwed by a single module. This raw pointer allows + // looking up the module. + ObjectPtr owner_; + + // Underlying unbound function + Function* function_; +}; + +namespace script { +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. +using Method = ::torch::jit::Method; +} // namespace script + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h new file mode 100644 index 0000000000000000000000000000000000000000..6c49b695cb6b5dec57e45f851f5db5b82533e4af --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h @@ -0,0 +1,685 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// This file contains classes which assist in desugaring Python style +// modules and their methods into flattened graphs which don't have any +// function calls. + +namespace torch::jit { + +using ::c10::Argument; +using ::c10::FunctionSchema; +using ::c10::QualifiedName; +// Map which stores filename to content. +using ExtraFilesMap = std::unordered_map; + +using ModulePtr = c10::intrusive_ptr; + +struct Module; + +template +struct slot_list_impl; + +template +struct Named { + std::string name; + T value; +}; + +using NameModule = Named; +using NameValue = Named; +using NameTensor = Named; + +namespace detail { +struct TORCH_API ModulePolicy; +struct TORCH_API ParameterPolicy; +struct TORCH_API AttributePolicy; +struct TORCH_API BufferPolicy; +template +struct NamedPolicy; +} // namespace detail + +using module_list = slot_list_impl; +using named_module_list = + slot_list_impl>; + +using parameter_list = slot_list_impl; +using named_parameter_list = + slot_list_impl>; + +using attribute_list = slot_list_impl; +using named_attribute_list = + slot_list_impl>; + +using buffer_list = slot_list_impl; +using named_buffer_list = + slot_list_impl>; + +using ModuleLookup = std::function&)>; + +struct TORCH_API Module : public Object { + explicit Module(c10::QualifiedName class_name); + Module(std::shared_ptr cu, const c10::ClassTypePtr& type); + Module() = default; + Module(const Module&) = default; + Module& operator=(const Module&) = default; + Module(Module&&) noexcept = default; + Module& operator=(Module&&) noexcept = default; + Module( + c10::QualifiedName, + std::shared_ptr cu, + bool shouldMangle = false); + Module(ModulePtr module_value) : Object(std::move(module_value)) {} + ~Module() = default; + + void set_optimized(bool o) { + TORCH_WARN( + "Module::set_optimized() is deprecated and has no effect. " + "Please use setGraphExecutorOptimize()"); + } + + bool is_optimized() const { + TORCH_WARN( + "Module::is_optimized() is deprecated and always returns true. " + "Please use getGraphExecutorOptimize()"); + return true; + } + + IValue forward(std::vector inputs, const Kwargs& kwargs = Kwargs()) { + return get_method("forward")(std::move(inputs), kwargs); + } + + // In script modules, buffers are Tensors attribute that are _not_ registered + // as parameters. This is different than in nn.Module where there is a special + // register_buffer method. With this simplification, we only need to track + // whether a slot is a parameter to be able to classify it. + void register_buffer(const std::string& name, at::Tensor v) { + bool is_param = false; + bool is_buffer = true; + std::lock_guard lock(*register_mutex_); + type()->addOrCheckAttribute(name, TensorType::get(), is_param, is_buffer); + _ivalue()->setAttr(name, std::move(v)); + } + + void register_parameter( + const std::string& name, + at::Tensor v, + bool is_buffer) { + std::lock_guard lock(*register_mutex_); + type()->addOrCheckAttribute(name, TensorType::get(), !is_buffer, is_buffer); + _ivalue()->setAttr(name, std::move(v)); + } + + void register_attribute( + const std::string& name, + const TypePtr& t, + IValue v, + bool is_param = false, + bool is_buffer = false) { + type()->addOrCheckAttribute(name, t, is_param, is_buffer); + _ivalue()->setAttr(name, std::move(v)); + } + + void register_module(const std::string& name, const Module& module) { + type()->addOrCheckAttribute(name, module.type()); + _ivalue()->setAttr(name, module._ivalue()); + } + + void apply(const std::function& fn); + + buffer_list buffers(bool recurse = true) const; + named_buffer_list named_buffers(bool recurse = true) const; + + module_list children() const; // direct modules + named_module_list named_children() const; + module_list modules() const; // all modules, including this one, recursively + named_module_list named_modules() const; + + // all tensors involved in gradient optimization + parameter_list parameters(bool recurse = true) const; + named_parameter_list named_parameters(bool recurse = true) const; + + // all members of the object, similar to iterating over dir(obj) in python + attribute_list attributes(bool recurse = true) const; + named_attribute_list named_attributes(bool recurse = true) const; + + void dump( + bool print_method_bodies, + bool print_attr_values, + bool print_param_values) const; + + std::string dump_to_str( + bool print_method_bodies, + bool print_attr_values, + bool print_param_values) const; + + /// Enables "training" mode. + void train(bool on = true); + /// Calls train(false) to enable "eval" mode. + /// Do not override this method, override `train()` instead. + void eval() { + train(/*on=*/false); + } + /// True if the module is in training mode. + bool is_training() const { + return attr("training", true).toBool(); + } + + /// Recursively casts all parameters to the given `dtype` and `device`. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + void to(at::Device device, at::ScalarType dtype, bool non_blocking = false); + + /// Recursively casts all parameters to the given dtype. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + void to(at::ScalarType dtype, bool non_blocking = false); + + /// Recursively moves all parameters to the given device. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + void to(at::Device device, bool non_blocking = false); + + void save( + std::ostream& out, + const ExtraFilesMap& extra_files = ExtraFilesMap()) const; + + void save( + const std::string& filename, + const ExtraFilesMap& extra_files = ExtraFilesMap()) const; + + void _save_for_mobile( + std::ostream& out, + const ExtraFilesMap& extra_files = ExtraFilesMap(), + bool save_mobile_debug_info = false, + bool use_flatbuffer = false) const; + + void _save_for_mobile( + const std::string& filename, + const ExtraFilesMap& extra_files = ExtraFilesMap(), + bool save_mobile_debug_info = false, + bool use_flatbuffer = false) const; + + Module copy() const; + + Module deepcopy(c10::optional device = c10::nullopt) const; + + // Clones both the underlying `ClassType` and the module instance(data), this + // function creates a new `ClassType` and returns a new instance that has the + // same data as the current instance but with the new type, shared ClassType + // will be preserved as well + Module clone(bool inplace = false) const; + + // Clones both the underlying `ClassType` and the module instance(data), this + // function creates a new `ClassType` and returns a new instance that has the + // same data as the current instance but with the new type, shared ClassType + // will be preserved as well. Also allows the caller to specify a set of + // method and attribute names to not clone. + Module clone( + bool inplace, + const std::unordered_set& ignored_method, + const std::unordered_set& ignored_attributes) const; + + void clone_method(const Module& orig, const std::string& name); + + IValue operator()(std::vector inputs); + + template + IValue create_class(const c10::QualifiedName& name, Types&&... args) const { + return create_class(name, {IValue(std::forward(args))...}); + } + + IValue create_class(const c10::QualifiedName& name, Stack stack) const; + + inline bool operator==(const Module& y) const noexcept { + return _ivalue() == y._ivalue(); + } + + void set_delete_memory(std::shared_ptr delete_mem) { + mem_to_delete_ = std::move(delete_mem); + } + + // A set of functions to maintain input shapes through torch.jit.save and + // torch.jit.load. It only works on tensors and lists/dicts of tensors + // because tracing is only supported by these types. + void store_traced_inputs(std::string func_name, std::vector inputs) { + if (inputs.size() == 0) { + return; + } + auto c10_inputs = c10::impl::GenericList(AnyType::get()); + for (IValue& value : inputs) { + // Not checking whether this is traceable type as that is already checked + // higher up in the stack and changing that would require a larger + // restructuring. + c10_inputs.emplace_back(std::move(value)); + } + traced_inputs_.insert_or_assign(func_name, c10_inputs); + } + + c10::Dict retrieve_traced_inputs() + const { + return traced_inputs_; + } + + private: + Module clone_impl( + std::unordered_map& type_remap, + bool inplace, + IValue::HashAliasedIValueMap memo, + const std::unordered_set& ignored_methods, + const std::unordered_set& ignored_attributes) const; + + void clone_method( + const Module& orig, + const Function& method, + const std::unordered_map& type_remap); + + c10::QualifiedName getNameForMethod(std::string basename) const { + return QualifiedName(*type()->name(), std::move(basename)); + } + + void to_impl( + const c10::optional& device, + const c10::optional& dtype, + bool non_blocking); + + // Extra handle for the module to delete when itself is deleted + std::shared_ptr mem_to_delete_; + + // Map of function names to the traced inputs that they have been traced with + c10::Dict traced_inputs_; + + // Mutex to keep registring buffer or parameter thread safe. + std::shared_ptr register_mutex_ = std::make_shared(); +}; + +// C++ equivalent api of `torch.jit.freeze`. See documentation there for +// details. +TORCH_API Module freeze( + const Module& module, + const c10::optional>& preserved_attrs = + c10::nullopt, + bool optimize_numerics = true); + +// C++ equivalent api of `torch.jit.optimize_for_inference`. See documentation +// there for details. +TORCH_API Module optimize_for_inference( + Module& module, + const std::vector& other_methods = {}); + +enum class FusionBehavior { STATIC, DYNAMIC }; + +using FusionStrategy = std::vector>; +// clang-format off +/* +Sets the type and number of specializations that can occur during fusion. + +Usage: provide a list of pairs (type, depth) where type is one of STATIC or DYNAMIC +and depth is an integer. + +Behavior - static vs dynamic: + In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined + based on some initial profiling runs. + In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple + shapes are possible. + +In both cases, we also recompile on new striding behavior, device, or dtype. + +Behavior - fallback functions & depth: + When an input doesn't match the format required by the specialized compiled op, it will run + a fallback function. Fallback functions are recursively be compiled and specialized based + on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to + limit the number of specializations that can be compiled, before giving up on recompiling and + falling back to a completely un-fused, un-specialized implementation. + +The list of (type, depth) pairs controls the type of specializations and the number of +specializations. For example: [(STATIC, 2), (DYNAMIC, 2)] indicates that the first +two specializations will use static fusions, the following two specializations will use +dynamic fusion, and any inputs that satisfy none of the 4 options will run an +unfused implementation. + +NB: in the future, if more as more fusion backends are added there may be more granular +apis for specific fusers. +*/ +// clang-format on +TORCH_API FusionStrategy getFusionStrategy(); +// returns previous strategy +TORCH_API FusionStrategy setFusionStrategy(FusionStrategy& fusion_strategy); + +namespace detail { + +struct TORCH_API SlotCursor { + Module module_; + int64_t i_; // slot offset, -1 indicates the module itself +}; + +} // namespace detail + +// This iterator allows the (optionally recursive) enumeration of +// the members of a Module. It performs a depth-first pre-order +// traversal of the module. The Policy template parameter determines +// which slots of the object should be included. For instance, +// when iterating parameters, we return the parameter tensors, +// but skip modules, buffers, and other attributes. +// See ModulePolicy for comments about Policy object's API. +template +struct slot_iterator_impl { + using SlotCursor = detail::SlotCursor; + using value_type = typename Policy::value_type; + slot_iterator_impl( + Module root, + bool recurse, // if true, do a depth-first search, otherwise, just look at + // slots of root + bool return_module) // if true include root itself as the first thing + // visited (used in modules()) + : cursors_({SlotCursor{std::move(root), return_module ? -1 : 0}}), + recurse_(recurse) { + // advance iterator to first valid element (or the end, if empty) + while_not_valid_next(); + } + // empty cursors_, represents end of iteration + slot_iterator_impl() : recurse_(false) {} + value_type operator*() const { + return Policy::create(cursors_, cur()); + } + value_type operator->() const { + return **this; + } + slot_iterator_impl& operator++() { + next_valid(); + return *this; + } + slot_iterator_impl operator++(int) { + // this is really expensive, should we delete it so people don't use it + // instead of prefix? + slot_iterator_impl old = *this; + ++(*this); + return old; + } + + private: + // return_module() is a corner case where instead of returning a submodule + // of root, we are returning root itself, because we are iterating modules(), + // which contains the root module itself. + // It is represented with a single SlotCursor whose index is -1. + bool return_module() const { + return top().i_ == -1; + } + const SlotCursor& top() const { + return cursors_.back(); + } + SlotCursor& top() { + return cursors_.back(); + } + IValue cur() const { + return return_module() ? top().module_._ivalue() + : top().module_._ivalue()->getSlot(top().i_); + } + + // advance to the next slot in a depth first pre-order traversal of the + // modules slots. This function does not guarantee the next slot is a + // valid element of the iteration. That is done by valid(). + // invariant: !cursors_.empty() + void next() { + // we just returned the module itself, advance i_ to 0 so we are now + // at the first slot of the module. + if (return_module()) { + ++top().i_; + return; + } + // the last traversal action advanced beyond the number of slots in the + // module so continue the iteration in the parent. + if (top().i_ >= int64_t(top().module_._ivalue()->type()->numAttributes())) { + cursors_.pop_back(); + if (!cursors_.empty()) { + ++top().i_; + } + return; + } + // if the current thing is a module, we have to scan it for recursive + // traversals. We do this by adding a new SlotCursor to track the traversal. + if (recurse_ && + top().module_._ivalue()->type()->getAttribute(top().i_)->is_module()) { + cursors_.emplace_back(SlotCursor{cur().toModule(), 0}); + return; + } + // common case: advance to the next slot. + ++top().i_; + } + // is the current position of the iterator a valid one? + // otherwise, we have to continue advancing. + bool valid() const { + return top().i_ < + int64_t(top().module_._ivalue()->type()->numAttributes()) && + Policy::valid( + top().module_._ivalue()->type(), + top().i_, + top().module_._ivalue()->getSlot(top().i_)); + } + void while_not_valid_next() { + // advance iteration until we are either at the end (cursors_.empty()) + // or in a valid state. return_module() is a special case, + // and is always considered valid, regardless of Policy, because it is + // it is only true when we are iterating modules. + while (!cursors_.empty() && !return_module() && !valid()) { + next(); + } + } + void next_valid() { + // avoid crashing if this is empty + if (cursors_.empty()) { + return; + } + // advance to next element, which is maybe not valid + next(); + while_not_valid_next(); + } + + std::vector cursors_; + bool recurse_; + + friend inline bool operator!=( + const slot_iterator_impl& a, + const slot_iterator_impl& b) { + // we are finished iteration when we have no more iteration SlotCursors. + // end is always an empty iterator with no cursors. + return (a.cursors_.empty() != b.cursors_.empty()); + } +}; + +// This type represents lists of parameters, attributes, and +// submodules contained in the module. It is abstract because +// they are not stored directly in std::vectors but inside the +// module's IValue object itself. +template +struct slot_list_impl { + using iterator = slot_iterator_impl; + using const_iterator = slot_iterator_impl; + using value_type = typename iterator::value_type; + slot_iterator_impl begin() const { + return slot_iterator_impl(module_, recurse_, return_module_); + } + slot_iterator_impl end() const { + return slot_iterator_impl(); + } + size_t size() const { + if (!size_) { + size_ = size_t(0); + // NOLINTNEXTLINE(clang-diagnostic-unused-variable) + for (const value_type& s : *(this)) { + (void)s; // Suppress unused variable warning + ++*size_; + } + } + return *size_; + } + + slot_list_impl(Module module, bool recurse, bool return_module) + : module_(std::move(module)), + recurse_(recurse), + return_module_(return_module), + size_(c10::nullopt) { + if (!recurse && !return_module && Policy::all_slots) { + size_ = module_.num_slots(); + } + } + + private: + Module module_; + bool recurse_; + bool return_module_; + // size of this list, cached on first request + // when we need to filter the slot list + mutable c10::optional size_; + friend struct Module; +}; + +namespace detail { + +// slot_iterator_impl always iterate over all the slots in a module, +// the Policy template argument determines slots should be returned and their +// types +struct TORCH_API ModulePolicy { + // the type of the value being returned + using value_type = Module; + + // the logic for creating the type being returned, given the raw IValue + // of that object. + static value_type create( + const std::vector& cursors, + IValue v) { + return Module(std::move(v).toObject()); + } + // is slot i in typ something that this iterator should return, otherwise, + // we skip it. + static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { + return typ->getAttribute(i)->is_module(); + } + // are we going to return everything? If so, we can optimize the calculate + // of the size of the list. + static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false; +}; + +struct TORCH_API ParameterPolicy { + using value_type = at::Tensor; + static value_type create( + const std::vector& cursors, + IValue v) { + return std::move(v).toTensor(); + } + static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { + return typ->is_parameter(i) && v.isTensor(); + } + static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false; +}; + +struct TORCH_API BufferPolicy { + using value_type = at::Tensor; + static value_type create( + const std::vector& cursors, + IValue v) { + return std::move(v).toTensor(); + } + static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { + return typ->getAttribute(i)->isSubtypeOf(*TensorType::get()) && + typ->is_buffer(i); + } + static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false; +}; + +struct TORCH_API AttributePolicy { + using value_type = IValue; + static value_type create( + const std::vector& cursors, + IValue v) { + return v; + } + static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { + return true; + } + static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = true; +}; + +// take a Policy object, and make a version of it that returns the slot. +// along with the fully qualified name of that slot. This is used for the named_ +// variants like named_parameters(). +template +struct NamedPolicy { + using value_type = Named; + static value_type create( + const std::vector& cursors, + IValue v) { + std::string name; + if (cursors.size() == 1) { + name = (cursors.back().i_ == -1) ? "" : nameFragment(cursors.back()); + } else { + std::ostringstream ss; + for (const auto i : c10::irange(cursors.size())) { + if (i > 0) { + ss << "."; + } + ss << nameFragment(cursors[i]); + } + name = ss.str(); + } + return value_type{std::move(name), Policy::create(cursors, std::move(v))}; + } + static bool valid(const ClassTypePtr& t, size_t i, const IValue& v) { + return Policy::valid(t, i, v); + } + static constexpr bool all_slots = Policy::all_slots; + + private: + static std::string nameFragment(const detail::SlotCursor& f) { + return f.module_.type()->getAttributeName(f.i_); + } +}; + +} // namespace detail + +TORCH_API bool& getInlineEverythingMode(); + +namespace script { +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. +using Module = ::torch::jit::Module; +using ExtraFilesMap = ::torch::jit::ExtraFilesMap; +} // namespace script + +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h new file mode 100644 index 0000000000000000000000000000000000000000..7ccacf385be538f8f8e2ad738745e6874ce9ea62 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h @@ -0,0 +1,200 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace torch::jit { + +struct Resolver; +using ResolverPtr = std::shared_ptr; + +using ObjectPtr = c10::intrusive_ptr; + +// Throw this in C++ land if `attr` fails. This will be converted to a Python +// AttributeError by the Python binding code +class ObjectAttributeError : public std::runtime_error { + public: + ObjectAttributeError(const std::string& what) : std::runtime_error(what) {} +}; + +struct TORCH_API Object { + Object() = default; + Object(const Object&) = default; + Object& operator=(const Object&) = default; + Object(Object&&) noexcept = default; + Object& operator=(Object&&) noexcept = default; + Object(ObjectPtr _ivalue) : _ivalue_(std::move(_ivalue)) {} + Object(std::shared_ptr cu, const c10::ClassTypePtr& type); + Object( + c10::QualifiedName, + std::shared_ptr cu, + bool shouldMangle = false); + + ObjectPtr _ivalue() const { + TORCH_INTERNAL_ASSERT(_ivalue_); + return _ivalue_; + } + + c10::ClassTypePtr type() const { + return _ivalue()->type(); + } + + struct Property { + std::string name; + Method getter_func; + c10::optional setter_func; + }; + + void setattr(const std::string& name, c10::IValue v) { + if (_ivalue()->type()->hasConstant(name)) { + TORCH_CHECK( + false, + "Can't set constant '", + name, + "' which has value:", + _ivalue()->type()->getConstant(name)); + } else if (auto slot = _ivalue()->type()->findAttributeSlot(name)) { + const c10::TypePtr& expected = _ivalue()->type()->getAttribute(*slot); + TORCH_CHECK( + v.type()->isSubtypeOf(*expected), + "Expected a value of type '", + expected->repr_str(), + "' for field '", + name, + "', but found '", + v.type()->repr_str(), + "'"); + _ivalue()->setSlot(*slot, std::move(v)); + } else { + TORCH_CHECK(false, "Module has no attribute '", name, "'"); + } + } + + c10::IValue attr(const std::string& name) const { + if (auto r = _ivalue()->type()->findAttributeSlot(name)) { + return _ivalue()->getSlot(*r); + } + if (auto r = _ivalue()->type()->findConstantSlot(name)) { + return _ivalue()->type()->getConstant(*r); + } + std::stringstream err; + err << _ivalue()->type()->repr_str() << " does not have a field with name '" + << name.c_str() << "'"; + throw ObjectAttributeError(err.str()); + } + + c10::IValue attr(const std::string& name, c10::IValue or_else) const { + if (auto r = _ivalue()->type()->findAttributeSlot(name)) { + return _ivalue()->getSlot(*r); + } + if (auto r = _ivalue()->type()->findConstantSlot(name)) { + return _ivalue()->type()->getConstant(*r); + } + return or_else; + } + + bool hasattr(const std::string& name) const { + return _ivalue()->type()->hasAttribute(name) || + _ivalue()->type()->hasConstant(name); + } + + // each object owns its methods. The reference returned here + // is guaranteed to stay valid until this module has been destroyed + Method get_method(const std::string& name) const { + if (auto method = find_method(name)) { + return *method; + } + AT_ERROR("Method '", name, "' is not defined."); + } + + const std::vector get_methods() const { + return c10::fmap(type()->methods(), [&](Function* func) { + return Method(_ivalue(), func); + }); + } + + bool has_property(const std::string& name) const { + for (const auto& prop : type()->properties()) { + if (prop.name == name) { + return true; + } + } + return false; + } + + const Property get_property(const std::string& name) const { + for (const auto& prop : type()->properties()) { + if (prop.name == name) { + c10::optional setter = c10::nullopt; + if (prop.setter) { + setter = Method(_ivalue(), prop.setter); + } + return Property{ + prop.name, Method(_ivalue(), prop.getter), std::move(setter)}; + } + } + AT_ERROR("Property '", name, "' is not defined."); + } + + const std::vector get_properties() const { + return c10::fmap(type()->properties(), [&](ClassType::Property prop) { + c10::optional setter = c10::nullopt; + if (prop.setter) { + setter = Method(_ivalue(), prop.setter); + } + return Property{ + std::move(prop.name), + Method(_ivalue(), prop.getter), + std::move(setter)}; + }); + } + + c10::optional find_method(const std::string& basename) const; + + /// Run a method from this module. + /// + /// For example: + /// @code + /// IValue output = module->run("relu_script", a, b); + /// @endcode + /// + /// To get a compile a module from a source string, see torch::jit::compile + /// + /// @param method_name The name of the method to run + /// @param args Arguments to be passed to the method + /// @return An IValue containing the return value (or values if it is a tuple) + /// from the method + template + IValue run_method(const std::string& method_name, Types&&... args) { + return get_method(method_name)({IValue(std::forward(args))...}); + } + + // so that C++ users can easily add methods + void define(const std::string& src, const ResolverPtr& resolver = nullptr); + + size_t num_slots() const { + return _ivalue()->slots().size(); + } + + // shallow copy the object + Object copy() const; + + // Copies all the attributes of the object recursively without creating new + // `ClassType`, including deepcopy of Tensors + Object deepcopy() const; + + private: + // mutable be we lazily initialize in module_object. + mutable ObjectPtr _ivalue_; +}; + +namespace script { +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. +using Object = ::torch::jit::Object; +} // namespace script +} // namespace torch::jit diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend.h new file mode 100644 index 0000000000000000000000000000000000000000..5aae642fa5517b8dd518117682734f24404c4ee7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend.h @@ -0,0 +1,119 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { +namespace jit { +namespace { +// NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration) +inline c10::FunctionSchema getIsAvailableSchema() { + c10::Argument self("self", c10::AnyType::get()); + c10::Argument available("available", c10::BoolType::get()); + c10::FunctionSchema preprocessor_schema( + "is_available", + /*overload_name=*/"", + /*arguments=*/{self}, + /*returns=*/{available}); + return preprocessor_schema; +} + +constexpr static auto kBackendsNamespace = "__backends__"; + +// NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration) +inline c10::FunctionSchema getCompileSchema() { + c10::Argument self("self", c10::AnyType::get()); + c10::Argument mod("processed", c10::AnyType::get()); + auto any_dict_ty = + c10::DictType::create(c10::StringType::get(), c10::AnyType::get()); + c10::Argument method_compile_spec("method_compile_spec", any_dict_ty); + c10::Argument handles("handles", any_dict_ty); + + c10::FunctionSchema compile_schema( + "compile", + /*overload_name=*/"", + /*arguments=*/{self, mod, method_compile_spec}, + /*returns=*/{handles}); + return compile_schema; +} + +// NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration) +inline c10::FunctionSchema getExecuteSchema() { + auto any_list_ty = c10::ListType::create(c10::AnyType::get()); + c10::Argument self("self", c10::AnyType::get()); + c10::Argument handle("handle", c10::AnyType::get()); + c10::Argument input("input", any_list_ty); + c10::Argument output("output", any_list_ty); + return c10::FunctionSchema( + "execute", + /*overload_name=*/"", + /*arguments=*/{self, handle, input}, + /*returns=*/{output}); +} + +template +std::function getIsAvailableFunc() { + return [](Stack& stack) { + auto self = pop(stack).toCustomClass(); + auto ret = self->is_available(); + push(stack, ret); + }; +} + +template +std::function getCompileFunc() { + return [](Stack& stack) { + auto method_compile_spec = pop(stack).toGenericDict(); + auto processed = pop(stack); + auto self = pop(stack).toCustomClass(); + auto ret = self->compile(processed, method_compile_spec); + push(stack, ret); + }; +} + +template +std::function getExecuteFunc() { + return [](Stack& stack) { + auto args = pop(stack); + auto handle = pop(stack); + auto self = pop(stack); + auto backend = self.toCustomClass(); + auto res = backend->execute(handle, args.toList()); + push(stack, res); + }; +} +} // namespace + +// Static registration API for backends. +template +class backend { + static_assert( + std::is_base_of::value, + "torch::jit::backend requires T to inherit from PyTorchBackendInterface"); + std::string backend_name_; + + public: + // Registers a new backend with /p name, and the given /p preprocess + // function. + backend(const std::string& name) : backend_name_(name) { + static auto cls = torch::class_(kBackendsNamespace, name) + .def(torch::init<>()) + ._def_unboxed( + "is_available", + getIsAvailableFunc(), + getIsAvailableSchema()) + ._def_unboxed( + "compile", + getCompileFunc(), + getCompileSchema()) + ._def_unboxed( + "execute", + getExecuteFunc(), + getExecuteSchema()); + } +}; + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_exception.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_exception.h new file mode 100644 index 0000000000000000000000000000000000000000..0e100a60bdae150d0913819a8e343abe91f5d8b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_exception.h @@ -0,0 +1,54 @@ +#pragma once +#include + +namespace c10 { +class TORCH_API BackendRuntimeException : public c10::Error { + public: + // Use debug_handle to throw exception + BackendRuntimeException( + SourceLocation loc, + std::string msg, + int64_t debug_handle) + : c10::Error(loc, msg) { + debug_handles.push_back(debug_handle); + } + // If rethrowing, can push another debug_handle + // This is useful in couple of scenarios. + // 1. A submodule is lowered and lite interperter has CallMethod + // to lowered module's method. In this case lowered module will throw with + // a handle, plus there will be another debug handle corresponding + // to the CallMethod node in lite interpreter. Both together give complete + // trace. This function allows lite interpreter to rethrow with debug + // handle it has for CallMethod. + // 2. Another scenarios is when lite interperter can make function calls or + // the lowered backend also has function call ability. Thus we have + // multiple function frames. Now we need a stack of handles to symbolicate + // entire stack trace. + void pushDebugHandle(int64_t debug_handle) { + debug_handles.push_back(debug_handle); + } + const std::vector& getDebugHandles() { + return debug_handles; + } + + private: + // Stores stack of debug handles. + std::vector debug_handles; +}; + +} // namespace c10 +#define TORCH_DELEGATED_BACKEND_THROW(cond, msg, debug_handle) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + throw ::c10::BackendRuntimeException( \ + {__func__, __FILE__, static_cast(__LINE__)}, \ + msg, \ + debug_handle); \ + } + +#define TORCH_DELEGATED_BACKEND_RETHROW(e, debug_handle) \ + do { \ + e.pushDebugHandle(debug_handle); \ + throw; \ + } while (false) + +#define DEBUG_HANDLE_UNKNOWN -1 diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/cuda/interface.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/cuda/interface.h new file mode 100644 index 0000000000000000000000000000000000000000..0ccdfe2c9ebd992fa7c0c50c01159ce06b7f53d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/cuda/interface.h @@ -0,0 +1,58 @@ +#pragma once + +#include +#include +#include +#include + +/* + * This file contains APIs for cuda fuser; + * + * We use an empty static struct to hold the function pointers, which are + * registered separately. This is to support cpu-only compilation. + * Registration is done in torch/csrc/jit/codegen/cuda/register_interface.cpp + */ + +namespace torch { +namespace jit { +namespace fuser { +namespace cuda { + +TORCH_API std::atomic& getCudaFusionGuardMode(); + +TORCH_API bool getSingletonFusion(); +TORCH_API bool setSingletonFusion(bool value); +TORCH_API bool getHorizontalFusion(); +TORCH_API bool setHorizontalFusion(bool value); + +// dummy struct to allow API registration +struct CudaFuserInterface { + void (*fn_compile_n)(Node*) = nullptr; + void (*fn_run_n_s)(const Node*, Stack&) = nullptr; + void (*fn_fuse_graph)(std::shared_ptr&) = nullptr; + bool (*fn_can_fuse_n)(const Node*) = nullptr; + void (*fn_insert_profile_inodes)(ProfilingRecord* pr) = nullptr; + bool (*fn_profile_n)(const Node*) = nullptr; + bool (*fn_skip_n)(const std::string&, bool flip) = nullptr; +}; + +// Get interface, this is used by registration and user facing API internally +TORCH_API CudaFuserInterface* getFuserInterface(); + +TORCH_API void compileFusionGroup(Node* fusion_node); +TORCH_API void runFusionGroup(const Node* fusion_node, Stack& stack); +TORCH_API void fuseGraph(std::shared_ptr&); +TORCH_API bool canFuseNode(const Node* node); +TORCH_API void InsertProfileNodesForCUDAFuser(ProfilingRecord* pr); +TORCH_API bool profileNode(const Node* node); + +TORCH_API bool skipNode(const std::string& symbol_str, bool flip = true); + +TORCH_API bool isEnabled(); +TORCH_API bool setEnabled(bool is_enabled); +TORCH_API bool canBeEnabled(); + +} // namespace cuda +} // namespace fuser +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h new file mode 100644 index 0000000000000000000000000000000000000000..c3a0fa303c7c566d72a45306d4f063bd52accadd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_log.h @@ -0,0 +1,128 @@ +#pragma once +#include +#include +#include +#include +#include + +// `TorchScript` offers a simple logging facility that can enabled by setting an +// environment variable `PYTORCH_JIT_LOG_LEVEL`. + +// Logging is enabled on a per file basis. To enable logging in +// `dead_code_elimination.cpp`, `PYTORCH_JIT_LOG_LEVEL` should be +// set to `dead_code_elimination.cpp` or, simply, to `dead_code_elimination` +// (i.e. `PYTORCH_JIT_LOG_LEVEL=dead_code_elimination`). + +// Multiple files can be logged by separating each file name with a colon `:` as +// in the following example, +// `PYTORCH_JIT_LOG_LEVEL=dead_code_elimination:guard_elimination` + +// There are 3 logging levels available for your use ordered by the detail level +// from lowest to highest. + +// * `GRAPH_DUMP` should be used for printing entire graphs after optimization +// passes +// * `GRAPH_UPDATE` should be used for reporting graph transformations (i.e. +// node deletion, constant folding, etc) +// * `GRAPH_DEBUG` should be used for providing information useful for debugging +// the internals of a particular optimization pass or analysis + +// The default logging level is `GRAPH_DUMP` meaning that only `GRAPH_DUMP` +// statements will be enabled when one specifies a file(s) in +// `PYTORCH_JIT_LOG_LEVEL`. + +// `GRAPH_UPDATE` can be enabled by prefixing a file name with an `>` as in +// `>alias_analysis`. +// `GRAPH_DEBUG` can be enabled by prefixing a file name with an `>>` as in +// `>>alias_analysis`. +// `>>>` is also valid and **currently** is equivalent to `GRAPH_DEBUG` as there +// is no logging level that is higher than `GRAPH_DEBUG`. + +namespace torch { +namespace jit { + +struct Node; +struct Graph; + +enum class JitLoggingLevels { + GRAPH_DUMP = 0, + GRAPH_UPDATE, + GRAPH_DEBUG, +}; + +TORCH_API std::string get_jit_logging_levels(); + +TORCH_API void set_jit_logging_levels(std::string level); + +TORCH_API void set_jit_logging_output_stream(std::ostream& out_stream); + +TORCH_API std::ostream& get_jit_logging_output_stream(); + +TORCH_API std::string getHeader(const Node* node); + +TORCH_API std::string log_function(const std::shared_ptr& graph); + +TORCH_API ::torch::jit::JitLoggingLevels jit_log_level(); + +// Prefix every line in a multiline string \p IN_STR with \p PREFIX. +TORCH_API std::string jit_log_prefix( + const std::string& prefix, + const std::string& in_str); + +TORCH_API std::string jit_log_prefix( + ::torch::jit::JitLoggingLevels level, + const char* fn, + int l, + const std::string& in_str); + +TORCH_API bool is_enabled( + const char* cfname, + ::torch::jit::JitLoggingLevels level); + +TORCH_API std::ostream& operator<<( + std::ostream& out, + ::torch::jit::JitLoggingLevels level); + +#define JIT_LOG(level, ...) \ + if (is_enabled(__FILE__, level)) { \ + ::torch::jit::get_jit_logging_output_stream() \ + << ::torch::jit::jit_log_prefix( \ + level, __FILE__, __LINE__, ::c10::str(__VA_ARGS__)); \ + } + +// tries to reconstruct original python source +#define SOURCE_DUMP(MSG, G) \ + JIT_LOG( \ + ::torch::jit::JitLoggingLevels::GRAPH_DUMP, \ + MSG, \ + "\n", \ + ::torch::jit::log_function(G)); +// use GRAPH_DUMP for dumping graphs after optimization passes +#define GRAPH_DUMP(MSG, G) \ + JIT_LOG( \ + ::torch::jit::JitLoggingLevels::GRAPH_DUMP, MSG, "\n", (G)->toString()); +// use GRAPH_UPDATE for reporting graph transformations (i.e. node deletion, +// constant folding, CSE) +#define GRAPH_UPDATE(...) \ + JIT_LOG(::torch::jit::JitLoggingLevels::GRAPH_UPDATE, __VA_ARGS__); +// use GRAPH_DEBUG to provide information useful for debugging a particular opt +// pass +#define GRAPH_DEBUG(...) \ + JIT_LOG(::torch::jit::JitLoggingLevels::GRAPH_DEBUG, __VA_ARGS__); +// use GRAPH_EXPORT to export a graph so that the IR can be loaded by a script +#define GRAPH_EXPORT(MSG, G) \ + JIT_LOG( \ + ::torch::jit::JitLoggingLevels::GRAPH_DEBUG, \ + MSG, \ + "\n\n", \ + (G)->toString(), \ + ""); + +#define GRAPH_DUMP_ENABLED \ + (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_DUMP)) +#define GRAPH_UPDATE_ENABLED \ + (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_UPDATE)) +#define GRAPH_DEBUG_ENABLED \ + (is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_DEBUG)) +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h new file mode 100644 index 0000000000000000000000000000000000000000..a5bb535c9c6fe708bbbf51625182a725425f1dc8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/jit_opt_limit.h @@ -0,0 +1,39 @@ +#pragma once +#include +#include +#include + +// `TorchScript` offers a simple optimization limit checker +// that can be configured through environment variable `PYTORCH_JIT_OPT_LIMIT`. +// The purpose is to limit how many optimization you can make per pass. +// This is useful for debugging any passes. + +// Opt limit checker is enabled on a per file basis (hence per pass). For +// example, in `constant_propagation.cpp`, `PYTORCH_JIT_OPT_LIMIT` should be set +// to `constant_propagation=` or, simply, to +// `constant_propagation=` where is the number of +// optimizations you want to make for the pass. (i.e. +// `PYTORCH_JIT_OPT_LIMIT="constant_propagation="`). + +// Multiple files can be configured by separating each file name with a colon +// `:` as in the following example, +// `PYTORCH_JIT_OPT_LIMIT="constant_propagation=:dead_code_elimination="` + +// You can call opt limiter by calling JIT_OPT_ALLOWED. It will return true if +// we haven't reached the optimization limit yet. Otherwise, it will return +// false. Typical usage: + +// if (!JIT_OPT_ALLOWED) { +// GRAPH_DUMP(...); //supplied from jit_log +// return; +// } + +namespace torch { +namespace jit { + +TORCH_API bool opt_limit(const char* pass_name); + +#define JIT_OPT_ALLOWED opt_limit(__FILE__) + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h new file mode 100644 index 0000000000000000000000000000000000000000..6c2a2fa64b46a8405793b54a81082cfcb1a1f321 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/resource_guard.h @@ -0,0 +1,27 @@ +#pragma once +#include + +namespace torch { +namespace jit { + +class ResourceGuard { + std::function _destructor; + bool _released{false}; + + public: + ResourceGuard(std::function destructor) + : _destructor(std::move(destructor)) {} + + // NOLINTNEXTLINE(bugprone-exception-escape) + ~ResourceGuard() { + if (!_released) + _destructor(); + } + + void release() { + _released = true; + } +}; + +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/file_check.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/file_check.h new file mode 100644 index 0000000000000000000000000000000000000000..6e9290f5130baffe4c1fbbadb61e80b6c88d46d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/file_check.h @@ -0,0 +1,81 @@ +#pragma once + +#include +#include +#include + +namespace torch { +namespace jit { + +struct Graph; + +namespace testing { + +struct FileCheckImpl; + +struct FileCheck { + public: + TORCH_API explicit FileCheck(); + TORCH_API ~FileCheck(); + + // Run FileCheck against test string + TORCH_API void run(const std::string& test_string); + + // Run FileCheck against dump of graph IR + TORCH_API void run(const Graph& graph); + + // Parsing input checks string and run against test string / dump of graph IR + TORCH_API void run( + const std::string& input_checks_string, + const std::string& test_string); + TORCH_API void run( + const std::string& input_checks_string, + const Graph& graph); + + // Checks that the string occurs, starting at the end of the most recent match + TORCH_API FileCheck* check(const std::string& str); + + // Checks that the string does not occur between the previous match and next + // match. Consecutive check_nots test against the same previous match and next + // match + TORCH_API FileCheck* check_not(const std::string& str); + + // Checks that the string occurs on the same line as the previous match + TORCH_API FileCheck* check_same(const std::string& str); + + // Checks that the string occurs on the line immediately following the + // previous match + TORCH_API FileCheck* check_next(const std::string& str); + + // Checks that the string occurs count number of times, starting at the end + // of the previous match. If exactly is true, checks that there are exactly + // count many matches + TORCH_API FileCheck* check_count( + const std::string& str, + size_t count, + bool exactly = false); + + // A series of consecutive check_dags get turned into a group of checks + // which can appear in any order relative to each other. The checks begin + // at the end of the previous match, and the match for the check_dag group + // is the minimum match of all individual checks to the maximum match of all + // individual checks. + TORCH_API FileCheck* check_dag(const std::string& str); + + // Checks that source token is highlighted in str (usually an error message). + TORCH_API FileCheck* check_source_highlighted(const std::string& str); + + // Checks that the regex matched string occurs, starting at the end of the + // most recent match + TORCH_API FileCheck* check_regex(const std::string& str); + + // reset checks + TORCH_API void reset(); + + private: + bool has_run = false; + std::unique_ptr fcImpl; +}; +} // namespace testing +} // namespace jit +} // namespace torch diff --git a/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/hooks_for_testing.h b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/hooks_for_testing.h new file mode 100644 index 0000000000000000000000000000000000000000..108dea3f1f72d79433faf1b9ddb56f54727ac6e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/testing/hooks_for_testing.h @@ -0,0 +1,21 @@ +#pragma once +#include +#include +#include +#include + +namespace torch { +namespace jit { +struct Module; + +using ModuleHook = std::function; +using FunctionHook = std::function; + +TORCH_API void didFinishEmitModule(Module module); +TORCH_API void didFinishEmitFunction(StrongFunctionPtr defined); +TORCH_API void setEmitHooks(ModuleHook for_module, FunctionHook for_fn); + +TORCH_API std::pair getEmitHooks(); + +} // namespace jit +} // namespace torch