diff --git a/ckpts/universal/global_step20/zero/12.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/12.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..8000b4a6d2fce8a3a0d35b293626ae6e1c96d8c3 --- /dev/null +++ b/ckpts/universal/global_step20/zero/12.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0abac3af3bd5e0d3bf7e569ca87f6b1d84f601bb3d64343ca748a231fb1f76e9 +size 9372 diff --git a/ckpts/universal/global_step20/zero/12.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/12.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..dbfa3d19a15e23dd35ced019936f410a60836d3c --- /dev/null +++ b/ckpts/universal/global_step20/zero/12.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29e20260912a2ea7d110c70e69f20390553feca9c8eb6090ac3be46bf9ab9bbd +size 9387 diff --git a/ckpts/universal/global_step20/zero/12.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step20/zero/12.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..76582c2380d03600dd39474bef1b2ef634883a13 --- /dev/null +++ b/ckpts/universal/global_step20/zero/12.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38682e5e3bc76ffe85a0419dfb1a3b8c4e254d5f06e4a4b2ea9fb622f49d4ba5 +size 9293 diff --git a/ckpts/universal/global_step20/zero/22.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/22.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..4d14b9d7feaaf14cc68070898e90e299c9e921d6 --- /dev/null +++ b/ckpts/universal/global_step20/zero/22.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d51daa950fd70487fbd4cdd10b03c07c64f4fec158460ad74120e2f34ba0c3c2 +size 16778396 diff --git a/ckpts/universal/global_step20/zero/3.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/3.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..1fd9bdf54aea64d48fe99b1e0859fdbcd3d45a7d --- /dev/null +++ b/ckpts/universal/global_step20/zero/3.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0c25f26f5b4a5911186d11f947b1718d458097486f455c2c0a63bb8f44e541f +size 50332828 diff --git a/ckpts/universal/global_step20/zero/3.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/3.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..7a4b0d34e42d9aceed6d3d067e09678bae9e2e92 --- /dev/null +++ b/ckpts/universal/global_step20/zero/3.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee153a4ab4599f1b89b1bf5f5a1177d634d2233d22cbef1dadbd431c0a460fe5 +size 50332843 diff --git a/ckpts/universal/global_step20/zero/3.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step20/zero/3.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..c50f84f29d2c7ded1431316e6f8eb665ed2a5e23 --- /dev/null +++ b/ckpts/universal/global_step20/zero/3.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:025324e63c46fe1da086d38f34b28cd0616d732ba7ea8defd309b2c28b0b2133 +size 50332749 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_backward_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8a192cb8630f67a4120da9443dccc94c583b924b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API void _backward(const at::Tensor & self, at::TensorList inputs, const c10::optional & gradient={}, c10::optional retain_graph=c10::nullopt, bool create_graph=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Double.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Double.h new file mode 100644 index 0000000000000000000000000000000000000000..d80d2043e36bb2d2bf3743b4f5cacf5ff91a263a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Double.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor +inline at::Tensor _cast_Double(const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Double::call(self, non_blocking); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Half_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Half_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..132bd5e78096e4554637fac3d224db9cc8543ce2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Half_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cast_Half { + using schema = at::Tensor (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cast_Half") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cast_Half(Tensor self, bool non_blocking=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, bool non_blocking); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Short_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Short_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1484d3c8994ae797f5d4f08837aea71c6acb3e4a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Short_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cast_Short { + using schema = at::Tensor (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cast_Short") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cast_Short(Tensor self, bool non_blocking=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, bool non_blocking); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a9493e246b43e591bcb67d6e08f70f8b65c3b1d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_conj_physical_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _conj_physical(const at::Tensor & self); +TORCH_API at::Tensor & _conj_physical_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & _conj_physical_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_tensor_affine_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_tensor_affine_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..09afc16ce0538670c04ecd57cfb09b08860be11a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_tensor_affine_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _fake_quantize_learnable_per_tensor_affine_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor=1.0); +TORCH_API at::Tensor & _fake_quantize_learnable_per_tensor_affine_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fill_mem_eff_dropout_mask_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fill_mem_eff_dropout_mask_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f71997c9aef457fa9e10092503bef07a71794f6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fill_mem_eff_dropout_mask_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & _fill_mem_eff_dropout_mask_(at::Tensor & self, double dropout_p, int64_t seed, int64_t offset); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f85c7d11c55a65cccaf4da98a4e7b102f1aaaa4c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void _foreach_pow_out(at::TensorList out, at::TensorList self, at::TensorList exponent); +TORCH_API void _foreach_pow_outf(at::TensorList self, at::TensorList exponent, at::TensorList out); +TORCH_API void _foreach_pow_out(at::TensorList out, at::TensorList self, const at::Scalar & exponent); +TORCH_API void _foreach_pow_outf(at::TensorList self, const at::Scalar & exponent, at::TensorList out); +TORCH_API void _foreach_pow_out(at::TensorList out, at::TensorList self, at::ArrayRef exponent); +TORCH_API void _foreach_pow_outf(at::TensorList self, at::ArrayRef exponent, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_round_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_round_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0fa8eea8b7dd3291a53486cffbce29b8e651c56b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_round_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_round { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_round") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_round(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_round_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_round_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_round_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_round_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_round") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_round.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b86b9b93db45e8d9b69718734ad2e9316a01ea35 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_functional_sym_constrain_range_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _functional_sym_constrain_range(const at::Scalar & size, c10::optional min, c10::optional max, const at::Tensor & dep_token); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_gather_sparse_backward_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_gather_sparse_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..432ed1ae5599fd1890ec139ee8e5a3960da38781 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_gather_sparse_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _gather_sparse_backward(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_lazy_clone_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_lazy_clone_native.h new file mode 100644 index 0000000000000000000000000000000000000000..02dd3d04941a3aec22c40cb05ac0ebfdad6d7bd3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_lazy_clone_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _lazy_clone(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigvals_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigvals_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d42b44f20020efaf4a15c9c86bf855425bcecda9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigvals_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _linalg_eigvals(const at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9ee2859035558ec0c0ea349e0704c3323650fe81 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured__linalg_svd_out : public at::meta::structured__linalg_svd { +void impl(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional driver, const at::Tensor & U, const at::Tensor & S, const at::Tensor & Vh); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_tensor_quantized_tensor_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_tensor_quantized_tensor_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..efa99b6c547cc1e76a759b9b33c0f9af5ab6a5eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_tensor_quantized_tensor_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _make_per_tensor_quantized_tensor_out(at::Tensor & out, const at::Tensor & self, double scale, int64_t zero_point); +TORCH_API at::Tensor & _make_per_tensor_quantized_tensor_outf(const at::Tensor & self, double scale, int64_t zero_point, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..41a7ab61a19e3f7ca833b8cc82f9a29a7b1d7d8c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_masked_softmax_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _masked_softmax(const at::Tensor & self, const at::Tensor & mask, c10::optional dim=c10::nullopt, c10::optional mask_type=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b98006c864ae7010ba91a6335bd02fab78a6c208 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _standard_gamma_grad(const at::Tensor & self, const at::Tensor & output); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_check_tensor.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_check_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..653ad92194c947f3a25a11c27292ef3f2d74fa95 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_check_tensor.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_test_check_tensor(Tensor self) -> Tensor +inline at::Tensor _test_check_tensor(const at::Tensor & self) { + return at::_ops::_test_check_tensor::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_intlist_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_intlist_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f3ed4f5b973c1456d314a72d2ef2e758d8cad526 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_intlist_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _test_optional_intlist { + using schema = at::Tensor (const at::Tensor &, at::OptionalIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_optional_intlist") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_optional_intlist(Tensor values, int[]? addends) -> Tensor") + static at::Tensor call(const at::Tensor & values, at::OptionalIntArrayRef addends); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends); +}; + +struct TORCH_API _test_optional_intlist_out { + using schema = at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_optional_intlist") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_optional_intlist.out(Tensor values, int[]? addends, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & values, at::OptionalIntArrayRef addends, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4762e40a77d0ed5490ed08d5f0cee02944f318cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _transformer_encoder_layer_fwd { + using schema = at::Tensor (const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, double, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_transformer_encoder_layer_fwd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor") + static at::Tensor call(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask, c10::optional mask_type); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask, c10::optional mask_type); +}; + +struct TORCH_API _transformer_encoder_layer_fwd_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, int64_t, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, bool, double, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_transformer_encoder_layer_fwd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask, c10::optional mask_type, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask, c10::optional mask_type, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..55fab16fb10461c6962cc1f107f790d2ffb20002 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _upsample_bilinear2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor _upsample_bilinear2d_aa_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arcsinh_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arcsinh_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7e2987cc5ce81e8d6764dfb6fa3951b257eaeed9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arcsinh_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor arcsinh(const at::Tensor & self); +TORCH_API at::Tensor & arcsinh_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & arcsinh_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & arcsinh_(at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/argsort_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/argsort_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..250445f32634ea6c8fe06505f34896dd927f307b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/argsort_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & argsort_out(at::Tensor & out, const at::Tensor & self, bool stable, int64_t dim=-1, bool descending=false); +TORCH_API at::Tensor & argsort_outf(const at::Tensor & self, bool stable, int64_t dim, bool descending, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/asin_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/asin_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..be29d19a4bc4d0a7966a9a160e69121d88ee8c80 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/asin_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor asin(const at::Tensor & self); +TORCH_API at::Tensor & asin_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & asin_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & asin_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/asinh_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/asinh_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bd96863997d2b24756dbb309809e0d503347229a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/asinh_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor asinh(const at::Tensor & self); +TORCH_API at::Tensor & asinh_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atan_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atan_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6332bfdc50e0112e2184f7f02da88f1b15b2f284 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atan_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor atan(const at::Tensor & self); +TORCH_API at::Tensor & atan_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & atan_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & atan_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..15980402c596e8e08b9e62380958c4ed42c6397b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_compositeexplicitautograd_dispatch.h @@ -0,0 +1,29 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor bitwise_and(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & bitwise_and_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & bitwise_and_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & bitwise_and_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor bitwise_and(const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_and_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & other); +TORCH_API at::Tensor & bitwise_and_outf(const at::Scalar & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices_native.h new file mode 100644 index 0000000000000000000000000000000000000000..573ecde04db05f34f4b683bbc70652239a636ebf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ccol_indices_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor ccol_indices_default(const at::Tensor & self); +TORCH_API at::Tensor ccol_indices_sparse_csr(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..6344507093cc8f2b70a9b8df9be939f5951b9abb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor) +inline ::std::tuple cudnn_batch_norm(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + return at::_ops::cudnn_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); +} + +// aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) +inline ::std::tuple cudnn_batch_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon) { + return at::_ops::cudnn_batch_norm_out::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2, out3); +} +// aten::cudnn_batch_norm.out(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!)) +inline ::std::tuple cudnn_batch_norm_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3) { + return at::_ops::cudnn_batch_norm_out::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, out0, out1, out2, out3); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2270c63e5d028429a7bb03fb3db8717d129ebae8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_cumprod_out : public at::meta::structured_cumprod { +void impl(const at::Tensor & self, int64_t dim, c10::optional dtype, const at::Tensor & out); +}; +TORCH_API at::Tensor cumprod(const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & cumprod_out(const at::Tensor & self, at::Dimname dim, c10::optional dtype, at::Tensor & out); +TORCH_API at::Tensor & cumprod_(at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum.h new file mode 100644 index 0000000000000000000000000000000000000000..9844db25d7eba89e6c2d160bd5b297b2e8f14582 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cumsum.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor cumsum(const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumsum::call(self, dim, dtype); +} + +// aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & cumsum_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumsum_out::call(self, dim, dtype, out); +} +// aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & cumsum_outf(const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out) { + return at::_ops::cumsum_out::call(self, dim, dtype, out); +} + +// aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor +inline at::Tensor cumsum(const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumsum_dimname::call(self, dim, dtype); +} + +// aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & cumsum_out(at::Tensor & out, const at::Tensor & self, at::Dimname dim, c10::optional dtype=c10::nullopt) { + return at::_ops::cumsum_dimname_out::call(self, dim, dtype, out); +} +// aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & cumsum_outf(const at::Tensor & self, at::Dimname dim, c10::optional dtype, at::Tensor & out) { + return at::_ops::cumsum_dimname_out::call(self, dim, dtype, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/feature_dropout.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/feature_dropout.h new file mode 100644 index 0000000000000000000000000000000000000000..d19cade9aa98321e927921dcdfe796a9d8633b77 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/feature_dropout.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::feature_dropout(Tensor input, float p, bool train) -> Tensor +inline at::Tensor feature_dropout(const at::Tensor & input, double p, bool train) { + return at::_ops::feature_dropout::call(input, p, train); +} + +// aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) +inline at::Tensor & feature_dropout_(at::Tensor & self, double p, bool train) { + return at::_ops::feature_dropout_::call(self, p, train); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fft2.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fft2.h new file mode 100644 index 0000000000000000000000000000000000000000..79355ce82797f98045df7f70d9e3ff5c4d15d2d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fft2.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +inline at::Tensor fft_fft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft2::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_fft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft2::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm); + } +} + +// aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor +inline at::Tensor fft_fft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft2::call(self, s, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_fft2(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft2::call(self, s, dim, norm); + } +} + +// aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_fft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft2_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_fft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft2_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_fft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fft2_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_fft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fft2_out::call(self, s.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*s)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_fft2_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft2_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_fft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional norm=c10::nullopt) { + return at::_ops::fft_fft2_out::call(self, s, dim, norm, out); + } +} + +// aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_fft2_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fft2_out::call(self, s, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_fft2_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_fft2_out::call(self, s, dim, norm, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifftshift_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifftshift_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8f11e05a9703ae95351c0465a1e084c405986e8a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifftshift_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fft_ifftshift(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9332df138892de6340f2da918ec6683dae7eef08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & fill_(at::Tensor & self, const at::Scalar & value); +TORCH_API at::Tensor & fill_(at::Tensor & self, const at::Tensor & value); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fliplr_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fliplr_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..863d512967a0892eae10b78674052b3d856c32ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fliplr_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fliplr(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide.h new file mode 100644 index 0000000000000000000000000000000000000000..ae6b9f994a1355ad3e3989e4d2427235ed8fb665 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::floor_divide(Tensor self, Tensor other) -> Tensor +inline at::Tensor floor_divide(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::floor_divide::call(self, other); +} + +// aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & floor_divide_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::floor_divide_out::call(self, other, out); +} +// aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & floor_divide_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::floor_divide_out::call(self, other, out); +} + +// aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor floor_divide(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::floor_divide_Scalar::call(self, other); +} + +// aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & floor_divide_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::floor_divide_Scalar_out::call(self, other, out); +} +// aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & floor_divide_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::floor_divide_Scalar_out::call(self, other, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmod.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmod.h new file mode 100644 index 0000000000000000000000000000000000000000..1c240eb517ba14d169acef3b2916cc62c4e816ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmod.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmod_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::fmod_Scalar_out::call(self, other, out); +} +// aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmod_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::fmod_Scalar_out::call(self, other, out); +} + +// aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor fmod(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::fmod_Scalar::call(self, other); +} + +// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmod_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmod_Tensor_out::call(self, other, out); +} +// aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fmod_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::fmod_Tensor_out::call(self, other, out); +} + +// aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor fmod(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::fmod_Tensor::call(self, other); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/glu_jvp_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/glu_jvp_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ca9787cfdd0220309bfbe2d4ea679a6803440a94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/glu_jvp_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a07f99cb9739013998d3b32bd32c8a6429375006 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gru_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple gru(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +TORCH_API ::std::tuple gru(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5b243a7a623184634d1aad1da0af8b690c70521a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardsigmoid(const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & hardsigmoid_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardsigmoid_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d4793bce95907be7bab651ba176c568b5ed02e1e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor hardswish(const at::Tensor & self); +TORCH_API at::Tensor & hardswish_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & hardswish_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & hardswish_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..817635950ce5369895f887983a91bf17e933e453 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_reduce_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor index_reduce(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); +TORCH_API at::Tensor & index_reduce_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self, at::Tensor & out); +TORCH_API at::Tensor & index_reduce_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self=true); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cdf22795b74ea5f92f7e7537359496892c31350d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_complex_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API is_complex { + using schema = bool (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::is_complex") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "is_complex(Tensor self) -> bool") + static bool call(const at::Tensor & self); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu.h new file mode 100644 index 0000000000000000000000000000000000000000..37d9dcb61c05256cd3f3b7c47221432ece056606 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_lu(Tensor A, *, bool pivot=True) -> (Tensor P, Tensor L, Tensor U) +inline ::std::tuple linalg_lu(const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu::call(A, pivot); +} + +// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) +inline ::std::tuple linalg_lu_out(at::Tensor & P, at::Tensor & L, at::Tensor & U, const at::Tensor & A, bool pivot=true) { + return at::_ops::linalg_lu_out::call(A, pivot, P, L, U); +} +// aten::linalg_lu.out(Tensor A, *, bool pivot=True, Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) -> (Tensor(a!) P, Tensor(b!) L, Tensor(c!) U) +inline ::std::tuple linalg_lu_outf(const at::Tensor & A, bool pivot, at::Tensor & P, at::Tensor & L, at::Tensor & U) { + return at::_ops::linalg_lu_out::call(A, pivot, P, L, U); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/msort.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/msort.h new file mode 100644 index 0000000000000000000000000000000000000000..03d3ac87a03f1c5d317f02a676c4878dbff2dfb0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/msort.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & msort_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::msort_out::call(self, out); +} +// aten::msort.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & msort_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::msort_out::call(self, out); +} + +// aten::msort(Tensor self) -> Tensor +inline at::Tensor msort(const at::Tensor & self) { + return at::_ops::msort::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_nd_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_nd_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fc9f643c96938040defbb326562e0a7eb8896297 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss_nd_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor nll_loss_nd(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, int64_t ignore_index=-100); +TORCH_API at::Tensor nll_loss_nd_symint(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight={}, int64_t reduction=at::Reduction::Mean, c10::SymInt ignore_index=-100); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..4f4ca49085d84f749d7f355dbecae0e5c0445500 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API orgqr { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::orgqr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "orgqr(Tensor self, Tensor input2) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & input2); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2); +}; + +struct TORCH_API orgqr_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::orgqr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & input2, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input2, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dc512560868370538efd0470ddc264d57999d94c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor permute_copy(const at::Tensor & self, at::IntArrayRef dims); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a50988f59dd2ad87153f3fc790b75643bbceb80d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/poisson_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & poisson_out(const at::Tensor & self, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor _s_poisson_cpu(const at::Tensor & self, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor _s_poisson_cuda(const at::Tensor & self, c10::optional generator=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/prod_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/prod_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..50530de9e91b79bd05386ab4d3a73756a75fbf9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/prod_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor prod(const at::Tensor & self, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor prod(const at::Tensor & self, int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & prod_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & prod_outf(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional dtype, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/prod_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/prod_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2a4f1bf5d6200bc681a9812a4b05ef314b998052 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/prod_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor prod(const at::Tensor & self, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor prod(const at::Tensor & self, int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & prod_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & prod_outf(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional dtype, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn.h new file mode 100644 index 0000000000000000000000000000000000000000..022b3ab13c5d49086d1c20521712f8ddf650ef84 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn.h @@ -0,0 +1,377 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn(at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor randn(at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor randn(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); + } +} + +// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn_symint(c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::randn::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor randn(c10::SymIntArrayRef size, at::TensorOptions options={}) { + return at::_ops::randn::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn::call(size, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor randn(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn::call(size, dtype, layout, device, pin_memory); + } +} + +// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn(at::IntArrayRef size, c10::optional generator, at::TensorOptions options={}) { + return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor randn(at::IntArrayRef size, c10::optional generator, at::TensorOptions options={}) { + return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn(at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor randn(at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_generator::call(c10::fromIntArrayRefSlow(size), generator, dtype, layout, device, pin_memory); + } +} + +// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional generator, at::TensorOptions options={}) { + return at::_ops::randn_generator::call(size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor randn(c10::SymIntArrayRef size, c10::optional generator, at::TensorOptions options={}) { + return at::_ops::randn_generator::call(size, generator, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_generator::call(size, generator, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor randn(c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_generator::call(size, generator, dtype, layout, device, pin_memory); + } +} + +// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn(at::IntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor randn(at::IntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn(at::IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor randn(at::IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_names::call(c10::fromIntArrayRefSlow(size), names, dtype, layout, device, pin_memory); + } +} + +// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::randn_names::call(size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor randn(c10::SymIntArrayRef size, c10::optional names, at::TensorOptions options={}) { + return at::_ops::randn_names::call(size, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_names::call(size, names, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor randn(c10::SymIntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_names::call(size, names, dtype, layout, device, pin_memory); + } +} + +// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn(at::IntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}) { + return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor randn(at::IntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}) { + return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn(at::IntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor randn(at::IntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_generator_with_names::call(c10::fromIntArrayRefSlow(size), generator, names, dtype, layout, device, pin_memory); + } +} + +// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}) { + return at::_ops::randn_generator_with_names::call(size, generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +namespace symint { + template ::value>> + at::Tensor randn(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}) { + return at::_ops::randn_generator_with_names::call(size, generator, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); + } +} + +// aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory); +} +namespace symint { + template ::value>> + at::Tensor randn(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::randn_generator_with_names::call(size, generator, names, dtype, layout, device, pin_memory); + } +} + +// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size) { + return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size) { + return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_outf(at::IntArrayRef size, at::Tensor & out) { + return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & randn_outf(at::IntArrayRef size, at::Tensor & out) { + return at::_ops::randn_out::call(c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::randn_out::call(size, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size) { + return at::_ops::randn_out::call(size, out); + } +} + +// aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::randn_out::call(size, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_outf(c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::randn_out::call(size, out); + } +} + +// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional generator) { + return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional generator) { + return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out); + } +} + +// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_outf(at::IntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_outf(at::IntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::randn_generator_out::call(c10::fromIntArrayRefSlow(size), generator, out); + } +} + +// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional generator) { + return at::_ops::randn_generator_out::call(size, generator, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional generator) { + return at::_ops::randn_generator_out::call(size, generator, out); + } +} + +// aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::randn_generator_out::call(size, generator, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_outf(c10::SymIntArrayRef size, c10::optional generator, at::Tensor & out) { + return at::_ops::randn_generator_out::call(size, generator, out); + } +} + +// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional names) { + return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional names) { + return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out); + } +} + +// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_outf(at::IntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_outf(at::IntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::randn_names_out::call(c10::fromIntArrayRefSlow(size), names, out); + } +} + +// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional names) { + return at::_ops::randn_names_out::call(size, names, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional names) { + return at::_ops::randn_names_out::call(size, names, out); + } +} + +// aten::randn.names_out(SymInt[] size, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::randn_names_out::call(size, names, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_outf(c10::SymIntArrayRef size, c10::optional names, at::Tensor & out) { + return at::_ops::randn_names_out::call(size, names, out); + } +} + +// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional generator, c10::optional names) { + return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional generator, c10::optional names) { + return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out); + } +} + +// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_outf(at::IntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out) { + return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_outf(at::IntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out) { + return at::_ops::randn_generator_with_names_out::call(c10::fromIntArrayRefSlow(size), generator, names, out); + } +} + +// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional generator, c10::optional names) { + return at::_ops::randn_generator_with_names_out::call(size, generator, names, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional generator, c10::optional names) { + return at::_ops::randn_generator_with_names_out::call(size, generator, names, out); + } +} + +// aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, Dimname[]? names, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out) { + return at::_ops::randn_generator_with_names_out::call(size, generator, names, out); +} +namespace symint { + template ::value>> + at::Tensor & randn_outf(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out) { + return at::_ops::randn_generator_with_names_out::call(size, generator, names, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/resize.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/resize.h new file mode 100644 index 0000000000000000000000000000000000000000..2121dc2824a1674c7ed70e9c007d093c0ac59f3f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/resize.h @@ -0,0 +1,105 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +namespace symint { + template ::value>> + const at::Tensor & resize_(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_::call(self, c10::fromIntArrayRefSlow(size), memory_format); + } +} + +namespace symint { + template ::value>> + const at::Tensor & resize_(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_::call(self, size, memory_format); + } +} + +// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & resize_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_out::call(self, c10::fromIntArrayRefSlow(size), memory_format, out); +} +namespace symint { + template ::value>> + const at::Tensor & resize_out(const at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_out::call(self, c10::fromIntArrayRefSlow(size), memory_format, out); + } +} + +// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & resize_outf(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_out::call(self, c10::fromIntArrayRefSlow(size), memory_format, out); +} +namespace symint { + template ::value>> + const at::Tensor & resize_outf(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_out::call(self, c10::fromIntArrayRefSlow(size), memory_format, out); + } +} + +// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & resize_symint_out(const at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_out::call(self, size, memory_format, out); +} +namespace symint { + template ::value>> + const at::Tensor & resize_out(const at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize_out::call(self, size, memory_format, out); + } +} + +// aten::resize.out(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline const at::Tensor & resize_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_out::call(self, size, memory_format, out); +} +namespace symint { + template ::value>> + const at::Tensor & resize_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format, const at::Tensor & out) { + return at::_ops::resize_out::call(self, size, memory_format, out); + } +} + +// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor resize(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize::call(self, c10::fromIntArrayRefSlow(size), memory_format); +} +namespace symint { + template ::value>> + at::Tensor resize(const at::Tensor & self, at::IntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize::call(self, c10::fromIntArrayRefSlow(size), memory_format); + } +} + +// aten::resize(Tensor self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor resize_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize::call(self, size, memory_format); +} +namespace symint { + template ::value>> + at::Tensor resize(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional memory_format=c10::nullopt) { + return at::_ops::resize::call(self, size, memory_format); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1a063d7c0c068973c71c079ebc9d2d12a3fe8638 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu_with_noise_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor rrelu_with_noise(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & rrelu_with_noise_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & rrelu_with_noise_outf(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor & rrelu_with_noise_(at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional generator=c10::nullopt); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/selu_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/selu_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d532d93326c135de5ea32552a65fe9c7df859f1a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/selu_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor selu(const at::Tensor & self); +TORCH_API at::Tensor & selu_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dfa6cfdf7cadb90008629d9087f4b29bc9901072 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_sigmoid_out : public at::meta::structured_sigmoid { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor mkldnn_sigmoid(const at::Tensor & self); +TORCH_API at::Tensor & mkldnn_sigmoid_(at::Tensor & self); +TORCH_API at::Tensor sigmoid_quantized_cpu(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/silu.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/silu.h new file mode 100644 index 0000000000000000000000000000000000000000..6aa8859693a9f6f9ac9dd89754ee19abffddae6a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/silu.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::silu(Tensor self) -> Tensor +inline at::Tensor silu(const at::Tensor & self) { + return at::_ops::silu::call(self); +} + +// aten::silu_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & silu_(at::Tensor & self) { + return at::_ops::silu_::call(self); +} + +// aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & silu_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::silu_out::call(self, out); +} +// aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & silu_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::silu_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c72ef4a9af53948eed675b5b770724c65b3c9326 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv_transpose2d_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor slow_conv_transpose2d(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor slow_conv_transpose2d_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); +TORCH_API at::Tensor & slow_conv_transpose2d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef output_padding=0, at::IntArrayRef dilation=1); +TORCH_API at::Tensor & slow_conv_transpose2d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef output_padding, at::IntArrayRef dilation, at::Tensor & out); +TORCH_API at::Tensor & slow_conv_transpose2d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1)); +TORCH_API at::Tensor & slow_conv_transpose2d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_bsr_tensor.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_bsr_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..aeb30ab20dfe00435f163da972cf36985f99df99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_bsr_tensor.h @@ -0,0 +1,43 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::sparse_bsr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::sparse_bsr_tensor.crow_col_value_size(Tensor crow_indices, Tensor col_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_bsr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory); +} + +// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) { + return at::_ops::sparse_bsr_tensor_crow_col_value::call(crow_indices, col_indices, values, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::sparse_bsr_tensor.crow_col_value(Tensor crow_indices, Tensor col_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_bsr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..312e2cc9753e03daf126579019b7173ca6ce4fee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_bessel_j0 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_bessel_j0") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_bessel_j0(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_bessel_j0_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_bessel_j0") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_bessel_j0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i0e.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i0e.h new file mode 100644 index 0000000000000000000000000000000000000000..aa99d55256c416c3f830bb784ada351a3e0e9008 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_i0e.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_i0e(Tensor self) -> Tensor +inline at::Tensor special_i0e(const at::Tensor & self) { + return at::_ops::special_i0e::call(self); +} + +// aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_i0e_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_i0e_out::call(self, out); +} +// aten::special_i0e.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_i0e_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_i0e_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..72d601723e994852ecb2c272013519ae272b1ff5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log_ndtr_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_log_ndtr(const at::Tensor & self); +TORCH_API at::Tensor & special_log_ndtr_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_log_ndtr_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/split_with_sizes_copy.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/split_with_sizes_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..11f2845cc2cc8a7c627d0af04691fe4411068660 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/split_with_sizes_copy.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] +inline ::std::vector split_with_sizes_copy(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy::call(self, c10::fromIntArrayRefSlow(split_sizes), dim); +} +namespace symint { + template ::value>> + ::std::vector split_with_sizes_copy(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy::call(self, c10::fromIntArrayRefSlow(split_sizes), dim); + } +} + +// aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[] +inline ::std::vector split_with_sizes_copy_symint(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy::call(self, split_sizes, dim); +} +namespace symint { + template ::value>> + ::std::vector split_with_sizes_copy(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy::call(self, split_sizes, dim); + } +} + +// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +inline void split_with_sizes_copy_out(at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy_out::call(self, c10::fromIntArrayRefSlow(split_sizes), dim, out); +} +namespace symint { + template ::value>> + void split_with_sizes_copy_out(at::TensorList out, const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy_out::call(self, c10::fromIntArrayRefSlow(split_sizes), dim, out); + } +} + +// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +inline void split_with_sizes_copy_outf(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out) { + return at::_ops::split_with_sizes_copy_out::call(self, c10::fromIntArrayRefSlow(split_sizes), dim, out); +} +namespace symint { + template ::value>> + void split_with_sizes_copy_outf(const at::Tensor & self, at::IntArrayRef split_sizes, int64_t dim, at::TensorList out) { + return at::_ops::split_with_sizes_copy_out::call(self, c10::fromIntArrayRefSlow(split_sizes), dim, out); + } +} + +// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +inline void split_with_sizes_copy_symint_out(at::TensorList out, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out); +} +namespace symint { + template ::value>> + void split_with_sizes_copy_out(at::TensorList out, const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim=0) { + return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out); + } +} + +// aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> () +inline void split_with_sizes_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { + return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out); +} +namespace symint { + template ::value>> + void split_with_sizes_copy_outf(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) { + return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sub.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sub.h new file mode 100644 index 0000000000000000000000000000000000000000..d3486e8c826ec995bdce6b7586e1e2e426061b03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sub.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sub_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::sub_out::call(self, other, alpha, out); +} +// aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sub_outf(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::sub_out::call(self, other, alpha, out); +} + +// aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor +inline at::Tensor sub(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1) { + return at::_ops::sub_Tensor::call(self, other, alpha); +} + +// aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor +inline at::Tensor sub(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::sub_Scalar::call(self, other, alpha); +} + +// aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sub_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha=1) { + return at::_ops::sub_Scalar_out::call(self, other, alpha, out); +} +// aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sub_outf(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::sub_Scalar_out::call(self, other, alpha, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_for_size_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_for_size_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9be6584303dc243d3b8be4ccc1acb654e587fae9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_for_size_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sym_constrain_range_for_size { + using schema = void (const at::Scalar &, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sym_constrain_range_for_size") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sym_constrain_range_for_size(Scalar size, *, int? min=None, int? max=None) -> ()") + static void call(const at::Scalar & size, c10::optional min, c10::optional max); + static void redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & size, c10::optional min, c10::optional max); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/take_along_dim_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/take_along_dim_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b5107a9fc75e7cef1e6ce375ababa3644fdfabbe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/take_along_dim_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API take_along_dim_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::take_along_dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & indices, c10::optional dim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::optional dim, at::Tensor & out); +}; + +struct TORCH_API take_along_dim { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::take_along_dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & indices, c10::optional dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::optional dim); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_backward_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e6753baba53931e77faee19f7a7f197229656b1c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor tanh_backward(const at::Tensor & grad_output, const at::Tensor & output); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_dense_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_dense_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d1960732c734823ac8fe6115993fb25d783b3711 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_dense_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API to_dense { + using schema = at::Tensor (const at::Tensor &, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::to_dense") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::optional dtype, c10::optional masked_grad); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dtype, c10::optional masked_grad); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/transpose_copy.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/transpose_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..ad72485e710329f64dcfd61a548e7daf47c5edc5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/transpose_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor +inline at::Tensor transpose_copy(const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::transpose_copy_int::call(self, dim0, dim1); +} + +// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & transpose_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim0, int64_t dim1) { + return at::_ops::transpose_copy_int_out::call(self, dim0, dim1, out); +} +// aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & transpose_copy_outf(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out) { + return at::_ops::transpose_copy_int_out::call(self, dim0, dim1, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tril_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tril_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3dee6f46b9ab4f6ebaa7afcc0d22a98cd66d42d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tril_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor tril(const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & tril_(at::Tensor & self, int64_t diagonal=0); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_copy_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1792b2da9fcb5c12b812b19eba1a0ec119781413 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/unsqueeze_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & unsqueeze_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim); +TORCH_API at::Tensor & unsqueeze_copy_outf(const at::Tensor & self, int64_t dim, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5b486ca6a64aee9ce4fc725e99f77571160f7011 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bilinear2d_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +TORCH_API at::Tensor upsample_bilinear2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); +struct TORCH_API structured_upsample_bilinear2d_out_cpu : public at::meta::structured_upsample_bilinear2d { +void impl(const at::Tensor & self, at::ArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, const at::Tensor & out); +}; +struct TORCH_API structured_upsample_bilinear2d_out_cuda : public at::meta::structured_upsample_bilinear2d { +void impl(const at::Tensor & self, at::ArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, const at::Tensor & out); +}; +TORCH_API at::Tensor upsample_bilinear2d_quantized_cpu(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..41c640beae7115d2be8ae105f9bd18c457284d75 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest1d_backward_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor upsample_nearest1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor upsample_nearest1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales, at::Tensor & grad_input); +TORCH_API at::Tensor & upsample_nearest1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & upsample_nearest1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, at::Tensor & grad_input); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/vdot_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/vdot_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1049b7b4874e7f50311bef4b819d0f79a3bf15fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/vdot_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & vdot_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & vdot_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at