diff --git a/ckpts/universal/global_step120/zero/16.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step120/zero/16.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..acbe115d55fb851fea4e0c2a677c1770511992b6 --- /dev/null +++ b/ckpts/universal/global_step120/zero/16.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:213965983251c27a5b88327b326f9496e68c875ce3f985a4cf2c5a5eda18d38d +size 50332749 diff --git a/ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..5ca6adf84469c75634bea34d75a00117118026f9 --- /dev/null +++ b/ckpts/universal/global_step120/zero/23.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:013a69a8716a8153b3a775ff5522e46480b871fcae3215751cfbe29cc8439775 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/26.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/26.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..3b401c60441f3cfdd3c2db1d2cb7eb7fbe314afd --- /dev/null +++ b/ckpts/universal/global_step120/zero/26.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cc8fcaee4cd514c52f0e86e2e03242f6af04d291bf6a82fb2330781311f282c +size 33555627 diff --git a/ckpts/universal/global_step120/zero/26.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step120/zero/26.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..2a1069b1757444bdd7d5b0c0ab956c07f90f6842 --- /dev/null +++ b/ckpts/universal/global_step120/zero/26.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9052230958a7c412e2c2514519a5eeeae3b1f3e32fe96ae0a5650f3f04751a1e +size 33555533 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale.h new file mode 100644 index 0000000000000000000000000000000000000000..85228b5bfebb7a6345f2c0759c1486948cd310dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_amp_update_scale_(Tensor(a!) self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor(a!) +inline at::Tensor & _amp_update_scale_(at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { + return at::_ops::_amp_update_scale_::call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); +} + +// aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _amp_update_scale_out(at::Tensor & out, const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { + return at::_ops::_amp_update_scale_out::call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval, out); +} +// aten::_amp_update_scale.out(Tensor self, Tensor(b!) growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _amp_update_scale_outf(const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out) { + return at::_ops::_amp_update_scale_out::call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval, out); +} + +// aten::_amp_update_scale(Tensor self, Tensor growth_tracker, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> (Tensor, Tensor growth_tracker_out) +inline ::std::tuple _amp_update_scale(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { + return at::_ops::_amp_update_scale::call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_coo_to_csr_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_coo_to_csr_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f14662f72bb0939558a2d9f76d8b799d686451fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_coo_to_csr_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _convert_indices_from_coo_to_csr(const at::Tensor & self, int64_t size, bool out_int32=false); +TORCH_API at::Tensor & _convert_indices_from_coo_to_csr_out(at::Tensor & out, const at::Tensor & self, int64_t size, bool out_int32=false); +TORCH_API at::Tensor & _convert_indices_from_coo_to_csr_outf(const at::Tensor & self, int64_t size, bool out_int32, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ed1a0ebdf20ea9deb348f24e2b1fbc1d1f5a29bc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_backward_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false); +TORCH_API at::Tensor _ctc_loss_backward(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity=false); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cummax_helper_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cummax_helper_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..64db1f76c08e21e67a590c1f3c93cc73a524a2ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cummax_helper_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API void _cummax_helper(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d2c1748429c0f7c4cc8170f972983080cc903680 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _empty_affine_quantized_out_symint(c10::SymIntArrayRef size, double scale, int64_t zero_point, c10::optional memory_format, at::Tensor & out); +TORCH_API at::Tensor empty_affine_quantized_other_backends_stub(at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous); +TORCH_API at::Tensor empty_affine_quantized(at::IntArrayRef size, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max.h new file mode 100644 index 0000000000000000000000000000000000000000..1ffcef31e54815605a54bec452b624c799c59e19 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_max.h @@ -0,0 +1,82 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_clamp_max.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] +inline ::std::vector _foreach_clamp_max(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_max_Scalar::call(self, scalar); +} + +// aten::_foreach_clamp_max_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () +inline void _foreach_clamp_max_(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_max__Scalar::call(self, scalar); +} + +// aten::_foreach_clamp_max.List(Tensor[] self, Tensor[] other) -> Tensor[] +inline ::std::vector _foreach_clamp_max(at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_max_List::call(self, other); +} + +// aten::_foreach_clamp_max_.List(Tensor(a!)[] self, Tensor[] other) -> () +inline void _foreach_clamp_max_(at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_max__List::call(self, other); +} + +// aten::_foreach_clamp_max.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] +inline ::std::vector _foreach_clamp_max(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_max_ScalarList::call(self, scalars); +} + +// aten::_foreach_clamp_max_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () +inline void _foreach_clamp_max_(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_max__ScalarList::call(self, scalars); +} + +// aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_clamp_max_Scalar_out::call(self, scalar, out); +} +// aten::_foreach_clamp_max.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_clamp_max_Scalar_out::call(self, scalar, out); +} + +// aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_out(at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_clamp_max_List_out::call(self, other, out); +} +// aten::_foreach_clamp_max.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_outf(at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_clamp_max_List_out::call(self, other, out); +} + +// aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_out(at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_clamp_max_ScalarList_out::call(self, scalars, out); +} +// aten::_foreach_clamp_max.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_clamp_max_outf(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_clamp_max_ScalarList_out::call(self, scalars, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..e5f34405328ec6b3f720e1eeda0bd36beb7235e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_prelu_kernel(Tensor self, Tensor weight) -> Tensor +inline at::Tensor _prelu_kernel(const at::Tensor & self, const at::Tensor & weight) { + return at::_ops::_prelu_kernel::call(self, weight); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_scramble_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_scramble_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..344a66e825511e04f66b7317c14f15007b026a49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_scramble_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor & _sobol_engine_scramble_(at::Tensor & self, const at::Tensor & ltm, int64_t dimension); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cc8b4cc24ed9da03fd11fd708a7bf14625044649 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _softmax(const at::Tensor & self, int64_t dim, bool half_to_float); +TORCH_API at::Tensor & _softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float); +TORCH_API at::Tensor & _softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2d64fcf47accf1ca5222c1156a039c953a4275ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _test_string_default(const at::Tensor & dummy, c10::string_view a="\"'\\", c10::string_view b="\"'\\"); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..c521c493fcfe03f648b51a3c5212ef01fa81ebd1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_transformer_encoder_layer_fwd.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor +inline at::Tensor _transformer_encoder_layer_fwd(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask={}, c10::optional mask_type=c10::nullopt) { + return at::_ops::_transformer_encoder_layer_fwd::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type); +} + +// aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _transformer_encoder_layer_fwd_out(at::Tensor & out, const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask={}, c10::optional mask_type=c10::nullopt) { + return at::_ops::_transformer_encoder_layer_fwd_out::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type, out); +} +// aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _transformer_encoder_layer_fwd_outf(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional & mask, c10::optional mask_type, at::Tensor & out) { + return at::_ops::_transformer_encoder_layer_fwd_out::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..22da564686ca13008f7f4d0f2ae520e14ffa2e2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _upsample_nearest_exact1d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact1d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_outf(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales, at::Tensor & out); +TORCH_API at::Tensor & _upsample_nearest_exact1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cee42b42fa084388703541118678b77b41919bea --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact3d_backward_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _upsample_nearest_exact3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); +TORCH_API at::Tensor & _upsample_nearest_exact3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_coo_tensor_args_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_coo_tensor_args_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9d2454dbe676fda88b707c7a3975a2364d6359cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_coo_tensor_args_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API void _validate_sparse_coo_tensor_args(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional is_coalesced=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..37f7c864c2ae13e0fe3b5d63e3b021b5a2af823f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/asinh_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/asinh_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..f4d33d7dc783c565acaae1e314176299d0b2f381 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/asinh_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_asinh : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool1d_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool1d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..296bea9a2405dda5109861ede57bd6ba067756ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool1d_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API avg_pool1d { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4460ac549c03a9cc1cba0fa7dddb8d8a38dd0863 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor bernoulli(const at::Tensor & self, double p, c10::optional generator=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b3cdb8cb81a724dc6f5666220c803506533cc00b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bilinear_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor bilinear(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional & bias={}); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cholesky_solve_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cholesky_solve_native.h new file mode 100644 index 0000000000000000000000000000000000000000..936c1181fab195df909a88ae703431ebde9436a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cholesky_solve_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor cholesky_solve(const at::Tensor & self, const at::Tensor & input2, bool upper=false); +TORCH_API at::Tensor & cholesky_solve_out(const at::Tensor & self, const at::Tensor & input2, bool upper, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8f175dda35f6f5e5b698632e691d955249d6f23a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple choose_qparams_optimized(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv2d_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fefd7585455b7b650042f55f3709f3c1b3ac742d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv2d_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor conv2d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1); +TORCH_API at::Tensor conv2d_padding_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::string_view padding="valid", c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv3d.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv3d.h new file mode 100644 index 0000000000000000000000000000000000000000..a3c313b8c1a6d57062d0bb71327057fc592d76c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv3d.h @@ -0,0 +1,69 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, SymInt groups=1) -> Tensor +inline at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv3d::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups); +} +namespace symint { + template ::value>> + at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv3d::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups); + } +} + +// aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, SymInt groups=1) -> Tensor +inline at::Tensor conv3d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1) { + return at::_ops::conv3d::call(input, weight, bias, stride, padding, dilation, groups); +} +namespace symint { + template ::value>> + at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1) { + return at::_ops::conv3d::call(input, weight, bias, stride, padding, dilation, groups); + } +} + +// aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, str padding="valid", SymInt[3] dilation=1, SymInt groups=1) -> Tensor +inline at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv3d_padding::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), padding, c10::fromIntArrayRefSlow(dilation), groups); +} +namespace symint { + template ::value>> + at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1) { + return at::_ops::conv3d_padding::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), padding, c10::fromIntArrayRefSlow(dilation), groups); + } +} + +// aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, str padding="valid", SymInt[3] dilation=1, SymInt groups=1) -> Tensor +inline at::Tensor conv3d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1) { + return at::_ops::conv3d_padding::call(input, weight, bias, stride, padding, dilation, groups); +} +namespace symint { + template ::value>> + at::Tensor conv3d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1) { + return at::_ops::conv3d_padding::call(input, weight, bias, stride, padding, dilation, groups); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_tbc_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_tbc_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..91e7af05536b4f7c764d9edabc36ce768d69b0ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_tbc_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API conv_tbc_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::conv_tbc_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose2d_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..da649fac0e15daac467a12908ca91f4824bfc6d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose2d_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor conv_transpose2d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef output_padding=c10::SymInt(0), c10::SymInt groups=1, c10::SymIntArrayRef dilation=c10::SymInt(1)); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/copysign.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/copysign.h new file mode 100644 index 0000000000000000000000000000000000000000..123bf6536931902a4be16fdb4e081b2eb3777e02 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/copysign.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & copysign_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::copysign_out::call(self, other, out); +} +// aten::copysign.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & copysign_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::copysign_out::call(self, other, out); +} + +// aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor copysign(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::copysign_Tensor::call(self, other); +} + +// aten::copysign.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor copysign(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::copysign_Scalar::call(self, other); +} + +// aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & copysign_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::copysign_Scalar_out::call(self, other, out); +} +// aten::copysign.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & copysign_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::copysign_Scalar_out::call(self, other, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..13af32a057d0b6bdcecf521b57314692e7ee313d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & crow_indices_copy_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor crow_indices_copy(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ba8000d5154050959c0bfa3a44ed9bf00ec09e2d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple cudnn_batch_norm(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/data_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/data_native.h new file mode 100644 index 0000000000000000000000000000000000000000..02c8430efe7bc377cf72c3ebff92d7dcb3456929 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/data_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor data(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/empty_permuted_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/empty_permuted_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c74274c06d8ff6a2205523986de1082f9117c85f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/empty_permuted_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API empty_permuted { + using schema = at::Tensor (c10::SymIntArrayRef, at::IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::empty_permuted") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "empty_permuted(SymInt[] size, int[] physical_layout, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::IntArrayRef physical_layout, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API empty_permuted_out { + using schema = at::Tensor & (c10::SymIntArrayRef, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::empty_permuted") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "empty_permuted.out(SymInt[] size, int[] physical_layout, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfinv_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfinv_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..764e4120340797b9a9ab49409fa273c4796cb26c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfinv_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_erfinv : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand_as.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand_as.h new file mode 100644 index 0000000000000000000000000000000000000000..173787aba901b457c5748693b770d61e28c239e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand_as.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..92420e3c34efc509ea881207be885c5d4a80d65a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fbgemm_linear_int8_weight(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_fp32_activation_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_fp32_activation_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ce42287295874dc61f70c1466b7e8f06edaacef8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_fp32_activation_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fbgemm_linear_int8_weight_fp32_activation { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Scalar &, const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fbgemm_linear_int8_weight_fp32_activation") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftn_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftn_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5123627879e168151a8b9a22440101658c9ac0e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftn_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fft_fftn(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor fft_fftn_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_fftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_fftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out); +TORCH_API at::Tensor & fft_fftn_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional norm=c10::nullopt); +TORCH_API at::Tensor & fft_fftn_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional norm, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill_diagonal_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill_diagonal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f7930cf8f1730d785d335f5e4ca2bd9aec9c56dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fill_diagonal_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fill_diagonal_ { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fill_diagonal_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & fill_value, bool wrap); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & fill_value, bool wrap); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..8d24995abbbbd737fab026a9f4b3e4f86130e4cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardswish_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor +inline at::Tensor hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardswish_backward::call(grad_output, self); +} + +// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardswish_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self) { + return at::_ops::hardswish_backward_out::call(grad_output, self, out); +} +// aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & hardswish_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out) { + return at::_ops::hardswish_backward_out::call(grad_output, self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c1fbab04d81ecfc8374798d790f8f897df3e0fb4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple histogram(const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_outf(const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); +TORCH_API ::std::tuple histogram(const at::Tensor & self, int64_t bins=100, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_out(at::Tensor & hist, at::Tensor & bin_edges, const at::Tensor & self, int64_t bins=100, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_outf(const at::Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c19936a33610a166a0e56aae14ccd357b9afd95c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor huber_loss(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, double delta=1.0); +TORCH_API at::Tensor & huber_loss_outf(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag.h new file mode 100644 index 0000000000000000000000000000000000000000..fae0b11655042bad2fa5f9af5848dd931ad881eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/imag.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::imag(Tensor(a) self) -> Tensor(a) +inline at::Tensor imag(const at::Tensor & self) { + return at::_ops::imag::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_native.h new file mode 100644 index 0000000000000000000000000000000000000000..cc89c9748a555a17c709f36e660fcd213c04b2ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_native.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_int_Scalar_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor & index_fill_int_Tensor_out(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor index_fill(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to.h new file mode 100644 index 0000000000000000000000000000000000000000..a179cdd64ae896c2bbb3d37861cdb83326f19b46 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_set_to.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq.h new file mode 100644 index 0000000000000000000000000000000000000000..075ac5dcbdf891c582ab88ad4536d9ed181c4d18 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values) +inline ::std::tuple linalg_lstsq(const at::Tensor & self, const at::Tensor & b, c10::optional rcond=c10::nullopt, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_lstsq::call(self, b, rcond, driver); +} + +// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) +inline ::std::tuple linalg_lstsq_out(at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values, const at::Tensor & self, const at::Tensor & b, c10::optional rcond=c10::nullopt, c10::optional driver=c10::nullopt) { + return at::_ops::linalg_lstsq_out::call(self, b, rcond, driver, solution, residuals, rank, singular_values); +} +// aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) +inline ::std::tuple linalg_lstsq_outf(const at::Tensor & self, const at::Tensor & b, c10::optional rcond, c10::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values) { + return at::_ops::linalg_lstsq_out::call(self, b, rcond, driver, solution, residuals, rank, singular_values); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_pinv_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_pinv_native.h new file mode 100644 index 0000000000000000000000000000000000000000..346dd90b8d52afcaf6dba6061ad7255b87c780db --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_pinv_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & linalg_pinv_out(const at::Tensor & self, const c10::optional & atol, const c10::optional & rtol, bool hermitian, at::Tensor & out); +TORCH_API at::Tensor linalg_pinv(const at::Tensor & self, const c10::optional & atol={}, const c10::optional & rtol={}, bool hermitian=false); +TORCH_API at::Tensor linalg_pinv(const at::Tensor & self, c10::optional atol=c10::nullopt, c10::optional rtol=c10::nullopt, bool hermitian=false); +TORCH_API at::Tensor & linalg_pinv_out(const at::Tensor & self, c10::optional atol, c10::optional rtol, bool hermitian, at::Tensor & out); +TORCH_API at::Tensor linalg_pinv(const at::Tensor & self, double rcond, bool hermitian=false); +TORCH_API at::Tensor & linalg_pinv_out(const at::Tensor & self, double rcond, bool hermitian, at::Tensor & out); +TORCH_API at::Tensor linalg_pinv(const at::Tensor & self, const at::Tensor & rcond, bool hermitian=false); +TORCH_API at::Tensor & linalg_pinv_out(const at::Tensor & self, const at::Tensor & rcond, bool hermitian, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell.h new file mode 100644 index 0000000000000000000000000000000000000000..14ae857ce4c98b46c147ba95333ae13fca71cd33 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) +inline ::std::tuple lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih={}, const c10::optional & b_hh={}) { + return at::_ops::lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..70cd5af6a0d02dddaf1155bae037fb60fe6153a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool3d_with_indices_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple max_pool3d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool3d_with_indices_out(at::Tensor & out, at::Tensor & indices, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +TORCH_API ::std::tuple max_pool3d_with_indices_outf(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out, at::Tensor & indices); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..627aba6e5da11928a6622157d73b39a0594eec75 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mean_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_mean_dim : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..af7bb9dde5b148e01e75cba9afd41094b2d7ed3a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void miopen_rnn_backward_out(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3); +TORCH_API ::std::tuple> miopen_rnn_backward(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional & cx, const at::Tensor & output, const c10::optional & grad_output, const c10::optional & grad_hy, const c10::optional & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, const at::Tensor & reserve, ::std::array output_mask); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_copy_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fcfdf70b972b0d1c5517881e6bbb4cad5173b5f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/narrow_copy_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor narrow_copy_dense_cpu(const at::Tensor & self, int64_t dim, int64_t start, int64_t length); +TORCH_API at::Tensor & narrow_copy_dense_cpu_out(const at::Tensor & self, int64_t dim, int64_t start, int64_t length, at::Tensor & out); +TORCH_API at::Tensor narrow_copy_sparse(const at::Tensor & self, int64_t dim, int64_t start, int64_t length); +TORCH_API at::Tensor narrow_copy_dense_symint(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_group_norm_backward_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_group_norm_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dd60ff0d2088aacf5eaa912a98420427d5c45fba --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_group_norm_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple native_group_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array output_mask); +TORCH_API ::std::tuple native_group_norm_backward_outf(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, int64_t N, int64_t C, int64_t HxW, int64_t group, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +TORCH_API ::std::tuple native_group_norm_backward_symint_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array output_mask); +TORCH_API ::std::tuple native_group_norm_backward_symint_outf(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/normal_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/normal_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cdc0aa4b1cb3afa0e32d0e4660961d2885a89dcb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/normal_cpu_dispatch.h @@ -0,0 +1,32 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & normal_(at::Tensor & self, double mean=0, double std=1, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor normal(const at::Tensor & mean, double std=1, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & normal_out(at::Tensor & out, const at::Tensor & mean, double std=1, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & normal_outf(const at::Tensor & mean, double std, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor normal(double mean, const at::Tensor & std, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & normal_out(at::Tensor & out, double mean, const at::Tensor & std, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & normal_outf(double mean, const at::Tensor & std, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor normal(const at::Tensor & mean, const at::Tensor & std, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & normal_out(at::Tensor & out, const at::Tensor & mean, const at::Tensor & std, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & normal_outf(const at::Tensor & mean, const at::Tensor & std, c10::optional generator, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ormqr_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ormqr_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1f9498221406fe9e29bb8ebd9fef9b2fc8a0d64b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ormqr_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor ormqr(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left=true, bool transpose=false); +TORCH_API at::Tensor & ormqr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left=true, bool transpose=false); +TORCH_API at::Tensor & ormqr_outf(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..28319faaeb7cd0e2abebe1df5bf8f7e51d17b869 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API pixel_shuffle { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pixel_shuffle") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pixel_shuffle(Tensor self, int upscale_factor) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t upscale_factor); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t upscale_factor); +}; + +struct TORCH_API pixel_shuffle_out { + using schema = at::Tensor & (const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pixel_shuffle") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, int64_t upscale_factor, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t upscale_factor, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/put_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/put_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b94ab7225958dd622e2b2e41bf8e04ff232c0a0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/put_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & put_(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate=false); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f119abb196da38acff5fc332ea2f26fd58040f6f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API reflection_pad3d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::reflection_pad3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "reflection_pad3d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input); +}; + +struct TORCH_API reflection_pad3d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::reflection_pad3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "reflection_pad3d_backward(Tensor grad_output, Tensor self, SymInt[6] padding) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_interleave_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_interleave_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..06e2c830aa815b246884db9e72568afbacbfb000 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/repeat_interleave_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor repeat_interleave(const at::Tensor & self, const at::Tensor & repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt); +TORCH_API at::Tensor repeat_interleave_symint(const at::Tensor & self, const at::Tensor & repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt); +TORCH_API at::Tensor repeat_interleave(const at::Tensor & self, int64_t repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt); +TORCH_API at::Tensor repeat_interleave_symint(const at::Tensor & self, c10::SymInt repeats, c10::optional dim=c10::nullopt, c10::optional output_size=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/retain_grad_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/retain_grad_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f80f2722957eb25ac7f735dfa0e628e87a442e79 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/retain_grad_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API retain_grad { + using schema = void (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::retain_grad") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "retain_grad(Tensor(a!) self) -> ()") + static void call(at::Tensor & self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_native.h new file mode 100644 index 0000000000000000000000000000000000000000..348ab009e45d2bf4aff592cb35543c23d09a1010 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_reduce_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_scatter_reduce_two : public at::meta::structured_scatter_reduce_two { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slice_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slice_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1c6f50dca331976acaf38491fe88e4db71ef52b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slice_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor slice_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t start, int64_t end, int64_t step); +TORCH_API at::Tensor & slice_backward_out_symint(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..76674116588fd5dd0d9488cbe83fb08199b479e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor softplus(const at::Tensor & self, const at::Scalar & beta=1, const at::Scalar & threshold=20); +TORCH_API at::Tensor & softplus_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & beta=1, const at::Scalar & threshold=20); +TORCH_API at::Tensor & softplus_outf(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1.h new file mode 100644 index 0000000000000000000000000000000000000000..7e873bcccf5b3bc3a6c730dbb0be8c8987560bbd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y1.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_bessel_y1(Tensor self) -> Tensor +inline at::Tensor special_bessel_y1(const at::Tensor & self) { + return at::_ops::special_bessel_y1::call(self); +} + +// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_bessel_y1_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_bessel_y1_out::call(self, out); +} +// aten::special_bessel_y1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_bessel_y1_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_bessel_y1_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_native.h new file mode 100644 index 0000000000000000000000000000000000000000..351fbcb23eb9fda00d0118cf5bfac96c74617901 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_chebyshev_polynomial_t_out : public at::meta::structured_special_chebyshev_polynomial_t { +void impl(const at::Tensor & x, const at::Tensor & n, const at::Tensor & out); +}; +TORCH_API at::Tensor special_chebyshev_polynomial_t(const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_t_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +TORCH_API at::Tensor special_chebyshev_polynomial_t(const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_t_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_v_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_v_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..1d04ed1894b5c697d3afcd4bc5f50287075c48d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_v_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_chebyshev_polynomial_v : public TensorIteratorBase { + + + void meta(const at::Tensor & x, const at::Tensor & n); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_erfcx_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_erfcx_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..501d95e4db8c5300f68e444d9e8939dcb78eebee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_erfcx_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_erfcx(const at::Tensor & self); +TORCH_API at::Tensor & special_erfcx_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_erfcx_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_he_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_he_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..34814ebc4a250a89cd7a3798c94e5891cf3d5c6d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_he_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_hermite_polynomial_he(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_hermite_polynomial_he_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_hermite_polynomial_he_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6f57a5360904b20817e617360fb7c38ffba83001 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_modified_bessel_i0(const at::Tensor & self); +TORCH_API at::Tensor & special_modified_bessel_i0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_modified_bessel_i0_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cbc198c7a3b6ddc4972871569fedc01eee9ae1be --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_scaled_modified_bessel_k0 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_scaled_modified_bessel_k0") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_scaled_modified_bessel_k0(Tensor x) -> Tensor") + static at::Tensor call(const at::Tensor & x); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x); +}; + +struct TORCH_API special_scaled_modified_bessel_k0_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_scaled_modified_bessel_k0") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_scaled_modified_bessel_k0.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tan_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tan_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f9b5e6278988dd1e97c94729d1d27974986eba1f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/tan_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor tan(const at::Tensor & self); +TORCH_API at::Tensor & tan_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & tan_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & tan_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4220f1c6ce32a081539c42754ae1e69c4c2176ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_backward_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_threshold_backward_out : public at::meta::structured_threshold_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, const at::Tensor & grad_input); +}; +TORCH_API at::Tensor threshold_backwards_nested(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold); +TORCH_API at::Tensor threshold_backward_sparse(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold); +TORCH_API at::Tensor & threshold_backward_sparse_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input); +TORCH_API at::Tensor threshold_backward_sparse_compressed(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold); +TORCH_API at::Tensor & threshold_backward_sparse_compressed_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold, at::Tensor & grad_input); +TORCH_API at::Tensor mkldnn_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..3b9bbb4f54dbaafa52f8961c4d36611b9f7a37ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_trilinear3d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API upsample_trilinear3d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, bool, c10::optional, c10::optional, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_trilinear3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input); +}; + +struct TORCH_API upsample_trilinear3d_backward { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, c10::SymIntArrayRef, bool, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_trilinear3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/var_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/var_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1694bffe1c5f9f6aab2e83fc023ac7671f2abe3f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/var_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor var(const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false); +TORCH_API at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false); +TORCH_API at::Tensor & var_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/var_mean_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/var_mean_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f061f9a90548001c084be4a578524544e7e6558a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/var_mean_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple var_mean_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false); +TORCH_API ::std::tuple var_mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/var_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/var_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bf8e64dfe690e1394c15ef6037f61ad8b740908b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/var_ops.h @@ -0,0 +1,116 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API var { + using schema = at::Tensor (const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::var") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "var(Tensor self, bool unbiased=True) -> Tensor") + static at::Tensor call(const at::Tensor & self, bool unbiased); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool unbiased); +}; + +struct TORCH_API var_dim { + using schema = at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::var") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim); +}; + +struct TORCH_API var_correction { + using schema = at::Tensor (const at::Tensor &, at::OptionalIntArrayRef, const c10::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::var") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "correction") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim); +}; + +struct TORCH_API var_out { + using schema = at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::var") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim, at::Tensor & out); +}; + +struct TORCH_API var_correction_out { + using schema = at::Tensor & (const at::Tensor &, at::OptionalIntArrayRef, const c10::optional &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::var") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "correction_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out); +}; + +struct TORCH_API var_names_dim { + using schema = at::Tensor (const at::Tensor &, at::DimnameList, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::var") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_dim") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim); +}; + +struct TORCH_API var_names_out { + using schema = at::Tensor & (const at::Tensor &, at::DimnameList, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::var") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim, at::Tensor & out); +}; + +struct TORCH_API var_correction_names { + using schema = at::Tensor (const at::Tensor &, at::DimnameList, const c10::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::var") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "correction_names") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim); +}; + +struct TORCH_API var_correction_names_out { + using schema = at::Tensor & (const at::Tensor &, at::DimnameList, const c10::optional &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::var") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "correction_names_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::DimnameList dim, const c10::optional & correction, bool keepdim, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_copy.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..ecc673f8b0f560c43aa1a53a79f91aa9ad26b4fc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_copy.h @@ -0,0 +1,105 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::view_copy(Tensor self, SymInt[] size) -> Tensor +inline at::Tensor view_copy(const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::view_copy::call(self, c10::fromIntArrayRefSlow(size)); +} +namespace symint { + template ::value>> + at::Tensor view_copy(const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::view_copy::call(self, c10::fromIntArrayRefSlow(size)); + } +} + +// aten::view_copy(Tensor self, SymInt[] size) -> Tensor +inline at::Tensor view_copy_symint(const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::view_copy::call(self, size); +} +namespace symint { + template ::value>> + at::Tensor view_copy(const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::view_copy::call(self, size); + } +} + +// aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor +inline at::Tensor view_copy(const at::Tensor & self, at::ScalarType dtype) { + return at::_ops::view_copy_dtype::call(self, dtype); +} + +// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & view_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::view_copy_out::call(self, c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & view_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { + return at::_ops::view_copy_out::call(self, c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & view_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::view_copy_out::call(self, c10::fromIntArrayRefSlow(size), out); +} +namespace symint { + template ::value>> + at::Tensor & view_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { + return at::_ops::view_copy_out::call(self, c10::fromIntArrayRefSlow(size), out); + } +} + +// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & view_copy_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::view_copy_out::call(self, size, out); +} +namespace symint { + template ::value>> + at::Tensor & view_copy_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { + return at::_ops::view_copy_out::call(self, size, out); + } +} + +// aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & view_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::view_copy_out::call(self, size, out); +} +namespace symint { + template ::value>> + at::Tensor & view_copy_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { + return at::_ops::view_copy_out::call(self, size, out); + } +} + +// aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & view_copy_out(at::Tensor & out, const at::Tensor & self, at::ScalarType dtype) { + return at::_ops::view_copy_dtype_out::call(self, dtype, out); +} +// aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & view_copy_outf(const at::Tensor & self, at::ScalarType dtype, at::Tensor & out) { + return at::_ops::view_copy_dtype_out::call(self, dtype, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/vstack_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/vstack_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f8f829aed908d8f009cc4c69672c9b621e80d7f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/vstack_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor vstack(at::TensorList tensors); +TORCH_API at::Tensor & vstack_out(at::Tensor & out, at::TensorList tensors); +TORCH_API at::Tensor & vstack_outf(at::TensorList tensors, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at