diff --git a/ckpts/universal/global_step20/zero/11.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/11.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..dbc0e52d01dc131987f8b1cd0a641e03a5df9ba1 --- /dev/null +++ b/ckpts/universal/global_step20/zero/11.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d97ce005a8b3d8c5fdedcafa90c5521aa036cb65f28a36a30f7efd2de663280 +size 33555627 diff --git a/ckpts/universal/global_step20/zero/11.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step20/zero/11.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..94ddc6308a724681c53848f1a16a84e9dd8e245a --- /dev/null +++ b/ckpts/universal/global_step20/zero/11.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70479c0d6375b34aad122bd14a0771f355b75351991a0904d8af6b42f38b5217 +size 33555533 diff --git a/ckpts/universal/global_step20/zero/14.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/14.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..b79bd4d9d3e58f62d79330d6c3c18372354fcb06 --- /dev/null +++ b/ckpts/universal/global_step20/zero/14.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25606717d1f2de6d4cff86d997de5a76088a20546261b14fba1303d8ec0a4406 +size 50332828 diff --git a/ckpts/universal/global_step20/zero/14.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/14.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..a4e264d2a574299dc253dbd1daf0774b736eea24 --- /dev/null +++ b/ckpts/universal/global_step20/zero/14.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:edad39ab721aefa4c60ee6d9adceb4d57a19a4b419c4200a694cf541db0028f1 +size 50332843 diff --git a/ckpts/universal/global_step20/zero/14.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step20/zero/14.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..5c429076dc4cb79ef0bab37047093bb2d3138c85 --- /dev/null +++ b/ckpts/universal/global_step20/zero/14.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03e68bf5428c61396dc61758f37868c888c88498248f0143a36be7b2feaf5480 +size 50332749 diff --git a/ckpts/universal/global_step20/zero/6.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step20/zero/6.mlp.dense_4h_to_h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..f07172d5b0fef2a455fde56a360b82096f01cdc4 --- /dev/null +++ b/ckpts/universal/global_step20/zero/6.mlp.dense_4h_to_h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4d03512624592754cbf0c3d026657121487fc0d900b0ed453f127512e7aad95 +size 33555533 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_full_precision_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_full_precision_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e3f8da6eb7f517370d0369ba08252cda6f332dde --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_full_precision_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _autocast_to_full_precision(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Short_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Short_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b1e0baf8848a1131849fd72235e01efcdbe29580 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Short_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _cast_Short(const at::Tensor & self, bool non_blocking=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_coo_to_csr_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_coo_to_csr_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..92a0d6e0d6f8b58fb5c23f230ec1d2c21d6b18de --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_convert_indices_from_coo_to_csr_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured__convert_indices_from_coo_to_csr : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, int64_t size, bool out_int32); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_div_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_div_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9ceb296f2bb7b878d768780460d43e5f31bbf2a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_div_cpu_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::vector _foreach_div(at::TensorList self, const at::Scalar & scalar); +TORCH_API void _foreach_div_(at::TensorList self, const at::Scalar & scalar); +TORCH_API ::std::vector _foreach_div(at::TensorList self, at::TensorList other); +TORCH_API void _foreach_div_(at::TensorList self, at::TensorList other); +TORCH_API ::std::vector _foreach_div(at::TensorList self, at::ArrayRef scalars); +TORCH_API void _foreach_div_(at::TensorList self, at::ArrayRef scalars); +TORCH_API ::std::vector _foreach_div(at::TensorList self, const at::Tensor & other); +TORCH_API void _foreach_div_(at::TensorList self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log2.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log2.h new file mode 100644 index 0000000000000000000000000000000000000000..113685a9a88a01206dae1adbc0ffbb3669fb2466 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log2.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_log2(Tensor[] self) -> Tensor[] +inline ::std::vector _foreach_log2(at::TensorList self) { + return at::_ops::_foreach_log2::call(self); +} + +// aten::_foreach_log2_(Tensor(a!)[] self) -> () +inline void _foreach_log2_(at::TensorList self) { + return at::_ops::_foreach_log2_::call(self); +} + +// aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_log2_out(at::TensorList out, at::TensorList self) { + return at::_ops::_foreach_log2_out::call(self, out); +} +// aten::_foreach_log2.out(Tensor[] self, *, Tensor(a!)[] out) -> () +inline void _foreach_log2_outf(at::TensorList self, at::TensorList out) { + return at::_ops::_foreach_log2_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_maximum.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_maximum.h new file mode 100644 index 0000000000000000000000000000000000000000..52a2670f31a5cadd12d16a63439186bebde8c52e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_maximum.h @@ -0,0 +1,82 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] +inline ::std::vector _foreach_maximum(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_maximum_Scalar::call(self, scalar); +} + +// aten::_foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () +inline void _foreach_maximum_(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_maximum__Scalar::call(self, scalar); +} + +// aten::_foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[] +inline ::std::vector _foreach_maximum(at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_maximum_List::call(self, other); +} + +// aten::_foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> () +inline void _foreach_maximum_(at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_maximum__List::call(self, other); +} + +// aten::_foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] +inline ::std::vector _foreach_maximum(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_maximum_ScalarList::call(self, scalars); +} + +// aten::_foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () +inline void _foreach_maximum_(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_maximum__ScalarList::call(self, scalars); +} + +// aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_maximum_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_maximum_Scalar_out::call(self, scalar, out); +} +// aten::_foreach_maximum.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_maximum_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_maximum_Scalar_out::call(self, scalar, out); +} + +// aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () +inline void _foreach_maximum_out(at::TensorList out, at::TensorList self, at::TensorList other) { + return at::_ops::_foreach_maximum_List_out::call(self, other, out); +} +// aten::_foreach_maximum.List_out(Tensor[] self, Tensor[] other, *, Tensor(a!)[] out) -> () +inline void _foreach_maximum_outf(at::TensorList self, at::TensorList other, at::TensorList out) { + return at::_ops::_foreach_maximum_List_out::call(self, other, out); +} + +// aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_maximum_out(at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_maximum_ScalarList_out::call(self, scalars, out); +} +// aten::_foreach_maximum.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_maximum_outf(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_maximum_ScalarList_out::call(self, scalars, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2d2b73f82dc7a1ac13c58e9ed6a617810fd0ed1f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow_cuda_dispatch.h @@ -0,0 +1,29 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::vector _foreach_pow(at::TensorList self, at::TensorList exponent); +TORCH_API void _foreach_pow_(at::TensorList self, at::TensorList exponent); +TORCH_API ::std::vector _foreach_pow(at::TensorList self, const at::Scalar & exponent); +TORCH_API void _foreach_pow_(at::TensorList self, const at::Scalar & exponent); +TORCH_API ::std::vector _foreach_pow(at::TensorList self, at::ArrayRef exponent); +TORCH_API void _foreach_pow_(at::TensorList self, at::ArrayRef exponent); +TORCH_API ::std::vector _foreach_pow(const at::Scalar & self, at::TensorList exponent); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_tensor_quantized_tensor_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_tensor_quantized_tensor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ce64320bcddacf77e9c5a4a4ea2f69f0e4a9589f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_tensor_quantized_tensor_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _make_per_tensor_quantized_tensor_out(const at::Tensor & self, double scale, int64_t zero_point, at::Tensor & out); +TORCH_API at::Tensor make_per_tensor_quantized_tensor_cpu(const at::Tensor & self, double scale, int64_t zero_point); +TORCH_API at::Tensor make_per_tensor_quantized_tensor_cuda(const at::Tensor & self, double scale, int64_t zero_point); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_storage_offsets_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_storage_offsets_native.h new file mode 100644 index 0000000000000000000000000000000000000000..306bc172714467940349b1571a585903f5513f99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_storage_offsets_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _nested_tensor_storage_offsets_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor _nested_tensor_storage_offsets(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2816f51117ca47bfe421c7e7d89fda27b742bf7e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_copy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor _nested_view_from_buffer_copy(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_backward_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ad6d9cefa6eb230f2a0a508693e61fd9236add4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pdist_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _pdist_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist); +TORCH_API at::Tensor & _pdist_backward_outf(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_mm.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_mm.h new file mode 100644 index 0000000000000000000000000000000000000000..ed618ecdc69b69d8259999a08cbad17d3b30c99d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_mm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_scaled_mm(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False) -> (Tensor, Tensor) +inline ::std::tuple _scaled_mm(const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias={}, c10::optional out_dtype=c10::nullopt, const c10::optional & scale_a={}, const c10::optional & scale_b={}, const c10::optional & scale_result={}, bool use_fast_accum=false) { + return at::_ops::_scaled_mm::call(self, mat2, bias, out_dtype, scale_a, scale_b, scale_result, use_fast_accum); +} + +// aten::_scaled_mm.out(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False, Tensor(a!) out, Tensor(b!) out_amax) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple _scaled_mm_out(at::Tensor & out, at::Tensor & out_amax, const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias={}, c10::optional out_dtype=c10::nullopt, const c10::optional & scale_a={}, const c10::optional & scale_b={}, const c10::optional & scale_result={}, bool use_fast_accum=false) { + return at::_ops::_scaled_mm_out::call(self, mat2, bias, out_dtype, scale_a, scale_b, scale_result, use_fast_accum, out, out_amax); +} +// aten::_scaled_mm.out(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False, Tensor(a!) out, Tensor(b!) out_amax) -> (Tensor(a!), Tensor(b!)) +inline ::std::tuple _scaled_mm_outf(const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias, c10::optional out_dtype, const c10::optional & scale_a, const c10::optional & scale_b, const c10::optional & scale_result, bool use_fast_accum, at::Tensor & out, at::Tensor & out_amax) { + return at::_ops::_scaled_mm_out::call(self, mat2, bias, out_dtype, scale_a, scale_b, scale_result, use_fast_accum, out, out_amax); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_mm_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_mm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e41497535d4a14c232c192824c1f6caddc875537 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_mm_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _scaled_mm { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, c10::optional, const c10::optional &, const c10::optional &, const c10::optional &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_scaled_mm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_scaled_mm(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias, c10::optional out_dtype, const c10::optional & scale_a, const c10::optional & scale_b, const c10::optional & scale_result, bool use_fast_accum); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias, c10::optional out_dtype, const c10::optional & scale_a, const c10::optional & scale_b, const c10::optional & scale_result, bool use_fast_accum); +}; + +struct TORCH_API _scaled_mm_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const c10::optional &, c10::optional, const c10::optional &, const c10::optional &, const c10::optional &, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_scaled_mm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_scaled_mm.out(Tensor self, Tensor mat2, *, Tensor? bias=None, ScalarType? out_dtype=None, Tensor? scale_a=None, Tensor? scale_b=None, Tensor? scale_result=None, bool use_fast_accum=False, Tensor(a!) out, Tensor(b!) out_amax) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias, c10::optional out_dtype, const c10::optional & scale_a, const c10::optional & scale_b, const c10::optional & scale_result, bool use_fast_accum, at::Tensor & out, at::Tensor & out_amax); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & mat2, const c10::optional & bias, c10::optional out_dtype, const c10::optional & scale_a, const c10::optional & scale_b, const c10::optional & scale_result, bool use_fast_accum, at::Tensor & out, at::Tensor & out_amax); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_floatlist_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_floatlist_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3a9451757b6446001affc1d56e5cf57aec9342d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_optional_floatlist_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _test_optional_floatlist_out(at::Tensor & out, const at::Tensor & values, c10::optional> addends); +TORCH_API at::Tensor & _test_optional_floatlist_outf(const at::Tensor & values, c10::optional> addends, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_serialization_subcmul_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_serialization_subcmul_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eafd2b794b8163ea6f1199cbd74ad781edf7436d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_serialization_subcmul_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _test_serialization_subcmul(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha=1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_copy.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..588a99c20135f3893239e4f879e1e080e1ad3c25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_copy.h @@ -0,0 +1,43 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor _to_copy(const at::Tensor & self, at::TensorOptions options={}, bool non_blocking=false, c10::optional memory_format=c10::nullopt) { + return at::_ops::_to_copy::call(self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), non_blocking, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +// aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor _to_copy(const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, bool non_blocking, c10::optional memory_format) { + return at::_ops::_to_copy::call(self, dtype, layout, device, pin_memory, non_blocking, memory_format); +} + +// aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _to_copy_out(at::Tensor & out, const at::Tensor & self, bool non_blocking=false, c10::optional memory_format=c10::nullopt) { + return at::_ops::_to_copy_out::call(self, non_blocking, memory_format, out); +} +// aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _to_copy_outf(const at::Tensor & self, bool non_blocking, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_to_copy_out::call(self, non_blocking, memory_format, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse.h new file mode 100644 index 0000000000000000000000000000000000000000..5d5d36736f6574ea2e677a24869680e032e3ed43 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse.h @@ -0,0 +1,43 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _to_sparse_out(at::Tensor & out, const at::Tensor & self, int64_t sparse_dim) { + return at::_ops::_to_sparse_sparse_dim_out::call(self, sparse_dim, out); +} +// aten::_to_sparse.sparse_dim_out(Tensor self, int sparse_dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _to_sparse_outf(const at::Tensor & self, int64_t sparse_dim, at::Tensor & out) { + return at::_ops::_to_sparse_sparse_dim_out::call(self, sparse_dim, out); +} + +// aten::_to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _to_sparse_out(at::Tensor & out, const at::Tensor & self, c10::optional layout=c10::nullopt, at::OptionalIntArrayRef blocksize=c10::nullopt, c10::optional dense_dim=c10::nullopt) { + return at::_ops::_to_sparse_out::call(self, layout, blocksize, dense_dim, out); +} +// aten::_to_sparse.out(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _to_sparse_outf(const at::Tensor & self, c10::optional layout, at::OptionalIntArrayRef blocksize, c10::optional dense_dim, at::Tensor & out) { + return at::_ops::_to_sparse_out::call(self, layout, blocksize, dense_dim, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8a72a6838131ebd0fc0d8faea990555e211a9d6e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor _upsample_nearest_exact1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales, at::Tensor & grad_input); +TORCH_API at::Tensor & _upsample_nearest_exact1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt); +TORCH_API at::Tensor & _upsample_nearest_exact1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_rnn_flatten_weight_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_rnn_flatten_weight_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..866f32f7a9365bdfbfb816d3fdbaa92f7390254c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_use_cudnn_rnn_flatten_weight_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API bool _use_cudnn_rnn_flatten_weight(); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_values_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_values_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e3b7094768fb48f5ac44b7fd904f248b7205fafd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_values_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _values_sparse(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool1d_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool1d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dfb45a94ccf8386df4288e91340f73aa67ff29ba --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool1d_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple adaptive_max_pool1d(const at::Tensor & self, at::IntArrayRef output_size); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addmv_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addmv_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0868238aa41a2b2d6d4b71beb24ec110387547c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/addmv_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor addmv(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addmv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addmv_outf(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & addmv_(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta=1, const at::Scalar & alpha=1); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8c3ae0f1fe0e22bd8489fcc88e3d731367520ce4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor avg_pool3d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binomial_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binomial_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1e7987619f6a55cb2f1714a5ca08d0735d35e815 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/binomial_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & binomial_out(at::Tensor & out, const at::Tensor & count, const at::Tensor & prob, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & binomial_outf(const at::Tensor & count, const at::Tensor & prob, c10::optional generator, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_tensors_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_tensors_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..212cceb4ef32143576fe49577c73fba10ab918cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_tensors_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API broadcast_tensors { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::broadcast_tensors") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "broadcast_tensors(Tensor[] tensors) -> Tensor[]") + static ::std::vector call(at::TensorList tensors); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList tensors); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clone_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clone_native.h new file mode 100644 index 0000000000000000000000000000000000000000..56e5113c4d100dc33d607c483cc5bf8e1fbaae15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/clone_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor clone(const at::Tensor & self, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor & clone_out(const at::Tensor & self, c10::optional memory_format, at::Tensor & out); +TORCH_API at::Tensor clone_nested(const at::Tensor & self, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor clone_sparse(const at::Tensor & self, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor clone_sparse_compressed(const at::Tensor & self, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor mkldnn_clone(const at::Tensor & self, c10::optional memory_format=c10::nullopt); +TORCH_API at::Tensor quantized_clone(const at::Tensor & self, c10::optional memory_format=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/corrcoef_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/corrcoef_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f5a90b54e35d3d489cb64a5d99acfd48842d0324 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/corrcoef_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API corrcoef { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::corrcoef") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "corrcoef(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0919d8fc8d7929b3a3b30986f8c133eb67cf541d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple cudnn_batch_norm_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon); +TORCH_API ::std::tuple cudnn_batch_norm_outf(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, const c10::optional & running_mean, const c10::optional & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_is_acceptable.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_is_acceptable.h new file mode 100644 index 0000000000000000000000000000000000000000..752f0e7e7d2c79733801ff89d20ee862b96eac09 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_is_acceptable.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::cudnn_is_acceptable(Tensor self) -> bool +inline bool cudnn_is_acceptable(const at::Tensor & self) { + return at::_ops::cudnn_is_acceptable::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7a5524deafac2050bb073a931fa1d0b945415b97 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/erfc_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor erfc(const at::Tensor & self); +TORCH_API at::Tensor & erfc_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & erfc_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & erfc_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a2722bafb9c59cc5ac21be8e7b4e96a6bd89d4aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple fake_quantize_per_tensor_affine_cachemask_out(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1); +TORCH_API ::std::tuple fake_quantize_per_tensor_affine_cachemask(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_pack_gemm_matrix_fp16_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_pack_gemm_matrix_fp16_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c19a1f167253eff6b67729a41e39ee7da68fecab --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_pack_gemm_matrix_fp16_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fbgemm_pack_gemm_matrix_fp16(const at::Tensor & input); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft.h new file mode 100644 index 0000000000000000000000000000000000000000..2b93319997f60df27f2340daf90e4513865cec33 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +inline at::Tensor fft_irfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_irfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm); + } +} + +// aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor +inline at::Tensor fft_irfft_symint(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft::call(self, n, dim, norm); +} +namespace symint { + template ::value>> + at::Tensor fft_irfft(const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft::call(self, n, dim, norm); + } +} + +// aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_irfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_irfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_irfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_irfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_irfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_irfft_out::call(self, n.has_value() ? c10::make_optional(c10::SymInt(*n)) : c10::nullopt, dim, norm, out); + } +} + +// aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_irfft_symint_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft_out::call(self, n, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_irfft_out(at::Tensor & out, const at::Tensor & self, c10::optional n=c10::nullopt, int64_t dim=-1, c10::optional norm=c10::nullopt) { + return at::_ops::fft_irfft_out::call(self, n, dim, norm, out); + } +} + +// aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & fft_irfft_symint_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_irfft_out::call(self, n, dim, norm, out); +} +namespace symint { + template ::value>> + at::Tensor & fft_irfft_outf(const at::Tensor & self, c10::optional n, int64_t dim, c10::optional norm, at::Tensor & out) { + return at::_ops::fft_irfft_out::call(self, n, dim, norm, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0e8da1ff26a5339ee7b6f6436385cde4f41eaa6e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft2_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fft_rfft2 { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, at::IntArrayRef, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_rfft2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm); +}; + +struct TORCH_API fft_rfft2_out { + using schema = at::Tensor & (const at::Tensor &, at::OptionalSymIntArrayRef, at::IntArrayRef, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_rfft2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional norm, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..30398ac0dba80eb712353f8da920901ca2a735cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/floor_divide_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API floor_divide { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API floor_divide__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API floor_divide_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API floor_divide_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide.Scalar(Tensor self, Scalar other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API floor_divide__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other); +}; + +struct TORCH_API floor_divide_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::floor_divide") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e257e0bcbdd5ebb32a2692394de5c899a9d1b8c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor gather(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..fa16a552675b499c3c2295b09f8f085a807e4803 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_igammac : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8ecff044f5ef1dd8b7e23f2a67ca067d4e6d1b2b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_put_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor index_put(const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false); +TORCH_API at::Tensor & index_put_out(const at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate, at::Tensor & out); +TORCH_API at::Tensor & index_put_(at::Tensor & self, const c10::List> & indices, const at::Tensor & values, bool accumulate=false); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..61898ab7b8b36a7538060fb3fcc252cc680909fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lift_fresh_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lift_fresh_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lift_fresh_copy(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API lift_fresh_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lift_fresh_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex.h new file mode 100644 index 0000000000000000000000000000000000000000..2ea30657402a13f9ed574062fc3fb1df01494b16 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cholesky_ex.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_cholesky_ex(Tensor self, *, bool upper=False, bool check_errors=False) -> (Tensor L, Tensor info) +inline ::std::tuple linalg_cholesky_ex(const at::Tensor & self, bool upper=false, bool check_errors=false) { + return at::_ops::linalg_cholesky_ex::call(self, upper, check_errors); +} + +// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) +inline ::std::tuple linalg_cholesky_ex_out(at::Tensor & L, at::Tensor & info, const at::Tensor & self, bool upper=false, bool check_errors=false) { + return at::_ops::linalg_cholesky_ex_L::call(self, upper, check_errors, L, info); +} +// aten::linalg_cholesky_ex.L(Tensor self, *, bool upper=False, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info) +inline ::std::tuple linalg_cholesky_ex_outf(const at::Tensor & self, bool upper, bool check_errors, at::Tensor & L, at::Tensor & info) { + return at::_ops::linalg_cholesky_ex_L::call(self, upper, check_errors, L, info); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c5683dfe3dcc4a3b5a729aa5ff53bda38aabbf84 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_ldl_factor(const at::Tensor & self, bool hermitian=false); +TORCH_API ::std::tuple linalg_ldl_factor_out(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a207b2ae4ce52ee1af745a9419debf6ac529df7e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_solve_triangular_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_solve_triangular_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, bool, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_solve_triangular") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular, at::Tensor & out); +}; + +struct TORCH_API linalg_solve_triangular { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, bool, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_solve_triangular") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..2b6e917eb0f5b7d1732057be27b08c03e7aff98a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_unpack_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_lu_unpack : public at::impl::MetaBase { + + + void meta(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..235acd95a149e73d0b8ea771b66f27f15c9cc0d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool1d_with_indices_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API max_pool1d_with_indices { + using schema = ::std::tuple (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::max_pool1d_with_indices") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..90cb462e80eb3c6fb0f53cd3da058af12e413546 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple max_pool2d_with_indices(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1f4110abbfd40b7a3adf704f3e49a2ed6b1c1f5c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/meshgrid_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::vector meshgrid(at::TensorList tensors); +TORCH_API ::std::vector meshgrid(at::TensorList tensors, c10::string_view indexing); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_group_norm_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_group_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..10aa0a63b38604ecb3adbbbed22f39e80f1a4f2b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_group_norm_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API native_group_norm { + using schema = ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, c10::SymInt, c10::SymInt, c10::SymInt, int64_t, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::native_group_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps); +}; + +struct TORCH_API native_group_norm_out { + using schema = ::std::tuple (const at::Tensor &, const c10::optional &, const c10::optional &, c10::SymInt, c10::SymInt, c10::SymInt, int64_t, double, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::native_group_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "native_group_norm.out(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const c10::optional & weight, const c10::optional & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_numpy.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_numpy.h new file mode 100644 index 0000000000000000000000000000000000000000..cf4caed2b56ba2942760b77f60bc1dfc3a3b8f9f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_numpy.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::nonzero_numpy(Tensor self) -> Tensor[] +inline ::std::vector nonzero_numpy(const at::Tensor & self) { + return at::_ops::nonzero_numpy::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2a144c8454ef80f54b0d9d1fb1e315c461ca76b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & permute_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dims); +TORCH_API at::Tensor & permute_copy_outf(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/q_per_channel_axis.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/q_per_channel_axis.h new file mode 100644 index 0000000000000000000000000000000000000000..3bdde935ee880f79dbccd59d9b0669b526b23ddd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/q_per_channel_axis.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::q_per_channel_axis(Tensor self) -> int +inline int64_t q_per_channel_axis(const at::Tensor & self) { + return at::_ops::q_per_channel_axis::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..29e84eacb48864b14007685ce56ecdafeaa08d7c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_reflection_pad1d_backward_out_cpu : public at::meta::structured_reflection_pad1d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, at::ArrayRef padding, const at::Tensor & grad_input); +}; +struct TORCH_API structured_reflection_pad1d_backward_out_cuda : public at::meta::structured_reflection_pad1d_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & self, at::ArrayRef padding, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c1e82cf2c091e69f25139f93360f63b36c911bd3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor reflection_pad1d(const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor reflection_pad1d_symint(const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad1d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & reflection_pad1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & reflection_pad1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/renorm_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/renorm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c3d499b34ec0aa7c0b5a77cc7d4743a8b877b369 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/renorm_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor renorm(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm); +TORCH_API at::Tensor & renorm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm); +TORCH_API at::Tensor & renorm_outf(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm, at::Tensor & out); +TORCH_API at::Tensor & renorm_(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0fce1cb2483ef81ad59e4beb8e1a1575c3e640e5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad1d_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor replication_pad1d(const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor replication_pad1d_symint(const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_outf(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor & replication_pad1d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & replication_pad1d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/requires_grad.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/requires_grad.h new file mode 100644 index 0000000000000000000000000000000000000000..b7ed7777aeddf556b373d49b5842f27a1ff3176e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/requires_grad.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/select_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/select_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b0b86a0b83348bbe186cdbad1017e8364ce32c24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/select_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API select_Dimname { + using schema = at::Tensor (const at::Tensor &, at::Dimname, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::select") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Dimname") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, at::Dimname dim, int64_t index); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, int64_t index); +}; + +struct TORCH_API select_int { + using schema = at::Tensor (const at::Tensor &, int64_t, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::select") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "int") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, int64_t dim, c10::SymInt index); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, c10::SymInt index); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/set_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/set_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..99d64cf43a97b227e2ae01815943a188e75e3b6e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/set_cpu_dispatch.h @@ -0,0 +1,27 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & set_(at::Tensor & self, at::Storage source); +TORCH_API at::Tensor & set_(at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}); +TORCH_API at::Tensor & set__symint(at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}); +TORCH_API at::Tensor & set_(at::Tensor & self, const at::Tensor & source); +TORCH_API at::Tensor & set_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/set_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/set_native.h new file mode 100644 index 0000000000000000000000000000000000000000..27397dbc79c167314d29b5705f766dd91a4a0639 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/set_native.h @@ -0,0 +1,38 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor set(const at::Tensor & self, at::Storage source); +TORCH_API at::Tensor & set_source_Storage_out(const at::Tensor & self, at::Storage source, at::Tensor & out); +TORCH_API at::Tensor & set_(at::Tensor & self, at::Storage source); +TORCH_API at::Tensor set_symint(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}); +TORCH_API at::Tensor & set_source_Storage_storage_offset_out_symint(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out); +TORCH_API at::Tensor & set_storage_cpu_(at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}); +TORCH_API at::Tensor & set_storage_cuda_(at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}); +TORCH_API at::Tensor & set_storage_meta__symint(at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}); +TORCH_API at::Tensor & set_storage_quantized_(at::Tensor & self, at::Storage source, int64_t storage_offset, at::IntArrayRef size, at::IntArrayRef stride={}); +TORCH_API at::Tensor & set__symint(at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride={}); +TORCH_API at::Tensor set(const at::Tensor & self, const at::Tensor & source); +TORCH_API at::Tensor & set_source_Tensor_out(const at::Tensor & self, const at::Tensor & source, at::Tensor & out); +TORCH_API at::Tensor & set_tensor_(at::Tensor & self, const at::Tensor & source); +TORCH_API at::Tensor set(const at::Tensor & self); +TORCH_API at::Tensor & set_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & set_cpu_(at::Tensor & self); +TORCH_API at::Tensor & set_cuda_(at::Tensor & self); +TORCH_API at::Tensor & set_meta_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sin.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sin.h new file mode 100644 index 0000000000000000000000000000000000000000..00e278e736416ebae10dbf0bccaf96e2c685af3b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sin.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sin(Tensor self) -> Tensor +inline at::Tensor sin(const at::Tensor & self) { + return at::_ops::sin::call(self); +} + +// aten::sin_(Tensor(a!) self) -> Tensor(a!) +inline at::Tensor & sin_(at::Tensor & self) { + return at::_ops::sin_::call(self); +} + +// aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sin_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::sin_out::call(self, out); +} +// aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & sin_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::sin_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ad4380868908401b3764570aefeb27d1bf7e8cee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor softplus(const at::Tensor & self, const at::Scalar & beta=1, const at::Scalar & threshold=20); +TORCH_API at::Tensor & softplus_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & beta=1, const at::Scalar & threshold=20); +TORCH_API at::Tensor & softplus_outf(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softshrink_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softshrink_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..397cb0c9909cb71c96443c5a336cf93bba029142 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softshrink_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & softshrink_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) { + return at::_ops::softshrink_backward_grad_input::call(grad_output, self, lambd, grad_input); +} +// aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & softshrink_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input) { + return at::_ops::softshrink_backward_grad_input::call(grad_output, self, lambd, grad_input); +} + +// aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor +inline at::Tensor softshrink_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) { + return at::_ops::softshrink_backward::call(grad_output, self, lambd); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_compressed_tensor_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_compressed_tensor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d54364cc643ad2e37043e3af10a53d58b275d47b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_compressed_tensor_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API sparse_compressed_tensor_comp_plain_value_size { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_compressed_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "comp_plain_value_size") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_compressed_tensor.comp_plain_value_size(Tensor compressed_indices, Tensor plain_indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor") + static at::Tensor call(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API sparse_compressed_tensor_comp_plain_value { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::sparse_compressed_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "comp_plain_value") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "sparse_compressed_tensor.comp_plain_value(Tensor compressed_indices, Tensor plain_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor") + static at::Tensor call(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..0cc31f797435a5750487a6820a84833e8da951cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j0_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_bessel_j0 : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y0_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y0_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c33a8653d8a561ac43aef1e16ca2d5eb4cbce154 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_y0_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_bessel_y0(const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_y0_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_y0_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log1p_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log1p_native.h new file mode 100644 index 0000000000000000000000000000000000000000..af28e043d4a1af25b46d2fcc92aa8b5fb19522ad --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_log1p_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor special_log1p(const at::Tensor & self); +TORCH_API at::Tensor & special_log1p_out(const at::Tensor & self, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..af21cf041599ed097e913a70ffc37ce6781e4209 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_k0_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_modified_bessel_k0 : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..92e91ceca225a1c01c84cb15678ff83361b3b9d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k1_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_scaled_modified_bessel_k1 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_scaled_modified_bessel_k1") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_scaled_modified_bessel_k1(Tensor x) -> Tensor") + static at::Tensor call(const at::Tensor & x); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x); +}; + +struct TORCH_API special_scaled_modified_bessel_k1_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_scaled_modified_bessel_k1") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_scaled_modified_bessel_k1.out(Tensor x, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & x, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & x, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sum_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sum_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..e8a8f4da4d913955dfc86e650ebe81121b40ba83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sum_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_sum_dim_IntList : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/svd_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/svd_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f92a4b7fd10eb9a9f85f9ba0f55dddb0831fe436 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/svd_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API svd_U { + using schema = ::std::tuple (const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::svd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "U") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)") + static ::std::tuple call(const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, bool compute_uv, at::Tensor & U, at::Tensor & S, at::Tensor & V); +}; + +struct TORCH_API svd { + using schema = ::std::tuple (const at::Tensor &, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::svd") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)") + static ::std::tuple call(const at::Tensor & self, bool some, bool compute_uv); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool some, bool compute_uv); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_csc_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_csc_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c02d3f30732d2397e2fb246e07e3dc0ce0f2ddda --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_csc_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API to_sparse_csc { + using schema = at::Tensor (const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::to_sparse_csc") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::optional dense_dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dense_dim); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/topk_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/topk_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..612f1d4117bbbcc2e2be4f78daac52878d33b992 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/topk_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API topk_values { + using schema = ::std::tuple (const at::Tensor &, c10::SymInt, int64_t, bool, bool, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::topk") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "values") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)") + static ::std::tuple call(const at::Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted, at::Tensor & values, at::Tensor & indices); +}; + +struct TORCH_API topk { + using schema = ::std::tuple (const at::Tensor &, c10::SymInt, int64_t, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::topk") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)") + static ::std::tuple call(const at::Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2dbc1290e9ef045f39404f2f229bdf360e565af0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_bicubic2d_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor upsample_bicubic2d(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor upsample_bicubic2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & upsample_bicubic2d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor & upsample_bicubic2d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales_h, c10::optional scales_w, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..417afaa75d05afedd61cf46aa8b78beed7b6797b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API upsample_linear1d_vec { + using schema = at::Tensor (const at::Tensor &, at::OptionalSymIntArrayRef, bool, c10::optional>); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_linear1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "vec") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor") + static at::Tensor call(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional> scale_factors); +}; + +struct TORCH_API upsample_linear1d_out { + using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, bool, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_linear1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales, at::Tensor & out); +}; + +struct TORCH_API upsample_linear1d { + using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::upsample_linear1d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional scales); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..afb9c68961ef28ce9c6ddfbffe38792940598c80 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_upsample_nearest3d : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::ArrayRef output_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/zeros_like.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/zeros_like.h new file mode 100644 index 0000000000000000000000000000000000000000..513209ac202c7216bf535b488e8136b710e89138 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/zeros_like.h @@ -0,0 +1,43 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor zeros_like(const at::Tensor & self, at::TensorOptions options={}, c10::optional memory_format=c10::nullopt) { + return at::_ops::zeros_like::call(self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +// aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +inline at::Tensor zeros_like(const at::Tensor & self, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional memory_format) { + return at::_ops::zeros_like::call(self, dtype, layout, device, pin_memory, memory_format); +} + +// aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & zeros_like_out(at::Tensor & out, const at::Tensor & self, c10::optional memory_format=c10::nullopt) { + return at::_ops::zeros_like_out::call(self, memory_format, out); +} +// aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & zeros_like_outf(const at::Tensor & self, c10::optional memory_format, at::Tensor & out) { + return at::_ops::zeros_like_out::call(self, memory_format, out); +} + +}