diff --git a/ckpts/universal/global_step120/zero/15.attention.dense.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/15.attention.dense.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..fafa0dc17e6555da652fdaedf8c1067b0003555e --- /dev/null +++ b/ckpts/universal/global_step120/zero/15.attention.dense.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e033aa4f0606319c3fdbf13f505063e1b0a3eeede14356c0b1c8db0fc45e0012 +size 16778411 diff --git a/ckpts/universal/global_step120/zero/22.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/22.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..b752ce41af88adee723ee7709916963af69f86fc --- /dev/null +++ b/ckpts/universal/global_step120/zero/22.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:985248e1f81df6854da480e0d936c582e43ff4a1d60fc97f1faadb3077b60973 +size 9387 diff --git a/ckpts/universal/global_step120/zero/22.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step120/zero/22.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..555c1bd713358abb8bac07aca7e351391fb39da2 --- /dev/null +++ b/ckpts/universal/global_step120/zero/22.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cca14ba7fc60ac9498c109c5ce80479406e1edc1bfd1404a6dc9b2bff01bbd9f +size 9293 diff --git a/ckpts/universal/global_step120/zero/25.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/25.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..101909e2313a4ed8336d6719ab1f3cbf4cb385ff --- /dev/null +++ b/ckpts/universal/global_step120/zero/25.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d437cd3be24233801580f155fed5cce42779a6a723b1ffd24e33c52e3a2d1ca +size 50332843 diff --git a/ckpts/universal/global_step120/zero/5.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/5.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..8d2699440c6808489c7842a697612983f3ba7c37 --- /dev/null +++ b/ckpts/universal/global_step120/zero/5.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:943b17a18cce3507a636f3d4caee34dbaf3cf4285c6cf41fef12b33b23f297ec +size 9387 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a592ee91b159c80f4464061ef188c37ab18a83db --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _adaptive_avg_pool3d_backward_out(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor adaptive_avg_pool3d_backward_cpu(const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor adaptive_avg_pool3d_backward_cuda(const at::Tensor & grad_output, const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Short.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Short.h new file mode 100644 index 0000000000000000000000000000000000000000..f338095b878c1d118d1b7fb8b363ad366922f3cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Short.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor +inline at::Tensor _cast_Short(const at::Tensor & self, bool non_blocking=false) { + return at::_ops::_cast_Short::call(self, non_blocking); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_set_plan_cache_max_size_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_set_plan_cache_max_size_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e5cf498bc3dc6fe0fd727f69eaa4e8f830b7a84a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_set_plan_cache_max_size_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _cufft_set_plan_cache_max_size(at::DeviceIndex device_index, int64_t max_size); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_dim_arange_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_dim_arange_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f707679384af9f938a656ad83823cf9a6e60d4e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_dim_arange_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _dim_arange { + using schema = at::Tensor (const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_dim_arange") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_dim_arange(Tensor like, int dim) -> Tensor") + static at::Tensor call(const at::Tensor & like, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & like, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized.h new file mode 100644 index 0000000000000000000000000000000000000000..e4d2f2d518acf5834691de46e1bf1b8e8c46e68f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized.h @@ -0,0 +1,113 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor +inline at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +namespace symint { + template ::value>> + at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized::call(c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } +} + +// aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor +inline at::Tensor _empty_affine_quantized(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, double scale, int64_t zero_point, c10::optional memory_format) { + return at::_ops::_empty_affine_quantized::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, scale, zero_point, memory_format); +} +namespace symint { + template ::value>> + at::Tensor _empty_affine_quantized(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, double scale, int64_t zero_point, c10::optional memory_format) { + return at::_ops::_empty_affine_quantized::call(c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, scale, zero_point, memory_format); + } +} + +// aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor +inline at::Tensor _empty_affine_quantized_symint(c10::SymIntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); +} +namespace symint { + template ::value>> + at::Tensor _empty_affine_quantized(c10::SymIntArrayRef size, at::TensorOptions options={}, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized::call(size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), scale, zero_point, c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format)); + } +} + +// aten::_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor +inline at::Tensor _empty_affine_quantized_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, double scale, int64_t zero_point, c10::optional memory_format) { + return at::_ops::_empty_affine_quantized::call(size, dtype, layout, device, pin_memory, scale, zero_point, memory_format); +} +namespace symint { + template ::value>> + at::Tensor _empty_affine_quantized(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, double scale, int64_t zero_point, c10::optional memory_format) { + return at::_ops::_empty_affine_quantized::call(size, dtype, layout, device, pin_memory, scale, zero_point, memory_format); + } +} + +// aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _empty_affine_quantized_out(at::Tensor & out, at::IntArrayRef size, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized_out::call(c10::fromIntArrayRefSlow(size), scale, zero_point, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & _empty_affine_quantized_out(at::Tensor & out, at::IntArrayRef size, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized_out::call(c10::fromIntArrayRefSlow(size), scale, zero_point, memory_format, out); + } +} + +// aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _empty_affine_quantized_outf(at::IntArrayRef size, double scale, int64_t zero_point, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_affine_quantized_out::call(c10::fromIntArrayRefSlow(size), scale, zero_point, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & _empty_affine_quantized_outf(at::IntArrayRef size, double scale, int64_t zero_point, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_affine_quantized_out::call(c10::fromIntArrayRefSlow(size), scale, zero_point, memory_format, out); + } +} + +// aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _empty_affine_quantized_symint_out(at::Tensor & out, c10::SymIntArrayRef size, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized_out::call(size, scale, zero_point, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & _empty_affine_quantized_out(at::Tensor & out, c10::SymIntArrayRef size, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous) { + return at::_ops::_empty_affine_quantized_out::call(size, scale, zero_point, memory_format, out); + } +} + +// aten::_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _empty_affine_quantized_symint_outf(c10::SymIntArrayRef size, double scale, int64_t zero_point, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_affine_quantized_out::call(size, scale, zero_point, memory_format, out); +} +namespace symint { + template ::value>> + at::Tensor & _empty_affine_quantized_outf(c10::SymIntArrayRef size, double scale, int64_t zero_point, c10::optional memory_format, at::Tensor & out) { + return at::_ops::_empty_affine_quantized_out::call(size, scale, zero_point, memory_format, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_channel_affine_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_channel_affine_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..658a304f63cdbbd5925c5c13d0078ec4c74e4889 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_channel_affine_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _fake_quantize_learnable_per_channel_affine { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, int64_t, double); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fake_quantize_learnable_per_channel_affine") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor); +}; + +struct TORCH_API _fake_quantize_learnable_per_channel_affine_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, int64_t, double, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_fake_quantize_learnable_per_channel_affine") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_fake_quantize_learnable_per_channel_affine.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.0, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_flash_attention_forward_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_flash_attention_forward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ad92d3675d34000f756447bd03662fb23059a6ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_flash_attention_forward_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _flash_attention_forward(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & cum_seq_q, const c10::optional & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale=c10::nullopt); +TORCH_API ::std::tuple _flash_attention_forward_symint(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & cum_seq_q, const c10::optional & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional scale=c10::nullopt); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_acos_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_acos_native.h new file mode 100644 index 0000000000000000000000000000000000000000..09c6cc795f7594dd36006b24012700ca691fedf9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_acos_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_acos_out(at::TensorList self, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_acos_slow(at::TensorList self); +TORCH_API void foreach_tensor_acos_slow_(at::TensorList self); +TORCH_API ::std::vector foreach_tensor_acos_cuda(at::TensorList self); +TORCH_API void foreach_tensor_acos_cuda_(at::TensorList self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ea4e1131c7b8058aafe8a9d3c7a6e054bfc0e6a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_floor_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_floor { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_floor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_floor(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_floor_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_floor_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_floor_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_floor_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_floor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_floor.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bb5de3828be11e3c959ec24e9d1738ab7da69c15 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void _foreach_sinh_out(at::TensorList out, at::TensorList self); +TORCH_API void _foreach_sinh_outf(at::TensorList self, at::TensorList out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_zero_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_zero_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..242aa56705a2fd2da0051f9a5d2c06e2f2c0f425 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_zero_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API void _foreach_zero_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_indices_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_indices_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dd6f7dff24dd975ce998644384792371d073074b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_indices_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _indices_sparse(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_local_scalar_dense_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_local_scalar_dense_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..42ad8d517f041f609a5ff671b69024e8cd29dc94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_local_scalar_dense_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Scalar _local_scalar_dense(const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution.h new file mode 100644 index 0000000000000000000000000000000000000000..8da065897796de72743b9262787f4e0691231a44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mps_convolution.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor +inline at::Tensor _mps_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_mps_convolution::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups); +} +namespace symint { + template ::value>> + at::Tensor _mps_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_mps_convolution::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups); + } +} + +// aten::_mps_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor +inline at::Tensor _mps_convolution_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::_mps_convolution::call(self, weight, bias, padding, stride, dilation, groups); +} +namespace symint { + template ::value>> + at::Tensor _mps_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::_mps_convolution::call(self, weight, bias, padding, stride, dilation, groups); + } +} + +// aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _mps_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_mps_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); +} +namespace symint { + template ::value>> + at::Tensor & _mps_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups) { + return at::_ops::_mps_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); + } +} + +// aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _mps_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::_mps_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); +} +namespace symint { + template ::value>> + at::Tensor & _mps_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, at::Tensor & out) { + return at::_ops::_mps_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(dilation), groups, out); + } +} + +// aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _mps_convolution_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::_mps_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, out); +} +namespace symint { + template ::value>> + at::Tensor & _mps_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) { + return at::_ops::_mps_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, out); + } +} + +// aten::_mps_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _mps_convolution_symint_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) { + return at::_ops::_mps_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, out); +} +namespace symint { + template ::value>> + at::Tensor & _mps_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out) { + return at::_ops::_mps_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cc262cfd9ce0e0cfefcfb63388fe2e6b2dbeedc6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_mask_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _nested_tensor_from_mask(const at::Tensor & t, const at::Tensor & mask, bool mask_check=true); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nnpack_spatial_convolution_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nnpack_spatial_convolution_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2f0c89167fe0c3cd02a213ae544a4a9b749e6290 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nnpack_spatial_convolution_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _nnpack_spatial_convolution { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nnpack_spatial_convolution") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride); +}; + +struct TORCH_API _nnpack_spatial_convolution_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nnpack_spatial_convolution") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..a5882896b416d06569fd63724c5333536180344a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor +inline at::Tensor _softmax(const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_softmax::call(self, dim, half_to_float); +} + +// aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _softmax_out(at::Tensor & out, const at::Tensor & self, int64_t dim, bool half_to_float) { + return at::_ops::_softmax_out::call(self, dim, half_to_float, out); +} +// aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _softmax_outf(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out) { + return at::_ops::_softmax_out::call(self, dim, half_to_float, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_addmm.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_addmm.h new file mode 100644 index 0000000000000000000000000000000000000000..1cf6d860a3ae1a558bd0916e609636dd2b14a7a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_addmm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor _sparse_addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::_sparse_addmm::call(self, mat1, mat2, beta, alpha); +} + +// aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::_sparse_addmm_out::call(self, mat1, mat2, beta, alpha, out); +} +// aten::_sparse_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::_sparse_addmm_out::call(self, mat1, mat2, beta, alpha, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_unsafe_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_unsafe_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..27b7130935178eb24039f8f31bf75da661614d3b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_unsafe_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sparse_coo_tensor_unsafe { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_coo_tensor_unsafe") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor") + static at::Tensor call(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ce39c759f24919f49c6250d923664e6c9b586ebe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_weight_norm_interface_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple _weight_norm_interface_backward(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..74438d94d66c2f5675528ccb2bec006caec8100c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_aminmax : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, c10::optional dim, bool keepdim); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arccosh_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arccosh_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5646fa0d8641bd5abc6e9ff49b1ad333ca64228d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arccosh_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor arccosh(const at::Tensor & self); +TORCH_API at::Tensor & arccosh_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & arccosh_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & arccosh_(at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..346c2b788c2fb4b7a590f79e08faa64c4cdde128 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool3d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API avg_pool3d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & grad_input); +}; + +struct TORCH_API avg_pool3d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/baddbmm_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/baddbmm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cebf70cd2425c5194d9d55fe244e859e31e4045d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/baddbmm_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & baddbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & baddbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & baddbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bartlett_window_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bartlett_window_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..05f8e3fb21f21825abad1674ea2aebef79db4c43 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/bartlett_window_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API bartlett_window { + using schema = at::Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bartlett_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API bartlett_window_periodic { + using schema = at::Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bartlett_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "periodic") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API bartlett_window_out { + using schema = at::Tensor & (int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bartlett_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bartlett_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(int64_t window_length, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, at::Tensor & out); +}; + +struct TORCH_API bartlett_window_periodic_out { + using schema = at::Tensor & (int64_t, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bartlett_window") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "periodic_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bartlett_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(int64_t window_length, bool periodic, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t window_length, bool periodic, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..75b5611a17a25a95b08673568ad2e9ec36fbb4b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_stats_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple batch_norm_stats(const at::Tensor & input, double eps); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..16f983a8db9ffcd41209e972cd2749aeae73d134 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor ceil(const at::Tensor & self); +TORCH_API at::Tensor & ceil_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & ceil_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & ceil_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/concatenate_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/concatenate_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5bf7b7b8e829d39a4d534dcf88629ac06cadf580 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/concatenate_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor concatenate(at::TensorList tensors, int64_t dim=0); +TORCH_API at::Tensor & concatenate_out(at::TensorList tensors, int64_t dim, at::Tensor & out); +TORCH_API at::Tensor concatenate(at::TensorList tensors, at::Dimname dim); +TORCH_API at::Tensor & concatenate_out(at::TensorList tensors, at::Dimname dim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/dot_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/dot_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e447c990cd7a3d8b37a2d8a0fca5f04ab54f083c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/dot_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor dot(const at::Tensor & self, const at::Tensor & tensor); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_native.h new file mode 100644 index 0000000000000000000000000000000000000000..0108501038e51dc9e0cd35c74c5d3f2e2d43a302 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fbgemm_linear_int8_weight(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e011fe80c204dbe15109ae566539ecc9f34f68b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gather_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor gather_backward(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5615ab89f3eaf2e83c745e344afeb74c8bd4ffd4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & geometric_(at::Tensor & self, double p, c10::optional generator=c10::nullopt); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..359ba036e171128210e518de814c615484bca24d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/gt_meta_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & gt_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor gt(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & gt_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & gt_(at::Tensor & self, const at::Tensor & other); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b5cd615ecd35730f9e17cf9663799d7dab3ec000 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/i0_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor i0(const at::Tensor & self); +TORCH_API at::Tensor & i0_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a3e479bdccd111a7f1d336dda768241b9c8919bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/igammac_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API igammac_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::igammac") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "igammac.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API igammac { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::igammac") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "igammac(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API igammac_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::igammac_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "igammac_(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d1bea57a3b35a2e987a85bcfb07abb6c344d8c34 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_distributed_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API bool is_distributed(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf.h new file mode 100644 index 0000000000000000000000000000000000000000..a3942058f67c05cbe2dd96ecc9d770af87569804 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::isneginf(Tensor self) -> Tensor +inline at::Tensor isneginf(const at::Tensor & self) { + return at::_ops::isneginf::call(self); +} + +// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::isneginf_out::call(self, out); +} +// aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::isneginf_out::call(self, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window.h new file mode 100644 index 0000000000000000000000000000000000000000..d6e96599a7505352bd331e7bdfc0d4f9d24ca8bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/kaiser_window.h @@ -0,0 +1,79 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options={}) { + return at::_ops::kaiser_window::call(window_length, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::kaiser_window::call(window_length, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options={}) { + return at::_ops::kaiser_window_periodic::call(window_length, periodic, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::kaiser_window_periodic::call(window_length, periodic, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options={}) { + return at::_ops::kaiser_window_beta::call(window_length, periodic, beta, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::kaiser_window_beta::call(window_length, periodic, beta, dtype, layout, device, pin_memory); +} + +// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length) { + return at::_ops::kaiser_window_out::call(window_length, out); +} +// aten::kaiser_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, at::Tensor & out) { + return at::_ops::kaiser_window_out::call(window_length, out); +} + +// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic) { + return at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out); +} +// aten::kaiser_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, at::Tensor & out) { + return at::_ops::kaiser_window_periodic_out::call(window_length, periodic, out); +} + +// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_out(at::Tensor & out, int64_t window_length, bool periodic, double beta) { + return at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out); +} +// aten::kaiser_window.beta_out(int window_length, bool periodic, float beta, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & kaiser_window_outf(int64_t window_length, bool periodic, double beta, at::Tensor & out) { + return at::_ops::kaiser_window_beta_out::call(window_length, periodic, beta, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..5fb7386712a7fe8a0f421f01f2909832ad6481a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/kthvalue_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple kthvalue(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple kthvalue_outf(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ad560922ed827d4eeac753bd790fefe2f8b7ce94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lcm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lcm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API lcm { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lcm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lcm(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API lcm_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lcm_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu.h new file mode 100644 index 0000000000000000000000000000000000000000..0498bcc8ba3918c491fad6fd82444785bf349aef --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope=0.01) { + return at::_ops::leaky_relu_out::call(self, negative_slope, out); +} +// aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out) { + return at::_ops::leaky_relu_out::call(self, negative_slope, out); +} + +// aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor +inline at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope=0.01) { + return at::_ops::leaky_relu::call(self, negative_slope); +} + +// aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!) +inline at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope=0.01) { + return at::_ops::leaky_relu_::call(self, negative_slope); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..65e3dfc3370c217b44e6dd9448b853143804901a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_cond_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_cond(const at::Tensor & self, const c10::optional & p=c10::nullopt); +TORCH_API at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, const c10::optional & p=c10::nullopt); +TORCH_API at::Tensor & linalg_cond_outf(const at::Tensor & self, const c10::optional & p, at::Tensor & out); +TORCH_API at::Tensor linalg_cond(const at::Tensor & self, c10::string_view p); +TORCH_API at::Tensor & linalg_cond_out(at::Tensor & out, const at::Tensor & self, c10::string_view p); +TORCH_API at::Tensor & linalg_cond_outf(const at::Tensor & self, c10::string_view p, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4e4195c83ac435051d55bf92aa4880bbf801ff50 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_linalg_ldl_solve_out : public at::meta::structured_linalg_ldl_solve { +void impl(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_native.h new file mode 100644 index 0000000000000000000000000000000000000000..4238e93a16d9ceec7b2c5ba9680dfee8342a0b06 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple linalg_lstsq(const at::Tensor & self, const at::Tensor & b, c10::optional rcond=c10::nullopt, c10::optional driver=c10::nullopt); +TORCH_API ::std::tuple linalg_lstsq_out(const at::Tensor & self, const at::Tensor & b, c10::optional rcond, c10::optional driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0336f0b359be4b490e5e3fb87fbf465d91b1fccf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vander_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor linalg_vander(const at::Tensor & x, c10::optional N=c10::nullopt); +TORCH_API at::Tensor linalg_vander_symint(const at::Tensor & x, c10::optional N=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1ab1a87fbe5802df04b22be802c25355b1187f7d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor log_normal(const at::Tensor & self, double mean=1, double std=2, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & log_normal_out(const at::Tensor & self, double mean, double std, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor & log_normal_(at::Tensor & self, double mean=1, double std=2, c10::optional generator=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..f7312d370d55e8dab6efea20fd6c723ba311252b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log_normal_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_normal_ { + using schema = at::Tensor & (at::Tensor &, double, double, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_normal_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, double mean, double std, c10::optional generator); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, double mean, double std, c10::optional generator); +}; + +struct TORCH_API log_normal_out { + using schema = at::Tensor & (const at::Tensor &, double, double, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_normal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_normal.out(Tensor self, float mean=1, float std=2, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, double mean, double std, c10::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, c10::optional generator, at::Tensor & out); +}; + +struct TORCH_API log_normal { + using schema = at::Tensor (const at::Tensor &, double, double, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_normal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_normal(Tensor self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, double mean, double std, c10::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, double mean, double std, c10::optional generator); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..24abd8d61c504e34ffe5a93a27ce3ab59b649b52 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logical_or_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & logical_or_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & logical_or_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..322dffc2fbb5f961743483b844984c9018ba91ef --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor logsumexp(const at::Tensor & self, at::DimnameList dim, bool keepdim=false); +TORCH_API at::Tensor & logsumexp_out(at::Tensor & out, const at::Tensor & self, at::DimnameList dim, bool keepdim=false); +TORCH_API at::Tensor & logsumexp_outf(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..defb4de9f624422ee47c381a1ae2b13b9b25bae9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple lstm(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); +TORCH_API ::std::tuple lstm(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_solve.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_solve.h new file mode 100644 index 0000000000000000000000000000000000000000..b1c5b1ea2c5fec46bb751acac52beb86db93c7d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lu_solve.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lu_solve_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) { + return at::_ops::lu_solve_out::call(self, LU_data, LU_pivots, out); +} +// aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & lu_solve_outf(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots, at::Tensor & out) { + return at::_ops::lu_solve_out::call(self, LU_data, LU_pivots, out); +} + +// aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor +inline at::Tensor lu_solve(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) { + return at::_ops::lu_solve::call(self, LU_data, LU_pivots); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c13221a4d2c513ce31a0bdf35d2a49184175845d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mish { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mish") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mish(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API mish_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mish_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mish_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API mish_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mish") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mvlgamma.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mvlgamma.h new file mode 100644 index 0000000000000000000000000000000000000000..351c77a467065831678b85ae82fce72c38a66e35 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mvlgamma.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mvlgamma_out(at::Tensor & out, const at::Tensor & self, int64_t p) { + return at::_ops::mvlgamma_out::call(self, p, out); +} +// aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & mvlgamma_outf(const at::Tensor & self, int64_t p, at::Tensor & out) { + return at::_ops::mvlgamma_out::call(self, p, out); +} + +// aten::mvlgamma(Tensor self, int p) -> Tensor +inline at::Tensor mvlgamma(const at::Tensor & self, int64_t p) { + return at::_ops::mvlgamma::call(self, p); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bfa7c81872489faadb894545b47cc4dd56be8a60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & permute_copy_out(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out); +TORCH_API at::Tensor permute_copy(const at::Tensor & self, at::IntArrayRef dims); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5f5c20b6c98efc5a62b6c75422decb0fd877a608 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pixel_shuffle_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & pixel_shuffle_out(const at::Tensor & self, int64_t upscale_factor, at::Tensor & out); +TORCH_API at::Tensor pixel_shuffle_cpu(const at::Tensor & self, int64_t upscale_factor); +TORCH_API at::Tensor math_pixel_shuffle(const at::Tensor & self, int64_t upscale_factor); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pow_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pow_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..036bfc2116166dc587516768b73662991206605a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/pow_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,27 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor pow(const at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor & pow_(at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor pow(const at::Scalar & self, const at::Tensor & exponent); +TORCH_API at::Tensor pow(const at::Tensor & self, const at::Scalar & exponent); +TORCH_API at::Tensor & pow_(at::Tensor & self, const at::Scalar & exponent); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_native.h new file mode 100644 index 0000000000000000000000000000000000000000..ccdc074aa7be93e92496184d2ed68cc25d0b6308 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & quantize_per_tensor_dynamic_out(const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out); +TORCH_API at::Tensor quantize_per_tensor_dynamic(const at::Tensor & self, at::ScalarType dtype, bool reduce_range); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randperm_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randperm_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a9a65aa3b0f7e46d67c1163332c0d3426d3ffb38 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randperm_compositeexplicitautograd_dispatch.h @@ -0,0 +1,34 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor randperm(int64_t n, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor randperm(int64_t n, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randperm_symint(c10::SymInt n, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor randperm_symint(c10::SymInt n, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & randperm_out(at::Tensor & out, int64_t n); +TORCH_API at::Tensor & randperm_outf(int64_t n, at::Tensor & out); +TORCH_API at::Tensor & randperm_symint_out(at::Tensor & out, c10::SymInt n); +TORCH_API at::Tensor & randperm_symint_outf(c10::SymInt n, at::Tensor & out); +TORCH_API at::Tensor randperm(int64_t n, c10::optional generator, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor randperm(int64_t n, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randperm_symint(c10::SymInt n, c10::optional generator, at::TensorOptions options=at::kLong); +TORCH_API at::Tensor randperm_symint(c10::SymInt n, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/range_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/range_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ca463b5ea7ad1f5f8c56c9bcdb89d8baacaf6ab8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/range_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & range_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step); +TORCH_API at::Tensor & range_outf(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/range_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/range_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..77e98257def78cb0657b1ae6ede7e8c08b0a3c5c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/range_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & range_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, const at::Scalar & step); +TORCH_API at::Tensor & range_outf(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slice_copy_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slice_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..47b9d11fb1a79d27b64486b22fe6651746856a83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/slice_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & slice_copy_out(at::Tensor & out, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1); +TORCH_API at::Tensor & slice_copy_outf(const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, int64_t step, at::Tensor & out); +TORCH_API at::Tensor & slice_copy_symint_out(at::Tensor & out, const at::Tensor & self, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1); +TORCH_API at::Tensor & slice_copy_symint_outf(const at::Tensor & self, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_coo_tensor_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_coo_tensor_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..14d9612f745c57fb07bc8fe3ef4a38629e156359 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_coo_tensor_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options={}, c10::optional is_coalesced=c10::nullopt); +TORCH_API at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); +TORCH_API at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}, c10::optional is_coalesced=c10::nullopt); +TORCH_API at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..44801fc3575d5356dc4e0b964889dac29c23acb7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_w_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_w_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_expit_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_expit_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a4be0141f3a2594547c4ad9817a921e650819985 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_expit_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor special_expit(const at::Tensor & self); +TORCH_API at::Tensor & special_expit_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_expit_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..434fb83fb492f190c4ba85b19f44c19f69ad1433 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_h_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor special_hermite_polynomial_h(const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_hermite_polynomial_h_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_hermite_polynomial_h_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +TORCH_API at::Tensor special_hermite_polynomial_h(const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_hermite_polynomial_h_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_hermite_polynomial_h_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..397b8eebf6a5c193fe9edd8393c36566a4bc4bd6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_ndtri(const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8ed8dc8aa2c587719e4579bf175c0c1367086137 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_u(const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor & out, const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..00e4d99047fce7b296a4d9db81564ad3d6746e3d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_w_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_w_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_w_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/stack_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/stack_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e4fb3f8843d78df9f69ff6e0208480a535261435 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/stack_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor stack(at::TensorList tensors, int64_t dim=0); +TORCH_API at::Tensor & stack_out(at::TensorList tensors, int64_t dim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/std_mean_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/std_mean_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dec364e687427a1546e8d767fe7776e905a3de2f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/std_mean_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple std_mean_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, at::OptionalIntArrayRef dim=c10::nullopt, const c10::optional & correction=c10::nullopt, bool keepdim=false); +TORCH_API ::std::tuple std_mean_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional & correction, bool keepdim, at::Tensor & out0, at::Tensor & out1); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_storage_offset_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_storage_offset_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..56bb86216b37b67225000ae99e652919f75b38bf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_storage_offset_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API c10::SymInt sym_storage_offset(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..9610b6b562cb275c46b5993099a1c27af1a78100 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_upsample_linear1d : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, at::ArrayRef output_size, bool align_corners, c10::optional scales); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..73a3e8977a44392b6e1c3994f6d377e3a1a106b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest3d_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_nearest3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_nearest3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input); + } +} + +// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_nearest3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_nearest3d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w, grad_input); + } +} + +// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_nearest3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_nearest3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); + } +} + +// aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & upsample_nearest3d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & upsample_nearest3d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d, c10::optional scales_h, c10::optional scales_w, at::Tensor & grad_input) { + return at::_ops::upsample_nearest3d_backward_grad_input::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); + } +} + +// aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor upsample_nearest3d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales_d, scales_h, scales_w); + } +} + +// aten::upsample_nearest3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor +inline at::Tensor upsample_nearest3d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w); +} +namespace symint { + template ::value>> + at::Tensor upsample_nearest3d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales_d=c10::nullopt, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt) { + return at::_ops::upsample_nearest3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..91fec458ab91b112c78fd1fd6cb18955c469d394 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor view(const at::Tensor & self, at::IntArrayRef size); +TORCH_API at::Tensor view_symint(const at::Tensor & self, c10::SymIntArrayRef size); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0b3f3cd53a586a6dbd1954b2b0a2aaed364277ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor view(const at::Tensor & self, at::IntArrayRef size); +TORCH_API at::Tensor view_symint(const at::Tensor & self, c10::SymIntArrayRef size); + +} // namespace cuda +} // namespace at