diff --git a/ckpts/universal/global_step120/zero/13.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/13.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..cca957c7f0dcfe96c1f0368c399477b294e055a8 --- /dev/null +++ b/ckpts/universal/global_step120/zero/13.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b986e26c0ff9db848ee5e8933f6c4cf0831e6879f7f8ac3f5d50d839efc97a8 +size 33555612 diff --git a/ckpts/universal/global_step120/zero/13.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step120/zero/13.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..24705e795be938765b0b755608e8ab2ac8396a21 --- /dev/null +++ b/ckpts/universal/global_step120/zero/13.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25ccb640ce7a27d3765f84e8ed48b662caaa5c96a525a13dca2c13fd99c49478 +size 33555533 diff --git a/ckpts/universal/global_step120/zero/26.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/26.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..9597174a38275b660882d171518cb2fdc4f8d719 --- /dev/null +++ b/ckpts/universal/global_step120/zero/26.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e046ce68813bbe4b74cb0c29f5a255d9b159e1469cae20d7c6b4b0b107e249af +size 33555612 diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..cb340b513915e6b58b359f1683f8d949ebc10214 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _autocast_to_reduced_precision { + using schema = at::Tensor (const at::Tensor &, bool, bool, at::ScalarType, at::ScalarType); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_autocast_to_reduced_precision") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cholesky_solve_helper.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cholesky_solve_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..811beb2d74d6b7415a76fee90b3082b79c2d63a5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cholesky_solve_helper.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor +inline at::Tensor _cholesky_solve_helper(const at::Tensor & self, const at::Tensor & A, bool upper) { + return at::_ops::_cholesky_solve_helper::call(self, A, upper); +} + +// aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _cholesky_solve_helper_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & A, bool upper) { + return at::_ops::_cholesky_solve_helper_out::call(self, A, upper, out); +} +// aten::_cholesky_solve_helper.out(Tensor self, Tensor A, bool upper, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _cholesky_solve_helper_outf(const at::Tensor & self, const at::Tensor & A, bool upper, at::Tensor & out) { + return at::_ops::_cholesky_solve_helper_out::call(self, A, upper, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_compute_linear_combination_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_compute_linear_combination_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d5b59976642cf895ff77a7a285249c40a6cfc072 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_compute_linear_combination_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _compute_linear_combination { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_compute_linear_combination") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & coefficients); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & coefficients); +}; + +struct TORCH_API _compute_linear_combination_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_compute_linear_combination") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & input, const at::Tensor & coefficients, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & coefficients, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5fafbcdec1593de4b5a5509005a806efbeb4fbcd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _cufft_get_plan_cache_size { + using schema = int64_t (at::DeviceIndex); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_cufft_get_plan_cache_size") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_cufft_get_plan_cache_size(DeviceIndex device_index) -> int") + static int64_t call(at::DeviceIndex device_index); + static int64_t redispatch(c10::DispatchKeySet dispatchKeySet, at::DeviceIndex device_index); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_debug_has_internal_overlap_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_debug_has_internal_overlap_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b404e6115e93d3d4a8f09e61006f6a1400e48a4f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_debug_has_internal_overlap_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API int64_t _debug_has_internal_overlap(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lerp_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lerp_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2b2f6197eee49d9f28c3b81431eef18699efbdaf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lerp_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::vector _foreach_lerp(at::TensorList self, at::TensorList tensors1, at::TensorList weights); +TORCH_API void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, at::TensorList weights); +TORCH_API ::std::vector _foreach_lerp(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight); +TORCH_API void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_norm_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_norm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a7f8ece3c3fced9110d63a4030081c6283958d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_norm_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_norm_Scalar { + using schema = ::std::vector (at::TensorList, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_norm.Scalar(Tensor[] self, Scalar ord=2) -> Tensor[]") + static ::std::vector call(at::TensorList self, const at::Scalar & ord); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & ord); +}; + +struct TORCH_API _foreach_norm_Scalar_out { + using schema = void (at::TensorList, const at::Scalar &, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_norm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_norm.Scalar_out(Tensor[] self, Scalar ord=2, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, const at::Scalar & ord, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, const at::Scalar & ord, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_round_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_round_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0a68be9ae458e49a2659df51e7e3e02b21b6e506 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_round_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::vector _foreach_round(at::TensorList self); +TORCH_API void _foreach_round_(at::TensorList self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_is_any_true_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_is_any_true_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7fda9572c3d229e49dc94c440cf196662f650104 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_is_any_true_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _is_any_true { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_is_any_true") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_is_any_true(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_channel_quantized_tensor_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_channel_quantized_tensor_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7d03963449be645459df4ef6ce709c7b97aa6739 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_channel_quantized_tensor_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _make_per_channel_quantized_tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_make_per_channel_quantized_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis); +}; + +struct TORCH_API _make_per_channel_quantized_tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_make_per_channel_quantized_tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_make_per_channel_quantized_tensor.out(Tensor self, Tensor scale, Tensor zero_point, int axis, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list.h new file mode 100644 index 0000000000000000000000000000000000000000..82b1ad48811acaaccd0d1595ffed5d4ca152cae7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_from_tensor_list.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nested_tensor_from_tensor_list(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _nested_tensor_from_tensor_list(at::TensorList list, c10::optional dtype=c10::nullopt, c10::optional layout=c10::nullopt, c10::optional device=c10::nullopt, c10::optional pin_memory=c10::nullopt) { + return at::_ops::_nested_tensor_from_tensor_list::call(list, dtype, layout, device, pin_memory); +} + +// aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nested_tensor_from_tensor_list_out(at::Tensor & out, at::TensorList list, c10::optional dtype=c10::nullopt, c10::optional layout=c10::nullopt, c10::optional device=c10::nullopt, c10::optional pin_memory=c10::nullopt) { + return at::_ops::_nested_tensor_from_tensor_list_out::call(list, dtype, layout, device, pin_memory, out); +} +// aten::_nested_tensor_from_tensor_list.out(Tensor[] list, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nested_tensor_from_tensor_list_outf(at::TensorList list, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, at::Tensor & out) { + return at::_ops::_nested_tensor_from_tensor_list_out::call(list, dtype, layout, device, pin_memory, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_jagged_copy.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_jagged_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..535a0842e791d840f0a16f1fa61096339a7fb17a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_jagged_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_nested_view_from_jagged_copy(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1) -> Tensor +inline at::Tensor _nested_view_from_jagged_copy(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional & lengths={}, int64_t ragged_idx=1) { + return at::_ops::_nested_view_from_jagged_copy::call(self, offsets, dummy, lengths, ragged_idx); +} + +// aten::_nested_view_from_jagged_copy.out(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nested_view_from_jagged_copy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional & lengths={}, int64_t ragged_idx=1) { + return at::_ops::_nested_view_from_jagged_copy_out::call(self, offsets, dummy, lengths, ragged_idx, out); +} +// aten::_nested_view_from_jagged_copy.out(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _nested_view_from_jagged_copy_outf(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional & lengths, int64_t ragged_idx, at::Tensor & out) { + return at::_ops::_nested_view_from_jagged_copy_out::call(self, offsets, dummy, lengths, ragged_idx, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias_copy_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9f458d70733b0a423c6361bf8ec6f6b8d619a671 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _reshape_alias_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride); +TORCH_API at::Tensor & _reshape_alias_copy_outf(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, at::Tensor & out); +TORCH_API at::Tensor & _reshape_alias_copy_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); +TORCH_API at::Tensor & _reshape_alias_copy_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cd2dbb7d9475ac9d7eddd19d9d561897007efb2f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride); +TORCH_API at::Tensor _reshape_alias_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sample_dirichlet_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sample_dirichlet_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ca307996c68fe77546a3dc665c7df991dd07ef8c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sample_dirichlet_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _sample_dirichlet { + using schema = at::Tensor (const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sample_dirichlet") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::optional generator); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator); +}; + +struct TORCH_API _sample_dirichlet_out { + using schema = at::Tensor & (const at::Tensor &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sample_dirichlet") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, c10::optional generator, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional generator, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_for_cpu_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_for_cpu_native.h new file mode 100644 index 0000000000000000000000000000000000000000..12f5b16add2363627a3bf6e418a1627a400974b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_for_cpu_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _scaled_dot_product_flash_attention_cpu(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p=0.0, bool is_causal=false, const c10::optional & attn_mask={}, c10::optional scale=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fbb20fb0b8d714d2aa1891998a5f95ff243c46db --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_with_dims_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options); +TORCH_API at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view_copy_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bebeeb126d48388d8ebf694aeea74cc0f2d33f66 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _test_autograd_multiple_dispatch_view_copy_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & _test_autograd_multiple_dispatch_view_copy_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view_native.h new file mode 100644 index 0000000000000000000000000000000000000000..801b1fc1ca4ac3aa5f85dc5b87ce680d366934cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _test_autograd_multiple_dispatch_view(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2f9d0441ea8602dd59eb24b5f49a9220a6aad07d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _validate_sparse_compressed_tensor_args(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a7dcd9fdf3dcee3c1ccecdba27f8943d9425a0b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & adaptive_avg_pool3d_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor & adaptive_avg_pool3d_outf(const at::Tensor & self, at::IntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor & adaptive_avg_pool3d_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size); +TORCH_API at::Tensor & adaptive_avg_pool3d_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adjoint_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adjoint_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fb3cda9e9737af4299c50e23609805f41f64b08f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/adjoint_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API adjoint { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adjoint") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adjoint(Tensor(a) self) -> Tensor(a)") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arctanh_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arctanh_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7785e6643a7327822b3fb18026f0fbfdd2e28781 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/arctanh_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor arctanh(const at::Tensor & self); +TORCH_API at::Tensor & arctanh_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & arctanh_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & arctanh_(at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atan_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atan_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..bb6d387cee782a52fec92797509b4f329babb587 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/atan_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API atan { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::atan") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "atan(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API atan_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::atan_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "atan_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API atan_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::atan") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b3aea664356d0e3a8182d93708177547d2a02bc3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API avg_pool2d_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool2d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override, at::Tensor & grad_input); +}; + +struct TORCH_API avg_pool2d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool2d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cholesky_solve.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cholesky_solve.h new file mode 100644 index 0000000000000000000000000000000000000000..184497f12d6a4811622f47c58bb08e5cb1ef08b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cholesky_solve.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & cholesky_solve_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & input2, bool upper=false) { + return at::_ops::cholesky_solve_out::call(self, input2, upper, out); +} +// aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & cholesky_solve_outf(const at::Tensor & self, const at::Tensor & input2, bool upper, at::Tensor & out) { + return at::_ops::cholesky_solve_out::call(self, input2, upper, out); +} + +// aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor +inline at::Tensor cholesky_solve(const at::Tensor & self, const at::Tensor & input2, bool upper=false) { + return at::_ops::cholesky_solve::call(self, input2, upper); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/combinations_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/combinations_native.h new file mode 100644 index 0000000000000000000000000000000000000000..19a66c44ef26352d236e5a49103faf3af61ba348 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/combinations_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor combinations(const at::Tensor & self, int64_t r=2, bool with_replacement=false); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices.h new file mode 100644 index 0000000000000000000000000000000000000000..f50167a832a3808b34033868487c9f6cb2d3172e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3351ab2d648e017c1ee90bf925f8f39d3d9f183b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_convolution_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor cudnn_convolution(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32); +TORCH_API at::Tensor cudnn_convolution_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32); +TORCH_API at::Tensor & cudnn_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32); +TORCH_API at::Tensor & cudnn_convolution_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out); +TORCH_API at::Tensor & cudnn_convolution_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32); +TORCH_API at::Tensor & cudnn_convolution_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8a8599208b31217cae97fa3f801065dd67c0b7a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor exp2(const at::Tensor & self); +TORCH_API at::Tensor & exp2_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & exp2_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & exp2_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..20a7f4db77c793f6fd673f1a7248b9e9cea7f4d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_fp16_weight_fp32_activation_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fbgemm_linear_fp16_weight_fp32_activation(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmin_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmin_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2704b39f184b620509594d67052fd03f9fd20663 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmin_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor fmin(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmin_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & fmin_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5a8a5a06a78573884dc4de447a89bde23b3d63ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/full_ops.h @@ -0,0 +1,61 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API full_names { + using schema = at::Tensor (at::IntArrayRef, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::full") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API full { + using schema = at::Tensor (c10::SymIntArrayRef, const at::Scalar &, c10::optional, c10::optional, c10::optional, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::full") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor") + static at::Tensor call(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +}; + +struct TORCH_API full_out { + using schema = at::Tensor & (c10::SymIntArrayRef, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::full") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out); +}; + +struct TORCH_API full_names_out { + using schema = at::Tensor & (at::IntArrayRef, const at::Scalar &, c10::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::full") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "names_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional names, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge.h new file mode 100644 index 0000000000000000000000000000000000000000..bad352c7138253dbf011b50bca2454781b6b3bea --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ge.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::ge_Scalar_out::call(self, other, out); +} +// aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ge_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::ge_Scalar_out::call(self, other, out); +} + +// aten::ge.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor ge(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::ge_Scalar::call(self, other); +} + +// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ge_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ge_Tensor_out::call(self, other, out); +} +// aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ge_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::ge_Tensor_out::call(self, other, out); +} + +// aten::ge.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor ge(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::ge_Tensor::call(self, other); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ger_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ger_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1329ffbb662862ee4e443e5403305dafdc8a6ea6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/ger_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor ger(const at::Tensor & self, const at::Tensor & vec2); +TORCH_API at::Tensor & ger_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2); +TORCH_API at::Tensor & ger_outf(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/greater_equal.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/greater_equal.h new file mode 100644 index 0000000000000000000000000000000000000000..14be05f2c3f4735edb36e99fa51a7cf31bc281a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/greater_equal.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & greater_equal_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater_equal_Scalar_out::call(self, other, out); +} +// aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & greater_equal_outf(const at::Tensor & self, const at::Scalar & other, at::Tensor & out) { + return at::_ops::greater_equal_Scalar_out::call(self, other, out); +} + +// aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor +inline at::Tensor greater_equal(const at::Tensor & self, const at::Scalar & other) { + return at::_ops::greater_equal_Scalar::call(self, other); +} + +// aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & greater_equal_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater_equal_Tensor_out::call(self, other, out); +} +// aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & greater_equal_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::greater_equal_Tensor_out::call(self, other, out); +} + +// aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor +inline at::Tensor greater_equal(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::greater_equal_Tensor::call(self, other); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7df1d154e1ec3b7475058ddf6e3f0ebaffcff6c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +TORCH_API at::Tensor & hardtanh_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e35dd4d2f6db18e8fa429df433c484d5ce1feabe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/index_select_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor index_select_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/inner_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/inner_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c3dabf7ad9273918075bf7175f1d5767a8f4eebe --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/inner_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor inner(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & inner_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_leaf.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_leaf.h new file mode 100644 index 0000000000000000000000000000000000000000..7a2a0c37e8f4292e49eb43edcba268ac87c394e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_leaf.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..1c89b52a061aa1972e6433b77e583383b8d32f96 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/l1_loss_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API l1_loss { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::l1_loss") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & target, int64_t reduction); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, int64_t reduction); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..80188b811faf0c9d336515e896e9a92114765c04 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lift { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lift(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API lift_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lift") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lift.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..caa9a8c54f7256eaf657580873e6e7c3b6a058b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log1p { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log1p") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log1p(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API log1p_ { + using schema = at::Tensor & (at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log1p_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log1p_(Tensor(a!) self) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); +}; + +struct TORCH_API log1p_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log1p") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d2b8eac109b20804ffb200e3322910ceb4e080d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lstm_cell { + using schema = ::std::tuple (const at::Tensor &, at::TensorList, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lstm_cell") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih, const c10::optional & b_hh); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..1c9010ecd31f38940df469869d36ed47a829ecc6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/masked_select_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor +inline at::Tensor masked_select_backward(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) { + return at::_ops::masked_select_backward::call(grad, input, mask); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9e84e0da22a0334484763724815c4b74531c9f90 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_rnn_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API miopen_rnn { + using schema = ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::miopen_rnn") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state); +}; + +struct TORCH_API miopen_rnn_out { + using schema = ::std::tuple (const at::Tensor &, at::TensorList, int64_t, const at::Tensor &, const c10::optional &, int64_t, int64_t, int64_t, bool, double, bool, bool, at::IntArrayRef, const c10::optional &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::miopen_rnn") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "miopen_rnn.out(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!))") + static ::std::tuple call(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional & dropout_state, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..806e108437c6b578d888da3c02051172513ce3f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor mish(const at::Tensor & self); +TORCH_API at::Tensor & mish_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & mish_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & mish_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6986cd4175566a61dbc891668933aedba814b30d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mish_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor mish(const at::Tensor & self); +TORCH_API at::Tensor & mish_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & mish_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & mish_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..99494775bfb89973642b2a4d8934658baf35ebee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_linear_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API ::std::tuple mkldnn_linear_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask); +TORCH_API ::std::tuple mkldnn_linear_backward_outf(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_backward_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..befc09caef6cda860e8d81886285f4d7eec69a81 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool3d_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mkldnn_max_pool3d_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_max_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_max_pool3d_backward(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode); +}; + +struct TORCH_API mkldnn_max_pool3d_backward_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_max_pool3d_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_max_pool3d_backward.out(Tensor grad_output, Tensor output, Tensor input, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..aaebcf52053ef71d650c39eac04057f1f79d1ea9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_backward_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask); +TORCH_API ::std::tuple native_layer_norm_backward_symint(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional & weight, const c10::optional & bias, ::std::array output_mask); + +} // namespace cpu +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9ba143b1c05655c4f6cffd03f3e70ab3ebb3a91f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/native_layer_norm_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps); +TORCH_API ::std::tuple native_layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional & weight, const c10::optional & bias, double eps); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/norm_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/norm_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2b813312b0a9030af7c8cd918d9585f327e458e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/norm_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional & p, at::ScalarType dtype); +TORCH_API at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::ScalarType dtype); +TORCH_API at::Tensor & norm_outf(const at::Tensor & self, const c10::optional & p, at::ScalarType dtype, at::Tensor & out); +TORCH_API at::Tensor norm(const at::Tensor & self, const at::Scalar & p=2); +TORCH_API at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & p=2); +TORCH_API at::Tensor & norm_outf(const at::Tensor & self, const at::Scalar & p, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/norm_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/norm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..faf2a9ca6dbe27e90400960d39c9cb758a9694b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/norm_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype); +TORCH_API at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype); +TORCH_API at::Tensor & norm_outf(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out); +TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & norm_outf(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr.h new file mode 100644 index 0000000000000000000000000000000000000000..19c0e2f7e6b26ddf93878d3247b44a8865bc7fef --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/orgqr.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::orgqr(Tensor self, Tensor input2) -> Tensor +inline at::Tensor orgqr(const at::Tensor & self, const at::Tensor & input2) { + return at::_ops::orgqr::call(self, input2); +} + +// aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & orgqr_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & input2) { + return at::_ops::orgqr_out::call(self, input2, out); +} +// aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & orgqr_outf(const at::Tensor & self, const at::Tensor & input2, at::Tensor & out) { + return at::_ops::orgqr_out::call(self, input2, out); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/q_zero_point.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/q_zero_point.h new file mode 100644 index 0000000000000000000000000000000000000000..83c3fdd1056961e0e10a8a97c9ff92e941c7cf8b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/q_zero_point.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::q_zero_point(Tensor self) -> int +inline int64_t q_zero_point(const at::Tensor & self) { + return at::_ops::q_zero_point::call(self); +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_ops.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c0b2e1a0c1e471b062623f236811c9caa9fbbc8d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/quantize_per_tensor_dynamic_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API quantize_per_tensor_dynamic { + using schema = at::Tensor (const at::Tensor &, at::ScalarType, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantize_per_tensor_dynamic") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::ScalarType dtype, bool reduce_range); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool reduce_range); +}; + +struct TORCH_API quantize_per_tensor_dynamic_out { + using schema = at::Tensor & (const at::Tensor &, at::ScalarType, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::quantize_per_tensor_dynamic") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::ScalarType dtype, bool reduce_range, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..41c2a281a701afbf8c6f4f6c8f92f7cb6c8862ae --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/randn_compositeimplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size); +TORCH_API at::Tensor & randn_outf(at::IntArrayRef size, at::Tensor & out); +TORCH_API at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size); +TORCH_API at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, at::Tensor & out); +TORCH_API at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional generator); +TORCH_API at::Tensor & randn_outf(at::IntArrayRef size, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional generator); +TORCH_API at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, c10::optional generator, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..4e8ffdf9ad4b4043be13a3497e36fc6519460c0b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad1d_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & reflection_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); +} +namespace symint { + template ::value>> + at::Tensor & reflection_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } +} + +// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & reflection_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); +} +namespace symint { + template ::value>> + at::Tensor & reflection_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, c10::fromIntArrayRefSlow(padding), grad_input); + } +} + +// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & reflection_pad1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, padding, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & reflection_pad1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, padding, grad_input); + } +} + +// aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, SymInt[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & reflection_pad1d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, padding, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & reflection_pad1d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input) { + return at::_ops::reflection_pad1d_backward_grad_input::call(grad_output, self, padding, grad_input); + } +} + +// aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor +inline at::Tensor reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad1d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding)); +} +namespace symint { + template ::value>> + at::Tensor reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { + return at::_ops::reflection_pad1d_backward::call(grad_output, self, c10::fromIntArrayRefSlow(padding)); + } +} + +// aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, SymInt[2] padding) -> Tensor +inline at::Tensor reflection_pad1d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad1d_backward::call(grad_output, self, padding); +} +namespace symint { + template ::value>> + at::Tensor reflection_pad1d_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) { + return at::_ops::reflection_pad1d_backward::call(grad_output, self, padding); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..65f507dfba36f9f5a4b8c980d2dd56cba2293532 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad2d_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor reflection_pad2d_cpu(const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad2d_out_cpu(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor reflection_pad2d_cuda(const at::Tensor & self, at::IntArrayRef padding); +TORCH_API at::Tensor & reflection_pad2d_out_cuda(const at::Tensor & self, at::IntArrayRef padding, at::Tensor & out); +TORCH_API at::Tensor reflection_pad2d_quantized_cpu(const at::Tensor & self, at::IntArrayRef padding); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_compositeimplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9ca3e1ffa90e2ae830644b69c8c6fef7f4e4e2ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/scatter_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src); +TORCH_API at::Tensor scatter(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/select_backward.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/select_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..be51f278ca681417c5f9eee0acbd5b3401855105 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/select_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor +inline at::Tensor select_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index) { + return at::_ops::select_backward::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index); +} +namespace symint { + template ::value>> + at::Tensor select_backward(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index) { + return at::_ops::select_backward::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index); + } +} + +// aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor +inline at::Tensor select_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) { + return at::_ops::select_backward::call(grad_output, input_sizes, dim, index); +} +namespace symint { + template ::value>> + at::Tensor select_backward(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) { + return at::_ops::select_backward::call(grad_output, input_sizes, dim, index); + } +} + +// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & select_backward_out(at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index) { + return at::_ops::select_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index, out); +} +namespace symint { + template ::value>> + at::Tensor & select_backward_out(at::Tensor & out, const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index) { + return at::_ops::select_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index, out); + } +} + +// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & select_backward_outf(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index, at::Tensor & out) { + return at::_ops::select_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index, out); +} +namespace symint { + template ::value>> + at::Tensor & select_backward_outf(const at::Tensor & grad_output, at::IntArrayRef input_sizes, int64_t dim, int64_t index, at::Tensor & out) { + return at::_ops::select_backward_out::call(grad_output, c10::fromIntArrayRefSlow(input_sizes), dim, index, out); + } +} + +// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & select_backward_symint_out(at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) { + return at::_ops::select_backward_out::call(grad_output, input_sizes, dim, index, out); +} +namespace symint { + template ::value>> + at::Tensor & select_backward_out(at::Tensor & out, const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) { + return at::_ops::select_backward_out::call(grad_output, input_sizes, dim, index, out); + } +} + +// aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & select_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor & out) { + return at::_ops::select_backward_out::call(grad_output, input_sizes, dim, index, out); +} +namespace symint { + template ::value>> + at::Tensor & select_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor & out) { + return at::_ops::select_backward_out::call(grad_output, input_sizes, dim, index, out); + } +} + +} diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2e614a6eb1dde781783a148f64d11fb159543b71 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sigmoid_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor sigmoid(const at::Tensor & self); +TORCH_API at::Tensor & sigmoid_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e7b8d071054b8dfcf0fd54188b13920b1cceff69 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/softplus_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_softplus_out : public at::meta::structured_softplus { +void impl(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_and_clear_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_and_clear_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6b6ea186c66e42435ffef926eef280ebf72b279e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_resize_and_clear_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API const at::Tensor & sparse_resize_and_clear_(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j1_cuda_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j1_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3f2806edb8eda61529f9f3311d43f98a76c16c41 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_bessel_j1_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_bessel_j1(const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_j1_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_bessel_j1_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..42cb5aae5e686fd8dcf846a62d80f0c2e111c95f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_t_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_chebyshev_polynomial_t(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_t_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_t_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..40d3fd1a0ea358b4ab6626f3f21c5f2ceabaf8ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_scaled_modified_bessel_k0_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_scaled_modified_bessel_k0(const at::Tensor & x); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_v_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_v_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7b18842d2e184d53eae95419c95813752191b056 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_v_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_v(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_v_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_v_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sum_meta_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sum_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f29cb4d4f18f09da427b2640289159c72c06370f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sum_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor sum(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & sum_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & sum_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional dtype, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_compositeexplicitautograd_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dd15e5e4ce49c3e24714ed029884c1d6d06239ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/sym_constrain_range_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API void sym_constrain_range(const at::Scalar & size, c10::optional min=c10::nullopt, c10::optional max=c10::nullopt); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_backward_meta.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..9c862033b72f6dc126d9bfbe87625c504f33d5f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_threshold_backward : public TensorIteratorBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold); +}; + +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trunc_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trunc_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a36768972ac1e792fe28e317c81ba69bfd186f3c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/trunc_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_trunc_out : public at::meta::structured_trunc { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor trunc_sparse(const at::Tensor & self); +TORCH_API at::Tensor & trunc_sparse_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & trunc_sparse_(at::Tensor & self); +TORCH_API at::Tensor trunc_sparse_csr(const at::Tensor & self); +TORCH_API at::Tensor & trunc_sparse_csr_out(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & trunc_sparse_csr_(at::Tensor & self); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_native.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3b35319b6b3a9eed962f1b5adc3e1a6845a18f17 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +TORCH_API at::Tensor upsample_nearest2d(const at::Tensor & input, at::OptionalIntArrayRef output_size, c10::optional> scale_factors); +struct TORCH_API structured_upsample_nearest2d_out_cpu : public at::meta::structured_upsample_nearest2d { +void impl(const at::Tensor & self, at::ArrayRef output_size, c10::optional scales_h, c10::optional scales_w, const at::Tensor & out); +}; +struct TORCH_API structured_upsample_nearest2d_out_cuda : public at::meta::structured_upsample_nearest2d { +void impl(const at::Tensor & self, at::ArrayRef output_size, c10::optional scales_h, c10::optional scales_w, const at::Tensor & out); +}; +TORCH_API at::Tensor upsample_nearest2d_quantized_cpu(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +} // namespace native +} // namespace at diff --git a/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_cpu_dispatch.h b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d830e8bb2aa6e3b6f518e3168899d50c72a416c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor view_as_real(const at::Tensor & self); + +} // namespace cpu +} // namespace at