applied-ai-018 commited on
Commit
aad2d1b
·
verified ·
1 Parent(s): fe67dbb

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/18.attention.query_key_value.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step20/zero/18.attention.query_key_value.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step20/zero/22.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step20/zero/25.attention.dense.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step20/zero/25.attention.dense.weight/exp_avg_sq.pt +3 -0
  6. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_compositeexplicitautograd_dispatch.h +24 -0
  7. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_chunk_cat.h +39 -0
  8. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm_search_cuda_dispatch.h +23 -0
  9. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_native.h +21 -0
  10. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_acos_ops.h +50 -0
  11. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lgamma_native.h +25 -0
  12. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow.h +87 -0
  13. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_compositeexplicitautograd_dispatch.h +24 -0
  14. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data.h +39 -0
  15. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_native.h +21 -0
  16. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward.h +30 -0
  17. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_softmax_with_shape_ops.h +28 -0
  18. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nnz.h +26 -0
  19. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias.h +47 -0
  20. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_resize_output_meta_dispatch.h +24 -0
  21. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_initialize_state.h +30 -0
  22. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_warn_in_autograd_compositeexplicitautograd_dispatch.h +25 -0
  23. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_dense_native.h +24 -0
  24. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_compositeexplicitautograd_dispatch.h +26 -0
  25. venv/lib/python3.10/site-packages/torch/include/ATen/ops/_version_native.h +21 -0
  26. venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cuda_dispatch.h +24 -0
  27. venv/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_native.h +35 -0
  28. venv/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_ops.h +39 -0
  29. venv/lib/python3.10/site-packages/torch/include/ATen/ops/amax_cpu_dispatch.h +25 -0
  30. venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_native.h +23 -0
  31. venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_backward_ops.h +39 -0
  32. venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_ops.h +50 -0
  33. venv/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_cuda_dispatch.h +26 -0
  34. venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp.h +63 -0
  35. venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_compositeexplicitautogradnonfunctional_dispatch.h +26 -0
  36. venv/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce.h +26 -0
  37. venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_depthwise3d_compositeexplicitautograd_dispatch.h +26 -0
  38. venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos.h +44 -0
  39. venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos_meta.h +27 -0
  40. venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosine_similarity_compositeimplicitautograd_dispatch.h +23 -0
  41. venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_native.h +22 -0
  42. venv/lib/python3.10/site-packages/torch/include/ATen/ops/diff.h +39 -0
  43. venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_backward_native.h +21 -0
  44. venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand_ops.h +28 -0
  45. venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftfreq_ops.h +39 -0
  46. venv/lib/python3.10/site-packages/torch/include/ATen/ops/full.h +131 -0
  47. venv/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_cuda_dispatch.h +25 -0
  48. venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_cuda_dispatch.h +26 -0
  49. venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
  50. venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_meta.h +32 -0
ckpts/universal/global_step20/zero/18.attention.query_key_value.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63afb1725252102af0d44a32201d9adc57308a0cc664df747afbb06353ded103
3
+ size 50332828
ckpts/universal/global_step20/zero/18.attention.query_key_value.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6fc2446f23e5b2188303f46dd42a90e97d53c6a7ecd39d7e64f27272c0dbace
3
+ size 50332749
ckpts/universal/global_step20/zero/22.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57952091de2a0c3ea99f3aba93eb66382c80348caf99cbfe50349f28173d6703
3
+ size 33555612
ckpts/universal/global_step20/zero/25.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f23472630b9cab8bc0802e68f8dd357231655171f65e83bb761c9439b7e0302
3
+ size 16778396
ckpts/universal/global_step20/zero/25.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cae3d41ac684e116eddff628753c6320266e711f6169cd292093198e04df9f6d
3
+ size 16778411
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & _cdist_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist);
21
+ TORCH_API at::Tensor & _cdist_backward_outf(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_chunk_cat.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_chunk_cat_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_chunk_cat(Tensor[] tensors, int dim, int num_chunks) -> Tensor
26
+ inline at::Tensor _chunk_cat(at::TensorList tensors, int64_t dim, int64_t num_chunks) {
27
+ return at::_ops::_chunk_cat::call(tensors, dim, num_chunks);
28
+ }
29
+
30
+ // aten::_chunk_cat.out(Tensor[] tensors, int dim, int num_chunks, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & _chunk_cat_out(at::Tensor & out, at::TensorList tensors, int64_t dim, int64_t num_chunks) {
32
+ return at::_ops::_chunk_cat_out::call(tensors, dim, num_chunks, out);
33
+ }
34
+ // aten::_chunk_cat.out(Tensor[] tensors, int dim, int num_chunks, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & _chunk_cat_outf(at::TensorList tensors, int64_t dim, int64_t num_chunks, at::Tensor & out) {
36
+ return at::_ops::_chunk_cat_out::call(tensors, dim, num_chunks, out);
37
+ }
38
+
39
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cslt_sparse_mm_search_cuda_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API int64_t _cslt_sparse_mm_search(const at::Tensor & compressed_A, const at::Tensor & dense_B, const c10::optional<at::Tensor> & bias={}, const c10::optional<at::Tensor> & alpha={}, c10::optional<at::ScalarType> out_dtype=c10::nullopt, bool transpose_result=false);
21
+
22
+ } // namespace cuda
23
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cufft_get_plan_cache_size_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API int64_t _cufft_get_plan_cache_size(at::DeviceIndex device_index);
20
+ } // namespace native
21
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_acos_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _foreach_acos {
18
+ using schema = ::std::vector<at::Tensor> (at::TensorList);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_acos")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_acos(Tensor[] self) -> Tensor[]")
24
+ static ::std::vector<at::Tensor> call(at::TensorList self);
25
+ static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
26
+ };
27
+
28
+ struct TORCH_API _foreach_acos_ {
29
+ using schema = void (at::TensorList);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_acos_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_acos_(Tensor(a!)[] self) -> ()")
35
+ static void call(at::TensorList self);
36
+ static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
37
+ };
38
+
39
+ struct TORCH_API _foreach_acos_out {
40
+ using schema = void (at::TensorList, at::TensorList);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_acos")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_acos.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
46
+ static void call(at::TensorList self, at::TensorList out);
47
+ static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
48
+ };
49
+
50
+ }} // namespace at::_ops
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lgamma_native.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API void _foreach_lgamma_out(at::TensorList self, at::TensorList out);
20
+ TORCH_API ::std::vector<at::Tensor> foreach_tensor_lgamma_slow(at::TensorList self);
21
+ TORCH_API void foreach_tensor_lgamma_slow_(at::TensorList self);
22
+ TORCH_API ::std::vector<at::Tensor> foreach_tensor_lgamma_cuda(at::TensorList self);
23
+ TORCH_API void foreach_tensor_lgamma_cuda_(at::TensorList self);
24
+ } // namespace native
25
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_pow.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_foreach_pow_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_foreach_pow.List(Tensor[] self, Tensor[] exponent) -> Tensor[]
26
+ inline ::std::vector<at::Tensor> _foreach_pow(at::TensorList self, at::TensorList exponent) {
27
+ return at::_ops::_foreach_pow_List::call(self, exponent);
28
+ }
29
+
30
+ // aten::_foreach_pow.Scalar(Tensor[] self, Scalar exponent) -> Tensor[]
31
+ inline ::std::vector<at::Tensor> _foreach_pow(at::TensorList self, const at::Scalar & exponent) {
32
+ return at::_ops::_foreach_pow_Scalar::call(self, exponent);
33
+ }
34
+
35
+ // aten::_foreach_pow.ScalarList(Tensor[] self, Scalar[] exponent) -> Tensor[]
36
+ inline ::std::vector<at::Tensor> _foreach_pow(at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
37
+ return at::_ops::_foreach_pow_ScalarList::call(self, exponent);
38
+ }
39
+
40
+ // aten::_foreach_pow.ScalarAndTensor(Scalar self, Tensor[] exponent) -> Tensor[]
41
+ inline ::std::vector<at::Tensor> _foreach_pow(const at::Scalar & self, at::TensorList exponent) {
42
+ return at::_ops::_foreach_pow_ScalarAndTensor::call(self, exponent);
43
+ }
44
+
45
+ // aten::_foreach_pow_.List(Tensor(a!)[] self, Tensor[] exponent) -> ()
46
+ inline void _foreach_pow_(at::TensorList self, at::TensorList exponent) {
47
+ return at::_ops::_foreach_pow__List::call(self, exponent);
48
+ }
49
+
50
+ // aten::_foreach_pow_.Scalar(Tensor(a!)[] self, Scalar exponent) -> ()
51
+ inline void _foreach_pow_(at::TensorList self, const at::Scalar & exponent) {
52
+ return at::_ops::_foreach_pow__Scalar::call(self, exponent);
53
+ }
54
+
55
+ // aten::_foreach_pow_.ScalarList(Tensor(a!)[] self, Scalar[] exponent) -> ()
56
+ inline void _foreach_pow_(at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
57
+ return at::_ops::_foreach_pow__ScalarList::call(self, exponent);
58
+ }
59
+
60
+ // aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> ()
61
+ inline void _foreach_pow_out(at::TensorList out, at::TensorList self, at::TensorList exponent) {
62
+ return at::_ops::_foreach_pow_List_out::call(self, exponent, out);
63
+ }
64
+ // aten::_foreach_pow.List_out(Tensor[] self, Tensor[] exponent, *, Tensor(a!)[] out) -> ()
65
+ inline void _foreach_pow_outf(at::TensorList self, at::TensorList exponent, at::TensorList out) {
66
+ return at::_ops::_foreach_pow_List_out::call(self, exponent, out);
67
+ }
68
+
69
+ // aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> ()
70
+ inline void _foreach_pow_out(at::TensorList out, at::TensorList self, const at::Scalar & exponent) {
71
+ return at::_ops::_foreach_pow_Scalar_out::call(self, exponent, out);
72
+ }
73
+ // aten::_foreach_pow.Scalar_out(Tensor[] self, Scalar exponent, *, Tensor(a!)[] out) -> ()
74
+ inline void _foreach_pow_outf(at::TensorList self, const at::Scalar & exponent, at::TensorList out) {
75
+ return at::_ops::_foreach_pow_Scalar_out::call(self, exponent, out);
76
+ }
77
+
78
+ // aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> ()
79
+ inline void _foreach_pow_out(at::TensorList out, at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
80
+ return at::_ops::_foreach_pow_ScalarList_out::call(self, exponent, out);
81
+ }
82
+ // aten::_foreach_pow.ScalarList_out(Tensor[] self, Scalar[] exponent, *, Tensor(a!)[] out) -> ()
83
+ inline void _foreach_pow_outf(at::TensorList self, at::ArrayRef<at::Scalar> exponent, at::TensorList out) {
84
+ return at::_ops::_foreach_pow_ScalarList_out::call(self, exponent, out);
85
+ }
86
+
87
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_reciprocal_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API void _foreach_reciprocal_out(at::TensorList out, at::TensorList self);
21
+ TORCH_API void _foreach_reciprocal_outf(at::TensorList self, at::TensorList out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_log_softmax_backward_data_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor
26
+ inline at::Tensor _log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
27
+ return at::_ops::_log_softmax_backward_data::call(grad_output, output, dim, input_dtype);
28
+ }
29
+
30
+ // aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & _log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
32
+ return at::_ops::_log_softmax_backward_data_out::call(grad_output, output, dim, input_dtype, out);
33
+ }
34
+ // aten::_log_softmax_backward_data.out(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & _log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out) {
36
+ return at::_ops::_log_softmax_backward_data_out::call(grad_output, output, dim, input_dtype, out);
37
+ }
38
+
39
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _neg_view(const at::Tensor & self);
20
+ } // namespace native
21
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_nested_sum_backward_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_nested_sum_backward(Tensor grad, Tensor self, int[1]? dim, bool keepdim=False) -> Tensor
26
+ inline at::Tensor _nested_sum_backward(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false) {
27
+ return at::_ops::_nested_sum_backward::call(grad, self, dim, keepdim);
28
+ }
29
+
30
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_tensor_softmax_with_shape_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _nested_tensor_softmax_with_shape {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_tensor_softmax_with_shape")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_tensor_softmax_with_shape(Tensor self, Tensor query) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & query);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & query);
26
+ };
27
+
28
+ }} // namespace at::_ops
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nnz.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_nnz_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+
26
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_reshape_alias.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_reshape_alias_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
26
+ inline at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
27
+ return at::_ops::_reshape_alias::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
28
+ }
29
+ namespace symint {
30
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
31
+ at::Tensor _reshape_alias(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride) {
32
+ return at::_ops::_reshape_alias::call(self, c10::fromIntArrayRefSlow(size), c10::fromIntArrayRefSlow(stride));
33
+ }
34
+ }
35
+
36
+ // aten::_reshape_alias(Tensor(a) self, SymInt[] size, SymInt[] stride) -> Tensor(a)
37
+ inline at::Tensor _reshape_alias_symint(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
38
+ return at::_ops::_reshape_alias::call(self, size, stride);
39
+ }
40
+ namespace symint {
41
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
42
+ at::Tensor _reshape_alias(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
43
+ return at::_ops::_reshape_alias::call(self, size, stride);
44
+ }
45
+ }
46
+
47
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_resize_output_meta_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API const at::Tensor & _resize_output_(const at::Tensor & self, at::IntArrayRef size, at::Device device);
21
+ TORCH_API const at::Tensor & _resize_output__symint(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device);
22
+
23
+ } // namespace meta
24
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_sobol_engine_initialize_state.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_sobol_engine_initialize_state_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!)
26
+ inline at::Tensor & _sobol_engine_initialize_state_(at::Tensor & self, int64_t dimension) {
27
+ return at::_ops::_sobol_engine_initialize_state_::call(self, dimension);
28
+ }
29
+
30
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_test_warn_in_autograd_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor _test_warn_in_autograd(const at::Tensor & self);
21
+ TORCH_API at::Tensor & _test_warn_in_autograd_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & _test_warn_in_autograd_outf(const at::Tensor & self, at::Tensor & out);
23
+
24
+ } // namespace compositeexplicitautograd
25
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_dense_native.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _to_dense_out(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<bool> masked_grad, at::Tensor & out);
20
+ TORCH_API at::Tensor sparse_to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype=c10::nullopt, c10::optional<bool> masked_grad=c10::nullopt);
21
+ TORCH_API at::Tensor sparse_compressed_to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype=c10::nullopt, c10::optional<bool> masked_grad=c10::nullopt);
22
+ TORCH_API at::Tensor mkldnn_to_dense(const at::Tensor & self, c10::optional<at::ScalarType> dtype=c10::nullopt, c10::optional<bool> masked_grad=c10::nullopt);
23
+ } // namespace native
24
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & _to_sparse_out(at::Tensor & out, const at::Tensor & self, int64_t sparse_dim);
21
+ TORCH_API at::Tensor & _to_sparse_outf(const at::Tensor & self, int64_t sparse_dim, at::Tensor & out);
22
+ TORCH_API at::Tensor & _to_sparse_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::Layout> layout=c10::nullopt, at::OptionalIntArrayRef blocksize=c10::nullopt, c10::optional<int64_t> dense_dim=c10::nullopt);
23
+ TORCH_API at::Tensor & _to_sparse_outf(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out);
24
+
25
+ } // namespace compositeexplicitautograd
26
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_version_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API int64_t _version(const at::Tensor & self);
20
+ } // namespace native
21
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_avg_pool3d_backward_cuda_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor & adaptive_avg_pool3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self);
21
+ TORCH_API at::Tensor & adaptive_avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & grad_input);
22
+
23
+ } // namespace cuda
24
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_native.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+ #include <ATen/ops/addmm_meta.h>
16
+
17
+ namespace at {
18
+ namespace native {
19
+ struct TORCH_API structured_addmm_out_cpu : public at::meta::structured_addmm {
20
+ void impl(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, const at::Tensor & out);
21
+ };
22
+ struct TORCH_API structured_addmm_out_cuda : public at::meta::structured_addmm {
23
+ void impl(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, const at::Tensor & out);
24
+ };
25
+ TORCH_API at::Tensor addmm_sparse_dense_cpu(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1);
26
+ TORCH_API at::Tensor & addmm_out_sparse_dense_cpu(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out);
27
+ TORCH_API at::Tensor & s_addmm_sparse_dense_cpu_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1);
28
+ TORCH_API at::Tensor addmm_sparse_dense_cuda(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1);
29
+ TORCH_API at::Tensor & addmm_out_sparse_dense_cuda(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out);
30
+ TORCH_API at::Tensor & s_addmm_sparse_dense_cuda_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1);
31
+ TORCH_API at::Tensor addmm_sparse_compressed_dense(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1);
32
+ TORCH_API at::Tensor & addmm_out_sparse_compressed_cpu(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out);
33
+ TORCH_API at::Tensor & addmm_out_sparse_compressed_cuda(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out);
34
+ } // namespace native
35
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/affine_grid_generator_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API affine_grid_generator {
18
+ using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::affine_grid_generator")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners);
26
+ };
27
+
28
+ struct TORCH_API affine_grid_generator_out {
29
+ using schema = at::Tensor & (const at::Tensor &, c10::SymIntArrayRef, bool, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::affine_grid_generator")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
venv/lib/python3.10/site-packages/torch/include/ATen/ops/amax_cpu_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor amax(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false);
21
+ TORCH_API at::Tensor & amax_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false);
22
+ TORCH_API at::Tensor & amax_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out);
23
+
24
+ } // namespace cpu
25
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_update_stats_native.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> batch_norm_update_stats_out(const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, at::Tensor & out0, at::Tensor & out1);
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> batch_norm_update_stats_cpu(const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum);
21
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> batch_norm_update_stats_cuda(const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum);
22
+ } // namespace native
23
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_backward_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API binary_cross_entropy_backward {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &, int64_t);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::binary_cross_entropy_backward")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction);
26
+ };
27
+
28
+ struct TORCH_API binary_cross_entropy_backward_grad_input {
29
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &, int64_t, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::binary_cross_entropy_backward")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & grad_input);
37
+ };
38
+
39
+ }} // namespace at::_ops
venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_not_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API bitwise_not {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_not")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_not(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ struct TORCH_API bitwise_not_ {
29
+ using schema = at::Tensor & (at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_not_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_not_(Tensor(a!) self) -> Tensor(a!)")
35
+ static at::Tensor & call(at::Tensor & self);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
37
+ };
38
+
39
+ struct TORCH_API bitwise_not_out {
40
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::bitwise_not")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
46
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
48
+ };
49
+
50
+ }} // namespace at::_ops
venv/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_cuda_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor ceil(const at::Tensor & self);
21
+ TORCH_API at::Tensor & ceil_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & ceil_outf(const at::Tensor & self, at::Tensor & out);
23
+ TORCH_API at::Tensor & ceil_(at::Tensor & self);
24
+
25
+ } // namespace cuda
26
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/clamp_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
26
+ inline at::Tensor clamp(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max=c10::nullopt) {
27
+ return at::_ops::clamp::call(self, min, max);
28
+ }
29
+
30
+ // aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor
31
+ inline at::Tensor clamp(const at::Tensor & self, const c10::optional<at::Tensor> & min={}, const c10::optional<at::Tensor> & max={}) {
32
+ return at::_ops::clamp_Tensor::call(self, min, max);
33
+ }
34
+
35
+ // aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
36
+ inline at::Tensor & clamp_(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max=c10::nullopt) {
37
+ return at::_ops::clamp_::call(self, min, max);
38
+ }
39
+
40
+ // aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)
41
+ inline at::Tensor & clamp_(at::Tensor & self, const c10::optional<at::Tensor> & min={}, const c10::optional<at::Tensor> & max={}) {
42
+ return at::_ops::clamp__Tensor::call(self, min, max);
43
+ }
44
+
45
+ // aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
46
+ inline at::Tensor & clamp_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max=c10::nullopt) {
47
+ return at::_ops::clamp_out::call(self, min, max, out);
48
+ }
49
+ // aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)
50
+ inline at::Tensor & clamp_outf(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max, at::Tensor & out) {
51
+ return at::_ops::clamp_out::call(self, min, max, out);
52
+ }
53
+
54
+ // aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
55
+ inline at::Tensor & clamp_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Tensor> & min={}, const c10::optional<at::Tensor> & max={}) {
56
+ return at::_ops::clamp_Tensor_out::call(self, min, max, out);
57
+ }
58
+ // aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)
59
+ inline at::Tensor & clamp_outf(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max, at::Tensor & out) {
60
+ return at::_ops::clamp_Tensor_out::call(self, min, max, out);
61
+ }
62
+
63
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor clamp(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max=c10::nullopt);
21
+ TORCH_API at::Tensor & clamp_(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max=c10::nullopt);
22
+ TORCH_API at::Tensor clamp(const at::Tensor & self, const c10::optional<at::Tensor> & min={}, const c10::optional<at::Tensor> & max={});
23
+ TORCH_API at::Tensor & clamp_(at::Tensor & self, const c10::optional<at::Tensor> & min={}, const c10::optional<at::Tensor> & max={});
24
+
25
+ } // namespace compositeexplicitautogradnonfunctional
26
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/coalesce.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/coalesce_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+
26
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_depthwise3d_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & conv_depthwise3d_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation);
21
+ TORCH_API at::Tensor & conv_depthwise3d_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, at::Tensor & out);
22
+ TORCH_API at::Tensor & conv_depthwise3d_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation);
23
+ TORCH_API at::Tensor & conv_depthwise3d_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, at::Tensor & out);
24
+
25
+ } // namespace compositeexplicitautograd
26
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/cos_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::cos(Tensor self) -> Tensor
26
+ inline at::Tensor cos(const at::Tensor & self) {
27
+ return at::_ops::cos::call(self);
28
+ }
29
+
30
+ // aten::cos_(Tensor(a!) self) -> Tensor(a!)
31
+ inline at::Tensor & cos_(at::Tensor & self) {
32
+ return at::_ops::cos_::call(self);
33
+ }
34
+
35
+ // aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
36
+ inline at::Tensor & cos_out(at::Tensor & out, const at::Tensor & self) {
37
+ return at::_ops::cos_out::call(self, out);
38
+ }
39
+ // aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
40
+ inline at::Tensor & cos_outf(const at::Tensor & self, at::Tensor & out) {
41
+ return at::_ops::cos_out::call(self, out);
42
+ }
43
+
44
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_cos : public TensorIteratorBase {
21
+
22
+
23
+ void meta(const at::Tensor & self);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosine_similarity_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor cosine_similarity(const at::Tensor & x1, const at::Tensor & x2, int64_t dim=1, double eps=1e-08);
21
+
22
+ } // namespace compositeimplicitautograd
23
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & cudnn_grid_sampler_out(const at::Tensor & self, const at::Tensor & grid, at::Tensor & out);
20
+ TORCH_API at::Tensor cudnn_grid_sampler_forward(const at::Tensor & self, const at::Tensor & grid);
21
+ } // namespace native
22
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/diff.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/diff_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor
26
+ inline at::Tensor diff(const at::Tensor & self, int64_t n=1, int64_t dim=-1, const c10::optional<at::Tensor> & prepend={}, const c10::optional<at::Tensor> & append={}) {
27
+ return at::_ops::diff::call(self, n, dim, prepend, append);
28
+ }
29
+
30
+ // aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & diff_out(at::Tensor & out, const at::Tensor & self, int64_t n=1, int64_t dim=-1, const c10::optional<at::Tensor> & prepend={}, const c10::optional<at::Tensor> & append={}) {
32
+ return at::_ops::diff_out::call(self, n, dim, prepend, append, out);
33
+ }
34
+ // aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & diff_outf(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append, at::Tensor & out) {
36
+ return at::_ops::diff_out::call(self, n, dim, prepend, append, out);
37
+ }
38
+
39
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_backward_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor embedding_backward_symint(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse);
20
+ } // namespace native
21
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/expand_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API expand {
18
+ using schema = at::Tensor (const at::Tensor &, c10::SymIntArrayRef, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::expand")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)")
24
+ static at::Tensor call(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymIntArrayRef size, bool implicit);
26
+ };
27
+
28
+ }} // namespace at::_ops
venv/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftfreq_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API fft_fftfreq {
18
+ using schema = at::Tensor (int64_t, double, c10::optional<at::ScalarType>, c10::optional<at::Layout>, c10::optional<at::Device>, c10::optional<bool>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_fftfreq")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor")
24
+ static at::Tensor call(int64_t n, double d, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
26
+ };
27
+
28
+ struct TORCH_API fft_fftfreq_out {
29
+ using schema = at::Tensor & (int64_t, double, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fft_fftfreq")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(int64_t n, double d, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, int64_t n, double d, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
venv/lib/python3.10/site-packages/torch/include/ATen/ops/full.h ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/full_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
26
+ inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, at::TensorOptions options={}) {
27
+ return at::_ops::full_names::call(size, fill_value, names, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
28
+ }
29
+ // aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
30
+ inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
31
+ return at::_ops::full_names::call(size, fill_value, names, dtype, layout, device, pin_memory);
32
+ }
33
+
34
+ // aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
35
+ inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) {
36
+ return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
37
+ }
38
+ namespace symint {
39
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
40
+ at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) {
41
+ return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
42
+ }
43
+ }
44
+
45
+ // aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
46
+ inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
47
+ return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory);
48
+ }
49
+ namespace symint {
50
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
51
+ at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
52
+ return at::_ops::full::call(c10::fromIntArrayRefSlow(size), fill_value, dtype, layout, device, pin_memory);
53
+ }
54
+ }
55
+
56
+ // aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
57
+ inline at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) {
58
+ return at::_ops::full::call(size, fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
59
+ }
60
+ namespace symint {
61
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
62
+ at::Tensor full(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options={}) {
63
+ return at::_ops::full::call(size, fill_value, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
64
+ }
65
+ }
66
+
67
+ // aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
68
+ inline at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
69
+ return at::_ops::full::call(size, fill_value, dtype, layout, device, pin_memory);
70
+ }
71
+ namespace symint {
72
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
73
+ at::Tensor full(c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
74
+ return at::_ops::full::call(size, fill_value, dtype, layout, device, pin_memory);
75
+ }
76
+ }
77
+
78
+ // aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
79
+ inline at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value) {
80
+ return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out);
81
+ }
82
+ namespace symint {
83
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
84
+ at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value) {
85
+ return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out);
86
+ }
87
+ }
88
+
89
+ // aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
90
+ inline at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
91
+ return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out);
92
+ }
93
+ namespace symint {
94
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
95
+ at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
96
+ return at::_ops::full_out::call(c10::fromIntArrayRefSlow(size), fill_value, out);
97
+ }
98
+ }
99
+
100
+ // aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
101
+ inline at::Tensor & full_symint_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Scalar & fill_value) {
102
+ return at::_ops::full_out::call(size, fill_value, out);
103
+ }
104
+ namespace symint {
105
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
106
+ at::Tensor & full_out(at::Tensor & out, c10::SymIntArrayRef size, const at::Scalar & fill_value) {
107
+ return at::_ops::full_out::call(size, fill_value, out);
108
+ }
109
+ }
110
+
111
+ // aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)
112
+ inline at::Tensor & full_symint_outf(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
113
+ return at::_ops::full_out::call(size, fill_value, out);
114
+ }
115
+ namespace symint {
116
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
117
+ at::Tensor & full_outf(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::Tensor & out) {
118
+ return at::_ops::full_out::call(size, fill_value, out);
119
+ }
120
+ }
121
+
122
+ // aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
123
+ inline at::Tensor & full_out(at::Tensor & out, at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names) {
124
+ return at::_ops::full_names_out::call(size, fill_value, names, out);
125
+ }
126
+ // aten::full.names_out(int[] size, Scalar fill_value, *, Dimname[]? names, Tensor(a!) out) -> Tensor(a!)
127
+ inline at::Tensor & full_outf(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, at::Tensor & out) {
128
+ return at::_ops::full_names_out::call(size, fill_value, names, out);
129
+ }
130
+
131
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/ops/geqrf_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> geqrf(const at::Tensor & self);
21
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> geqrf_out(at::Tensor & a, at::Tensor & tau, const at::Tensor & self);
22
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> geqrf_outf(const at::Tensor & self, at::Tensor & a, at::Tensor & tau);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/igamma_cuda_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor igamma(const at::Tensor & self, const at::Tensor & other);
21
+ TORCH_API at::Tensor & igamma_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
22
+ TORCH_API at::Tensor & igamma_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
23
+ TORCH_API at::Tensor & igamma_(at::Tensor & self, const at::Tensor & other);
24
+
25
+ } // namespace cuda
26
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor logit_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps=c10::nullopt);
21
+
22
+ } // namespace compositeexplicitautogradnonfunctional
23
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/ops/lt_meta.h ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_lt_Scalar : public TensorIteratorBase {
21
+
22
+
23
+ void meta(const at::Tensor & self, const at::Scalar & other);
24
+ };
25
+ struct TORCH_API structured_lt_Tensor : public TensorIteratorBase {
26
+
27
+
28
+ void meta(const at::Tensor & self, const at::Tensor & other);
29
+ };
30
+
31
+ } // namespace native
32
+ } // namespace at