Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step120/zero/5.attention.dense.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/7.input_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/7.input_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step120/zero/7.input_layernorm.weight/fp32.pt +3 -0
- ckpts/universal/global_step20/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale.h +44 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Long_compositeimplicitautograd_dispatch.h +23 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_native.h +22 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_copy_from_compositeexplicitautograd_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_acos_cuda_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_moving_avg_obs_fq_helper_native.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mixed_dtypes_linear.h +30 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_values.h +30 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_copy.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pack_padded_sequence_backward_compositeimplicitautograd_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward_ops.h +28 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad_ops.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_transform_bias_rescale_qkv_ops.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_triton_multi_head_attention.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_triton_scaled_dot_attention.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_put.h +30 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/amax_meta_dispatch.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_cpu_dispatch.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/argmin_cuda_dispatch.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/argsort_compositeimplicitautograd_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/atanh_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_native.h +28 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_left_shift_compositeexplicitautogradnonfunctional_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_ops.h +50 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/channel_shuffle_ops.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices_ops.h +28 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose3d_ops.h +28 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos_cpu_dispatch.h +26 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosh.h +44 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed_native.h +22 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/empty_like.h +43 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_compositeimplicitautograd_dispatch.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_fp32_activation.h +30 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_cuda_dispatch.h +26 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_cpu_dispatch.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_native.h +21 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_native.h +21 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_native.h +26 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_ops.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/mH_native.h +21 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_cuda_dispatch.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
ckpts/universal/global_step120/zero/5.attention.dense.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:453a34466b8e0b54752b3e6fa87497dce93bb34a675cdf3d288f7ee8554ae9e3
|
3 |
+
size 16778396
|
ckpts/universal/global_step120/zero/7.input_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7610a94c1887af17c500c56e80ac33e12841e49454322f7308369e1533f23cda
|
3 |
+
size 9372
|
ckpts/universal/global_step120/zero/7.input_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2f47e7e901dcb2e965f639ea73ebe252dd0e099f6e6e40d9cb9f7f150f9a6dde
|
3 |
+
size 9387
|
ckpts/universal/global_step120/zero/7.input_layernorm.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:75d1b541bc9860a9cae0f56e32fb723e10ffa5e398074232ed81e7521cc6d8e7
|
3 |
+
size 9293
|
ckpts/universal/global_step20/zero/14.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:545a662bdf8aacfba8b2d909b7297c531f377aea82eeff4bb7799edbf4765981
|
3 |
+
size 33555612
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale.h
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_amp_foreach_non_finite_check_and_unscale_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_amp_foreach_non_finite_check_and_unscale_(Tensor(a!)[] self, Tensor(b!) found_inf, Tensor inv_scale) -> ()
|
26 |
+
inline void _amp_foreach_non_finite_check_and_unscale_(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
|
27 |
+
return at::_ops::_amp_foreach_non_finite_check_and_unscale_::call(self, found_inf, inv_scale);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()
|
31 |
+
inline void _amp_foreach_non_finite_check_and_unscale_out(at::TensorList out, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
|
32 |
+
return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self, found_inf, inv_scale, out);
|
33 |
+
}
|
34 |
+
// aten::_amp_foreach_non_finite_check_and_unscale.out(Tensor[] self, Tensor(b!) found_inf, Tensor inv_scale, *, Tensor(a!)[] out) -> ()
|
35 |
+
inline void _amp_foreach_non_finite_check_and_unscale_outf(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) {
|
36 |
+
return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self, found_inf, inv_scale, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
// aten::_amp_foreach_non_finite_check_and_unscale(Tensor[] self, Tensor found_inf, Tensor inv_scale) -> (Tensor[] self_out, Tensor found_inf_out)
|
40 |
+
inline ::std::tuple<::std::vector<at::Tensor>,at::Tensor> _amp_foreach_non_finite_check_and_unscale(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) {
|
41 |
+
return at::_ops::_amp_foreach_non_finite_check_and_unscale::call(self, found_inf, inv_scale);
|
42 |
+
}
|
43 |
+
|
44 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Long_compositeimplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeimplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor _cast_Long(const at::Tensor & self, bool non_blocking=false);
|
21 |
+
|
22 |
+
} // namespace compositeimplicitautograd
|
23 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_cdist_backward_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor & _cdist_backward_out(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist, at::Tensor & out);
|
20 |
+
TORCH_API at::Tensor _cdist_backward(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_copy_from_compositeexplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor & _copy_from_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & dst, bool non_blocking=false);
|
21 |
+
TORCH_API at::Tensor & _copy_from_outf(const at::Tensor & self, const at::Tensor & dst, bool non_blocking, at::Tensor & out);
|
22 |
+
|
23 |
+
} // namespace compositeexplicitautograd
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_acos_cuda_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_acos(at::TensorList self);
|
21 |
+
TORCH_API void _foreach_acos_(at::TensorList self);
|
22 |
+
|
23 |
+
} // namespace cuda
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_moving_avg_obs_fq_helper_native.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper_functional(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false);
|
20 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> _fused_moving_avg_obs_fq_helper_out(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant, at::Tensor & out0, at::Tensor & out1);
|
21 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> fused_moving_avg_obs_fake_quant_cpu(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false);
|
22 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> fused_moving_avg_obs_fake_quant_cuda(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant=false, bool symmetric_quant=false);
|
23 |
+
} // namespace native
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_mixed_dtypes_linear.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_mixed_dtypes_linear_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_mixed_dtypes_linear(Tensor input, Tensor weight, Tensor scale, *, Tensor? bias=None, str? activation=None) -> Tensor
|
26 |
+
inline at::Tensor _mixed_dtypes_linear(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & scale, const c10::optional<at::Tensor> & bias={}, c10::optional<c10::string_view> activation=c10::nullopt) {
|
27 |
+
return at::_ops::_mixed_dtypes_linear::call(input, weight, scale, bias, activation);
|
28 |
+
}
|
29 |
+
|
30 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_values.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_nested_get_values_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_nested_get_values(Tensor(a) self) -> Tensor(a)
|
26 |
+
inline at::Tensor _nested_get_values(const at::Tensor & self) {
|
27 |
+
return at::_ops::_nested_get_values::call(self);
|
28 |
+
}
|
29 |
+
|
30 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_copy.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_nested_view_from_buffer_copy_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_nested_view_from_buffer_copy(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets) -> Tensor
|
26 |
+
inline at::Tensor _nested_view_from_buffer_copy(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) {
|
27 |
+
return at::_ops::_nested_view_from_buffer_copy::call(self, nested_size, nested_strides, offsets);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets, *, Tensor(a!) out) -> Tensor(a!)
|
31 |
+
inline at::Tensor & _nested_view_from_buffer_copy_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) {
|
32 |
+
return at::_ops::_nested_view_from_buffer_copy_out::call(self, nested_size, nested_strides, offsets, out);
|
33 |
+
}
|
34 |
+
// aten::_nested_view_from_buffer_copy.out(Tensor self, Tensor nested_size, Tensor nested_strides, Tensor offsets, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & _nested_view_from_buffer_copy_outf(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets, at::Tensor & out) {
|
36 |
+
return at::_ops::_nested_view_from_buffer_copy_out::call(self, nested_size, nested_strides, offsets, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_pack_padded_sequence_backward_compositeimplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeimplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor _pack_padded_sequence_backward(const at::Tensor & grad, at::IntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first);
|
21 |
+
TORCH_API at::Tensor _pack_padded_sequence_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first);
|
22 |
+
|
23 |
+
} // namespace compositeimplicitautograd
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward_ops.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _scaled_dot_product_efficient_attention_backward {
|
18 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, double, ::std::array<bool,4>, bool, c10::optional<double>);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_scaled_dot_product_efficient_attention_backward")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor attn_bias, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, float dropout_p, bool[4] grad_input_mask, bool is_causal=False, *, float? scale=None) -> (Tensor, Tensor, Tensor, Tensor)")
|
24 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, double dropout_p, ::std::array<bool,4> grad_input_mask, bool is_causal, c10::optional<double> scale);
|
25 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, double dropout_p, ::std::array<bool,4> grad_input_mask, bool is_causal, c10::optional<double> scale);
|
26 |
+
};
|
27 |
+
|
28 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _standard_gamma_grad {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_standard_gamma_grad")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_standard_gamma_grad(Tensor self, Tensor output) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & output);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & output);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API _standard_gamma_grad_out {
|
29 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_standard_gamma_grad")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & output, at::Tensor & out);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & output, at::Tensor & out);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_transform_bias_rescale_qkv_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API _transform_bias_rescale_qkv {
|
18 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, int64_t);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_transform_bias_rescale_qkv")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_transform_bias_rescale_qkv(Tensor qkv, Tensor qkv_bias, int num_heads) -> (Tensor, Tensor, Tensor)")
|
24 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads);
|
25 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API _transform_bias_rescale_qkv_out {
|
29 |
+
using schema = ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> (const at::Tensor &, const at::Tensor &, int64_t, at::Tensor &, at::Tensor &, at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_transform_bias_rescale_qkv")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_transform_bias_rescale_qkv.out(Tensor qkv, Tensor qkv_bias, int num_heads, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))")
|
35 |
+
static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> call(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
|
36 |
+
static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_triton_multi_head_attention.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_triton_multi_head_attention_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_triton_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None) -> Tensor
|
26 |
+
inline at::Tensor _triton_multi_head_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask={}) {
|
27 |
+
return at::_ops::_triton_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!)
|
31 |
+
inline at::Tensor & _triton_multi_head_attention_out(at::Tensor & out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask={}) {
|
32 |
+
return at::_ops::_triton_multi_head_attention_out::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, out);
|
33 |
+
}
|
34 |
+
// aten::_triton_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & _triton_multi_head_attention_outf(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, at::Tensor & out) {
|
36 |
+
return at::_ops::_triton_multi_head_attention_out::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_triton_scaled_dot_attention.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_triton_scaled_dot_attention_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_triton_scaled_dot_attention(Tensor q, Tensor k, Tensor v, float dropout_p=0.0) -> Tensor
|
26 |
+
inline at::Tensor _triton_scaled_dot_attention(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p=0.0) {
|
27 |
+
return at::_ops::_triton_scaled_dot_attention::call(q, k, v, dropout_p);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!)
|
31 |
+
inline at::Tensor & _triton_scaled_dot_attention_out(at::Tensor & out, const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p=0.0) {
|
32 |
+
return at::_ops::_triton_scaled_dot_attention_out::call(q, k, v, dropout_p, out);
|
33 |
+
}
|
34 |
+
// aten::_triton_scaled_dot_attention.out(Tensor q, Tensor k, Tensor v, float dropout_p=0.0, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & _triton_scaled_dot_attention_outf(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p, at::Tensor & out) {
|
36 |
+
return at::_ops::_triton_scaled_dot_attention_out::call(q, k, v, dropout_p, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/_unsafe_index_put.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/_unsafe_index_put_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::_unsafe_index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor
|
26 |
+
inline at::Tensor _unsafe_index_put(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false) {
|
27 |
+
return at::_ops::_unsafe_index_put::call(self, indices, values, accumulate);
|
28 |
+
}
|
29 |
+
|
30 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/amax_meta_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace meta {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor amax(const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false);
|
21 |
+
TORCH_API at::Tensor & amax_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim={}, bool keepdim=false);
|
22 |
+
TORCH_API at::Tensor & amax_outf(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out);
|
23 |
+
|
24 |
+
} // namespace meta
|
25 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_cpu_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> aminmax(const at::Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false);
|
21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> aminmax_out(at::Tensor & min, at::Tensor & max, const at::Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false);
|
22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> aminmax_outf(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & min, at::Tensor & max);
|
23 |
+
|
24 |
+
} // namespace cpu
|
25 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/argmin_cuda_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor argmin(const at::Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false);
|
21 |
+
TORCH_API at::Tensor & argmin_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false);
|
22 |
+
TORCH_API at::Tensor & argmin_outf(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & out);
|
23 |
+
|
24 |
+
} // namespace cuda
|
25 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/argsort_compositeimplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeimplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor argsort(const at::Tensor & self, int64_t dim=-1, bool descending=false);
|
21 |
+
TORCH_API at::Tensor argsort(const at::Tensor & self, at::Dimname dim, bool descending=false);
|
22 |
+
|
23 |
+
} // namespace compositeimplicitautograd
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/atanh_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautogradnonfunctional {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor atanh(const at::Tensor & self);
|
21 |
+
TORCH_API at::Tensor & atanh_(at::Tensor & self);
|
22 |
+
|
23 |
+
} // namespace compositeexplicitautogradnonfunctional
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_and_native.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
#include <ATen/ops/bitwise_and_meta.h>
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
struct TORCH_API structured_bitwise_and_out : public at::meta::structured_bitwise_and_Tensor {
|
20 |
+
void impl(const at::Tensor & self, const at::Tensor & other, const at::Tensor & out);
|
21 |
+
};
|
22 |
+
TORCH_API at::Tensor bitwise_and(const at::Tensor & self, const at::Scalar & other);
|
23 |
+
TORCH_API at::Tensor & bitwise_and_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
|
24 |
+
TORCH_API at::Tensor & bitwise_and_(at::Tensor & self, const at::Scalar & other);
|
25 |
+
TORCH_API at::Tensor bitwise_and(const at::Scalar & self, const at::Tensor & other);
|
26 |
+
TORCH_API at::Tensor & bitwise_and_Scalar_Tensor_out(const at::Scalar & self, const at::Tensor & other, at::Tensor & out);
|
27 |
+
} // namespace native
|
28 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/bitwise_left_shift_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautogradnonfunctional {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor bitwise_left_shift(const at::Tensor & self, const at::Tensor & other);
|
21 |
+
TORCH_API at::Tensor & bitwise_left_shift_(at::Tensor & self, const at::Tensor & other);
|
22 |
+
|
23 |
+
} // namespace compositeexplicitautogradnonfunctional
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_ops.h
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API ceil {
|
18 |
+
using schema = at::Tensor (const at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ceil")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ceil(Tensor self) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & self);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API ceil_ {
|
29 |
+
using schema = at::Tensor & (at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ceil_")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ceil_(Tensor(a!) self) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(at::Tensor & self);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
|
37 |
+
};
|
38 |
+
|
39 |
+
struct TORCH_API ceil_out {
|
40 |
+
using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
|
41 |
+
using ptr_schema = schema*;
|
42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::ceil")
|
44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
|
46 |
+
static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
|
47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
|
48 |
+
};
|
49 |
+
|
50 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/channel_shuffle_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API channel_shuffle {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, c10::SymInt);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::channel_shuffle")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "channel_shuffle(Tensor self, SymInt groups) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & self, c10::SymInt groups);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt groups);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API channel_shuffle_out {
|
29 |
+
using schema = at::Tensor & (const at::Tensor &, c10::SymInt, at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::channel_shuffle")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)")
|
35 |
+
static at::Tensor & call(const at::Tensor & self, c10::SymInt groups, at::Tensor & out);
|
36 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::SymInt groups, at::Tensor & out);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/col_indices_ops.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API col_indices {
|
18 |
+
using schema = at::Tensor (const at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::col_indices")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "col_indices(Tensor(a) self) -> Tensor(a)")
|
24 |
+
static at::Tensor call(const at::Tensor & self);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
|
26 |
+
};
|
27 |
+
|
28 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose3d_ops.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API conv_transpose3d_input {
|
18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional<at::Tensor> &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt, c10::SymIntArrayRef);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::conv_transpose3d")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "input")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor")
|
24 |
+
static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation);
|
25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation);
|
26 |
+
};
|
27 |
+
|
28 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/cos_cpu_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor cos(const at::Tensor & self);
|
21 |
+
TORCH_API at::Tensor & cos_out(at::Tensor & out, const at::Tensor & self);
|
22 |
+
TORCH_API at::Tensor & cos_outf(const at::Tensor & self, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor & cos_(at::Tensor & self);
|
24 |
+
|
25 |
+
} // namespace cpu
|
26 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/cosh.h
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/cosh_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::cosh(Tensor self) -> Tensor
|
26 |
+
inline at::Tensor cosh(const at::Tensor & self) {
|
27 |
+
return at::_ops::cosh::call(self);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::cosh_(Tensor(a!) self) -> Tensor(a!)
|
31 |
+
inline at::Tensor & cosh_(at::Tensor & self) {
|
32 |
+
return at::_ops::cosh_::call(self);
|
33 |
+
}
|
34 |
+
|
35 |
+
// aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
36 |
+
inline at::Tensor & cosh_out(at::Tensor & out, const at::Tensor & self) {
|
37 |
+
return at::_ops::cosh_out::call(self, out);
|
38 |
+
}
|
39 |
+
// aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
40 |
+
inline at::Tensor & cosh_outf(const at::Tensor & self, at::Tensor & out) {
|
41 |
+
return at::_ops::cosh_out::call(self, out);
|
42 |
+
}
|
43 |
+
|
44 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed_native.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor & diag_embed_out(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out);
|
20 |
+
TORCH_API at::Tensor diag_embed(const at::Tensor & self, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1);
|
21 |
+
} // namespace native
|
22 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/empty_like.h
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/empty_like_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
|
26 |
+
inline at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
|
27 |
+
return at::_ops::empty_like::call(self, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
|
28 |
+
}
|
29 |
+
// aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
|
30 |
+
inline at::Tensor empty_like(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
|
31 |
+
return at::_ops::empty_like::call(self, dtype, layout, device, pin_memory, memory_format);
|
32 |
+
}
|
33 |
+
|
34 |
+
// aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & empty_like_out(at::Tensor & out, const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
|
36 |
+
return at::_ops::empty_like_out::call(self, memory_format, out);
|
37 |
+
}
|
38 |
+
// aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
|
39 |
+
inline at::Tensor & empty_like_outf(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
|
40 |
+
return at::_ops::empty_like_out::call(self, memory_format, out);
|
41 |
+
}
|
42 |
+
|
43 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/fake_quantize_per_channel_affine_cachemask_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)
|
26 |
+
inline ::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_channel_affine_cachemask(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
|
27 |
+
return at::_ops::fake_quantize_per_channel_affine_cachemask::call(self, scale, zero_point, axis, quant_min, quant_max);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
|
31 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_channel_affine_cachemask_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
|
32 |
+
return at::_ops::fake_quantize_per_channel_affine_cachemask_out::call(self, scale, zero_point, axis, quant_min, quant_max, out0, out1);
|
33 |
+
}
|
34 |
+
// aten::fake_quantize_per_channel_affine_cachemask.out(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
|
35 |
+
inline ::std::tuple<at::Tensor &,at::Tensor &> fake_quantize_per_channel_affine_cachemask_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, at::Tensor & out0, at::Tensor & out1) {
|
36 |
+
return at::_ops::fake_quantize_per_channel_affine_cachemask_out::call(self, scale, zero_point, axis, quant_min, quant_max, out0, out1);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_compositeimplicitautograd_dispatch.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeimplicitautograd {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor fake_quantize_per_tensor_affine(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max);
|
21 |
+
TORCH_API at::Tensor fake_quantize_per_tensor_affine(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max);
|
22 |
+
|
23 |
+
} // namespace compositeimplicitautograd
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_fp32_activation.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/fbgemm_linear_int8_weight_fp32_activation_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor
|
26 |
+
inline at::Tensor fbgemm_linear_int8_weight_fp32_activation(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
|
27 |
+
return at::_ops::fbgemm_linear_int8_weight_fp32_activation::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
|
28 |
+
}
|
29 |
+
|
30 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/fmod_cuda_dispatch.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor fmod(const at::Tensor & self, const at::Tensor & other);
|
21 |
+
TORCH_API at::Tensor & fmod_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
|
22 |
+
TORCH_API at::Tensor & fmod_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
23 |
+
TORCH_API at::Tensor & fmod_(at::Tensor & self, const at::Tensor & other);
|
24 |
+
|
25 |
+
} // namespace cuda
|
26 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_cpu_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cpu {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none");
|
21 |
+
TORCH_API at::Tensor & gelu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none");
|
22 |
+
TORCH_API at::Tensor & gelu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input);
|
23 |
+
|
24 |
+
} // namespace cpu
|
25 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/is_conj_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API bool is_conj(const at::Tensor & self);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight={}, const c10::optional<at::Tensor> & bias={}, double eps=1e-05, bool cudnn_enable=true);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_native.h
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor less_equal(const at::Tensor & self, const at::Scalar & other);
|
20 |
+
TORCH_API at::Tensor & less_equal_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
|
21 |
+
TORCH_API at::Tensor & less_equal_(at::Tensor & self, const at::Scalar & other);
|
22 |
+
TORCH_API at::Tensor less_equal(const at::Tensor & self, const at::Tensor & other);
|
23 |
+
TORCH_API at::Tensor & less_equal_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
24 |
+
TORCH_API at::Tensor & less_equal_(at::Tensor & self, const at::Tensor & other);
|
25 |
+
} // namespace native
|
26 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Function.h
|
4 |
+
|
5 |
+
#include <ATen/Context.h>
|
6 |
+
#include <ATen/DeviceGuard.h>
|
7 |
+
#include <ATen/TensorUtils.h>
|
8 |
+
#include <ATen/TracerMode.h>
|
9 |
+
#include <ATen/core/Generator.h>
|
10 |
+
#include <ATen/core/Reduction.h>
|
11 |
+
#include <ATen/core/Tensor.h>
|
12 |
+
#include <c10/core/Scalar.h>
|
13 |
+
#include <c10/core/Storage.h>
|
14 |
+
#include <c10/core/TensorOptions.h>
|
15 |
+
#include <c10/util/Deprecated.h>
|
16 |
+
#include <c10/util/Optional.h>
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
#include <ATen/ops/lift_fresh_copy_ops.h>
|
21 |
+
|
22 |
+
namespace at {
|
23 |
+
|
24 |
+
|
25 |
+
// aten::lift_fresh_copy(Tensor self) -> Tensor
|
26 |
+
inline at::Tensor lift_fresh_copy(const at::Tensor & self) {
|
27 |
+
return at::_ops::lift_fresh_copy::call(self);
|
28 |
+
}
|
29 |
+
|
30 |
+
// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
31 |
+
inline at::Tensor & lift_fresh_copy_out(at::Tensor & out, const at::Tensor & self) {
|
32 |
+
return at::_ops::lift_fresh_copy_out::call(self, out);
|
33 |
+
}
|
34 |
+
// aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
|
35 |
+
inline at::Tensor & lift_fresh_copy_outf(const at::Tensor & self, at::Tensor & out) {
|
36 |
+
return at::_ops::lift_fresh_copy_out::call(self, out);
|
37 |
+
}
|
38 |
+
|
39 |
+
}
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lu_factor_ex_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautogradnonfunctional {
|
19 |
+
|
20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_factor_ex(const at::Tensor & A, bool pivot=true, bool check_errors=false);
|
21 |
+
|
22 |
+
} // namespace compositeexplicitautogradnonfunctional
|
23 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp_ops.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from Operator.h
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
|
17 |
+
struct TORCH_API logaddexp_out {
|
18 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
|
19 |
+
using ptr_schema = schema*;
|
20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logaddexp")
|
22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
|
24 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
25 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
26 |
+
};
|
27 |
+
|
28 |
+
struct TORCH_API logaddexp {
|
29 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
30 |
+
using ptr_schema = schema*;
|
31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logaddexp")
|
33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logaddexp(Tensor self, Tensor other) -> Tensor")
|
35 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & other);
|
36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other);
|
37 |
+
};
|
38 |
+
|
39 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/mH_native.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
4 |
+
|
5 |
+
#include <c10/core/Scalar.h>
|
6 |
+
#include <c10/core/Storage.h>
|
7 |
+
#include <c10/core/TensorOptions.h>
|
8 |
+
#include <c10/util/Deprecated.h>
|
9 |
+
#include <c10/util/Optional.h>
|
10 |
+
#include <c10/core/QScheme.h>
|
11 |
+
#include <ATen/core/Reduction.h>
|
12 |
+
#include <ATen/core/Tensor.h>
|
13 |
+
#include <tuple>
|
14 |
+
#include <vector>
|
15 |
+
|
16 |
+
|
17 |
+
namespace at {
|
18 |
+
namespace native {
|
19 |
+
TORCH_API at::Tensor mH(const at::Tensor & self);
|
20 |
+
} // namespace native
|
21 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_cuda_dispatch.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace cuda {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices);
|
21 |
+
TORCH_API at::Tensor & max_pool2d_with_indices_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices);
|
22 |
+
TORCH_API at::Tensor & max_pool2d_with_indices_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices, at::Tensor & grad_input);
|
23 |
+
|
24 |
+
} // namespace cuda
|
25 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torch/include/ATen/ops/minimum_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace compositeexplicitautogradnonfunctional {
|
19 |
+
|
20 |
+
TORCH_API at::Tensor minimum(const at::Tensor & self, const at::Tensor & other);
|
21 |
+
|
22 |
+
} // namespace compositeexplicitautogradnonfunctional
|
23 |
+
} // namespace at
|