applied-ai-018 commited on
Commit
d831d88
·
verified ·
1 Parent(s): 83d18ca

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUGeneratorImpl.h +49 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CollapseDims.h +94 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DimVector.h +2 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapMode.h +26 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/SparseCsrTensorImpl.h +186 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorAccessor.h +2 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ceil_div.h +24 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/code_template.h +243 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/jiterator_macros.h +38 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision.h +26 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_backward.h +47 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_sparse_backward.h +47 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_native.h +22 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_solve_ex_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dep_token.h +34 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_native.h +21 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_pack_padded_sequence_backward_native.h +21 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_bsr_ops.h +39 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_meta_dispatch.h +28 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args_native.h +21 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/abs.h +44 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_ops.h +39 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/argmin_cuda_dispatch.h +25 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/argmin_native.h +23 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/argsort_native.h +24 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_cpu_dispatch.h +30 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/clip_compositeimplicitautograd_dispatch.h +30 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cosh_meta_dispatch.h +26 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_compositeexplicitautograd_dispatch.h +24 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_backward_compositeexplicitautograd_dispatch.h +24 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_native.h +22 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_cuda_dispatch.h +26 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/empty_strided_native.h +25 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/exp_ops.h +50 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fft2_compositeimplicitautograd_dispatch.h +28 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftn_compositeimplicitautograd_dispatch.h +28 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifftn_native.h +22 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_meta.h +27 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_ops.h +28 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_native.h +21 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_compositeimplicitautograd_dispatch.h +23 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_native.h +26 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy.h +39 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_compositeimplicitautograd_dispatch.h +23 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_meta.h +27 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linspace.h +97 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/log10.h +44 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_cuda_dispatch.h +26 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_cuda_dispatch.h +24 -0
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CPUGeneratorImpl.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Generator.h>
4
+ #include <ATen/core/MT19937RNGEngine.h>
5
+ #include <c10/core/GeneratorImpl.h>
6
+ #include <c10/util/Optional.h>
7
+
8
+ namespace at {
9
+
10
+ struct TORCH_API CPUGeneratorImpl : public c10::GeneratorImpl {
11
+ // Constructors
12
+ CPUGeneratorImpl(uint64_t seed_in = default_rng_seed_val);
13
+ ~CPUGeneratorImpl() override = default;
14
+
15
+ // CPUGeneratorImpl methods
16
+ std::shared_ptr<CPUGeneratorImpl> clone() const;
17
+ void set_current_seed(uint64_t seed) override;
18
+ void set_offset(uint64_t offset) override;
19
+ uint64_t get_offset() const override;
20
+ uint64_t current_seed() const override;
21
+ uint64_t seed() override;
22
+ void set_state(const c10::TensorImpl& new_state) override;
23
+ c10::intrusive_ptr<c10::TensorImpl> get_state() const override;
24
+ static c10::DeviceType device_type();
25
+ uint32_t random();
26
+ uint64_t random64();
27
+ c10::optional<float> next_float_normal_sample();
28
+ c10::optional<double> next_double_normal_sample();
29
+ void set_next_float_normal_sample(c10::optional<float> randn);
30
+ void set_next_double_normal_sample(c10::optional<double> randn);
31
+ at::mt19937 engine();
32
+ void set_engine(at::mt19937 engine);
33
+
34
+ private:
35
+ CPUGeneratorImpl* clone_impl() const override;
36
+ at::mt19937 engine_;
37
+ c10::optional<float> next_float_normal_sample_;
38
+ c10::optional<double> next_double_normal_sample_;
39
+ };
40
+
41
+ namespace detail {
42
+
43
+ TORCH_API const Generator& getDefaultCPUGenerator();
44
+ TORCH_API Generator
45
+ createCPUGenerator(uint64_t seed_val = default_rng_seed_val);
46
+
47
+ } // namespace detail
48
+
49
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/CollapseDims.h ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/util/Exception.h>
2
+ #include <utility>
3
+
4
+ namespace at {
5
+
6
+ /*
7
+ [collapse dims] Updates sizes, and strides to reflect a "collapse" of
8
+ the info, possibly excluding the optional excludeDim. A "collapsed" version
9
+ of the info is the fewest dims that order the tensor's elements in the same
10
+ way as the original info. If excludeDim is specified, the collapse is the
11
+ fewest dims that order the tensor's elements as the original and preserve the
12
+ excluded dimension, unless the tensor collapses to a point.
13
+
14
+ This function returns a pair of values.
15
+
16
+ 1) The (new) index of the preserved dimension if excludeDim is
17
+ specified. 0 if the tensor is collapsed to a point. -1
18
+ otherwise.
19
+
20
+ 2) The new number of dimensions.
21
+ */
22
+ template <typename T>
23
+ inline std::pair<int64_t, int64_t> collapse_dims(
24
+ T* sizes,
25
+ T* strides,
26
+ int64_t dims,
27
+ const int excludeDim = -1) {
28
+ TORCH_CHECK(
29
+ excludeDim >= -1 && excludeDim < dims,
30
+ "expected excluded dim between -1 and dims - 1");
31
+
32
+ int64_t stopDim = (excludeDim == -1) ? dims : excludeDim;
33
+ int64_t newIndex = -1;
34
+ int64_t oldIndex = 0;
35
+ int64_t remappedExcludedDim = -1;
36
+
37
+ while (oldIndex < dims) {
38
+ // Finds a dimension to collapse into
39
+ for (; oldIndex < stopDim; ++oldIndex) {
40
+ if (sizes[oldIndex] == 1) {
41
+ continue;
42
+ }
43
+
44
+ ++newIndex;
45
+ sizes[newIndex] = sizes[oldIndex];
46
+ strides[newIndex] = strides[oldIndex];
47
+ ++oldIndex;
48
+ break;
49
+ }
50
+
51
+ // Collapses dims
52
+ for (; oldIndex < stopDim; ++oldIndex) {
53
+ if (sizes[oldIndex] == 1) {
54
+ continue;
55
+ }
56
+
57
+ if (strides[newIndex] == sizes[oldIndex] * strides[oldIndex]) {
58
+ sizes[newIndex] *= sizes[oldIndex];
59
+ strides[newIndex] = strides[oldIndex];
60
+ } else {
61
+ ++newIndex;
62
+ sizes[newIndex] = sizes[oldIndex];
63
+ strides[newIndex] = strides[oldIndex];
64
+ }
65
+ }
66
+
67
+ // Handles excludeDim being set (oldIndex == excludeDim)
68
+ if (oldIndex != dims) {
69
+ // Preserves excluded dimension
70
+ ++newIndex;
71
+ sizes[newIndex] = sizes[oldIndex];
72
+ strides[newIndex] = strides[oldIndex];
73
+ remappedExcludedDim = newIndex;
74
+
75
+ // Restarts iteration after excludeDim
76
+ ++oldIndex;
77
+ stopDim = dims;
78
+ }
79
+ }
80
+
81
+ // Handles special case of all dims size 1
82
+ if (newIndex == -1 || (newIndex == 0 && sizes[0] == 1)) {
83
+ dims = 1;
84
+ sizes[0] = 1;
85
+ strides[0] = 1;
86
+
87
+ return std::pair<int64_t, int64_t>(0, 1);
88
+ }
89
+
90
+ dims = newIndex + 1;
91
+ return std::pair<int64_t, int64_t>(remappedExcludedDim, dims);
92
+ }
93
+
94
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/DimVector.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/DimVector.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/LegacyVmapMode.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/impl/LocalDispatchKeySet.h>
4
+
5
+ namespace at::impl {
6
+
7
+ // VmapMode contains a thread local count of how many nested vmaps
8
+ // we are currently inside. That number is known as the `vmap level`.
9
+ // VmapMode is used in the implementation of the Python `torch.vmap` API.
10
+ //
11
+ // NOTE: this is NOT the c++ api for torch.vmap. That doesn't exist yet.
12
+
13
+ struct TORCH_API VmapMode {
14
+ // Returns the vmap level, aka the count of how many nested vmaps we're in.
15
+ static int64_t current_vmap_level();
16
+
17
+ // Increment the count of nested vmaps. If this causes the vmap level to be
18
+ // greater than 0, then it enables DispatchKey::VmapMode on all tensors.
19
+ static int64_t increment_nesting();
20
+
21
+ // Decrements the count of nested vmaps. If this causes the vmap level to be
22
+ // equal to 0, then it disables DispatchKey::VmapMode on all tensors.
23
+ static int64_t decrement_nesting();
24
+ };
25
+
26
+ } // namespace at::impl
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/SparseCsrTensorImpl.h ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/core/TensorImpl.h>
5
+ #include <c10/util/Exception.h>
6
+ namespace at {
7
+
8
+ // Struct implementing a sparse CSR tensor. It uses three 1-D tensors for
9
+ // denoting the data: `crow_indices_`, `col_indices_` and `values_`.
10
+ // The `crow_indices_` tensor is a integer tensor of shape `(size(0) + 1)`
11
+ // that represents the compressed row indices of the CSR tensor. The
12
+ // `col_indices_` tensor is an integer tensor of shape `(nnz())`
13
+ // that explicitly stores the column indices of each value of the sparse
14
+ // tensor. The `values_` tensor can be of any pytorch-supported data type
15
+ // and has shape `(nnz())`.
16
+ //
17
+ // Since the main advantage of the CSR format over the COO format is speed of
18
+ // computation, care must be taken to facilitate smooth interfacing of
19
+ // these data structures with optimized libraries such as MKL and MAGMA.
20
+ // Since the MKL interface for pytorch currently uses indexing with int32
21
+ // type, it is important to make sure that the `crow_indices` and `col_indices`
22
+ // are of type int32 when calling MKL routines such as SPMM or SPMV.
23
+ //
24
+ // If not calling MKL, it should be alright to use 64 bit integer tensors
25
+ // for indexing.
26
+ struct TORCH_API SparseCsrTensorImpl : public TensorImpl {
27
+ Tensor crow_indices_;
28
+ Tensor col_indices_;
29
+ Tensor values_;
30
+ Layout layout_;
31
+
32
+ public:
33
+ explicit SparseCsrTensorImpl(
34
+ at::DispatchKeySet,
35
+ at::Device device,
36
+ Layout layout,
37
+ const caffe2::TypeMeta);
38
+
39
+ void resize_(int64_t nnz, IntArrayRef size);
40
+ void resize_and_clear_(
41
+ int64_t sparse_dim,
42
+ int64_t dense_dim,
43
+ IntArrayRef size);
44
+ void resize_as_sparse_compressed_tensor_(const Tensor& src);
45
+ void set_member_tensors(
46
+ const Tensor& crow_indices,
47
+ const Tensor& col_indices,
48
+ const Tensor& values,
49
+ c10::SymIntArrayRef size);
50
+ void set_member_tensors(
51
+ const Tensor& crow_indices,
52
+ const Tensor& col_indices,
53
+ const Tensor& values,
54
+ IntArrayRef size);
55
+ const Tensor& compressed_indices() const {
56
+ return crow_indices_;
57
+ }
58
+ const Tensor& plain_indices() const {
59
+ return col_indices_;
60
+ }
61
+ const Tensor& values() const {
62
+ return values_;
63
+ }
64
+ int64_t nnz() {
65
+ return col_indices_.size(-1);
66
+ }
67
+
68
+ inline int64_t batch_dim() const noexcept {
69
+ return crow_indices_.dim() - 1;
70
+ }
71
+
72
+ inline int64_t sparse_dim() const noexcept {
73
+ return 2;
74
+ }
75
+
76
+ inline int64_t dense_dim() const noexcept {
77
+ return values_.dim() - batch_dim() - block_dim() - 1;
78
+ }
79
+
80
+ private:
81
+ inline int64_t block_dim() const noexcept {
82
+ return (layout_ == kSparseBsr || layout_ == kSparseBsc ? 2 : 0);
83
+ }
84
+
85
+ protected:
86
+ IntArrayRef strides_custom() const override;
87
+ SymIntArrayRef sym_strides_custom() const override;
88
+ bool is_contiguous_custom(MemoryFormat) const override;
89
+
90
+ public:
91
+ void set_size(int64_t dim, int64_t new_size) override;
92
+ void set_stride(int64_t dim, int64_t new_stride) override;
93
+ void set_storage_offset(int64_t storage_offset) override;
94
+ Layout layout_impl() const override {
95
+ return layout_;
96
+ }
97
+ void set_layout(Layout layout) {
98
+ switch (layout) {
99
+ case kSparseCsr:
100
+ case kSparseCsc:
101
+ case kSparseBsr:
102
+ case kSparseBsc:
103
+ layout_ = layout;
104
+ break;
105
+ default:
106
+ TORCH_CHECK(false, "unsupported layout ", layout);
107
+ }
108
+ }
109
+
110
+ /**
111
+ * Return a TensorImpl that is a shallow-copy of this TensorImpl.
112
+ *
113
+ * For usage of `version_counter` and `allow_tensor_metadata_change`,
114
+ * see NOTE [ TensorImpl Shallow-Copying ].
115
+ */
116
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
117
+ const c10::VariableVersion& version_counter,
118
+ bool allow_tensor_metadata_change) const override {
119
+ auto impl = c10::make_intrusive<SparseCsrTensorImpl>(
120
+ key_set(), device(), layout_impl(), dtype());
121
+ copy_tensor_metadata(
122
+ /*src_sparse_impl=*/this,
123
+ /*dest_sparse_impl=*/impl.get(),
124
+ /*version_counter=*/version_counter,
125
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
126
+ impl->refresh_numel();
127
+ return impl;
128
+ }
129
+
130
+ /**
131
+ * Return a TensorImpl that is a shallow-copy of this TensorImpl.
132
+ *
133
+ * For usage of `version_counter` and `allow_tensor_metadata_change`,
134
+ * see NOTE [ TensorImpl Shallow-Copying ].
135
+ */
136
+ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
137
+ c10::VariableVersion&& version_counter,
138
+ bool allow_tensor_metadata_change) const override {
139
+ auto impl = c10::make_intrusive<SparseCsrTensorImpl>(
140
+ key_set(), device(), layout_impl(), dtype());
141
+ copy_tensor_metadata(
142
+ /*src_sparse_impl=*/this,
143
+ /*dest_sparse_impl=*/impl.get(),
144
+ /*version_counter=*/std::move(version_counter),
145
+ /*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
146
+ impl->refresh_numel();
147
+ return impl;
148
+ }
149
+
150
+ private:
151
+ explicit SparseCsrTensorImpl(
152
+ at::DispatchKeySet key_set,
153
+ const caffe2::TypeMeta data_type,
154
+ at::Tensor crow_indices,
155
+ at::Tensor col_indices,
156
+ at::Tensor values,
157
+ at::Layout layout);
158
+
159
+ const char* tensorimpl_type_name() const override;
160
+
161
+ /**
162
+ * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer /
163
+ * storage_offset) from one TensorImpl to another TensorImpl.
164
+ *
165
+ * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE
166
+ * [ TensorImpl Shallow-Copying ].
167
+ */
168
+ static void copy_tensor_metadata(
169
+ const SparseCsrTensorImpl* src_sparse_impl,
170
+ SparseCsrTensorImpl* dest_sparse_impl,
171
+ c10::VariableVersion version_counter,
172
+ bool allow_tensor_metadata_change) {
173
+ TensorImpl::copy_tensor_metadata(
174
+ src_sparse_impl,
175
+ dest_sparse_impl,
176
+ std::move(version_counter),
177
+ allow_tensor_metadata_change);
178
+
179
+ // Sparse-specific fields
180
+ dest_sparse_impl->crow_indices_ = src_sparse_impl->compressed_indices();
181
+ dest_sparse_impl->col_indices_ = src_sparse_impl->plain_indices();
182
+ dest_sparse_impl->values_ = src_sparse_impl->values();
183
+ dest_sparse_impl->layout_ = src_sparse_impl->layout_impl();
184
+ }
185
+ };
186
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/TensorAccessor.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/TensorAccessor.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ceil_div.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Macros.h>
3
+ #include <type_traits>
4
+
5
+ namespace at {
6
+
7
+ /**
8
+ Computes ceil(a / b)
9
+ */
10
+ template <typename T, typename = std::enable_if_t<std::is_integral_v<T>>>
11
+ C10_ALWAYS_INLINE C10_HOST_DEVICE T ceil_div(T a, T b) {
12
+ return (a + b - 1) / b;
13
+ }
14
+
15
+ /**
16
+ Computes ceil(a / b) * b; i.e., rounds up `a` to the next highest
17
+ multiple of b
18
+ */
19
+ template <typename T>
20
+ C10_ALWAYS_INLINE C10_HOST_DEVICE T round_up(T a, T b) {
21
+ return ceil_div(a, b) * b;
22
+ }
23
+
24
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/code_template.h ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+
5
+ #include <sstream>
6
+ #include <string>
7
+ #include <unordered_map>
8
+ #include <vector>
9
+
10
+ namespace at::jit {
11
+
12
+ // A template environment is a mapping from template variable names, e.g.,
13
+ // identifier (corresponding to $identifier) to their expansions.
14
+ //
15
+ // This template environment supports storing strings, numbers and lists
16
+ // of strings, and can be chained together (so that lookup proceeds in
17
+ // in the top level environment, and then recurses into a parent
18
+ // environment if the key is not found.)
19
+ struct TemplateEnv {
20
+ TemplateEnv() = default;
21
+ TemplateEnv(TemplateEnv& parent) : parent(&parent) {}
22
+
23
+ using string_list = std::vector<std::string>;
24
+
25
+ // Add a string 'v' to the map at key 'k'.
26
+ void s(const std::string& k, const std::string& v) {
27
+ strings_[k] = v;
28
+ lists_.erase(k);
29
+ }
30
+
31
+ // Add a number 'v' to the map at key 'k'
32
+ template <typename T>
33
+ void d(const std::string& k, const T& v) {
34
+ strings_[k] = c10::to_string(v);
35
+ lists_.erase(k);
36
+ }
37
+
38
+ // Retrieve the string representation of the value stored at 'k' from the map.
39
+ // Raises an exception if the key is not found.
40
+ const std::string& s(const std::string& k) const {
41
+ if (strings_.count(k) == 0) {
42
+ if (parent) {
43
+ return parent->s(k);
44
+ }
45
+ notFound(k);
46
+ }
47
+ return strings_.at(k);
48
+ }
49
+
50
+ // Store a list of strings 'v' in the map at 'k'.
51
+ void v(const std::string& k, const string_list& v) {
52
+ lists_[k] = v;
53
+ strings_.erase(k);
54
+ }
55
+
56
+ // Retrieve a list of strings stored at 'k' from the map.
57
+ // Raises an exception if the key is not found.
58
+ const string_list& v(const std::string& k) const {
59
+ if (lists_.count(k) == 0) {
60
+ if (parent) {
61
+ return parent->v(k);
62
+ }
63
+ notFound(k);
64
+ }
65
+ return lists_.at(k);
66
+ }
67
+
68
+ // Test if a string 'k' is a string (as opposed to a list.)
69
+ bool keyIsString(const std::string& k) const {
70
+ if (strings_.count(k) > 0)
71
+ return true;
72
+ if (lists_.count(k) > 0)
73
+ return false;
74
+ if (parent)
75
+ return parent->keyIsString(k);
76
+ notFound(k);
77
+ }
78
+
79
+ private:
80
+ [[noreturn]] void notFound(const std::string& k) const {
81
+ std::stringstream ss;
82
+ ss << "key not found: " << k;
83
+ throw std::logic_error(ss.str());
84
+ }
85
+
86
+ std::unordered_map<std::string, std::string> strings_;
87
+ std::unordered_map<std::string, string_list> lists_;
88
+ TemplateEnv* parent{nullptr};
89
+ };
90
+
91
+ /*
92
+ # Match $identifier or ${identifier} and replace with the value in env.
93
+ # If this identifier is at the beginning of whitespace on a line
94
+ # and its value is a list then it is treated as
95
+ # block substitution by indenting all lines of all elements.
96
+ # If the identifier is on a line starting with non-whitespace and a list
97
+ # then it is comma separated. ${,foo} will insert a comma before the list
98
+ # if this list is not empty and ${foo,} will insert one after.
99
+ */
100
+ struct CodeTemplate {
101
+ /* implicit */ CodeTemplate(std::string t) : template_text(std::move(t)) {}
102
+
103
+ std::string format(const TemplateEnv& env) const {
104
+ std::stringstream out;
105
+ size_t pos = 0;
106
+ size_t indent = 0;
107
+ bool all_whitespace = true;
108
+ while (pos < template_text.size()) {
109
+ char c = template_text[pos];
110
+ if (c == '$') {
111
+ std::stringstream kss;
112
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
113
+ bool comma_before;
114
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
115
+ bool comma_after;
116
+ size_t new_pos = parseKey(pos, kss, comma_before, comma_after);
117
+ std::string k = kss.str();
118
+ bool is_string = env.keyIsString(k);
119
+ if (all_whitespace) {
120
+ if (is_string)
121
+ emitStringWithIndents(out, indent, env.s(k));
122
+ else
123
+ emitLinesIndented(out, indent, env.v(k));
124
+ } else {
125
+ if (is_string)
126
+ out << env.s(k);
127
+ else
128
+ emitCommaSeparatedList(out, env.v(k), comma_before, comma_after);
129
+ }
130
+ all_whitespace = false;
131
+ pos = new_pos;
132
+ } else {
133
+ out << c;
134
+ if (!isspace(c))
135
+ all_whitespace = false;
136
+ indent++;
137
+ if (c == '\n') {
138
+ indent = 0;
139
+ all_whitespace = true;
140
+ }
141
+ pos++;
142
+ }
143
+ }
144
+ return out.str();
145
+ }
146
+
147
+ private:
148
+ using string_list = std::vector<std::string>;
149
+ char charAt(size_t p) const {
150
+ if (p >= template_text.size())
151
+ throw std::logic_error("EOS found in key");
152
+ return template_text[p];
153
+ }
154
+ size_t parseKey(
155
+ size_t pos,
156
+ std::ostream& k,
157
+ bool& comma_before,
158
+ bool& comma_after) const {
159
+ comma_before = false;
160
+ comma_after = false;
161
+ pos++;
162
+ if (charAt(pos) == '{') {
163
+ pos++;
164
+ if (charAt(pos) == ',') {
165
+ comma_before = true;
166
+ pos++;
167
+ }
168
+ pos = parseIdent(pos, k);
169
+ if (charAt(pos) == ',') {
170
+ comma_after = true;
171
+ pos++;
172
+ }
173
+ if (charAt(pos) != '}')
174
+ throw std::logic_error("missing terminating '}'");
175
+ pos++;
176
+ return pos;
177
+ } else {
178
+ return parseIdent(pos, k);
179
+ }
180
+ }
181
+ size_t parseIdent(size_t pos, std::ostream& k) const {
182
+ while (pos < template_text.size() &&
183
+ (isalnum(template_text[pos]) || template_text[pos] == '_')) {
184
+ k << template_text[pos];
185
+ pos++;
186
+ }
187
+ return pos;
188
+ }
189
+ void emitCommaSeparatedList(
190
+ std::ostream& out,
191
+ const string_list& strings,
192
+ bool comma_before,
193
+ bool comma_after) const {
194
+ if (comma_before && !strings.empty())
195
+ out << ", ";
196
+ for (const auto i : c10::irange(strings.size())) {
197
+ if (i > 0)
198
+ out << ", ";
199
+ out << strings[i];
200
+ }
201
+ if (comma_after && !strings.empty())
202
+ out << ", ";
203
+ }
204
+ // These indentation functions follow the convention that they never emit
205
+ // leading or trailing newlines when the input string does not have leading
206
+ // or trailing newlines. It's the responsibility of the calling function
207
+ // to indent correctly in the context.
208
+ void emitIndent(std::ostream& out, size_t indent) const {
209
+ for (C10_UNUSED const auto i : c10::irange(indent)) {
210
+ out << " ";
211
+ }
212
+ }
213
+ void emitStringWithIndents(
214
+ std::ostream& out,
215
+ size_t indent,
216
+ const std::string& str) const {
217
+ for (auto c : str) {
218
+ out << c;
219
+ if (c == '\n') {
220
+ emitIndent(out, indent);
221
+ }
222
+ }
223
+ }
224
+ void emitLinesIndented(
225
+ std::stringstream& out,
226
+ size_t indent,
227
+ const string_list& strings) const {
228
+ for (const auto i : c10::irange(strings.size())) {
229
+ if (i > 0)
230
+ emitIndent(out, indent);
231
+ emitStringWithIndents(out, indent, strings[i]);
232
+ if (i + 1 != strings.size())
233
+ out << "\n";
234
+ }
235
+ }
236
+ std::string template_text;
237
+ };
238
+
239
+ static inline std::string format(const std::string& fmt, TemplateEnv& env) {
240
+ return CodeTemplate(fmt).format(env);
241
+ }
242
+
243
+ } // namespace at::jit
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/jiterator_macros.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Macros.h>
3
+ #include <string>
4
+
5
+ #define JITERATOR_HOST_DEVICE C10_HOST_DEVICE
6
+ #if defined(_MSC_VER) && defined(__CUDACC__)
7
+ // NVRTC on Windows errors if __host__ __device__ attribute is
8
+ // present on kernel.
9
+ // error: attribute "__host__" does not apply here
10
+ // error: attribute "__device__" does not apply here
11
+ #define JITERATOR_HOST_DEVICE
12
+ #endif
13
+
14
+ // jiterator_also_stringify_as macro is used to define code (for CPU/ROCm)
15
+ // and generate code string for `jiterator` (only when compiling for CUDA).
16
+ // Usage :
17
+ // jiterator_also_stringify_as(
18
+ // jiterator_code(template <typename T> T identity(T x) { return x; }),
19
+ // identity_string);
20
+ // This will define the template `identity` as present in code and
21
+ // also define `std::string identity_string` with the code as the string
22
+ // if this is being compiled for CUDA.
23
+
24
+ // `jiterator_code` macro is to deal with `,` in the kernel code.
25
+ // These `,`s confuse the preprocessor into thinking we are passing
26
+ // multiple arguments to the macro.
27
+ #define jiterator_code(...) __VA_ARGS__
28
+ #if defined(__CUDACC__) || defined(__HIPCC__)
29
+ // CPU and CUDA and ROCm case
30
+ #define stringify_code(...) #__VA_ARGS__
31
+ #define jiterator_also_stringify_as(code, str_name) \
32
+ code /* define the function */ \
33
+ const std::string str_name = std::string(stringify_code(code));
34
+ #else
35
+ // CPU only or CPU and ROCm case
36
+ // Only needs the function
37
+ #define jiterator_also_stringify_as(code, str_name) code
38
+ #endif
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_reduced_precision.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_autocast_to_reduced_precision_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+
26
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_efficient_attention_backward.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_efficient_attention_backward_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor out, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt max_seqlen_q, SymInt max_seqlen_k, Tensor logsumexp, float dropout_p, Tensor philox_seed, Tensor philox_offset, int custom_mask_type, bool bias_requires_grad, *, float? scale=None, int? num_splits_key=None) -> (Tensor, Tensor, Tensor, Tensor)
26
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _efficient_attention_backward(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & bias, const at::Tensor & out, const c10::optional<at::Tensor> & cu_seqlens_q, const c10::optional<at::Tensor> & cu_seqlens_k, int64_t max_seqlen_q, int64_t max_seqlen_k, const at::Tensor & logsumexp, double dropout_p, const at::Tensor & philox_seed, const at::Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, c10::optional<double> scale=c10::nullopt, c10::optional<int64_t> num_splits_key=c10::nullopt) {
27
+ return at::_ops::_efficient_attention_backward::call(grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key);
28
+ }
29
+ namespace symint {
30
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
31
+ ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _efficient_attention_backward(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & bias, const at::Tensor & out, const c10::optional<at::Tensor> & cu_seqlens_q, const c10::optional<at::Tensor> & cu_seqlens_k, int64_t max_seqlen_q, int64_t max_seqlen_k, const at::Tensor & logsumexp, double dropout_p, const at::Tensor & philox_seed, const at::Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, c10::optional<double> scale=c10::nullopt, c10::optional<int64_t> num_splits_key=c10::nullopt) {
32
+ return at::_ops::_efficient_attention_backward::call(grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key);
33
+ }
34
+ }
35
+
36
+ // aten::_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor? bias, Tensor out, Tensor? cu_seqlens_q, Tensor? cu_seqlens_k, SymInt max_seqlen_q, SymInt max_seqlen_k, Tensor logsumexp, float dropout_p, Tensor philox_seed, Tensor philox_offset, int custom_mask_type, bool bias_requires_grad, *, float? scale=None, int? num_splits_key=None) -> (Tensor, Tensor, Tensor, Tensor)
37
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _efficient_attention_backward_symint(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & bias, const at::Tensor & out, const c10::optional<at::Tensor> & cu_seqlens_q, const c10::optional<at::Tensor> & cu_seqlens_k, c10::SymInt max_seqlen_q, c10::SymInt max_seqlen_k, const at::Tensor & logsumexp, double dropout_p, const at::Tensor & philox_seed, const at::Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, c10::optional<double> scale=c10::nullopt, c10::optional<int64_t> num_splits_key=c10::nullopt) {
38
+ return at::_ops::_efficient_attention_backward::call(grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key);
39
+ }
40
+ namespace symint {
41
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
42
+ ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _efficient_attention_backward(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & bias, const at::Tensor & out, const c10::optional<at::Tensor> & cu_seqlens_q, const c10::optional<at::Tensor> & cu_seqlens_k, c10::SymInt max_seqlen_q, c10::SymInt max_seqlen_k, const at::Tensor & logsumexp, double dropout_p, const at::Tensor & philox_seed, const at::Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, c10::optional<double> scale=c10::nullopt, c10::optional<int64_t> num_splits_key=c10::nullopt) {
43
+ return at::_ops::_efficient_attention_backward::call(grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key);
44
+ }
45
+ }
46
+
47
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_sparse_backward.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_embedding_bag_sparse_backward_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
26
+ inline at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
27
+ return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
28
+ }
29
+ namespace symint {
30
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
31
+ at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
32
+ return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
33
+ }
34
+ }
35
+
36
+ // aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
37
+ inline at::Tensor _embedding_bag_sparse_backward_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
38
+ return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
39
+ }
40
+ namespace symint {
41
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
42
+ at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
43
+ return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
44
+ }
45
+ }
46
+
47
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _grid_sampler_2d_cpu_fallback(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
20
+ TORCH_API at::Tensor & _grid_sampler_2d_cpu_fallback_out(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out);
21
+ } // namespace native
22
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_solve_ex_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_solve_ex(const at::Tensor & A, const at::Tensor & B, bool left=true, bool check_errors=false);
21
+
22
+ } // namespace compositeexplicitautogradnonfunctional
23
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dep_token.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_make_dep_token_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
26
+ inline at::Tensor _make_dep_token(at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
27
+ return at::_ops::_make_dep_token::call(c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
28
+ }
29
+ // aten::_make_dep_token(*, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
30
+ inline at::Tensor _make_dep_token(c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
31
+ return at::_ops::_make_dep_token::call(dtype, layout, device, pin_memory, memory_format);
32
+ }
33
+
34
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _neg_view(const at::Tensor & self);
20
+ } // namespace native
21
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_pack_padded_sequence_backward_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _pack_padded_sequence_backward_symint(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first);
20
+ } // namespace native
21
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_to_sparse_bsr_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _to_sparse_bsr {
18
+ using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, c10::optional<int64_t>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_to_sparse_bsr")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim);
26
+ };
27
+
28
+ struct TORCH_API _to_sparse_bsr_out {
29
+ using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, c10::optional<int64_t>, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_to_sparse_bsr")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_to_sparse_bsr.out(Tensor self, int[2] blocksize, int? dense_dim=None, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact2d_backward_meta_dispatch.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor _upsample_nearest_exact2d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
21
+ TORCH_API at::Tensor _upsample_nearest_exact2d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
22
+ TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
23
+ TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input);
24
+ TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h=c10::nullopt, c10::optional<double> scales_w=c10::nullopt);
25
+ TORCH_API at::Tensor & _upsample_nearest_exact2d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, at::Tensor & grad_input);
26
+
27
+ } // namespace meta
28
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API void _validate_sparse_csc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size);
20
+ } // namespace native
21
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/abs.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/abs_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::abs(Tensor self) -> Tensor
26
+ inline at::Tensor abs(const at::Tensor & self) {
27
+ return at::_ops::abs::call(self);
28
+ }
29
+
30
+ // aten::abs_(Tensor(a!) self) -> Tensor(a!)
31
+ inline at::Tensor & abs_(at::Tensor & self) {
32
+ return at::_ops::abs_::call(self);
33
+ }
34
+
35
+ // aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
36
+ inline at::Tensor & abs_out(at::Tensor & out, const at::Tensor & self) {
37
+ return at::_ops::abs_out::call(self, out);
38
+ }
39
+ // aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
40
+ inline at::Tensor & abs_outf(const at::Tensor & self, at::Tensor & out) {
41
+ return at::_ops::abs_out::call(self, out);
42
+ }
43
+
44
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool2d_backward_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API adaptive_max_pool2d_backward_grad_input {
18
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool2d_backward")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)")
24
+ static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input);
25
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, at::Tensor & grad_input);
26
+ };
27
+
28
+ struct TORCH_API adaptive_max_pool2d_backward {
29
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::adaptive_max_pool2d_backward")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor")
35
+ static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices);
36
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices);
37
+ };
38
+
39
+ }} // namespace at::_ops
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/argmin_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor argmin(const at::Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false);
21
+ TORCH_API at::Tensor & argmin_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false);
22
+ TORCH_API at::Tensor & argmin_outf(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, at::Tensor & out);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/argmin_native.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+ #include <ATen/ops/argmin_meta.h>
16
+
17
+ namespace at {
18
+ namespace native {
19
+ struct TORCH_API structured_argmin_out : public at::meta::structured_argmin {
20
+ void impl(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim, const at::Tensor & out);
21
+ };
22
+ } // namespace native
23
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/argsort_native.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor argsort(const at::Tensor & self, int64_t dim=-1, bool descending=false);
20
+ TORCH_API at::Tensor & argsort_stable_out(const at::Tensor & self, bool stable, int64_t dim, bool descending, at::Tensor & out);
21
+ TORCH_API at::Tensor argsort_stable(const at::Tensor & self, bool stable, int64_t dim=-1, bool descending=false);
22
+ TORCH_API at::Tensor argsort(const at::Tensor & self, at::Dimname dim, bool descending=false);
23
+ } // namespace native
24
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_cpu_dispatch.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor clamp(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max=c10::nullopt);
21
+ TORCH_API at::Tensor & clamp_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max=c10::nullopt);
22
+ TORCH_API at::Tensor & clamp_outf(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max, at::Tensor & out);
23
+ TORCH_API at::Tensor & clamp_(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max=c10::nullopt);
24
+ TORCH_API at::Tensor clamp(const at::Tensor & self, const c10::optional<at::Tensor> & min={}, const c10::optional<at::Tensor> & max={});
25
+ TORCH_API at::Tensor & clamp_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Tensor> & min={}, const c10::optional<at::Tensor> & max={});
26
+ TORCH_API at::Tensor & clamp_outf(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max, at::Tensor & out);
27
+ TORCH_API at::Tensor & clamp_(at::Tensor & self, const c10::optional<at::Tensor> & min={}, const c10::optional<at::Tensor> & max={});
28
+
29
+ } // namespace cpu
30
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/clip_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor clip(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max=c10::nullopt);
21
+ TORCH_API at::Tensor & clip_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max=c10::nullopt);
22
+ TORCH_API at::Tensor & clip_outf(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max, at::Tensor & out);
23
+ TORCH_API at::Tensor & clip_(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max=c10::nullopt);
24
+ TORCH_API at::Tensor clip(const at::Tensor & self, const c10::optional<at::Tensor> & min={}, const c10::optional<at::Tensor> & max={});
25
+ TORCH_API at::Tensor & clip_out(at::Tensor & out, const at::Tensor & self, const c10::optional<at::Tensor> & min={}, const c10::optional<at::Tensor> & max={});
26
+ TORCH_API at::Tensor & clip_outf(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max, at::Tensor & out);
27
+ TORCH_API at::Tensor & clip_(at::Tensor & self, const c10::optional<at::Tensor> & min={}, const c10::optional<at::Tensor> & max={});
28
+
29
+ } // namespace compositeimplicitautograd
30
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cosh_meta_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor cosh(const at::Tensor & self);
21
+ TORCH_API at::Tensor & cosh_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & cosh_outf(const at::Tensor & self, at::Tensor & out);
23
+ TORCH_API at::Tensor & cosh_(at::Tensor & self);
24
+
25
+ } // namespace meta
26
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_backward_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & cudnn_affine_grid_generator_backward_out(at::Tensor & out, const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W);
21
+ TORCH_API at::Tensor & cudnn_affine_grid_generator_backward_outf(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_backward_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_backward_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace);
21
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_backward_outf(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_batch_norm_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> cudnn_batch_norm_out(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3);
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon);
21
+ } // namespace native
22
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/digamma_cuda_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor digamma(const at::Tensor & self);
21
+ TORCH_API at::Tensor & digamma_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & digamma_outf(const at::Tensor & self, at::Tensor & out);
23
+ TORCH_API at::Tensor & digamma_(at::Tensor & self);
24
+
25
+ } // namespace cuda
26
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/empty_strided_native.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & empty_strided_out_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out);
20
+ TORCH_API at::Tensor empty_strided_cpu(at::IntArrayRef size, at::IntArrayRef stride, c10::optional<at::ScalarType> dtype={}, c10::optional<at::Layout> layout={}, c10::optional<at::Device> device={}, c10::optional<bool> pin_memory={});
21
+ TORCH_API at::Tensor empty_strided_cuda(at::IntArrayRef size, at::IntArrayRef stride, c10::optional<at::ScalarType> dtype={}, c10::optional<at::Layout> layout={}, c10::optional<at::Device> device={}, c10::optional<bool> pin_memory={});
22
+ TORCH_API at::Tensor empty_strided_meta_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype={}, c10::optional<at::Layout> layout={}, c10::optional<at::Device> device={}, c10::optional<bool> pin_memory={});
23
+ TORCH_API at::Tensor empty_strided_unknown_quantized(at::IntArrayRef size, at::IntArrayRef stride, c10::optional<at::ScalarType> dtype={}, c10::optional<at::Layout> layout={}, c10::optional<at::Device> device={}, c10::optional<bool> pin_memory={});
24
+ } // namespace native
25
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/exp_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API exp {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::exp")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "exp(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ struct TORCH_API exp_ {
29
+ using schema = at::Tensor & (at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::exp_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "exp_(Tensor(a!) self) -> Tensor(a!)")
35
+ static at::Tensor & call(at::Tensor & self);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
37
+ };
38
+
39
+ struct TORCH_API exp_out {
40
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::exp")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
46
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
48
+ };
49
+
50
+ }} // namespace at::_ops
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fft2_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor fft_fft2(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt);
21
+ TORCH_API at::Tensor fft_fft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt);
22
+ TORCH_API at::Tensor & fft_fft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt);
23
+ TORCH_API at::Tensor & fft_fft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out);
24
+ TORCH_API at::Tensor & fft_fft2_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::IntArrayRef dim={-2,-1}, c10::optional<c10::string_view> norm=c10::nullopt);
25
+ TORCH_API at::Tensor & fft_fft2_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out);
26
+
27
+ } // namespace compositeimplicitautograd
28
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftn_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor fft_fftn(const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt);
21
+ TORCH_API at::Tensor fft_fftn_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt);
22
+ TORCH_API at::Tensor & fft_fftn_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt);
23
+ TORCH_API at::Tensor & fft_fftn_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out);
24
+ TORCH_API at::Tensor & fft_fftn_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt);
25
+ TORCH_API at::Tensor & fft_fftn_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out);
26
+
27
+ } // namespace compositeimplicitautograd
28
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fft_ifftn_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor fft_ifftn_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=c10::nullopt, at::OptionalIntArrayRef dim=c10::nullopt, c10::optional<c10::string_view> norm=c10::nullopt);
20
+ TORCH_API at::Tensor & fft_ifftn_symint_out(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm, at::Tensor & out);
21
+ } // namespace native
22
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_fractional_max_pool2d_backward : public at::impl::MetaBase {
21
+
22
+
23
+ void meta(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> fractional_max_pool2d(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples);
21
+
22
+ } // namespace compositeexplicitautogradnonfunctional
23
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/fused_moving_avg_obs_fake_quant_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API fused_moving_avg_obs_fake_quant {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, double, int64_t, int64_t, int64_t, bool, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fused_moving_avg_obs_fake_quant")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, at::Tensor & running_min, at::Tensor & running_max, at::Tensor & scale, at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant);
26
+ };
27
+
28
+ }} // namespace at::_ops
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/grid_sampler_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor grid_sampler(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
20
+ } // namespace native
21
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/infinitely_differentiable_gelu_backward_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor infinitely_differentiable_gelu_backward(const at::Tensor & grad, const at::Tensor & self);
21
+
22
+ } // namespace compositeimplicitautograd
23
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_native.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor less_equal(const at::Tensor & self, const at::Scalar & other);
20
+ TORCH_API at::Tensor & less_equal_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
21
+ TORCH_API at::Tensor & less_equal_(at::Tensor & self, const at::Scalar & other);
22
+ TORCH_API at::Tensor less_equal(const at::Tensor & self, const at::Tensor & other);
23
+ TORCH_API at::Tensor & less_equal_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
24
+ TORCH_API at::Tensor & less_equal_(at::Tensor & self, const at::Tensor & other);
25
+ } // namespace native
26
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/lift_fresh_copy.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/lift_fresh_copy_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::lift_fresh_copy(Tensor self) -> Tensor
26
+ inline at::Tensor lift_fresh_copy(const at::Tensor & self) {
27
+ return at::_ops::lift_fresh_copy::call(self);
28
+ }
29
+
30
+ // aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & lift_fresh_copy_out(at::Tensor & out, const at::Tensor & self) {
32
+ return at::_ops::lift_fresh_copy_out::call(self, out);
33
+ }
34
+ // aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & lift_fresh_copy_outf(const at::Tensor & self, at::Tensor & out) {
36
+ return at::_ops::lift_fresh_copy_out::call(self, out);
37
+ }
38
+
39
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_eigvals_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor linalg_eigvals(const at::Tensor & self);
21
+
22
+ } // namespace compositeimplicitautograd
23
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_meta.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeMetaFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/TensorIterator.h>
13
+ #include <ATen/TensorMeta.h>
14
+ #include <tuple>
15
+ #include <vector>
16
+
17
+ namespace at {
18
+ namespace meta {
19
+
20
+ struct TORCH_API structured_linalg_ldl_factor_ex : public at::impl::MetaBase {
21
+
22
+
23
+ void meta(const at::Tensor & self, bool hermitian, bool check_errors);
24
+ };
25
+
26
+ } // namespace native
27
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/linspace.h ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/linspace_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
26
+ inline at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::TensorOptions options={}) {
27
+ return at::_ops::linspace::call(start, end, steps, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
28
+ }
29
+ // aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
30
+ inline at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
31
+ return at::_ops::linspace::call(start, end, steps, dtype, layout, device, pin_memory);
32
+ }
33
+
34
+ // aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
35
+ inline at::Tensor linspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::TensorOptions options={}) {
36
+ return at::_ops::linspace_Tensor_Tensor::call(start, end, steps, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
37
+ }
38
+ // aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
39
+ inline at::Tensor linspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
40
+ return at::_ops::linspace_Tensor_Tensor::call(start, end, steps, dtype, layout, device, pin_memory);
41
+ }
42
+
43
+ // aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
44
+ inline at::Tensor linspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::TensorOptions options={}) {
45
+ return at::_ops::linspace_Tensor_Scalar::call(start, end, steps, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
46
+ }
47
+ // aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
48
+ inline at::Tensor linspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
49
+ return at::_ops::linspace_Tensor_Scalar::call(start, end, steps, dtype, layout, device, pin_memory);
50
+ }
51
+
52
+ // aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
53
+ inline at::Tensor linspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::TensorOptions options={}) {
54
+ return at::_ops::linspace_Scalar_Tensor::call(start, end, steps, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
55
+ }
56
+ // aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
57
+ inline at::Tensor linspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
58
+ return at::_ops::linspace_Scalar_Tensor::call(start, end, steps, dtype, layout, device, pin_memory);
59
+ }
60
+
61
+ // aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
62
+ inline at::Tensor & linspace_out(at::Tensor & out, const at::Scalar & start, const at::Scalar & end, int64_t steps) {
63
+ return at::_ops::linspace_out::call(start, end, steps, out);
64
+ }
65
+ // aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
66
+ inline at::Tensor & linspace_outf(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::Tensor & out) {
67
+ return at::_ops::linspace_out::call(start, end, steps, out);
68
+ }
69
+
70
+ // aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)
71
+ inline at::Tensor & linspace_out(at::Tensor & out, const at::Tensor & start, const at::Tensor & end, int64_t steps) {
72
+ return at::_ops::linspace_Tensor_Tensor_out::call(start, end, steps, out);
73
+ }
74
+ // aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)
75
+ inline at::Tensor & linspace_outf(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::Tensor & out) {
76
+ return at::_ops::linspace_Tensor_Tensor_out::call(start, end, steps, out);
77
+ }
78
+
79
+ // aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
80
+ inline at::Tensor & linspace_out(at::Tensor & out, const at::Tensor & start, const at::Scalar & end, int64_t steps) {
81
+ return at::_ops::linspace_Tensor_Scalar_out::call(start, end, steps, out);
82
+ }
83
+ // aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)
84
+ inline at::Tensor & linspace_outf(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::Tensor & out) {
85
+ return at::_ops::linspace_Tensor_Scalar_out::call(start, end, steps, out);
86
+ }
87
+
88
+ // aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)
89
+ inline at::Tensor & linspace_out(at::Tensor & out, const at::Scalar & start, const at::Tensor & end, int64_t steps) {
90
+ return at::_ops::linspace_Scalar_Tensor_out::call(start, end, steps, out);
91
+ }
92
+ // aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)
93
+ inline at::Tensor & linspace_outf(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::Tensor & out) {
94
+ return at::_ops::linspace_Scalar_Tensor_out::call(start, end, steps, out);
95
+ }
96
+
97
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/log10.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/log10_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::log10(Tensor self) -> Tensor
26
+ inline at::Tensor log10(const at::Tensor & self) {
27
+ return at::_ops::log10::call(self);
28
+ }
29
+
30
+ // aten::log10_(Tensor(a!) self) -> Tensor(a!)
31
+ inline at::Tensor & log10_(at::Tensor & self) {
32
+ return at::_ops::log10_::call(self);
33
+ }
34
+
35
+ // aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
36
+ inline at::Tensor & log10_out(at::Tensor & out, const at::Tensor & self) {
37
+ return at::_ops::log10_out::call(self, out);
38
+ }
39
+ // aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
40
+ inline at::Tensor & log10_outf(const at::Tensor & self, at::Tensor & out) {
41
+ return at::_ops::log10_out::call(self, out);
42
+ }
43
+
44
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/log1p_cuda_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor log1p(const at::Tensor & self);
21
+ TORCH_API at::Tensor & log1p_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & log1p_outf(const at::Tensor & self, at::Tensor & out);
23
+ TORCH_API at::Tensor & log1p_(at::Tensor & self);
24
+
25
+ } // namespace cuda
26
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/ops/logical_xor_cuda_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor & logical_xor_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
21
+ TORCH_API at::Tensor & logical_xor_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
22
+
23
+ } // namespace cuda
24
+ } // namespace at