Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/base.h +103 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/map.h +118 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/mnist.h +48 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/shared.h +83 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/stateful.h +70 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/tensor.h +38 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/custom_batch_request.h +21 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/distributed.h +139 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/random.h +54 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/sequential.h +50 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/serialize.h +28 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/stream.h +63 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/TensorDataContainer.h +372 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/static.h +65 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/lr_scheduler.h +39 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/reduce_on_plateau_scheduler.h +63 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/step_lr.h +22 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/serialize/archive.h +4 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/serialize/input-archive.h +117 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/serialize/output-archive.h +82 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/serialize/tensor.h +20 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/FunctionsManual.h +1105 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/InferenceMode.h +12 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/VariableTypeUtils.h +532 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/anomaly_mode.h +73 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd.h +106 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd_not_implemented_fallback.h +34 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/cpp_hook.h +31 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/custom_function.h +438 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/edge.h +58 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/engine.h +295 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/forward_grad.h +212 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function.h +761 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function_hook.h +66 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/Functions.h +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_functions.h +25 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/variable_factories.h +728 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/grad_mode.h +13 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/graph_task.h +243 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_buffer.h +48 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_metadata.h +113 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/jit_decomp_interface.h +54 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler.h +4 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_kineto.h +192 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_legacy.h +417 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_python.h +13 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_anomaly_mode.h +43 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_autograd.h +19 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_cpp_function.h +107 -0
- env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_engine.h +48 -0
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/base.h
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/data/example.h>
|
4 |
+
#include <torch/types.h>
|
5 |
+
|
6 |
+
#include <c10/util/ArrayRef.h>
|
7 |
+
|
8 |
+
#include <cstddef>
|
9 |
+
#include <cstdint>
|
10 |
+
#include <type_traits>
|
11 |
+
#include <utility>
|
12 |
+
#include <vector>
|
13 |
+
|
14 |
+
namespace torch {
|
15 |
+
namespace data {
|
16 |
+
namespace datasets {
|
17 |
+
template <typename S, typename T>
|
18 |
+
class MapDataset;
|
19 |
+
template <typename D, typename T>
|
20 |
+
MapDataset<D, T> map(D, T); // NOLINT
|
21 |
+
} // namespace datasets
|
22 |
+
} // namespace data
|
23 |
+
} // namespace torch
|
24 |
+
|
25 |
+
namespace torch {
|
26 |
+
namespace data {
|
27 |
+
namespace datasets {
|
28 |
+
namespace detail {
|
29 |
+
template <typename T>
|
30 |
+
struct is_optional : std::false_type {};
|
31 |
+
template <typename T>
|
32 |
+
struct is_optional<optional<T>> : std::true_type {};
|
33 |
+
} // namespace detail
|
34 |
+
|
35 |
+
/// A dataset that can yield data only in batches.
|
36 |
+
template <
|
37 |
+
typename Self,
|
38 |
+
typename Batch = std::vector<Example<>>,
|
39 |
+
typename BatchRequest = ArrayRef<size_t>>
|
40 |
+
class BatchDataset {
|
41 |
+
public:
|
42 |
+
using SelfType = Self;
|
43 |
+
using BatchType = Batch;
|
44 |
+
using BatchRequestType = BatchRequest;
|
45 |
+
constexpr static bool is_stateful = detail::is_optional<BatchType>::value;
|
46 |
+
|
47 |
+
virtual ~BatchDataset() = default;
|
48 |
+
|
49 |
+
/// Returns a batch of data given an index.
|
50 |
+
virtual Batch get_batch(BatchRequest request) = 0;
|
51 |
+
|
52 |
+
/// Returns the size of the dataset, or an empty optional if it is unsized.
|
53 |
+
virtual optional<size_t> size() const = 0;
|
54 |
+
|
55 |
+
/// Creates a `MapDataset` that applies the given `transform` to this dataset.
|
56 |
+
template <typename TransformType>
|
57 |
+
MapDataset<Self, TransformType> map(TransformType transform) & {
|
58 |
+
return datasets::map(static_cast<Self&>(*this), std::move(transform));
|
59 |
+
}
|
60 |
+
|
61 |
+
/// Creates a `MapDataset` that applies the given `transform` to this dataset.
|
62 |
+
template <typename TransformType>
|
63 |
+
MapDataset<Self, TransformType> map(TransformType transform) && {
|
64 |
+
return datasets::map(
|
65 |
+
std::move(static_cast<Self&>(*this)), std::move(transform));
|
66 |
+
}
|
67 |
+
};
|
68 |
+
|
69 |
+
/// A dataset that can yield data in batches, or as individual examples.
|
70 |
+
///
|
71 |
+
/// A `Dataset` is a `BatchDataset`, because it supports random access and
|
72 |
+
/// therefore batched access is implemented (by default) by calling the random
|
73 |
+
/// access indexing function for each index in the requested batch of indices.
|
74 |
+
/// This can be customized.
|
75 |
+
template <typename Self, typename SingleExample = Example<>>
|
76 |
+
class Dataset : public BatchDataset<Self, std::vector<SingleExample>> {
|
77 |
+
public:
|
78 |
+
using ExampleType = SingleExample;
|
79 |
+
|
80 |
+
/// Returns the example at the given index.
|
81 |
+
virtual ExampleType get(size_t index) = 0;
|
82 |
+
|
83 |
+
/// Returns a batch of data.
|
84 |
+
/// The default implementation calls `get()` for every requested index
|
85 |
+
/// in the batch.
|
86 |
+
std::vector<ExampleType> get_batch(ArrayRef<size_t> indices) override {
|
87 |
+
std::vector<ExampleType> batch;
|
88 |
+
batch.reserve(indices.size());
|
89 |
+
for (const auto i : indices) {
|
90 |
+
batch.push_back(get(i));
|
91 |
+
}
|
92 |
+
return batch;
|
93 |
+
}
|
94 |
+
};
|
95 |
+
|
96 |
+
/// A `StreamDataset` represents a dataset that is a potentially infinite
|
97 |
+
/// stream. It takes as batch index only a number, which is the batch size, and
|
98 |
+
/// yields that many elements from the stream.
|
99 |
+
template <typename Self, typename Batch = std::vector<Example<>>>
|
100 |
+
using StreamDataset = BatchDataset<Self, Batch, /*BatchRequest=*/size_t>;
|
101 |
+
} // namespace datasets
|
102 |
+
} // namespace data
|
103 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/map.h
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/data/datasets/base.h>
|
4 |
+
#include <torch/types.h>
|
5 |
+
|
6 |
+
#include <c10/util/ArrayRef.h>
|
7 |
+
|
8 |
+
#include <cstddef>
|
9 |
+
#include <type_traits>
|
10 |
+
#include <utility>
|
11 |
+
|
12 |
+
namespace torch {
|
13 |
+
namespace data {
|
14 |
+
namespace datasets {
|
15 |
+
namespace detail {
|
16 |
+
template <bool C, typename T>
|
17 |
+
using optional_if_t = typename std::conditional<C, torch::optional<T>, T>::type;
|
18 |
+
} // namespace detail
|
19 |
+
|
20 |
+
/// A `MapDataset` is a dataset that applies a transform to a source dataset.
|
21 |
+
template <typename SourceDataset, typename AppliedTransform>
|
22 |
+
class MapDataset : public BatchDataset<
|
23 |
+
MapDataset<SourceDataset, AppliedTransform>,
|
24 |
+
detail::optional_if_t<
|
25 |
+
SourceDataset::is_stateful,
|
26 |
+
typename AppliedTransform::OutputBatchType>,
|
27 |
+
typename SourceDataset::BatchRequestType> {
|
28 |
+
public:
|
29 |
+
using DatasetType = SourceDataset;
|
30 |
+
using TransformType = AppliedTransform;
|
31 |
+
using BatchRequestType = typename SourceDataset::BatchRequestType;
|
32 |
+
using OutputBatchType = detail::optional_if_t<
|
33 |
+
SourceDataset::is_stateful,
|
34 |
+
typename AppliedTransform::OutputBatchType>;
|
35 |
+
|
36 |
+
MapDataset(DatasetType dataset, TransformType transform)
|
37 |
+
: dataset_(std::move(dataset)), transform_(std::move(transform)) {}
|
38 |
+
|
39 |
+
/// Gets a batch from the source dataset and applies the transform to it,
|
40 |
+
/// returning the result.
|
41 |
+
OutputBatchType get_batch(BatchRequestType indices) override {
|
42 |
+
return get_batch_impl(std::move(indices));
|
43 |
+
}
|
44 |
+
|
45 |
+
/// Returns the size of the source dataset.
|
46 |
+
// NOLINTNEXTLINE(bugprone-exception-escape)
|
47 |
+
optional<size_t> size() const noexcept override {
|
48 |
+
return dataset_.size();
|
49 |
+
}
|
50 |
+
|
51 |
+
/// Calls `reset()` on the underlying dataset.
|
52 |
+
/// NOTE: Stateless datasets do not have a reset() method, so a call to this
|
53 |
+
/// method will only compile for stateful datasets (which have a reset()
|
54 |
+
/// method).
|
55 |
+
void reset() {
|
56 |
+
dataset_.reset();
|
57 |
+
}
|
58 |
+
|
59 |
+
/// Returns the underlying dataset.
|
60 |
+
const SourceDataset& dataset() noexcept {
|
61 |
+
return dataset_;
|
62 |
+
}
|
63 |
+
|
64 |
+
/// Returns the transform being applied.
|
65 |
+
const AppliedTransform& transform() noexcept {
|
66 |
+
return transform_;
|
67 |
+
}
|
68 |
+
|
69 |
+
private:
|
70 |
+
/// The implementation of `get_batch()` for the stateless case, which simply
|
71 |
+
/// applies the transform to the output of `get_batch()` from the dataset.
|
72 |
+
template <
|
73 |
+
typename D = SourceDataset,
|
74 |
+
typename = torch::disable_if_t<D::is_stateful>>
|
75 |
+
OutputBatchType get_batch_impl(BatchRequestType indices) {
|
76 |
+
return transform_.apply_batch(dataset_.get_batch(std::move(indices)));
|
77 |
+
}
|
78 |
+
|
79 |
+
/// The implementation of `get_batch()` for the stateful case. Here, we follow
|
80 |
+
/// the semantics of `Optional.map()` in many functional languages, which
|
81 |
+
/// applies a transformation to the optional's content when the optional
|
82 |
+
/// contains a value, and returns a new optional (of a different type) if the
|
83 |
+
/// original optional returned by `get_batch()` was empty.
|
84 |
+
template <typename D = SourceDataset>
|
85 |
+
torch::enable_if_t<D::is_stateful, OutputBatchType> get_batch_impl(
|
86 |
+
BatchRequestType indices) {
|
87 |
+
if (auto batch = dataset_.get_batch(std::move(indices))) {
|
88 |
+
return transform_.apply_batch(std::move(*batch));
|
89 |
+
}
|
90 |
+
return nullopt;
|
91 |
+
}
|
92 |
+
|
93 |
+
/// The underlying dataset being transformed.
|
94 |
+
SourceDataset dataset_;
|
95 |
+
|
96 |
+
// The transformation that is applied to batches received from the dataset.
|
97 |
+
AppliedTransform transform_;
|
98 |
+
};
|
99 |
+
|
100 |
+
/// Creates a `MapDataset` with the given dataset and transform.
|
101 |
+
template <typename DatasetType, typename TransformType>
|
102 |
+
MapDataset<DatasetType, TransformType> map(
|
103 |
+
DatasetType dataset,
|
104 |
+
TransformType transform) {
|
105 |
+
static_assert(
|
106 |
+
std::is_same<
|
107 |
+
typename std::conditional<
|
108 |
+
DatasetType::is_stateful,
|
109 |
+
typename DatasetType::BatchType::value_type,
|
110 |
+
typename DatasetType::BatchType>::type,
|
111 |
+
typename TransformType::InputBatchType>::value,
|
112 |
+
"BatchType type of dataset does not match input type of transform");
|
113 |
+
return {std::move(dataset), std::move(transform)};
|
114 |
+
}
|
115 |
+
|
116 |
+
} // namespace datasets
|
117 |
+
} // namespace data
|
118 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/mnist.h
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/data/datasets/base.h>
|
4 |
+
#include <torch/data/example.h>
|
5 |
+
#include <torch/types.h>
|
6 |
+
|
7 |
+
#include <torch/csrc/Export.h>
|
8 |
+
|
9 |
+
#include <cstddef>
|
10 |
+
#include <string>
|
11 |
+
|
12 |
+
namespace torch {
|
13 |
+
namespace data {
|
14 |
+
namespace datasets {
|
15 |
+
/// The MNIST dataset.
|
16 |
+
class TORCH_API MNIST : public Dataset<MNIST> {
|
17 |
+
public:
|
18 |
+
/// The mode in which the dataset is loaded.
|
19 |
+
enum class Mode { kTrain, kTest };
|
20 |
+
|
21 |
+
/// Loads the MNIST dataset from the `root` path.
|
22 |
+
///
|
23 |
+
/// The supplied `root` path should contain the *content* of the unzipped
|
24 |
+
/// MNIST dataset, available from http://yann.lecun.com/exdb/mnist.
|
25 |
+
explicit MNIST(const std::string& root, Mode mode = Mode::kTrain);
|
26 |
+
|
27 |
+
/// Returns the `Example` at the given `index`.
|
28 |
+
Example<> get(size_t index) override;
|
29 |
+
|
30 |
+
/// Returns the size of the dataset.
|
31 |
+
optional<size_t> size() const override;
|
32 |
+
|
33 |
+
/// Returns true if this is the training subset of MNIST.
|
34 |
+
// NOLINTNEXTLINE(bugprone-exception-escape)
|
35 |
+
bool is_train() const noexcept;
|
36 |
+
|
37 |
+
/// Returns all images stacked into a single tensor.
|
38 |
+
const Tensor& images() const;
|
39 |
+
|
40 |
+
/// Returns all targets stacked into a single tensor.
|
41 |
+
const Tensor& targets() const;
|
42 |
+
|
43 |
+
private:
|
44 |
+
Tensor images_, targets_;
|
45 |
+
};
|
46 |
+
} // namespace datasets
|
47 |
+
} // namespace data
|
48 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/shared.h
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/data/datasets/base.h>
|
4 |
+
|
5 |
+
#include <memory>
|
6 |
+
#include <utility>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace data {
|
10 |
+
namespace datasets {
|
11 |
+
|
12 |
+
/// A dataset that wraps another dataset in a shared pointer and implements the
|
13 |
+
/// `BatchDataset` API, delegating all calls to the shared instance. This is
|
14 |
+
/// useful when you want all worker threads in the dataloader to access the same
|
15 |
+
/// dataset instance. The dataset must take care of synchronization and
|
16 |
+
/// thread-safe access itself.
|
17 |
+
///
|
18 |
+
/// Use `torch::data::datasets::make_shared_dataset()` to create a new
|
19 |
+
/// `SharedBatchDataset` like you would a `std::shared_ptr`.
|
20 |
+
template <typename UnderlyingDataset>
|
21 |
+
class SharedBatchDataset : public BatchDataset<
|
22 |
+
SharedBatchDataset<UnderlyingDataset>,
|
23 |
+
typename UnderlyingDataset::BatchType,
|
24 |
+
typename UnderlyingDataset::BatchRequestType> {
|
25 |
+
public:
|
26 |
+
using BatchType = typename UnderlyingDataset::BatchType;
|
27 |
+
using BatchRequestType = typename UnderlyingDataset::BatchRequestType;
|
28 |
+
|
29 |
+
/// Constructs a new `SharedBatchDataset` from a `shared_ptr` to the
|
30 |
+
/// `UnderlyingDataset`.
|
31 |
+
/* implicit */ SharedBatchDataset(
|
32 |
+
std::shared_ptr<UnderlyingDataset> shared_dataset)
|
33 |
+
: dataset_(std::move(shared_dataset)) {}
|
34 |
+
|
35 |
+
/// Calls `get_batch` on the underlying dataset.
|
36 |
+
BatchType get_batch(BatchRequestType request) override {
|
37 |
+
return dataset_->get_batch(std::move(request));
|
38 |
+
}
|
39 |
+
|
40 |
+
/// Returns the `size` from the underlying dataset.
|
41 |
+
optional<size_t> size() const override {
|
42 |
+
return dataset_->size();
|
43 |
+
}
|
44 |
+
|
45 |
+
/// Accesses the underlying dataset.
|
46 |
+
UnderlyingDataset& operator*() {
|
47 |
+
return *dataset_;
|
48 |
+
}
|
49 |
+
|
50 |
+
/// Accesses the underlying dataset.
|
51 |
+
const UnderlyingDataset& operator*() const {
|
52 |
+
return *dataset_;
|
53 |
+
}
|
54 |
+
|
55 |
+
/// Accesses the underlying dataset.
|
56 |
+
UnderlyingDataset* operator->() {
|
57 |
+
return dataset_.get();
|
58 |
+
}
|
59 |
+
|
60 |
+
/// Accesses the underlying dataset.
|
61 |
+
const UnderlyingDataset* operator->() const {
|
62 |
+
return dataset_.get();
|
63 |
+
}
|
64 |
+
|
65 |
+
/// Calls `reset()` on the underlying dataset.
|
66 |
+
void reset() {
|
67 |
+
dataset_->reset();
|
68 |
+
}
|
69 |
+
|
70 |
+
private:
|
71 |
+
std::shared_ptr<UnderlyingDataset> dataset_;
|
72 |
+
};
|
73 |
+
|
74 |
+
/// Constructs a new `SharedBatchDataset` by creating a
|
75 |
+
/// `shared_ptr<UnderlyingDatase>`. All arguments are forwarded to
|
76 |
+
/// `make_shared<UnderlyingDataset>`.
|
77 |
+
template <typename UnderlyingDataset, typename... Args>
|
78 |
+
SharedBatchDataset<UnderlyingDataset> make_shared_dataset(Args&&... args) {
|
79 |
+
return std::make_shared<UnderlyingDataset>(std::forward<Args>(args)...);
|
80 |
+
}
|
81 |
+
} // namespace datasets
|
82 |
+
} // namespace data
|
83 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/stateful.h
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/data/datasets/base.h>
|
4 |
+
#include <torch/data/example.h>
|
5 |
+
|
6 |
+
#include <cstddef>
|
7 |
+
#include <vector>
|
8 |
+
|
9 |
+
namespace torch {
|
10 |
+
namespace serialize {
|
11 |
+
class OutputArchive;
|
12 |
+
class InputArchive;
|
13 |
+
} // namespace serialize
|
14 |
+
} // namespace torch
|
15 |
+
|
16 |
+
namespace torch {
|
17 |
+
namespace data {
|
18 |
+
namespace datasets {
|
19 |
+
|
20 |
+
/// A stateful dataset is a dataset that maintains some internal state, which
|
21 |
+
/// will be `reset()` at the beginning of each epoch. Subclasses can override
|
22 |
+
/// the `reset()` method to configure this behavior. Further, the return type of
|
23 |
+
/// a stateful dataset's `get_batch()` method is always an `optional`. When the
|
24 |
+
/// stateful dataset wants to indicate to the dataloader that its epoch has
|
25 |
+
/// ended, it should return an empty optional. The dataloader knows to modify
|
26 |
+
/// its implementation based on whether the dataset is stateless or stateful.
|
27 |
+
///
|
28 |
+
/// Note that when subclassing a from `StatefulDataset<Self, T>`, the return
|
29 |
+
/// type of `get_batch()`, which the subclass must override, will be
|
30 |
+
/// `optional<T>` (i.e. the type specified in the `StatefulDataset`
|
31 |
+
/// specialization is automatically boxed into an `optional` for the dataset's
|
32 |
+
/// `BatchType`).
|
33 |
+
template <
|
34 |
+
typename Self,
|
35 |
+
typename Batch = std::vector<Example<>>,
|
36 |
+
typename BatchRequest = size_t>
|
37 |
+
class StatefulDataset
|
38 |
+
: public BatchDataset<Self, optional<Batch>, BatchRequest> {
|
39 |
+
public:
|
40 |
+
/// Resets internal state of the dataset.
|
41 |
+
virtual void reset() = 0;
|
42 |
+
|
43 |
+
/// Saves the statefulDataset's state to OutputArchive.
|
44 |
+
virtual void save(serialize::OutputArchive& archive) const = 0;
|
45 |
+
|
46 |
+
/// Deserializes the statefulDataset's state from the `archive`.
|
47 |
+
virtual void load(serialize::InputArchive& archive) = 0;
|
48 |
+
};
|
49 |
+
|
50 |
+
/// Serializes a statefulDataset to `OutputArchive`.
|
51 |
+
template <typename... Args>
|
52 |
+
serialize::OutputArchive& operator<<(
|
53 |
+
serialize::OutputArchive& archive,
|
54 |
+
const StatefulDataset<Args...>& statefulDataset) {
|
55 |
+
statefulDataset.save(archive);
|
56 |
+
return archive;
|
57 |
+
}
|
58 |
+
|
59 |
+
/// Deserializes a statefulDataset from an `InputArchive`.
|
60 |
+
template <typename... Args>
|
61 |
+
serialize::InputArchive& operator>>(
|
62 |
+
serialize::InputArchive& archive,
|
63 |
+
StatefulDataset<Args...>& statefulDataset) {
|
64 |
+
statefulDataset.load(archive);
|
65 |
+
return archive;
|
66 |
+
}
|
67 |
+
|
68 |
+
} // namespace datasets
|
69 |
+
} // namespace data
|
70 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/tensor.h
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/data/datasets/base.h>
|
4 |
+
#include <torch/data/example.h>
|
5 |
+
#include <torch/types.h>
|
6 |
+
|
7 |
+
#include <cstddef>
|
8 |
+
#include <vector>
|
9 |
+
|
10 |
+
namespace torch {
|
11 |
+
namespace data {
|
12 |
+
namespace datasets {
|
13 |
+
|
14 |
+
/// A dataset of tensors.
|
15 |
+
/// Stores a single tensor internally, which is then indexed inside `get()`.
|
16 |
+
struct TensorDataset : public Dataset<TensorDataset, TensorExample> {
|
17 |
+
/// Creates a `TensorDataset` from a vector of tensors.
|
18 |
+
explicit TensorDataset(const std::vector<Tensor>& tensors)
|
19 |
+
: TensorDataset(torch::stack(tensors)) {}
|
20 |
+
|
21 |
+
explicit TensorDataset(torch::Tensor tensor) : tensor(std::move(tensor)) {}
|
22 |
+
|
23 |
+
/// Returns a single `TensorExample`.
|
24 |
+
TensorExample get(size_t index) override {
|
25 |
+
return tensor[index];
|
26 |
+
}
|
27 |
+
|
28 |
+
/// Returns the number of tensors in the dataset.
|
29 |
+
optional<size_t> size() const override {
|
30 |
+
return tensor.size(0);
|
31 |
+
}
|
32 |
+
|
33 |
+
Tensor tensor;
|
34 |
+
};
|
35 |
+
|
36 |
+
} // namespace datasets
|
37 |
+
} // namespace data
|
38 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/custom_batch_request.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/Export.h>
|
4 |
+
#include <cstddef>
|
5 |
+
|
6 |
+
namespace torch {
|
7 |
+
namespace data {
|
8 |
+
namespace samplers {
|
9 |
+
/// A base class for custom index types.
|
10 |
+
struct TORCH_API CustomBatchRequest {
|
11 |
+
CustomBatchRequest() = default;
|
12 |
+
CustomBatchRequest(const CustomBatchRequest&) = default;
|
13 |
+
CustomBatchRequest(CustomBatchRequest&&) noexcept = default;
|
14 |
+
virtual ~CustomBatchRequest() = default;
|
15 |
+
|
16 |
+
/// The number of elements accessed by this index.
|
17 |
+
virtual size_t size() const = 0;
|
18 |
+
};
|
19 |
+
} // namespace samplers
|
20 |
+
} // namespace data
|
21 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/distributed.h
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/Export.h>
|
4 |
+
#include <torch/data/samplers/base.h>
|
5 |
+
|
6 |
+
#include <cstddef>
|
7 |
+
#include <vector>
|
8 |
+
|
9 |
+
namespace torch {
|
10 |
+
namespace serialize {
|
11 |
+
class OutputArchive;
|
12 |
+
class InputArchive;
|
13 |
+
} // namespace serialize
|
14 |
+
} // namespace torch
|
15 |
+
|
16 |
+
namespace torch {
|
17 |
+
namespace data {
|
18 |
+
namespace samplers {
|
19 |
+
|
20 |
+
/// A `Sampler` that selects a subset of indices to sample from and defines a
|
21 |
+
/// sampling behavior. In a distributed setting, this selects a subset of the
|
22 |
+
/// indices depending on the provided num_replicas and rank parameters. The
|
23 |
+
/// `Sampler` performs a rounding operation based on the `allow_duplicates`
|
24 |
+
/// parameter to decide the local sample count.
|
25 |
+
template <typename BatchRequest = std::vector<size_t>>
|
26 |
+
class DistributedSampler : public Sampler<BatchRequest> {
|
27 |
+
public:
|
28 |
+
DistributedSampler(
|
29 |
+
size_t size,
|
30 |
+
size_t num_replicas = 1,
|
31 |
+
size_t rank = 0,
|
32 |
+
bool allow_duplicates = true)
|
33 |
+
: size_(size),
|
34 |
+
num_replicas_(num_replicas),
|
35 |
+
rank_(rank),
|
36 |
+
epoch_(0),
|
37 |
+
allow_duplicates_(allow_duplicates) {}
|
38 |
+
|
39 |
+
/// Set the epoch for the current enumeration. This can be used to alter the
|
40 |
+
/// sample selection and shuffling behavior.
|
41 |
+
void set_epoch(size_t epoch) {
|
42 |
+
epoch_ = epoch;
|
43 |
+
}
|
44 |
+
|
45 |
+
size_t epoch() const {
|
46 |
+
return epoch_;
|
47 |
+
}
|
48 |
+
|
49 |
+
protected:
|
50 |
+
size_t local_sample_count() {
|
51 |
+
if (allow_duplicates_) {
|
52 |
+
return (size_ + num_replicas_ - 1) / num_replicas_;
|
53 |
+
} else {
|
54 |
+
return size_ / num_replicas_;
|
55 |
+
}
|
56 |
+
}
|
57 |
+
|
58 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
59 |
+
size_t size_;
|
60 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
61 |
+
size_t num_replicas_;
|
62 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
63 |
+
size_t rank_;
|
64 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
65 |
+
size_t epoch_;
|
66 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
67 |
+
bool allow_duplicates_;
|
68 |
+
};
|
69 |
+
|
70 |
+
/// Select samples randomly. The sampling order is shuffled at each `reset()`
|
71 |
+
/// call.
|
72 |
+
class TORCH_API DistributedRandomSampler : public DistributedSampler<> {
|
73 |
+
public:
|
74 |
+
DistributedRandomSampler(
|
75 |
+
size_t size,
|
76 |
+
size_t num_replicas = 1,
|
77 |
+
size_t rank = 0,
|
78 |
+
bool allow_duplicates = true);
|
79 |
+
|
80 |
+
/// Resets the `DistributedRandomSampler` to a new set of indices.
|
81 |
+
void reset(optional<size_t> new_size = nullopt) override;
|
82 |
+
|
83 |
+
/// Returns the next batch of indices.
|
84 |
+
optional<std::vector<size_t>> next(size_t batch_size) override;
|
85 |
+
|
86 |
+
/// Serializes the `DistributedRandomSampler` to the `archive`.
|
87 |
+
void save(serialize::OutputArchive& archive) const override;
|
88 |
+
|
89 |
+
/// Deserializes the `DistributedRandomSampler` from the `archive`.
|
90 |
+
void load(serialize::InputArchive& archive) override;
|
91 |
+
|
92 |
+
/// Returns the current index of the `DistributedRandomSampler`.
|
93 |
+
size_t index() const noexcept;
|
94 |
+
|
95 |
+
private:
|
96 |
+
void populate_indices();
|
97 |
+
|
98 |
+
size_t begin_index_;
|
99 |
+
size_t end_index_;
|
100 |
+
size_t sample_index_;
|
101 |
+
std::vector<size_t> all_indices_;
|
102 |
+
};
|
103 |
+
|
104 |
+
/// Select samples sequentially.
|
105 |
+
class TORCH_API DistributedSequentialSampler : public DistributedSampler<> {
|
106 |
+
public:
|
107 |
+
DistributedSequentialSampler(
|
108 |
+
size_t size,
|
109 |
+
size_t num_replicas = 1,
|
110 |
+
size_t rank = 0,
|
111 |
+
bool allow_duplicates = true);
|
112 |
+
|
113 |
+
/// Resets the `DistributedSequentialSampler` to a new set of indices.
|
114 |
+
void reset(optional<size_t> new_size = nullopt) override;
|
115 |
+
|
116 |
+
/// Returns the next batch of indices.
|
117 |
+
optional<std::vector<size_t>> next(size_t batch_size) override;
|
118 |
+
|
119 |
+
/// Serializes the `DistributedSequentialSampler` to the `archive`.
|
120 |
+
void save(serialize::OutputArchive& archive) const override;
|
121 |
+
|
122 |
+
/// Deserializes the `DistributedSequentialSampler` from the `archive`.
|
123 |
+
void load(serialize::InputArchive& archive) override;
|
124 |
+
|
125 |
+
/// Returns the current index of the `DistributedSequentialSampler`.
|
126 |
+
size_t index() const noexcept;
|
127 |
+
|
128 |
+
private:
|
129 |
+
void populate_indices();
|
130 |
+
|
131 |
+
size_t begin_index_;
|
132 |
+
size_t end_index_;
|
133 |
+
size_t sample_index_;
|
134 |
+
std::vector<size_t> all_indices_;
|
135 |
+
};
|
136 |
+
|
137 |
+
} // namespace samplers
|
138 |
+
} // namespace data
|
139 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/random.h
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/Export.h>
|
4 |
+
#include <torch/data/samplers/base.h>
|
5 |
+
#include <torch/types.h>
|
6 |
+
|
7 |
+
#include <cstddef>
|
8 |
+
#include <vector>
|
9 |
+
|
10 |
+
namespace torch {
|
11 |
+
namespace serialize {
|
12 |
+
class OutputArchive;
|
13 |
+
class InputArchive;
|
14 |
+
} // namespace serialize
|
15 |
+
} // namespace torch
|
16 |
+
|
17 |
+
namespace torch {
|
18 |
+
namespace data {
|
19 |
+
namespace samplers {
|
20 |
+
|
21 |
+
/// A `Sampler` that returns random indices.
|
22 |
+
class TORCH_API RandomSampler : public Sampler<> {
|
23 |
+
public:
|
24 |
+
/// Constructs a `RandomSampler` with a size and dtype for the stored indices.
|
25 |
+
///
|
26 |
+
/// The constructor will eagerly allocate all required indices, which is the
|
27 |
+
/// sequence `0 ... size - 1`. `index_dtype` is the data type of the stored
|
28 |
+
/// indices. You can change it to influence memory usage.
|
29 |
+
explicit RandomSampler(int64_t size, Dtype index_dtype = torch::kInt64);
|
30 |
+
|
31 |
+
~RandomSampler() override;
|
32 |
+
|
33 |
+
/// Resets the `RandomSampler` to a new set of indices.
|
34 |
+
void reset(optional<size_t> new_size = nullopt) override;
|
35 |
+
|
36 |
+
/// Returns the next batch of indices.
|
37 |
+
optional<std::vector<size_t>> next(size_t batch_size) override;
|
38 |
+
|
39 |
+
/// Serializes the `RandomSampler` to the `archive`.
|
40 |
+
void save(serialize::OutputArchive& archive) const override;
|
41 |
+
|
42 |
+
/// Deserializes the `RandomSampler` from the `archive`.
|
43 |
+
void load(serialize::InputArchive& archive) override;
|
44 |
+
|
45 |
+
/// Returns the current index of the `RandomSampler`.
|
46 |
+
size_t index() const noexcept;
|
47 |
+
|
48 |
+
private:
|
49 |
+
at::Tensor indices_;
|
50 |
+
int64_t index_ = 0;
|
51 |
+
};
|
52 |
+
} // namespace samplers
|
53 |
+
} // namespace data
|
54 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/sequential.h
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/Export.h>
|
4 |
+
#include <torch/data/samplers/base.h>
|
5 |
+
#include <torch/types.h>
|
6 |
+
|
7 |
+
#include <cstddef>
|
8 |
+
#include <vector>
|
9 |
+
|
10 |
+
namespace torch {
|
11 |
+
namespace serialize {
|
12 |
+
class OutputArchive;
|
13 |
+
class InputArchive;
|
14 |
+
} // namespace serialize
|
15 |
+
} // namespace torch
|
16 |
+
|
17 |
+
namespace torch {
|
18 |
+
namespace data {
|
19 |
+
namespace samplers {
|
20 |
+
|
21 |
+
/// A `Sampler` that returns indices sequentially.
|
22 |
+
class TORCH_API SequentialSampler : public Sampler<> {
|
23 |
+
public:
|
24 |
+
/// Creates a `SequentialSampler` that will return indices in the range
|
25 |
+
/// `0...size - 1`.
|
26 |
+
explicit SequentialSampler(size_t size);
|
27 |
+
|
28 |
+
/// Resets the `SequentialSampler` to zero.
|
29 |
+
void reset(optional<size_t> new_size = nullopt) override;
|
30 |
+
|
31 |
+
/// Returns the next batch of indices.
|
32 |
+
optional<std::vector<size_t>> next(size_t batch_size) override;
|
33 |
+
|
34 |
+
/// Serializes the `SequentialSampler` to the `archive`.
|
35 |
+
void save(serialize::OutputArchive& archive) const override;
|
36 |
+
|
37 |
+
/// Deserializes the `SequentialSampler` from the `archive`.
|
38 |
+
void load(serialize::InputArchive& archive) override;
|
39 |
+
|
40 |
+
/// Returns the current index of the `SequentialSampler`.
|
41 |
+
size_t index() const noexcept;
|
42 |
+
|
43 |
+
private:
|
44 |
+
size_t size_;
|
45 |
+
size_t index_{0};
|
46 |
+
};
|
47 |
+
|
48 |
+
} // namespace samplers
|
49 |
+
} // namespace data
|
50 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/serialize.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/data/samplers/base.h>
|
4 |
+
#include <torch/serialize/archive.h>
|
5 |
+
|
6 |
+
namespace torch {
|
7 |
+
namespace data {
|
8 |
+
namespace samplers {
|
9 |
+
/// Serializes a `Sampler` into an `OutputArchive`.
|
10 |
+
template <typename BatchRequest>
|
11 |
+
serialize::OutputArchive& operator<<(
|
12 |
+
serialize::OutputArchive& archive,
|
13 |
+
const Sampler<BatchRequest>& sampler) {
|
14 |
+
sampler.save(archive);
|
15 |
+
return archive;
|
16 |
+
}
|
17 |
+
|
18 |
+
/// Deserializes a `Sampler` from an `InputArchive`.
|
19 |
+
template <typename BatchRequest>
|
20 |
+
serialize::InputArchive& operator>>(
|
21 |
+
serialize::InputArchive& archive,
|
22 |
+
Sampler<BatchRequest>& sampler) {
|
23 |
+
sampler.load(archive);
|
24 |
+
return archive;
|
25 |
+
}
|
26 |
+
} // namespace samplers
|
27 |
+
} // namespace data
|
28 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers/stream.h
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/Export.h>
|
4 |
+
#include <torch/data/samplers/base.h>
|
5 |
+
#include <torch/data/samplers/custom_batch_request.h>
|
6 |
+
#include <torch/types.h>
|
7 |
+
|
8 |
+
#include <cstddef>
|
9 |
+
|
10 |
+
namespace torch {
|
11 |
+
namespace serialize {
|
12 |
+
class InputArchive;
|
13 |
+
class OutputArchive;
|
14 |
+
} // namespace serialize
|
15 |
+
} // namespace torch
|
16 |
+
|
17 |
+
namespace torch {
|
18 |
+
namespace data {
|
19 |
+
namespace samplers {
|
20 |
+
|
21 |
+
/// A wrapper around a batch size value, which implements the
|
22 |
+
/// `CustomBatchRequest` interface.
|
23 |
+
struct TORCH_API BatchSize : public CustomBatchRequest {
|
24 |
+
explicit BatchSize(size_t size);
|
25 |
+
size_t size() const noexcept override;
|
26 |
+
operator size_t() const noexcept;
|
27 |
+
size_t size_;
|
28 |
+
};
|
29 |
+
|
30 |
+
/// A sampler for (potentially infinite) streams of data.
|
31 |
+
///
|
32 |
+
/// The major feature of the `StreamSampler` is that it does not return
|
33 |
+
/// particular indices, but instead only the number of elements to fetch from
|
34 |
+
/// the dataset. The dataset has to decide how to produce those elements.
|
35 |
+
class TORCH_API StreamSampler : public Sampler<BatchSize> {
|
36 |
+
public:
|
37 |
+
/// Constructs the `StreamSampler` with the number of individual examples that
|
38 |
+
/// should be fetched until the sampler is exhausted.
|
39 |
+
explicit StreamSampler(size_t epoch_size);
|
40 |
+
|
41 |
+
/// Resets the internal state of the sampler.
|
42 |
+
void reset(optional<size_t> new_size = nullopt) override;
|
43 |
+
|
44 |
+
/// Returns a `BatchSize` object with the number of elements to fetch in the
|
45 |
+
/// next batch. This number is the minimum of the supplied `batch_size` and
|
46 |
+
/// the difference between the `epoch_size` and the current index. If the
|
47 |
+
/// `epoch_size` has been reached, returns an empty optional.
|
48 |
+
optional<BatchSize> next(size_t batch_size) override;
|
49 |
+
|
50 |
+
/// Serializes the `StreamSampler` to the `archive`.
|
51 |
+
void save(serialize::OutputArchive& archive) const override;
|
52 |
+
|
53 |
+
/// Deserializes the `StreamSampler` from the `archive`.
|
54 |
+
void load(serialize::InputArchive& archive) override;
|
55 |
+
|
56 |
+
private:
|
57 |
+
size_t examples_retrieved_so_far_ = 0;
|
58 |
+
size_t epoch_size_;
|
59 |
+
};
|
60 |
+
|
61 |
+
} // namespace samplers
|
62 |
+
} // namespace data
|
63 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/TensorDataContainer.h
ADDED
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/Dispatch.h>
|
4 |
+
#include <ATen/ScalarOps.h>
|
5 |
+
#include <ATen/core/Tensor.h>
|
6 |
+
#include <ATen/core/grad_mode.h>
|
7 |
+
|
8 |
+
#include <c10/util/irange.h>
|
9 |
+
|
10 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
11 |
+
#include <ATen/Functions.h>
|
12 |
+
#else
|
13 |
+
#include <ATen/ops/empty.h>
|
14 |
+
#include <ATen/ops/tensor.h>
|
15 |
+
#endif
|
16 |
+
|
17 |
+
#include <initializer_list>
|
18 |
+
|
19 |
+
namespace torch {
|
20 |
+
|
21 |
+
namespace detail {
|
22 |
+
|
23 |
+
enum class TensorDataContainerType { Scalar, InitList, Tensor };
|
24 |
+
|
25 |
+
struct TensorDataContainer;
|
26 |
+
|
27 |
+
inline std::ostream& operator<<(
|
28 |
+
std::ostream& stream,
|
29 |
+
const TensorDataContainer& tensor_data_container);
|
30 |
+
|
31 |
+
// FIXME: There is no `operator<<` overload for `at::kBFloat16` type,
|
32 |
+
// and we need to convert it to `float` type using `operator float()` function
|
33 |
+
// defined in `c10/util/BFloat16.h`.
|
34 |
+
// Tracking issue: https://github.com/pytorch/pytorch/issues/28845
|
35 |
+
inline std::ostream& operator<<(std::ostream& stream, c10::BFloat16 value) {
|
36 |
+
stream << static_cast<float>(value);
|
37 |
+
return stream;
|
38 |
+
}
|
39 |
+
|
40 |
+
inline c10::ScalarType compute_desired_dtype(c10::ScalarType scalar_type) {
|
41 |
+
if (scalar_type == at::kInt || scalar_type == at::kLong) {
|
42 |
+
// C++ `torch::tensor` with an integer type or an `at::ArrayRef` /
|
43 |
+
// `std::vector` / (nested) braced-init-list of integer types always
|
44 |
+
// produces a tensor of dtype `at::kLong` (aka. int64_t), matching Python
|
45 |
+
// `torch.tensor` behavior.
|
46 |
+
return at::kLong;
|
47 |
+
} else if (scalar_type == at::kFloat || scalar_type == at::kDouble) {
|
48 |
+
// C++ `torch::tensor` with a floating-point type or an `at::ArrayRef` /
|
49 |
+
// `std::vector` / (nested) braced-init-list of floating-point types always
|
50 |
+
// produces a tensor of dtype `torch::get_default_dtype()`, matching Python
|
51 |
+
// `torch.tensor` behavior.
|
52 |
+
return at::typeMetaToScalarType(at::get_default_dtype());
|
53 |
+
} else {
|
54 |
+
return scalar_type;
|
55 |
+
}
|
56 |
+
}
|
57 |
+
|
58 |
+
// We use `TensorDataContainer` to support converting the following data
|
59 |
+
// container types into the equivalent Tensor:
|
60 |
+
//
|
61 |
+
// 1. Arbitrarily nested braced-init-list (e.g. `{{1, 2}, {3, 4}}`).
|
62 |
+
// 2. `at::ArrayRef` of supported tensor data types.
|
63 |
+
// 3. `std::vector` of supported tensor data types.
|
64 |
+
//
|
65 |
+
// At any time, a `TensorDataContainer` object represents one of the following:
|
66 |
+
//
|
67 |
+
// 1. A scalar with value `scalar()` and type `scalar_type()`.
|
68 |
+
// 2. A Tensor represented in `std::initializer_list<TensorDataContainer>` form,
|
69 |
+
// with value `init_list()`, Tensor scalar type `scalar_type()`, and Tensor
|
70 |
+
// sizes `sizes()`.
|
71 |
+
// 3. A Tensor represented in `at::Tensor` form, with value `tensor()`, scalar
|
72 |
+
// type `scalar_type()`,
|
73 |
+
// and Tensor sizes `sizes()`.
|
74 |
+
//
|
75 |
+
// All the infrastructure here is mostly to support converting an arbitrarily
|
76 |
+
// nested braced-init-list to the equivalent Tensor successfully. Consider the
|
77 |
+
// following example:
|
78 |
+
//
|
79 |
+
// `torch::tensor({{1}, {2}})`
|
80 |
+
//
|
81 |
+
// this will call into the `torch::tensor` function:
|
82 |
+
//
|
83 |
+
// `at::Tensor tensor(detail::TensorDataContainer tensor_data_container, const
|
84 |
+
// at::TensorOptions& options = {})`
|
85 |
+
//
|
86 |
+
// the compiler will first try to convert `{{1}, {2}}` to `TensorDataContainer`
|
87 |
+
// type:
|
88 |
+
//
|
89 |
+
// `TensorDataContainer({{1}, {2}})`
|
90 |
+
//
|
91 |
+
// which matches to the
|
92 |
+
// `TensorDataContainer(std::initializer_list<TensorDataContainer>)`
|
93 |
+
// constructor, and in an attempt to convert `{1}` and `{2}` to
|
94 |
+
// `TensorDataContainer`, it calls the following:
|
95 |
+
//
|
96 |
+
// `TensorDataContainer({1})` (same call path happens for `{2}`, and we'll just
|
97 |
+
// focus on `{1}` here)
|
98 |
+
//
|
99 |
+
// At this point, theoretically there are two plausible ways for `{1}` to be
|
100 |
+
// matched to one of the constructors of `TensorDataContainer`:
|
101 |
+
//
|
102 |
+
// 1. It can be a list-initialization of a scalar value, thus matching
|
103 |
+
// `TensorDataContainer(int value)`.
|
104 |
+
// 2. It can be converted to `std::initializer_list<TensorDataContainer>`, thus
|
105 |
+
// matching
|
106 |
+
// `TensorDataContainer(std::initializer_list<TensorDataContainer>)`.
|
107 |
+
//
|
108 |
+
// How does the compiler decide which one to choose? According to
|
109 |
+
// `https://en.cppreference.com/w/cpp/language/list_initialization`,
|
110 |
+
// braced-init-list always prefers the constructor that takes
|
111 |
+
// `std::initializer_list`. Hence we happily move forward with constructor #2,
|
112 |
+
// and it calls the following:
|
113 |
+
//
|
114 |
+
// `TensorDataContainer(1)`
|
115 |
+
//
|
116 |
+
// Now it matches `TensorDataContainer(int value)`, which stores `1` as a scalar
|
117 |
+
// value. All is good.
|
118 |
+
struct TensorDataContainer {
|
119 |
+
// NOTE: For tensors with zero-size dimensions (e.g. `torch::tensor({{},
|
120 |
+
// {}})`), the innermost empty braced-init-list `{}` matches the default
|
121 |
+
// constructor of the innermost `TensorDataContainer`.
|
122 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
123 |
+
TensorDataContainer()
|
124 |
+
: sizes_({0}),
|
125 |
+
// NOTE: In Python, the dtype of tensors with zero-size dimensions (e.g.
|
126 |
+
// `torch.tensor([[], []])`) depends on the value of
|
127 |
+
// `torch.get_default_dtype()`, and we should do the same for the C++
|
128 |
+
// equivalent.
|
129 |
+
scalar_type_(at::typeMetaToScalarType(at::get_default_dtype())),
|
130 |
+
type_(TensorDataContainerType::InitList) {}
|
131 |
+
#define TENSOR(T, S) \
|
132 |
+
TensorDataContainer(T value) \
|
133 |
+
: sizes_(), \
|
134 |
+
scalar_type_(at::k##S), \
|
135 |
+
type_(TensorDataContainerType::Scalar), \
|
136 |
+
scalar_(value) {}
|
137 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
138 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR)
|
139 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
140 |
+
AT_FORALL_COMPLEX_TYPES(TENSOR)
|
141 |
+
#undef TENSOR
|
142 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
143 |
+
TensorDataContainer(std::initializer_list<TensorDataContainer> init_list)
|
144 |
+
: sizes_(),
|
145 |
+
scalar_type_(init_list.begin()->scalar_type()),
|
146 |
+
type_(TensorDataContainerType::InitList),
|
147 |
+
init_list_(init_list) {
|
148 |
+
const TensorDataContainer& first_elem = *(init_list.begin());
|
149 |
+
for (const auto& elem : init_list) {
|
150 |
+
TORCH_CHECK(
|
151 |
+
elem.sizes() == first_elem.sizes(),
|
152 |
+
"Expected all sub-lists to have sizes: ",
|
153 |
+
first_elem.sizes(),
|
154 |
+
" (e.g. ",
|
155 |
+
first_elem,
|
156 |
+
"), ",
|
157 |
+
"but got sub-list ",
|
158 |
+
elem,
|
159 |
+
" with sizes: ",
|
160 |
+
elem.sizes());
|
161 |
+
TORCH_CHECK(
|
162 |
+
elem.scalar_type() == first_elem.scalar_type(),
|
163 |
+
"Expected all elements of the tensor to have the same scalar type: ",
|
164 |
+
first_elem.scalar_type(),
|
165 |
+
", but got element of scalar type: ",
|
166 |
+
elem.scalar_type());
|
167 |
+
}
|
168 |
+
sizes_.reserve(first_elem.sizes().size() + 1);
|
169 |
+
sizes_.push_back(init_list.size());
|
170 |
+
sizes_.insert(
|
171 |
+
sizes_.end(), first_elem.sizes().begin(), first_elem.sizes().end());
|
172 |
+
}
|
173 |
+
|
174 |
+
#define TENSOR(T, S) \
|
175 |
+
TensorDataContainer(at::ArrayRef<T> values) \
|
176 |
+
: sizes_({(int64_t)values.size()}), \
|
177 |
+
scalar_type_(at::k##S), \
|
178 |
+
type_(TensorDataContainerType::Tensor) { \
|
179 |
+
at::AutoDispatchBelowAutograd mode; \
|
180 |
+
if (scalar_type_ == at::kBool) { \
|
181 |
+
tensor_ = at::tensor(values, at::TensorOptions().device(at::kCPU)); \
|
182 |
+
} else { \
|
183 |
+
tensor_ = at::tensor(values, at::dtype(scalar_type_).device(at::kCPU)); \
|
184 |
+
} \
|
185 |
+
}
|
186 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
187 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR)
|
188 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
189 |
+
AT_FORALL_COMPLEX_TYPES(TENSOR)
|
190 |
+
#undef TENSOR
|
191 |
+
|
192 |
+
// NOTE: We need to handle `std::vector` explicitly instead of relying on an
|
193 |
+
// implicit conversion to `at::ArrayRef`, otherwise the following error can be
|
194 |
+
// thrown when calling `torch::tensor(std::vector<int>({1, 2}))`:
|
195 |
+
// ```
|
196 |
+
// error: no matching function for call to 'tensor(const std::vector<int>&)'
|
197 |
+
// no known conversion for argument 1 from 'const std::vector<int>' to
|
198 |
+
// 'torch::detail::TensorDataContainer'
|
199 |
+
// ```
|
200 |
+
//
|
201 |
+
// NOTE: `torch::tensor(std::vector<bool>)` is not supported for now, because
|
202 |
+
// ArrayRef<bool> cannot be constructed from a std::vector<bool> bitfield.
|
203 |
+
#define TENSOR(T, S) \
|
204 |
+
TensorDataContainer(const std::vector<T>& values) \
|
205 |
+
: TensorDataContainer(at::ArrayRef<T>(values)) {}
|
206 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
207 |
+
AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TENSOR)
|
208 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
209 |
+
AT_FORALL_COMPLEX_TYPES(TENSOR)
|
210 |
+
#undef TENSOR
|
211 |
+
|
212 |
+
bool is_scalar() const {
|
213 |
+
return type_ == TensorDataContainerType::Scalar;
|
214 |
+
}
|
215 |
+
|
216 |
+
const c10::Scalar& scalar() const {
|
217 |
+
TORCH_CHECK(
|
218 |
+
is_scalar(),
|
219 |
+
"Can only call `scalar()` on a TensorDataContainer that has `is_scalar() == true`");
|
220 |
+
return scalar_;
|
221 |
+
}
|
222 |
+
|
223 |
+
bool is_init_list() const {
|
224 |
+
return type_ == TensorDataContainerType::InitList;
|
225 |
+
}
|
226 |
+
|
227 |
+
const std::initializer_list<TensorDataContainer>& init_list() const {
|
228 |
+
TORCH_CHECK(
|
229 |
+
is_init_list(),
|
230 |
+
"Can only call `init_list()` on a TensorDataContainer that has `is_init_list() == true`");
|
231 |
+
return init_list_;
|
232 |
+
}
|
233 |
+
|
234 |
+
bool is_tensor() const {
|
235 |
+
return type_ == TensorDataContainerType::Tensor;
|
236 |
+
}
|
237 |
+
|
238 |
+
const at::Tensor& tensor() const {
|
239 |
+
TORCH_CHECK(
|
240 |
+
is_tensor(),
|
241 |
+
"Can only call `tensor()` on a TensorDataContainer that has `is_tensor() == true`");
|
242 |
+
return tensor_;
|
243 |
+
}
|
244 |
+
|
245 |
+
const std::vector<int64_t>& sizes() const {
|
246 |
+
return sizes_;
|
247 |
+
}
|
248 |
+
|
249 |
+
const c10::ScalarType& scalar_type() const {
|
250 |
+
return scalar_type_;
|
251 |
+
}
|
252 |
+
|
253 |
+
at::Tensor convert_to_tensor(at::TensorOptions options) const {
|
254 |
+
if (!options.has_dtype()) {
|
255 |
+
options = options.dtype(compute_desired_dtype(scalar_type_));
|
256 |
+
}
|
257 |
+
|
258 |
+
if (is_scalar()) {
|
259 |
+
at::AutoDispatchBelowAutograd mode;
|
260 |
+
return at::scalar_tensor(scalar_, options);
|
261 |
+
} else if (is_init_list()) {
|
262 |
+
// NOTE: Here we explicitly choose to initialize the tensor on CPU first,
|
263 |
+
// fill each element of the tensor, and then move the tensor to the
|
264 |
+
// desired device. For CUDA device, this approach only involves 1 CUDA
|
265 |
+
// kernel launch, and is much faster than initializing the tensor on CUDA
|
266 |
+
// first and then filling each element of it (which involves `N` CUDA
|
267 |
+
// kernel launches where `N` is the number of the elements in the tensor).
|
268 |
+
at::Tensor tensor = ([&]() {
|
269 |
+
at::AutoDispatchBelowAutograd mode;
|
270 |
+
return at::empty(sizes_, options.device(at::kCPU));
|
271 |
+
})();
|
272 |
+
fill_tensor(tensor);
|
273 |
+
return tensor.to(options.device());
|
274 |
+
} else if (is_tensor()) {
|
275 |
+
auto output = tensor_.to(options);
|
276 |
+
TORCH_CHECK(
|
277 |
+
!tensor_.is_complex() || output.is_complex(),
|
278 |
+
"can not do torch::tensor(complex, dtype=non-complex) because complex can not be casted to real number without loss of information");
|
279 |
+
return output;
|
280 |
+
} else {
|
281 |
+
TORCH_INTERNAL_ASSERT(false, "Invalid TensorDataContainer type");
|
282 |
+
}
|
283 |
+
}
|
284 |
+
|
285 |
+
void pretty_print_recursive(std::ostream& stream) const {
|
286 |
+
if (is_scalar()) {
|
287 |
+
AT_DISPATCH_ALL_TYPES_AND3(
|
288 |
+
at::kBool,
|
289 |
+
at::kHalf,
|
290 |
+
at::kBFloat16,
|
291 |
+
scalar_type_,
|
292 |
+
"TensorDataContainer_pretty_print_scalar",
|
293 |
+
[&] { stream << scalar_.to<scalar_t>(); });
|
294 |
+
} else if (is_init_list()) {
|
295 |
+
stream << "{";
|
296 |
+
for (const TensorDataContainer* it = init_list_.begin();
|
297 |
+
it != init_list_.end();
|
298 |
+
it++) {
|
299 |
+
stream << *it;
|
300 |
+
if (std::next(it) != init_list_.end())
|
301 |
+
stream << ", ";
|
302 |
+
}
|
303 |
+
stream << "}";
|
304 |
+
} else if (is_tensor()) {
|
305 |
+
stream << "{";
|
306 |
+
for (const auto i : c10::irange(tensor_.sizes()[0])) {
|
307 |
+
AT_DISPATCH_ALL_TYPES_AND3(
|
308 |
+
at::kBool,
|
309 |
+
at::kHalf,
|
310 |
+
at::kBFloat16,
|
311 |
+
scalar_type_,
|
312 |
+
"TensorDataContainer_pretty_print_tensor_item",
|
313 |
+
[&] { stream << tensor_[i].item<scalar_t>(); });
|
314 |
+
if (i != tensor_.sizes()[0] - 1)
|
315 |
+
stream << ", ";
|
316 |
+
}
|
317 |
+
stream << "}";
|
318 |
+
} else {
|
319 |
+
TORCH_INTERNAL_ASSERT(false, "Invalid TensorDataContainer type");
|
320 |
+
}
|
321 |
+
}
|
322 |
+
|
323 |
+
private:
|
324 |
+
void fill_tensor(at::Tensor& tensor) const {
|
325 |
+
if (is_scalar()) {
|
326 |
+
TORCH_INTERNAL_ASSERT(
|
327 |
+
tensor.dim() == 0,
|
328 |
+
"Expected a 0-dim Tensor, but got Tensor with dimensions: ",
|
329 |
+
tensor.dim());
|
330 |
+
at::NoGradGuard guard;
|
331 |
+
tensor.fill_(scalar_);
|
332 |
+
} else if (is_init_list()) {
|
333 |
+
TORCH_INTERNAL_ASSERT(
|
334 |
+
tensor.sizes()[0] == (int64_t)init_list_.size(),
|
335 |
+
"Expected a Tensor with size ",
|
336 |
+
init_list_.size(),
|
337 |
+
" in its first dimension, but got Tensor with size ",
|
338 |
+
tensor.sizes()[0],
|
339 |
+
" in its first dimension");
|
340 |
+
size_t index = 0;
|
341 |
+
for (const auto& elem : init_list_) {
|
342 |
+
at::Tensor slice = tensor[index];
|
343 |
+
elem.fill_tensor(slice);
|
344 |
+
index++;
|
345 |
+
}
|
346 |
+
} else if (is_tensor()) {
|
347 |
+
TORCH_INTERNAL_ASSERT(
|
348 |
+
false,
|
349 |
+
"TensorDataContainer is already a Tensor type, `fill_tensor` should not be called");
|
350 |
+
} else {
|
351 |
+
TORCH_INTERNAL_ASSERT(false, "Invalid TensorDataContainer type");
|
352 |
+
}
|
353 |
+
}
|
354 |
+
|
355 |
+
std::vector<int64_t> sizes_;
|
356 |
+
c10::ScalarType scalar_type_;
|
357 |
+
TensorDataContainerType type_;
|
358 |
+
c10::Scalar scalar_;
|
359 |
+
std::initializer_list<TensorDataContainer> init_list_;
|
360 |
+
at::Tensor tensor_;
|
361 |
+
};
|
362 |
+
|
363 |
+
inline std::ostream& operator<<(
|
364 |
+
std::ostream& stream,
|
365 |
+
const TensorDataContainer& tensor_data_container) {
|
366 |
+
tensor_data_container.pretty_print_recursive(stream);
|
367 |
+
return stream;
|
368 |
+
}
|
369 |
+
|
370 |
+
} // namespace detail
|
371 |
+
|
372 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/static.h
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/utils/variadic.h>
|
4 |
+
#include <torch/types.h>
|
5 |
+
|
6 |
+
#include <cstdint>
|
7 |
+
#include <type_traits>
|
8 |
+
|
9 |
+
namespace torch {
|
10 |
+
namespace nn {
|
11 |
+
class Module;
|
12 |
+
} // namespace nn
|
13 |
+
} // namespace torch
|
14 |
+
|
15 |
+
namespace torch {
|
16 |
+
namespace detail {
|
17 |
+
/// Detects if a type T has a forward() method.
|
18 |
+
template <typename T>
|
19 |
+
struct has_forward {
|
20 |
+
// Declare two types with differing size.
|
21 |
+
using yes = int8_t;
|
22 |
+
using no = int16_t;
|
23 |
+
|
24 |
+
// Here we declare two functions. The first is only enabled if `&U::forward`
|
25 |
+
// is well-formed and returns the `yes` type. In C++, the ellipsis parameter
|
26 |
+
// type (`...`) always puts the function at the bottom of overload resolution.
|
27 |
+
// This is specified in the standard as: 1) A standard conversion sequence is
|
28 |
+
// always better than a user-defined conversion sequence or an ellipsis
|
29 |
+
// conversion sequence. 2) A user-defined conversion sequence is always better
|
30 |
+
// than an ellipsis conversion sequence This means that if the first overload
|
31 |
+
// is viable, it will be preferred over the second as long as we pass any
|
32 |
+
// convertible type. The type of `&U::forward` is a pointer type, so we can
|
33 |
+
// pass e.g. 0.
|
34 |
+
template <typename U>
|
35 |
+
static yes test(decltype(&U::forward));
|
36 |
+
template <typename U>
|
37 |
+
static no test(...);
|
38 |
+
|
39 |
+
// Finally we test statically whether the size of the type returned by the
|
40 |
+
// selected overload is the size of the `yes` type.
|
41 |
+
static constexpr bool value = (sizeof(test<T>(nullptr)) == sizeof(yes));
|
42 |
+
};
|
43 |
+
|
44 |
+
template <typename Head = void, typename... Tail>
|
45 |
+
constexpr bool check_not_lvalue_references() {
|
46 |
+
return (!std::is_lvalue_reference<Head>::value ||
|
47 |
+
std::is_const<typename std::remove_reference<Head>::type>::value) &&
|
48 |
+
check_not_lvalue_references<Tail...>();
|
49 |
+
}
|
50 |
+
|
51 |
+
template <>
|
52 |
+
inline constexpr bool check_not_lvalue_references<void>() {
|
53 |
+
return true;
|
54 |
+
}
|
55 |
+
|
56 |
+
/// A type trait whose `value` member is true if `M` derives from `Module`.
|
57 |
+
template <typename M>
|
58 |
+
using is_module =
|
59 |
+
std::is_base_of<torch::nn::Module, typename std::decay<M>::type>;
|
60 |
+
|
61 |
+
template <typename M, typename T = void>
|
62 |
+
using enable_if_module_t =
|
63 |
+
typename std::enable_if<is_module<M>::value, T>::type;
|
64 |
+
} // namespace detail
|
65 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/lr_scheduler.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/optim/optimizer.h>
|
4 |
+
|
5 |
+
#include <torch/csrc/Export.h>
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace optim {
|
9 |
+
|
10 |
+
class TORCH_API LRScheduler {
|
11 |
+
public:
|
12 |
+
// This class needs to take a reference of an optimizer from outside such that
|
13 |
+
// it can modify its learning rates; due to this the lifetime of said
|
14 |
+
// optimizer must be maintained
|
15 |
+
LRScheduler(torch::optim::Optimizer& optimizer);
|
16 |
+
|
17 |
+
virtual ~LRScheduler() = default;
|
18 |
+
|
19 |
+
void step();
|
20 |
+
|
21 |
+
protected:
|
22 |
+
// A vector of learning rates is calculated and returned from the specific
|
23 |
+
// subclass. A vector is returned with each element being a separate learning
|
24 |
+
// rate for each param group - although the normal use case would be to return
|
25 |
+
// a vector of identical elements.
|
26 |
+
virtual std::vector<double> get_lrs() = 0;
|
27 |
+
|
28 |
+
// Get current learning rates from the optimizer
|
29 |
+
std::vector<double> get_current_lrs() const;
|
30 |
+
|
31 |
+
unsigned step_count_{};
|
32 |
+
|
33 |
+
private:
|
34 |
+
void set_optimizer_lrs(const std::vector<double>& learning_rates);
|
35 |
+
|
36 |
+
torch::optim::Optimizer& optimizer_;
|
37 |
+
};
|
38 |
+
} // namespace optim
|
39 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/reduce_on_plateau_scheduler.h
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/optim/optimizer.h>
|
4 |
+
|
5 |
+
#include <torch/csrc/Export.h>
|
6 |
+
|
7 |
+
#include <string>
|
8 |
+
|
9 |
+
#include <cmath>
|
10 |
+
|
11 |
+
#include <iostream>
|
12 |
+
|
13 |
+
namespace torch {
|
14 |
+
namespace optim {
|
15 |
+
|
16 |
+
class TORCH_API ReduceLROnPlateauScheduler {
|
17 |
+
public:
|
18 |
+
enum SchedulerMode { min, max };
|
19 |
+
enum ThresholdMode { rel, abs };
|
20 |
+
ReduceLROnPlateauScheduler(
|
21 |
+
Optimizer& optimizer,
|
22 |
+
SchedulerMode mode = min,
|
23 |
+
float factor = 0.1,
|
24 |
+
int patience = 10,
|
25 |
+
double threshold = 1e-4,
|
26 |
+
ThresholdMode threshold_mode = rel,
|
27 |
+
int cooldown = 0,
|
28 |
+
const std::vector<float>& min_lr = std::vector<float>(),
|
29 |
+
double eps = 1e-8,
|
30 |
+
bool verbose = false);
|
31 |
+
|
32 |
+
virtual ~ReduceLROnPlateauScheduler() = default;
|
33 |
+
|
34 |
+
void step(float metric);
|
35 |
+
|
36 |
+
private:
|
37 |
+
void reset();
|
38 |
+
void reduce_lr(int epoch);
|
39 |
+
bool in_cooldown();
|
40 |
+
bool is_better(float a);
|
41 |
+
void init_is_better(
|
42 |
+
SchedulerMode mode,
|
43 |
+
double threshold,
|
44 |
+
ThresholdMode threshold_mode);
|
45 |
+
|
46 |
+
Optimizer& optimizer;
|
47 |
+
SchedulerMode mode;
|
48 |
+
float mode_worse;
|
49 |
+
float factor;
|
50 |
+
int patience;
|
51 |
+
double threshold;
|
52 |
+
ThresholdMode threshold_mode;
|
53 |
+
int cooldown;
|
54 |
+
int cooldown_counter;
|
55 |
+
std::vector<float> min_lrs;
|
56 |
+
double eps;
|
57 |
+
float best;
|
58 |
+
bool verbose;
|
59 |
+
int last_epoch;
|
60 |
+
int num_bad_epochs;
|
61 |
+
};
|
62 |
+
} // namespace optim
|
63 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/step_lr.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/optim/schedulers/lr_scheduler.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace optim {
|
7 |
+
|
8 |
+
class TORCH_API StepLR : public LRScheduler {
|
9 |
+
public:
|
10 |
+
StepLR(
|
11 |
+
torch::optim::Optimizer& optimizer,
|
12 |
+
const unsigned step_size,
|
13 |
+
const double gamma = 0.1);
|
14 |
+
|
15 |
+
private:
|
16 |
+
std::vector<double> get_lrs() override;
|
17 |
+
|
18 |
+
const unsigned step_size_;
|
19 |
+
const double gamma_;
|
20 |
+
};
|
21 |
+
} // namespace optim
|
22 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/serialize/archive.h
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/serialize/input-archive.h>
|
4 |
+
#include <torch/serialize/output-archive.h>
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/serialize/input-archive.h
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/Device.h>
|
4 |
+
#include <c10/util/Optional.h>
|
5 |
+
#include <torch/csrc/Export.h>
|
6 |
+
#include <torch/csrc/jit/api/module.h>
|
7 |
+
#include <torch/types.h>
|
8 |
+
|
9 |
+
#include <iosfwd>
|
10 |
+
#include <memory>
|
11 |
+
#include <string>
|
12 |
+
#include <utility>
|
13 |
+
|
14 |
+
namespace at {
|
15 |
+
class Tensor;
|
16 |
+
} // namespace at
|
17 |
+
|
18 |
+
namespace torch {
|
19 |
+
using at::Tensor;
|
20 |
+
namespace jit {
|
21 |
+
struct Module;
|
22 |
+
} // namespace jit
|
23 |
+
} // namespace torch
|
24 |
+
|
25 |
+
namespace torch {
|
26 |
+
namespace serialize {
|
27 |
+
|
28 |
+
/// A recursive representation of tensors that can be deserialized from a file
|
29 |
+
/// or stream. In most cases, users should not have to interact with this class,
|
30 |
+
/// and should instead use `torch::load`.
|
31 |
+
class TORCH_API InputArchive final {
|
32 |
+
public:
|
33 |
+
/// Default-constructs the `InputArchive`.
|
34 |
+
InputArchive();
|
35 |
+
|
36 |
+
// Move is allowed.
|
37 |
+
InputArchive(InputArchive&&) = default;
|
38 |
+
InputArchive& operator=(InputArchive&&) = default;
|
39 |
+
|
40 |
+
// Copy is disallowed.
|
41 |
+
InputArchive(InputArchive&) = delete;
|
42 |
+
InputArchive& operator=(InputArchive&) = delete;
|
43 |
+
|
44 |
+
~InputArchive() = default;
|
45 |
+
|
46 |
+
/// Reads an `IValue` associated with a given `key`.
|
47 |
+
void read(const std::string& key, c10::IValue& ivalue);
|
48 |
+
|
49 |
+
/// Reads an `IValue` associated with a given `key`. If there is no `IValue`
|
50 |
+
/// associated with the `key`, this returns false, otherwise it returns true.
|
51 |
+
bool try_read(const std::string& key, c10::IValue& ivalue);
|
52 |
+
|
53 |
+
/// Reads a `tensor` associated with a given `key`. If there is no `tensor`
|
54 |
+
/// associated with the `key`, this returns false, otherwise it returns true.
|
55 |
+
/// If the tensor is expected to be a buffer (not differentiable), `is_buffer`
|
56 |
+
/// must be `true`.
|
57 |
+
bool try_read(const std::string& key, Tensor& tensor, bool is_buffer = false);
|
58 |
+
|
59 |
+
/// Reads a `tensor` associated with a given `key`.
|
60 |
+
/// If the tensor is expected to be a buffer (not differentiable), `is_buffer`
|
61 |
+
/// must be `true`.
|
62 |
+
void read(const std::string& key, Tensor& tensor, bool is_buffer = false);
|
63 |
+
|
64 |
+
/// Reads a `InputArchive` associated with a given `key`. If there is no
|
65 |
+
/// `InputArchive` associated with the `key`, this returns false, otherwise
|
66 |
+
/// it returns true.
|
67 |
+
bool try_read(const std::string& key, InputArchive& archive);
|
68 |
+
|
69 |
+
/// Reads an `InputArchive` associated with a given `key`.
|
70 |
+
/// The archive can thereafter be used for further deserialization of the
|
71 |
+
/// nested data.
|
72 |
+
void read(const std::string& key, InputArchive& archive);
|
73 |
+
|
74 |
+
/// Loads the `InputArchive` from a serialized representation stored in the
|
75 |
+
/// file at `filename`. Storage are remapped using device option. If device
|
76 |
+
/// is not specified, the module is loaded to the original device.
|
77 |
+
void load_from(
|
78 |
+
const std::string& filename,
|
79 |
+
c10::optional<torch::Device> device = c10::nullopt);
|
80 |
+
|
81 |
+
/// Loads the `InputArchive` from a serialized representation stored in the
|
82 |
+
/// given `stream`. Storage are remapped using device option. If device
|
83 |
+
/// is not specified, the module is loaded to the original device.
|
84 |
+
void load_from(
|
85 |
+
std::istream& stream,
|
86 |
+
c10::optional<torch::Device> device = c10::nullopt);
|
87 |
+
|
88 |
+
// Loads given the specified flat array.
|
89 |
+
void load_from(
|
90 |
+
const char* data,
|
91 |
+
size_t size,
|
92 |
+
c10::optional<torch::Device> device = c10::nullopt);
|
93 |
+
|
94 |
+
// Loads given the specified read and size functions.
|
95 |
+
void load_from(
|
96 |
+
const std::function<size_t(uint64_t pos, void* buf, size_t nbytes)>&
|
97 |
+
read_func,
|
98 |
+
const std::function<size_t(void)>& size_func,
|
99 |
+
c10::optional<torch::Device> device = c10::nullopt);
|
100 |
+
|
101 |
+
// Returns the vector of keys in the input archive.
|
102 |
+
std::vector<std::string> keys();
|
103 |
+
|
104 |
+
/// Forwards all arguments to `read()`.
|
105 |
+
/// Useful for generic code that can be re-used for both `InputArchive` and
|
106 |
+
/// `OutputArchive` (where `operator()` forwards to `write()`).
|
107 |
+
template <typename... Ts>
|
108 |
+
void operator()(Ts&&... ts) {
|
109 |
+
read(std::forward<Ts>(ts)...);
|
110 |
+
}
|
111 |
+
|
112 |
+
private:
|
113 |
+
jit::Module module_;
|
114 |
+
std::string hierarchy_prefix_;
|
115 |
+
};
|
116 |
+
} // namespace serialize
|
117 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/serialize/output-archive.h
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/Export.h>
|
4 |
+
#include <torch/csrc/jit/api/module.h>
|
5 |
+
|
6 |
+
#include <iosfwd>
|
7 |
+
#include <memory>
|
8 |
+
#include <string>
|
9 |
+
#include <utility>
|
10 |
+
|
11 |
+
namespace at {
|
12 |
+
class Tensor;
|
13 |
+
} // namespace at
|
14 |
+
|
15 |
+
namespace torch {
|
16 |
+
using at::Tensor;
|
17 |
+
namespace jit {
|
18 |
+
struct Module;
|
19 |
+
} // namespace jit
|
20 |
+
} // namespace torch
|
21 |
+
|
22 |
+
namespace torch {
|
23 |
+
namespace serialize {
|
24 |
+
class TORCH_API OutputArchive final {
|
25 |
+
public:
|
26 |
+
explicit OutputArchive(std::shared_ptr<jit::CompilationUnit> cu);
|
27 |
+
explicit OutputArchive()
|
28 |
+
: cu_(std::make_shared<jit::CompilationUnit>()),
|
29 |
+
module_("__torch__.Module", cu_) {}
|
30 |
+
|
31 |
+
// Move is allowed.
|
32 |
+
OutputArchive(OutputArchive&&) = default;
|
33 |
+
OutputArchive& operator=(OutputArchive&&) = default;
|
34 |
+
|
35 |
+
// Copy is disallowed.
|
36 |
+
OutputArchive(OutputArchive&) = delete;
|
37 |
+
OutputArchive& operator=(OutputArchive&) = delete;
|
38 |
+
|
39 |
+
std::shared_ptr<jit::CompilationUnit> compilation_unit() const {
|
40 |
+
return cu_;
|
41 |
+
}
|
42 |
+
|
43 |
+
/// Writes an `IValue` to the `OutputArchive`.
|
44 |
+
void write(const std::string& key, const c10::IValue& ivalue);
|
45 |
+
|
46 |
+
/// Writes a `(key, tensor)` pair to the `OutputArchive`, and marks it as
|
47 |
+
/// being or not being a buffer (non-differentiable tensor).
|
48 |
+
void write(
|
49 |
+
const std::string& key,
|
50 |
+
const Tensor& tensor,
|
51 |
+
bool is_buffer = false);
|
52 |
+
|
53 |
+
/// Writes a nested `OutputArchive` under the given `key` to this
|
54 |
+
/// `OutputArchive`.
|
55 |
+
void write(const std::string& key, OutputArchive& nested_archive);
|
56 |
+
|
57 |
+
/// Saves the `OutputArchive` into a serialized representation in a file at
|
58 |
+
/// `filename`.
|
59 |
+
void save_to(const std::string& filename);
|
60 |
+
|
61 |
+
/// Saves the `OutputArchive` into a serialized representation into the given
|
62 |
+
/// `stream`.
|
63 |
+
void save_to(std::ostream& stream);
|
64 |
+
|
65 |
+
/// Saves the `OutputArchive` into a serialized representation using the
|
66 |
+
/// given writer function.
|
67 |
+
void save_to(const std::function<size_t(const void*, size_t)>& func);
|
68 |
+
|
69 |
+
/// Forwards all arguments to `write()`.
|
70 |
+
/// Useful for generic code that can be re-used for both `OutputArchive` and
|
71 |
+
/// `InputArchive` (where `operator()` forwards to `read()`).
|
72 |
+
template <typename... Ts>
|
73 |
+
void operator()(Ts&&... ts) {
|
74 |
+
write(std::forward<Ts>(ts)...);
|
75 |
+
}
|
76 |
+
|
77 |
+
private:
|
78 |
+
std::shared_ptr<jit::CompilationUnit> cu_;
|
79 |
+
jit::Module module_;
|
80 |
+
};
|
81 |
+
} // namespace serialize
|
82 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/serialize/tensor.h
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/serialize/archive.h>
|
4 |
+
#include <torch/types.h>
|
5 |
+
|
6 |
+
namespace torch {
|
7 |
+
inline serialize::OutputArchive& operator<<(
|
8 |
+
serialize::OutputArchive& archive,
|
9 |
+
const Tensor& tensor) {
|
10 |
+
archive.write("0", tensor);
|
11 |
+
return archive;
|
12 |
+
}
|
13 |
+
|
14 |
+
inline serialize::InputArchive& operator>>(
|
15 |
+
serialize::InputArchive& archive,
|
16 |
+
Tensor& tensor) {
|
17 |
+
archive.read("0", tensor);
|
18 |
+
return archive;
|
19 |
+
}
|
20 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/FunctionsManual.h
ADDED
@@ -0,0 +1,1105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// NB: Must be at the top of file to avoid including the deprecated "math.h".
|
4 |
+
// https://stackoverflow.com/questions/6563810/m-pi-works-with-math-h-but-not-with-cmath-in-visual-studio
|
5 |
+
#ifdef _MSC_VER
|
6 |
+
#ifndef _USE_MATH_DEFINES
|
7 |
+
#define _USE_MATH_DEFINES
|
8 |
+
#endif
|
9 |
+
#include <cmath>
|
10 |
+
#endif
|
11 |
+
|
12 |
+
#include <ATen/ATen.h>
|
13 |
+
#include <torch/csrc/autograd/generated/Functions.h>
|
14 |
+
|
15 |
+
namespace torch {
|
16 |
+
namespace autograd {
|
17 |
+
namespace generated {
|
18 |
+
namespace details {
|
19 |
+
|
20 |
+
extern const char* kCudnnDoubleBackwardMsg;
|
21 |
+
|
22 |
+
// A simple way to imperatively compute index ranges for slots
|
23 |
+
// that have been flattened
|
24 |
+
struct TORCH_API IndexRangeGenerator {
|
25 |
+
IndexRange range(size_t range_size) {
|
26 |
+
i += range_size;
|
27 |
+
return {i - range_size, i};
|
28 |
+
}
|
29 |
+
size_t size() {
|
30 |
+
return i;
|
31 |
+
}
|
32 |
+
|
33 |
+
private:
|
34 |
+
size_t i = 0;
|
35 |
+
};
|
36 |
+
|
37 |
+
TORCH_API Tensor toNonOptFwGrad(const c10::optional<Tensor>& t);
|
38 |
+
TORCH_API Tensor toNonOptPrimal(const c10::optional<Tensor>& t);
|
39 |
+
TORCH_API Tensor toNonOptTensor(const c10::optional<Tensor>& t);
|
40 |
+
|
41 |
+
TORCH_API inline c10::optional<Tensor> wrap_opt_if(
|
42 |
+
const Tensor& t,
|
43 |
+
const bool cond) {
|
44 |
+
using OptTensor = c10::optional<Tensor>;
|
45 |
+
return cond ? OptTensor(t) : static_cast<OptTensor>(c10::nullopt);
|
46 |
+
}
|
47 |
+
|
48 |
+
TORCH_API Tensor
|
49 |
+
apply_loss_reduction(const Tensor& unreduced, int64_t reduction);
|
50 |
+
TORCH_API bool any_variable_defined(const variable_list& variables);
|
51 |
+
TORCH_API void copy_range(
|
52 |
+
variable_list& out,
|
53 |
+
IndexRange range,
|
54 |
+
const at::Tensor& t);
|
55 |
+
TORCH_API void copy_range(
|
56 |
+
variable_list& out,
|
57 |
+
IndexRange range,
|
58 |
+
at::ArrayRef<at::Tensor> t);
|
59 |
+
TORCH_API at::Tensor copysign_tensor_self_backward(
|
60 |
+
const Tensor& grad,
|
61 |
+
const Tensor& self,
|
62 |
+
const Tensor& result);
|
63 |
+
TORCH_API at::Tensor not_implemented(const char* name, const char* reason = "");
|
64 |
+
TORCH_API std::vector<Tensor> not_implemented_list(
|
65 |
+
const char* name,
|
66 |
+
const char* reason = "");
|
67 |
+
at::Tensor handle_r_to_c(ScalarType self_st, Tensor gradient_result);
|
68 |
+
at::Tensor maybe_multiply(const at::Tensor& t, const at::Scalar& s);
|
69 |
+
int64_t _safe_size(IntArrayRef sizes, IntArrayRef dim);
|
70 |
+
Tensor restore_reduced_dims(
|
71 |
+
const Tensor& output,
|
72 |
+
IntArrayRef dims,
|
73 |
+
bool keepdim);
|
74 |
+
Tensor scale_grad_by_count(
|
75 |
+
const Tensor& grad,
|
76 |
+
const Tensor& mask,
|
77 |
+
IntArrayRef dims);
|
78 |
+
at::Tensor norm_backward(
|
79 |
+
const at::Tensor& grad,
|
80 |
+
const at::Tensor& self,
|
81 |
+
const optional<at::Scalar>& p_,
|
82 |
+
const at::Tensor& norm);
|
83 |
+
at::Tensor norm_backward(
|
84 |
+
at::Tensor grad,
|
85 |
+
const at::Tensor& self,
|
86 |
+
const optional<at::Scalar>& p_,
|
87 |
+
at::Tensor norm,
|
88 |
+
at::IntArrayRef dim,
|
89 |
+
bool keepdim);
|
90 |
+
Tensor norm_jvp(
|
91 |
+
const Tensor& self_p,
|
92 |
+
const Tensor& self_t,
|
93 |
+
const optional<Scalar>& p_,
|
94 |
+
Tensor norm,
|
95 |
+
IntArrayRef dim,
|
96 |
+
bool keepdim);
|
97 |
+
Tensor norm_jvp(
|
98 |
+
const Tensor& grad,
|
99 |
+
const Tensor& self,
|
100 |
+
const optional<Scalar>& p_,
|
101 |
+
Tensor norm);
|
102 |
+
Tensor _nested_from_padded_backward(
|
103 |
+
const Tensor& grad,
|
104 |
+
const Tensor& input,
|
105 |
+
const bool do_transform_0213);
|
106 |
+
std::tuple<Tensor, Tensor, Tensor> linear_double_backward(
|
107 |
+
const variable_list& grads,
|
108 |
+
const Tensor& self,
|
109 |
+
const Tensor& grad_output,
|
110 |
+
const Tensor& weight);
|
111 |
+
Tensor linalg_vector_norm_jvp(
|
112 |
+
const Tensor& self_p,
|
113 |
+
const Tensor& self_t,
|
114 |
+
const Scalar& scalar_ord,
|
115 |
+
Tensor norm,
|
116 |
+
const at::OptionalIntArrayRef& opt_dim,
|
117 |
+
bool keepdim);
|
118 |
+
at::Tensor linalg_vector_norm_backward(
|
119 |
+
at::Tensor grad,
|
120 |
+
const at::Tensor& self,
|
121 |
+
const at::Scalar& ord,
|
122 |
+
at::Tensor norm,
|
123 |
+
const at::OptionalIntArrayRef& opt_dim,
|
124 |
+
bool keepdim);
|
125 |
+
at::Tensor pow_backward(
|
126 |
+
at::Tensor grad,
|
127 |
+
const at::Tensor& self,
|
128 |
+
const at::Scalar& exponent_);
|
129 |
+
at::Tensor pow_backward_self(
|
130 |
+
const at::Tensor& grad,
|
131 |
+
const at::Tensor& self,
|
132 |
+
const at::Tensor& exponent);
|
133 |
+
at::Tensor pow_backward_exponent(
|
134 |
+
const at::Tensor& grad,
|
135 |
+
const at::Tensor& self,
|
136 |
+
const at::Tensor& exponent,
|
137 |
+
const at::Tensor& result);
|
138 |
+
at::Tensor pow_backward_exponent(
|
139 |
+
const at::Tensor& grad,
|
140 |
+
const at::Scalar& base,
|
141 |
+
const at::Tensor& exponent,
|
142 |
+
const at::Tensor& result);
|
143 |
+
at::Tensor angle_backward(const at::Tensor& grad, const at::Tensor& self);
|
144 |
+
template <typename T>
|
145 |
+
at::Tensor mul_tensor_backward(const Tensor& grad, T other, ScalarType self_st);
|
146 |
+
template <typename T>
|
147 |
+
at::Tensor div_tensor_self_backward(
|
148 |
+
const Tensor& grad,
|
149 |
+
T other,
|
150 |
+
ScalarType self_st);
|
151 |
+
at::Tensor div_tensor_other_backward(
|
152 |
+
const Tensor& grad,
|
153 |
+
const Tensor& self,
|
154 |
+
const Tensor& other);
|
155 |
+
template <typename T>
|
156 |
+
at::Tensor div_tensor_self_backward(
|
157 |
+
const Tensor& grad,
|
158 |
+
T other,
|
159 |
+
ScalarType self_st,
|
160 |
+
const c10::optional<c10::string_view>& rounding_mode);
|
161 |
+
at::Tensor div_tensor_other_backward(
|
162 |
+
const Tensor& grad,
|
163 |
+
const Tensor& self,
|
164 |
+
const Tensor& other,
|
165 |
+
const c10::optional<c10::string_view>& rounding_mode);
|
166 |
+
at::Tensor mvlgamma_backward(
|
167 |
+
const at::Tensor& grad,
|
168 |
+
const at::Tensor& self,
|
169 |
+
int64_t p);
|
170 |
+
at::Tensor permute_backwards(const at::Tensor& grad, at::IntArrayRef fwd_dims);
|
171 |
+
at::Tensor rad2deg_backward(const at::Tensor& grad);
|
172 |
+
at::Tensor deg2rad_backward(const at::Tensor& grad);
|
173 |
+
at::Tensor unsqueeze_multiple(
|
174 |
+
const at::Tensor& t,
|
175 |
+
at::OptionalIntArrayRef opt_dim,
|
176 |
+
size_t n_dims);
|
177 |
+
at::Tensor sum_backward(
|
178 |
+
const at::Tensor& grad,
|
179 |
+
at::SymIntArrayRef sizes,
|
180 |
+
at::OptionalIntArrayRef opt_dims,
|
181 |
+
bool keepdim);
|
182 |
+
at::Tensor sum_backward(
|
183 |
+
const at::Tensor& grad,
|
184 |
+
c10::SymIntArrayRef sizes,
|
185 |
+
c10::IntArrayRef dims,
|
186 |
+
bool keepdim);
|
187 |
+
at::Tensor nansum_backward(
|
188 |
+
const at::Tensor& grad,
|
189 |
+
const at::Tensor& self,
|
190 |
+
at::OptionalIntArrayRef dims,
|
191 |
+
bool keepdim);
|
192 |
+
std::vector<int64_t> reverse_list(const at::IntArrayRef list);
|
193 |
+
std::vector<c10::SymInt> reverse_list_symint(const c10::SymIntArrayRef list);
|
194 |
+
at::Tensor reverse_dim(const at::Tensor& t, int64_t dim);
|
195 |
+
at::Tensor prod_safe_zeros_backward(
|
196 |
+
const at::Tensor& grad,
|
197 |
+
const at::Tensor& inp,
|
198 |
+
int64_t dim);
|
199 |
+
at::Tensor prod_backward(
|
200 |
+
const at::Tensor& grad,
|
201 |
+
const at::Tensor& input,
|
202 |
+
const at::Tensor& result);
|
203 |
+
at::Tensor prod_backward(
|
204 |
+
at::Tensor grad,
|
205 |
+
const at::Tensor& input,
|
206 |
+
at::Tensor result,
|
207 |
+
int64_t dim,
|
208 |
+
bool keepdim);
|
209 |
+
at::Tensor solve_jvp(
|
210 |
+
const Tensor& X,
|
211 |
+
const Tensor& A,
|
212 |
+
const Tensor& dA,
|
213 |
+
const Tensor& dB);
|
214 |
+
at::Tensor solve_backward_self(
|
215 |
+
const at::Tensor& grad,
|
216 |
+
const at::Tensor& self,
|
217 |
+
const at::Tensor& A);
|
218 |
+
at::Tensor solve_backward_A(
|
219 |
+
const at::Tensor& grad,
|
220 |
+
const at::Tensor& self,
|
221 |
+
const at::Tensor& A,
|
222 |
+
const at::Tensor& solution);
|
223 |
+
at::Tensor cumsum_backward(const at::Tensor& grad, int64_t dim);
|
224 |
+
at::Tensor logsumexp_backward(
|
225 |
+
at::Tensor grad,
|
226 |
+
const at::Tensor& self,
|
227 |
+
at::Tensor result,
|
228 |
+
at::IntArrayRef dim,
|
229 |
+
bool keepdim);
|
230 |
+
at::Tensor logsumexp_jvp(
|
231 |
+
const at::Tensor& self_p,
|
232 |
+
const at::Tensor& self_t,
|
233 |
+
IntArrayRef dim,
|
234 |
+
bool keepdim);
|
235 |
+
at::Tensor logcumsumexp_backward(
|
236 |
+
at::Tensor grad,
|
237 |
+
const at::Tensor& self,
|
238 |
+
at::Tensor result,
|
239 |
+
int64_t dim);
|
240 |
+
at::Tensor logcumsumexp_jvp(
|
241 |
+
const at::Tensor& self_p,
|
242 |
+
const at::Tensor& self_t,
|
243 |
+
int64_t dim);
|
244 |
+
at::Tensor unbind_backward(const variable_list& grads, int64_t dim);
|
245 |
+
at::Tensor unbind_backward_nested(
|
246 |
+
const variable_list& grads,
|
247 |
+
const Tensor& nt_sizes,
|
248 |
+
int64_t dim,
|
249 |
+
const at::TensorOptions& options);
|
250 |
+
at::Tensor unsqueeze_to(const at::Tensor& self, c10::SymIntArrayRef sym_sizes);
|
251 |
+
at::Tensor unsqueeze_to(
|
252 |
+
const at::Tensor& self,
|
253 |
+
int64_t dim,
|
254 |
+
c10::SymIntArrayRef sym_sizes);
|
255 |
+
at::Tensor unsqueeze_to(
|
256 |
+
const at::Tensor& self,
|
257 |
+
IntArrayRef dim,
|
258 |
+
c10::SymIntArrayRef sym_sizes);
|
259 |
+
std::vector<at::Tensor> cat_tensors_backward(
|
260 |
+
const at::Tensor& grad,
|
261 |
+
const std::vector<std::vector<c10::SymInt>>& sizes,
|
262 |
+
const std::vector<ScalarType>& dtypes,
|
263 |
+
int64_t dim);
|
264 |
+
std::vector<at::Tensor> stack_tensors_backward(
|
265 |
+
const at::Tensor& grad,
|
266 |
+
int64_t dim,
|
267 |
+
const std::vector<ScalarType>& dtypes);
|
268 |
+
std::vector<at::Tensor> block_diag_backward(
|
269 |
+
const at::Tensor& grad,
|
270 |
+
const std::vector<std::vector<int64_t>>& sizes,
|
271 |
+
const std::vector<ScalarType>& dtypes);
|
272 |
+
at::Tensor clamp_backward(
|
273 |
+
const at::Tensor& grad,
|
274 |
+
const at::Tensor& self,
|
275 |
+
const optional<at::Scalar>& min,
|
276 |
+
const optional<at::Scalar>& max);
|
277 |
+
at::Tensor clamp_backward(
|
278 |
+
const at::Tensor& grad,
|
279 |
+
const at::Tensor& self,
|
280 |
+
const at::Tensor& min,
|
281 |
+
const at::Tensor& max);
|
282 |
+
std::tuple<at::Tensor, at::Tensor> clamp_backward_min_max(
|
283 |
+
const at::Tensor& grad,
|
284 |
+
const at::Tensor& self,
|
285 |
+
const at::Tensor& min,
|
286 |
+
const at::Tensor& max,
|
287 |
+
const std::array<bool, 2>&);
|
288 |
+
at::Tensor clamp_jvp(
|
289 |
+
const Tensor& self_p,
|
290 |
+
const Tensor& self_t,
|
291 |
+
const Tensor& min_p,
|
292 |
+
const Tensor& min_t,
|
293 |
+
const Tensor& max_p,
|
294 |
+
const Tensor& max_t);
|
295 |
+
at::SymIntArrayRef strides_or_error(
|
296 |
+
const Tensor& input,
|
297 |
+
c10::string_view const& input_name);
|
298 |
+
at::Tensor mm_mat1_backward(
|
299 |
+
const Tensor& grad,
|
300 |
+
const Tensor& mat2,
|
301 |
+
at::SymIntArrayRef mat1_sizes,
|
302 |
+
at::SymIntArrayRef mat1_strides,
|
303 |
+
c10::Layout mat1_layout,
|
304 |
+
const Scalar& alpha);
|
305 |
+
at::Tensor mm_mat2_backward(
|
306 |
+
const at::Tensor& grad,
|
307 |
+
const at::Tensor& mat1,
|
308 |
+
at::SymIntArrayRef sizes,
|
309 |
+
at::SymIntArrayRef strides,
|
310 |
+
c10::Layout layout,
|
311 |
+
const at::Scalar& alpha);
|
312 |
+
at::Tensor mm_mat1_sparse_backward(
|
313 |
+
const at::Tensor& grad,
|
314 |
+
const at::Tensor& mat1,
|
315 |
+
const at::Tensor& mat2,
|
316 |
+
const at::Scalar& alpha);
|
317 |
+
std::tuple<Tensor, Tensor, Tensor> sparse_sampled_addmm_backward(
|
318 |
+
const Tensor& grad,
|
319 |
+
const Tensor& self,
|
320 |
+
const c10::optional<Tensor>& mat1,
|
321 |
+
const c10::optional<Tensor>& mat2,
|
322 |
+
const Scalar& alpha,
|
323 |
+
const Scalar& beta,
|
324 |
+
const std::array<bool, 3>& grad_input_mask);
|
325 |
+
at::Tensor sparse_mask_backward(
|
326 |
+
const at::Tensor& grad,
|
327 |
+
const at::Tensor& mask,
|
328 |
+
c10::Layout self_layout);
|
329 |
+
at::Tensor sparse_sparse_matmul_backward(
|
330 |
+
const at::Tensor& grad,
|
331 |
+
const at::Tensor& mat1,
|
332 |
+
const at::Tensor& mat2,
|
333 |
+
int64_t grad_order);
|
334 |
+
at::Tensor renorm_backward(
|
335 |
+
const at::Tensor& grad,
|
336 |
+
const at::Tensor& self,
|
337 |
+
const at::Scalar& p,
|
338 |
+
int64_t dim,
|
339 |
+
const at::Scalar& maxnorm);
|
340 |
+
at::Tensor renorm_jvp(
|
341 |
+
const at::Tensor& self_p,
|
342 |
+
const at::Tensor& self_t,
|
343 |
+
const at::Scalar& p,
|
344 |
+
int64_t dim,
|
345 |
+
const at::Scalar& maxnorm);
|
346 |
+
at::Tensor repeat_backward(
|
347 |
+
at::Tensor grad,
|
348 |
+
at::SymIntArrayRef repeats,
|
349 |
+
at::SymIntArrayRef input_shape);
|
350 |
+
at::Tensor _fused_dropout_backward(
|
351 |
+
const at::Tensor& grad,
|
352 |
+
const at::Tensor& mask,
|
353 |
+
double p1m);
|
354 |
+
at::Tensor infinitely_differentiable_native_dropout_backward(
|
355 |
+
const at::Tensor& grad,
|
356 |
+
const at::Tensor& mask,
|
357 |
+
double scale);
|
358 |
+
at::Tensor native_dropout_double_backward(
|
359 |
+
const at::Tensor& ggI,
|
360 |
+
const at::Tensor& grad,
|
361 |
+
const at::Tensor& mask,
|
362 |
+
double scale);
|
363 |
+
at::Tensor evenly_distribute_backward(
|
364 |
+
const at::Tensor& grad,
|
365 |
+
const at::Tensor& input,
|
366 |
+
const at::Tensor& value);
|
367 |
+
Tensor sgn_backward(const Tensor& x, const Tensor& gx, const Tensor& sgn);
|
368 |
+
Tensor masked_fill_backward(const Tensor& grad, const Tensor& mask);
|
369 |
+
at::Tensor var_backward(
|
370 |
+
at::Tensor grad,
|
371 |
+
const at::Tensor& self,
|
372 |
+
at::OptionalIntArrayRef dim,
|
373 |
+
const c10::optional<c10::Scalar>& correction,
|
374 |
+
bool keepdim);
|
375 |
+
at::Tensor var_jvp(
|
376 |
+
const at::Tensor& self_t,
|
377 |
+
const at::Tensor& self_p,
|
378 |
+
const at::Tensor& result,
|
379 |
+
at::OptionalIntArrayRef dim_opt,
|
380 |
+
const c10::optional<c10::Scalar>& correction,
|
381 |
+
bool keepdim);
|
382 |
+
at::Tensor std_backward(
|
383 |
+
const at::Tensor& result,
|
384 |
+
const at::Tensor& grad,
|
385 |
+
const at::Tensor& self,
|
386 |
+
at::OptionalIntArrayRef dim,
|
387 |
+
const c10::optional<c10::Scalar>& correction,
|
388 |
+
bool keepdim);
|
389 |
+
Tensor mean_backward(
|
390 |
+
const Tensor& grad,
|
391 |
+
c10::SymIntArrayRef shape,
|
392 |
+
at::OptionalIntArrayRef opt_dim,
|
393 |
+
c10::SymInt numel,
|
394 |
+
bool keepdim);
|
395 |
+
Tensor var_mean_backward(
|
396 |
+
const Tensor& gvar,
|
397 |
+
const Tensor& gmean,
|
398 |
+
const Tensor& self,
|
399 |
+
at::OptionalIntArrayRef dim_opt,
|
400 |
+
const c10::optional<c10::Scalar>& correction,
|
401 |
+
bool keepdim);
|
402 |
+
Tensor std_mean_backward(
|
403 |
+
const Tensor& gstd,
|
404 |
+
const Tensor& gmean,
|
405 |
+
const Tensor& self,
|
406 |
+
const Tensor& std,
|
407 |
+
at::OptionalIntArrayRef dim_opt,
|
408 |
+
const c10::optional<c10::Scalar>& correction,
|
409 |
+
bool keepdim);
|
410 |
+
at::Tensor cholesky_backward(
|
411 |
+
const at::Tensor& grad,
|
412 |
+
bool upper,
|
413 |
+
const at::Tensor& L);
|
414 |
+
at::Tensor cholesky_jvp(
|
415 |
+
const at::Tensor& input_tangent,
|
416 |
+
const at::Tensor& L,
|
417 |
+
bool upper);
|
418 |
+
at::Tensor cholesky_inverse_backward(
|
419 |
+
const at::Tensor& grad,
|
420 |
+
const at::Tensor& L,
|
421 |
+
bool upper,
|
422 |
+
const at::Tensor& inverse);
|
423 |
+
at::Tensor cholesky_inverse_jvp(
|
424 |
+
const at::Tensor& F,
|
425 |
+
const at::Tensor& dF,
|
426 |
+
const at::Tensor& X,
|
427 |
+
bool upper);
|
428 |
+
Tensor pinv_jvp(const Tensor& A, const Tensor& pinvA, const Tensor& dA);
|
429 |
+
Tensor pinv_backward(const Tensor& grad, const Tensor& pinvA, const Tensor& A);
|
430 |
+
at::Tensor split_with_sizes_backward(
|
431 |
+
const std::vector<torch::autograd::Variable>& grads,
|
432 |
+
c10::SymIntArrayRef split_sizes,
|
433 |
+
int64_t dim,
|
434 |
+
c10::SymIntArrayRef sizes,
|
435 |
+
const at::TensorOptions& options);
|
436 |
+
at::Tensor _nested_split_with_sizes_backward(
|
437 |
+
const std::vector<torch::autograd::Variable>& grads,
|
438 |
+
c10::SymIntArrayRef split_sizes,
|
439 |
+
int64_t dim,
|
440 |
+
const Tensor& nt_sizes,
|
441 |
+
const at::TensorOptions& options);
|
442 |
+
at::Tensor split_backward(
|
443 |
+
const std::vector<torch::autograd::Variable>& grads,
|
444 |
+
const c10::SymInt& split_size,
|
445 |
+
int64_t dim,
|
446 |
+
c10::SymIntArrayRef sizes,
|
447 |
+
const at::TensorOptions& options);
|
448 |
+
at::Tensor max_pool_double_backward(
|
449 |
+
const at::Tensor& grad,
|
450 |
+
const at::Tensor& indices,
|
451 |
+
int dim);
|
452 |
+
at::Tensor error_for_max_pool2d_double_backward();
|
453 |
+
at::Tensor glu_double_backward(
|
454 |
+
const at::Tensor& grad,
|
455 |
+
const at::Tensor& grad_output,
|
456 |
+
const at::Tensor& input,
|
457 |
+
int64_t dim);
|
458 |
+
at::Tensor glu_double_backward_grad_output(
|
459 |
+
const at::Tensor& grad,
|
460 |
+
const at::Tensor& input,
|
461 |
+
int64_t dim);
|
462 |
+
at::Tensor infinitely_differentiable_silu_backward(
|
463 |
+
const at::Tensor& grad_output,
|
464 |
+
const at::Tensor& input);
|
465 |
+
at::Tensor infinitely_differentiable_mish_backward(
|
466 |
+
const at::Tensor& grad_output,
|
467 |
+
const at::Tensor& input);
|
468 |
+
Tensor infinitely_differentiable_logit_backward(
|
469 |
+
const Tensor& grad,
|
470 |
+
const Tensor& self,
|
471 |
+
c10::optional<double> eps);
|
472 |
+
Tensor binary_cross_entropy_target_backward(
|
473 |
+
const Tensor& grad,
|
474 |
+
const Tensor& self,
|
475 |
+
const Tensor& target,
|
476 |
+
const c10::optional<Tensor>& weight,
|
477 |
+
int64_t reduction);
|
478 |
+
Tensor binary_cross_entropy_double_backward_target(
|
479 |
+
const Tensor& grad,
|
480 |
+
const Tensor& grad_output,
|
481 |
+
const Tensor& self,
|
482 |
+
const Tensor& target,
|
483 |
+
const c10::optional<Tensor>& weight,
|
484 |
+
int64_t reduction);
|
485 |
+
Tensor binary_cross_entropy_with_logits_backward(
|
486 |
+
const Tensor& grad,
|
487 |
+
const Tensor& input,
|
488 |
+
const Tensor& target,
|
489 |
+
const c10::optional<Tensor>& weight_opt,
|
490 |
+
const c10::optional<Tensor>& pos_weight_opt,
|
491 |
+
int64_t reduction);
|
492 |
+
at::Tensor binary_cross_entropy_with_logits_target_backward(
|
493 |
+
const at::Tensor& grad_output,
|
494 |
+
const at::Tensor& self,
|
495 |
+
const at::Tensor& target,
|
496 |
+
const c10::optional<at::Tensor>& weight,
|
497 |
+
const c10::optional<at::Tensor>& pos_weight,
|
498 |
+
int64_t reduction);
|
499 |
+
at::Tensor log_sigmoid_double_backward(
|
500 |
+
const at::Tensor& grad,
|
501 |
+
const at::Tensor& input);
|
502 |
+
at::Tensor softmax_double_backward(
|
503 |
+
const at::Tensor& grad,
|
504 |
+
const at::Tensor& grad_output,
|
505 |
+
int dim,
|
506 |
+
const at::Tensor& output);
|
507 |
+
at::Tensor binary_cross_entropy_double_backward(
|
508 |
+
const at::Tensor& grad_output,
|
509 |
+
const at::Tensor& grad,
|
510 |
+
const at::Tensor& input,
|
511 |
+
const at::Tensor& target,
|
512 |
+
const c10::optional<at::Tensor>& weight,
|
513 |
+
int64_t reduction);
|
514 |
+
at::Tensor binary_cross_entropy_double_backward_grad_output(
|
515 |
+
const at::Tensor& grad,
|
516 |
+
const at::Tensor& input,
|
517 |
+
const at::Tensor& target,
|
518 |
+
const c10::optional<at::Tensor>& weight,
|
519 |
+
int64_t reduction);
|
520 |
+
at::Tensor smooth_l1_loss_double_backward(
|
521 |
+
const at::Tensor& grad,
|
522 |
+
const at::Tensor& input,
|
523 |
+
const at::Tensor& target,
|
524 |
+
int64_t reduction,
|
525 |
+
double beta);
|
526 |
+
at::Tensor huber_loss_double_backward(
|
527 |
+
const at::Tensor& grad,
|
528 |
+
const at::Tensor& input,
|
529 |
+
const at::Tensor& target,
|
530 |
+
int64_t reduction,
|
531 |
+
double delta);
|
532 |
+
at::Tensor huber_loss_double_backward_grad_output(
|
533 |
+
const at::Tensor& grad,
|
534 |
+
const at::Tensor& grad_output,
|
535 |
+
const at::Tensor& input,
|
536 |
+
const at::Tensor& target,
|
537 |
+
int64_t reduction,
|
538 |
+
double delta);
|
539 |
+
at::Tensor mse_loss_double_backward(
|
540 |
+
const at::Tensor& grad,
|
541 |
+
const at::Tensor& input,
|
542 |
+
int64_t reduction);
|
543 |
+
at::Tensor soft_margin_loss_double_backward(
|
544 |
+
const at::Tensor& grad,
|
545 |
+
const at::Tensor& input,
|
546 |
+
const at::Tensor& target,
|
547 |
+
int64_t reduction);
|
548 |
+
at::Tensor soft_margin_loss_double_backward_grad_output(
|
549 |
+
const at::Tensor& grad,
|
550 |
+
const at::Tensor& grad_output,
|
551 |
+
const at::Tensor& input,
|
552 |
+
const at::Tensor& target,
|
553 |
+
int64_t reduction);
|
554 |
+
at::Tensor softplus_double_backward(
|
555 |
+
const at::Tensor& grad,
|
556 |
+
const at::Tensor& input,
|
557 |
+
const at::Scalar& beta,
|
558 |
+
const at::Scalar& threshold);
|
559 |
+
std::tuple<at::Tensor, at::Tensor> slogdet_jvp(
|
560 |
+
const at::Tensor& LU,
|
561 |
+
const at::Tensor& pivots,
|
562 |
+
const at::Tensor& dA,
|
563 |
+
const at::Tensor& sign,
|
564 |
+
const bool use_A_T);
|
565 |
+
at::Tensor slogdet_backward(
|
566 |
+
const at::Tensor& grad_sign,
|
567 |
+
const at::Tensor& grad_logabsdet,
|
568 |
+
const at::Tensor& A,
|
569 |
+
const at::Tensor& signdet,
|
570 |
+
const at::Tensor& LU,
|
571 |
+
const at::Tensor& pivots);
|
572 |
+
at::Tensor log1p_backward(const at::Tensor& grad, const at::Tensor& self);
|
573 |
+
at::Tensor sinc_backward(const at::Tensor& grad, const at::Tensor& self);
|
574 |
+
at::Tensor sparse_constructor_values_backward(
|
575 |
+
const at::Tensor& sparse_grad_out,
|
576 |
+
const at::Tensor& indices);
|
577 |
+
at::Tensor embedding_dense_double_backward_symint(
|
578 |
+
const at::Tensor& grad,
|
579 |
+
const at::Tensor& indices,
|
580 |
+
const c10::SymInt& padding_idx);
|
581 |
+
at::Tensor index_backward(
|
582 |
+
at::Tensor zeros_like_self,
|
583 |
+
const torch::List<c10::optional<Tensor>>& indices,
|
584 |
+
const at::Tensor& grad);
|
585 |
+
at::Tensor _cudnn_ctc_loss_backward(
|
586 |
+
const at::Tensor& grad_out,
|
587 |
+
const at::Tensor& loss,
|
588 |
+
const at::Tensor& raw_grad,
|
589 |
+
bool zero_infinity);
|
590 |
+
at::Tensor elu_double_backward(
|
591 |
+
const Tensor& grad,
|
592 |
+
const Tensor& grad_output,
|
593 |
+
const Scalar& alpha,
|
594 |
+
const Scalar& scale,
|
595 |
+
const Scalar& input_scale,
|
596 |
+
bool is_result,
|
597 |
+
const Tensor& self_or_result);
|
598 |
+
|
599 |
+
Tensor svd_backward(
|
600 |
+
const Tensor& gU,
|
601 |
+
const Tensor& gS,
|
602 |
+
const Tensor& gVh,
|
603 |
+
const Tensor& U,
|
604 |
+
const Tensor& S,
|
605 |
+
const Tensor& Vh);
|
606 |
+
|
607 |
+
std::tuple<Tensor, Tensor, Tensor> linalg_svd_jvp(
|
608 |
+
const Tensor& dA,
|
609 |
+
const Tensor& U,
|
610 |
+
const Tensor& S,
|
611 |
+
const Tensor& Vh,
|
612 |
+
const bool full_matrices);
|
613 |
+
Tensor slice_backward_wrapper(
|
614 |
+
const at::Tensor& grad,
|
615 |
+
const c10::SymIntArrayRef& input_sizes,
|
616 |
+
int64_t dim,
|
617 |
+
c10::optional<c10::SymInt> start,
|
618 |
+
c10::optional<c10::SymInt> end,
|
619 |
+
c10::SymInt step);
|
620 |
+
std::tuple<Tensor, Tensor> linalg_eig_jvp(
|
621 |
+
const Tensor& dA,
|
622 |
+
const Tensor& L,
|
623 |
+
const Tensor& V,
|
624 |
+
const bool is_hermitian);
|
625 |
+
Tensor linalg_eig_backward(
|
626 |
+
const Tensor& gL,
|
627 |
+
const Tensor& gV,
|
628 |
+
const Tensor& L,
|
629 |
+
const Tensor& V,
|
630 |
+
const bool is_hermitian,
|
631 |
+
const bool symeig_eigenvectors = true);
|
632 |
+
Tensor linalg_lstsq_jvp(
|
633 |
+
const Tensor& A,
|
634 |
+
const Tensor& B,
|
635 |
+
const Tensor& dA,
|
636 |
+
const Tensor& dB);
|
637 |
+
std::tuple<Tensor, Tensor> triangular_solve_backward(
|
638 |
+
const Tensor& grad_x,
|
639 |
+
const Tensor& grad_m,
|
640 |
+
const Tensor& b,
|
641 |
+
const Tensor& a,
|
642 |
+
const Tensor& x,
|
643 |
+
const bool upper,
|
644 |
+
const bool transpose,
|
645 |
+
const bool unitriangular,
|
646 |
+
std::array<bool, 2> output_mask);
|
647 |
+
Tensor triangular_solve_jvp(
|
648 |
+
const Tensor& X,
|
649 |
+
const Tensor& A,
|
650 |
+
const Tensor& dA,
|
651 |
+
const Tensor& dB,
|
652 |
+
const bool upper,
|
653 |
+
const bool transpose,
|
654 |
+
const bool unitriangular);
|
655 |
+
Tensor linalg_solve_triangular_forward_AD(
|
656 |
+
const Tensor& A_t,
|
657 |
+
const Tensor& B_t,
|
658 |
+
const Tensor& A,
|
659 |
+
const Tensor& X,
|
660 |
+
const bool upper,
|
661 |
+
const bool left,
|
662 |
+
const bool unitriangular);
|
663 |
+
std::tuple<Tensor, Tensor> linalg_solve_triangular_backward(
|
664 |
+
const Tensor& grad,
|
665 |
+
const Tensor& A,
|
666 |
+
const Tensor& X,
|
667 |
+
const bool upper,
|
668 |
+
const bool left,
|
669 |
+
const bool unitriangular,
|
670 |
+
std::array<bool, 2> output_mask);
|
671 |
+
std::tuple<Tensor, Tensor, Tensor> _trilinear_backward(
|
672 |
+
const Tensor& grad_out,
|
673 |
+
const c10::optional<Tensor>& i1,
|
674 |
+
const c10::optional<Tensor>& i2,
|
675 |
+
const c10::optional<Tensor>& i3,
|
676 |
+
IntArrayRef expand1,
|
677 |
+
IntArrayRef expand2,
|
678 |
+
IntArrayRef expand3,
|
679 |
+
IntArrayRef sumdim,
|
680 |
+
std::array<bool, 3> grad_mask);
|
681 |
+
std::tuple<Tensor, Tensor> linalg_qr_jvp(
|
682 |
+
const Tensor& dA,
|
683 |
+
const Tensor& Q,
|
684 |
+
const Tensor& R,
|
685 |
+
const c10::string_view mode);
|
686 |
+
Tensor linalg_qr_backward(
|
687 |
+
const Tensor& gQ,
|
688 |
+
const Tensor& gR,
|
689 |
+
const Tensor& Q,
|
690 |
+
const Tensor& R,
|
691 |
+
const c10::string_view mode);
|
692 |
+
Tensor linalg_matrix_exp_differential(
|
693 |
+
const Tensor& self,
|
694 |
+
const Tensor& grad,
|
695 |
+
bool adjoint);
|
696 |
+
std::tuple<Tensor, Tensor, Tensor> batchnorm_double_backward(
|
697 |
+
const Tensor& input,
|
698 |
+
const c10::optional<Tensor>& gamma,
|
699 |
+
const Tensor& ggI,
|
700 |
+
const Tensor& ggG,
|
701 |
+
const Tensor& ggB,
|
702 |
+
const Tensor& gO,
|
703 |
+
const c10::optional<Tensor>& running_mean,
|
704 |
+
const c10::optional<Tensor>& running_var,
|
705 |
+
bool training,
|
706 |
+
double eps,
|
707 |
+
const c10::optional<Tensor>& save_mean,
|
708 |
+
const c10::optional<Tensor>& save_invstd,
|
709 |
+
std::array<bool, 3> output_mask);
|
710 |
+
std::tuple<Tensor, Tensor> _euclidean_dist_backward(
|
711 |
+
const Tensor& grad,
|
712 |
+
const Tensor& x1,
|
713 |
+
const Tensor& x2,
|
714 |
+
const Tensor& res);
|
715 |
+
Tensor fft_backward(
|
716 |
+
const Tensor& self,
|
717 |
+
const Tensor& grad,
|
718 |
+
int64_t signal_ndim,
|
719 |
+
bool complex_input,
|
720 |
+
bool complex_output,
|
721 |
+
bool inverse,
|
722 |
+
IntArrayRef checked_signal_sizes,
|
723 |
+
int64_t normalization,
|
724 |
+
bool onesided,
|
725 |
+
IntArrayRef output_sizes);
|
726 |
+
Tensor fft_r2c_backward(
|
727 |
+
const Tensor& grad,
|
728 |
+
at::IntArrayRef dim,
|
729 |
+
int64_t normalization,
|
730 |
+
bool onesided,
|
731 |
+
const c10::SymInt& last_dim_size);
|
732 |
+
Tensor fft_c2r_backward(
|
733 |
+
const Tensor& grad,
|
734 |
+
IntArrayRef dim,
|
735 |
+
int64_t normalization);
|
736 |
+
Tensor constant_pad_nd_backward(const Tensor& grad, c10::SymIntArrayRef pad);
|
737 |
+
std::tuple<Tensor, Tensor> cholesky_solve_backward(
|
738 |
+
const Tensor& grad_x,
|
739 |
+
const Tensor& self,
|
740 |
+
const Tensor& input2,
|
741 |
+
const Tensor& result,
|
742 |
+
const bool upper,
|
743 |
+
std::array<bool, 2> output_mask);
|
744 |
+
Tensor cholesky_solve_jvp(
|
745 |
+
const Tensor& X,
|
746 |
+
const Tensor& U,
|
747 |
+
const Tensor& dU,
|
748 |
+
const Tensor& dB,
|
749 |
+
const bool upper);
|
750 |
+
std::tuple<Tensor, Tensor, Tensor>
|
751 |
+
infinitely_differentiable_native_group_norm_backward(
|
752 |
+
const Tensor& dY,
|
753 |
+
const Tensor& dmean,
|
754 |
+
const Tensor& drstd,
|
755 |
+
const Tensor& X,
|
756 |
+
const Tensor& mean,
|
757 |
+
const Tensor& rstd,
|
758 |
+
const c10::optional<Tensor>& gamma,
|
759 |
+
c10::SymInt N,
|
760 |
+
const c10::SymInt& C,
|
761 |
+
c10::SymInt HxW,
|
762 |
+
int64_t group,
|
763 |
+
double eps,
|
764 |
+
std::array<bool, 3> grad_input_mask);
|
765 |
+
Tensor gelu_double_backward(
|
766 |
+
const Tensor& ggI,
|
767 |
+
const Tensor& gO,
|
768 |
+
const Tensor& input,
|
769 |
+
c10::string_view approximate);
|
770 |
+
Tensor as_strided_backward(
|
771 |
+
Tensor grad,
|
772 |
+
const TensorGeometry& input_geometry,
|
773 |
+
c10::SymIntArrayRef sizes,
|
774 |
+
c10::SymIntArrayRef strides,
|
775 |
+
const optional<c10::SymInt>& storage_offset_);
|
776 |
+
Tensor as_strided_scatter_backward(
|
777 |
+
const Tensor& grad,
|
778 |
+
const TensorGeometry& input_geometry,
|
779 |
+
const TensorGeometry& src_geometry,
|
780 |
+
c10::SymIntArrayRef sizes,
|
781 |
+
c10::SymIntArrayRef strides,
|
782 |
+
optional<c10::SymInt> storage_offset);
|
783 |
+
std::tuple<Tensor, Tensor> atan2_backward(
|
784 |
+
const Tensor& grad,
|
785 |
+
const Tensor& self,
|
786 |
+
const Tensor& other,
|
787 |
+
std::array<bool, 2> output_mask);
|
788 |
+
Tensor amaxamin_jvp(
|
789 |
+
const Tensor& x,
|
790 |
+
const Tensor& dx,
|
791 |
+
const Tensor& result,
|
792 |
+
IntArrayRef dim,
|
793 |
+
bool keepdim);
|
794 |
+
std::tuple<Tensor, Tensor, Tensor> layer_norm_double_backward(
|
795 |
+
const Tensor& input,
|
796 |
+
const c10::optional<Tensor>& gamma,
|
797 |
+
const Tensor& ggI,
|
798 |
+
const Tensor& ggG,
|
799 |
+
const Tensor& ggB,
|
800 |
+
const Tensor& gO,
|
801 |
+
const Tensor& save_mean,
|
802 |
+
const Tensor& save_invstd,
|
803 |
+
c10::SymIntArrayRef normalized_shape,
|
804 |
+
std::array<bool, 3> output_mask);
|
805 |
+
|
806 |
+
std::tuple<Tensor, Tensor> householder_product_backward(
|
807 |
+
const Tensor& grad,
|
808 |
+
const Tensor& result,
|
809 |
+
const Tensor& input,
|
810 |
+
const Tensor& tau,
|
811 |
+
const bool flip_order = false);
|
812 |
+
Tensor householder_product_jvp(
|
813 |
+
const Tensor& dV,
|
814 |
+
const Tensor& dtau,
|
815 |
+
const Tensor& prod,
|
816 |
+
const Tensor& V,
|
817 |
+
const Tensor& tau);
|
818 |
+
std::tuple<Tensor, Tensor, Tensor> ormqr_backward(
|
819 |
+
const Tensor& grad,
|
820 |
+
const Tensor& result,
|
821 |
+
const Tensor& self,
|
822 |
+
const Tensor& tau,
|
823 |
+
const Tensor& other,
|
824 |
+
bool left,
|
825 |
+
bool transpose,
|
826 |
+
std::array<bool, 3> grad_output_mask);
|
827 |
+
std::tuple<Tensor, Tensor> polar_backward(
|
828 |
+
const Tensor& grad,
|
829 |
+
const Tensor& result);
|
830 |
+
Tensor i1_backward(
|
831 |
+
const Tensor& grad,
|
832 |
+
const Tensor& self,
|
833 |
+
const Tensor& result);
|
834 |
+
Tensor i1e_backward(
|
835 |
+
const Tensor& grad,
|
836 |
+
const Tensor& self,
|
837 |
+
const Tensor& result);
|
838 |
+
Tensor linalg_lu_solve_LU(
|
839 |
+
const Tensor& grad,
|
840 |
+
const Tensor& LU,
|
841 |
+
const Tensor& pivots,
|
842 |
+
const Tensor& X,
|
843 |
+
const bool left,
|
844 |
+
const bool adjoint);
|
845 |
+
Tensor linalg_lu_solve_jvp(
|
846 |
+
const Tensor& X,
|
847 |
+
const Tensor& LU,
|
848 |
+
const Tensor& pivots,
|
849 |
+
const Tensor& dLU,
|
850 |
+
const Tensor& dB,
|
851 |
+
const bool left,
|
852 |
+
const bool adjoint);
|
853 |
+
std::tuple<Tensor, Tensor> linalg_solve_backward(
|
854 |
+
const Tensor& gX,
|
855 |
+
const Tensor& X,
|
856 |
+
const Tensor& A,
|
857 |
+
const Tensor& LU,
|
858 |
+
const Tensor& pivots,
|
859 |
+
const bool left,
|
860 |
+
const bool B_requires_grad);
|
861 |
+
Tensor linalg_solve_jvp(
|
862 |
+
const Tensor& dA,
|
863 |
+
const Tensor& dB,
|
864 |
+
const Tensor& X,
|
865 |
+
const Tensor& LU,
|
866 |
+
const Tensor& pivots,
|
867 |
+
const bool left,
|
868 |
+
const bool use_A_T);
|
869 |
+
Tensor lu_unpack_backward(
|
870 |
+
const Tensor& L_grad,
|
871 |
+
const Tensor& U_grad,
|
872 |
+
const c10::SymInt& m,
|
873 |
+
const c10::SymInt& n);
|
874 |
+
|
875 |
+
Tensor linalg_det_backward(
|
876 |
+
const Tensor& grad,
|
877 |
+
const Tensor& det,
|
878 |
+
const Tensor& A,
|
879 |
+
const Tensor& LU,
|
880 |
+
const Tensor& pivots);
|
881 |
+
Tensor linalg_det_jvp(
|
882 |
+
const Tensor& dA,
|
883 |
+
const Tensor& det,
|
884 |
+
const Tensor& LU,
|
885 |
+
const Tensor& pivots,
|
886 |
+
const bool use_A_T);
|
887 |
+
std::tuple<Tensor, Tensor> linalg_lstsq_backward(
|
888 |
+
const Tensor& grad,
|
889 |
+
const Tensor& A,
|
890 |
+
const Tensor& B_,
|
891 |
+
const std::array<bool, 2>& grad_input_mask);
|
892 |
+
Tensor linalg_lu_backward(
|
893 |
+
const Tensor& L_grad,
|
894 |
+
const Tensor& U_grad,
|
895 |
+
const Tensor& P,
|
896 |
+
const Tensor& L,
|
897 |
+
const Tensor& U,
|
898 |
+
const bool pivot);
|
899 |
+
|
900 |
+
std::tuple<Tensor, Tensor> linalg_lu_jvp(
|
901 |
+
const Tensor& dA,
|
902 |
+
const Tensor& P,
|
903 |
+
const Tensor& L,
|
904 |
+
const Tensor& U,
|
905 |
+
const bool pivot);
|
906 |
+
|
907 |
+
Tensor lu_factor_ex_backward(
|
908 |
+
const Tensor& grad,
|
909 |
+
const Tensor& LU,
|
910 |
+
const Tensor& pivs,
|
911 |
+
const bool pivot);
|
912 |
+
Tensor lu_factor_ex_jvp(
|
913 |
+
const Tensor& dX,
|
914 |
+
const Tensor& LU,
|
915 |
+
const Tensor& pivs,
|
916 |
+
const bool pivot);
|
917 |
+
|
918 |
+
Tensor batch_norm_jvp(
|
919 |
+
const Tensor& input_p,
|
920 |
+
const Tensor& input_t,
|
921 |
+
const Tensor& weight_p,
|
922 |
+
const Tensor& weight_t,
|
923 |
+
const Tensor& bias_p,
|
924 |
+
const Tensor& bias_t,
|
925 |
+
const c10::optional<Tensor>& running_mean,
|
926 |
+
const c10::optional<Tensor>& running_var,
|
927 |
+
const Tensor& saved_mean,
|
928 |
+
const Tensor& saved_invstd,
|
929 |
+
bool train,
|
930 |
+
double eps);
|
931 |
+
|
932 |
+
Tensor layer_norm_jvp(
|
933 |
+
const Tensor& input_p,
|
934 |
+
const Tensor& input_t,
|
935 |
+
const Tensor& weight_p,
|
936 |
+
const Tensor& weight_t,
|
937 |
+
const Tensor& bias_p,
|
938 |
+
const Tensor& bias_t,
|
939 |
+
const Tensor& saved_mean,
|
940 |
+
const Tensor& saved_invstd,
|
941 |
+
c10::SymIntArrayRef normalized_shape);
|
942 |
+
|
943 |
+
Tensor group_norm_jvp(
|
944 |
+
const Tensor& input_p,
|
945 |
+
const Tensor& input_t,
|
946 |
+
const Tensor& weight_p,
|
947 |
+
const Tensor& weight_t,
|
948 |
+
const Tensor& bias_p,
|
949 |
+
const Tensor& bias_t,
|
950 |
+
const Tensor& saved_mean,
|
951 |
+
const Tensor& saved_invstd,
|
952 |
+
int64_t groups);
|
953 |
+
Tensor group_norm_mean_jvp(
|
954 |
+
const Tensor& input_t,
|
955 |
+
const Tensor& mean_p,
|
956 |
+
int64_t groups);
|
957 |
+
Tensor group_norm_invstd_jvp(
|
958 |
+
const Tensor& input_p,
|
959 |
+
const Tensor& input_t,
|
960 |
+
const Tensor& mean_p,
|
961 |
+
const Tensor& invstd_p,
|
962 |
+
int64_t groups);
|
963 |
+
|
964 |
+
Tensor convolution_jvp(
|
965 |
+
const Tensor& input_p,
|
966 |
+
const Tensor& input_t,
|
967 |
+
const Tensor& weight_p,
|
968 |
+
const Tensor& weight_t,
|
969 |
+
const Tensor& bias_p,
|
970 |
+
const Tensor& bias_t,
|
971 |
+
at::SymIntArrayRef stride,
|
972 |
+
at::SymIntArrayRef padding,
|
973 |
+
at::SymIntArrayRef dilation,
|
974 |
+
bool transposed,
|
975 |
+
at::SymIntArrayRef output_padding,
|
976 |
+
const c10::SymInt& groups);
|
977 |
+
|
978 |
+
Tensor _convolution_jvp(
|
979 |
+
const Tensor& input_p,
|
980 |
+
const Tensor& input_t,
|
981 |
+
const Tensor& weight_p,
|
982 |
+
const Tensor& weight_t,
|
983 |
+
const Tensor& bias_p,
|
984 |
+
const Tensor& bias_t,
|
985 |
+
at::SymIntArrayRef stride,
|
986 |
+
at::SymIntArrayRef padding,
|
987 |
+
at::SymIntArrayRef dilation,
|
988 |
+
bool transposed,
|
989 |
+
at::SymIntArrayRef output_padding,
|
990 |
+
const c10::SymInt& groups,
|
991 |
+
bool benchmark,
|
992 |
+
bool deterministic,
|
993 |
+
bool cudnn_enabled,
|
994 |
+
bool allow_tf32);
|
995 |
+
|
996 |
+
Tensor convolution_backward_jvp_grad_bias(
|
997 |
+
const Tensor& grad_out_t,
|
998 |
+
const Tensor& grad_bias);
|
999 |
+
|
1000 |
+
Tensor cat_jvp(const at::ITensorListRef& tensors, int64_t dim);
|
1001 |
+
Tensor block_diag_jvp(at::TensorList tensors);
|
1002 |
+
Tensor stack_jvp(at::TensorList tensors, int64_t dim);
|
1003 |
+
Tensor cumprod_jvp(
|
1004 |
+
const Tensor& self_t,
|
1005 |
+
const Tensor& self_p,
|
1006 |
+
const Tensor& result,
|
1007 |
+
int dim);
|
1008 |
+
Tensor gather_with_keepdimed_indices(
|
1009 |
+
const Tensor& input,
|
1010 |
+
int64_t dim,
|
1011 |
+
const Tensor& indices,
|
1012 |
+
bool keepdim);
|
1013 |
+
Tensor evenly_read_jvp(
|
1014 |
+
const Tensor& fw_grad,
|
1015 |
+
const Tensor& input,
|
1016 |
+
const Tensor& value);
|
1017 |
+
Tensor warn_backwards(const Tensor& grad_output);
|
1018 |
+
|
1019 |
+
std::tuple<Tensor, Tensor> _cudnn_convolution_backward(
|
1020 |
+
const at::Tensor& self,
|
1021 |
+
const at::Tensor& grad_output,
|
1022 |
+
const at::Tensor& weight,
|
1023 |
+
at::SymIntArrayRef padding,
|
1024 |
+
at::SymIntArrayRef output_padding,
|
1025 |
+
at::SymIntArrayRef stride,
|
1026 |
+
at::SymIntArrayRef dilation,
|
1027 |
+
bool transposed,
|
1028 |
+
c10::SymInt groups,
|
1029 |
+
::std::array<bool, 2> output_mask);
|
1030 |
+
|
1031 |
+
Tensor scatter_reduce_jvp(
|
1032 |
+
const Tensor& self_p,
|
1033 |
+
const Tensor& self_t,
|
1034 |
+
int dim,
|
1035 |
+
const Tensor& index,
|
1036 |
+
const Tensor& src_p,
|
1037 |
+
const Tensor& src_t,
|
1038 |
+
c10::string_view reduce,
|
1039 |
+
bool include_self,
|
1040 |
+
const Tensor& result);
|
1041 |
+
|
1042 |
+
std::tuple<Tensor, Tensor> scatter_reduce_backward(
|
1043 |
+
const Tensor& grad,
|
1044 |
+
const Tensor& self,
|
1045 |
+
int dim,
|
1046 |
+
const Tensor& index,
|
1047 |
+
const Tensor& src,
|
1048 |
+
c10::string_view reduce,
|
1049 |
+
bool include_self,
|
1050 |
+
const Tensor& result);
|
1051 |
+
|
1052 |
+
Tensor _to_copy_backward(
|
1053 |
+
const Tensor& grad,
|
1054 |
+
const c10::TensorOptions& self_options);
|
1055 |
+
|
1056 |
+
std::tuple<Tensor, Tensor> index_reduce_backward(
|
1057 |
+
const Tensor& grad,
|
1058 |
+
const Tensor& self,
|
1059 |
+
int dim,
|
1060 |
+
const Tensor& index,
|
1061 |
+
const Tensor& source,
|
1062 |
+
c10::string_view reduce,
|
1063 |
+
bool include_self,
|
1064 |
+
const Tensor& result);
|
1065 |
+
|
1066 |
+
Tensor take_backward(
|
1067 |
+
const Tensor& grad,
|
1068 |
+
const Tensor& self,
|
1069 |
+
const Tensor& indices);
|
1070 |
+
|
1071 |
+
Tensor to_sparse_backward(
|
1072 |
+
const Tensor& grad,
|
1073 |
+
const c10::Layout self_layout,
|
1074 |
+
const c10::OptionalArrayRef<c10::SymInt>& self_blocksize);
|
1075 |
+
|
1076 |
+
std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor>
|
1077 |
+
mkldnn_rnn_layer_differentiable_backward(
|
1078 |
+
const Tensor& input,
|
1079 |
+
const Tensor& weight0,
|
1080 |
+
const Tensor& weight1,
|
1081 |
+
const Tensor& weight2,
|
1082 |
+
const Tensor& weight3,
|
1083 |
+
const Tensor& hx_,
|
1084 |
+
const Tensor& cx_tmp,
|
1085 |
+
const Tensor& output,
|
1086 |
+
const Tensor& hy_,
|
1087 |
+
const Tensor& cy_,
|
1088 |
+
const c10::optional<Tensor>& grad_output_r_opt,
|
1089 |
+
const c10::optional<Tensor>& grad_hy_r_opt,
|
1090 |
+
const c10::optional<Tensor>& grad_cy_r_opt,
|
1091 |
+
bool reverse,
|
1092 |
+
int64_t mode,
|
1093 |
+
int64_t hidden_size,
|
1094 |
+
int64_t num_layers,
|
1095 |
+
bool has_biases,
|
1096 |
+
bool train,
|
1097 |
+
bool bidirectional,
|
1098 |
+
at::IntArrayRef batch_sizes,
|
1099 |
+
bool batch_first,
|
1100 |
+
const at::Tensor& workspace);
|
1101 |
+
|
1102 |
+
} // namespace details
|
1103 |
+
} // namespace generated
|
1104 |
+
} // namespace autograd
|
1105 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/InferenceMode.h
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/InferenceMode.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
|
6 |
+
namespace torch {
|
7 |
+
namespace autograd {
|
8 |
+
|
9 |
+
using InferenceMode = c10::InferenceMode;
|
10 |
+
|
11 |
+
}
|
12 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/VariableTypeUtils.h
ADDED
@@ -0,0 +1,532 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/irange.h>
|
4 |
+
|
5 |
+
#include <ATen/core/boxing/KernelFunction.h>
|
6 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
7 |
+
|
8 |
+
#include <torch/csrc/autograd/edge.h>
|
9 |
+
#include <torch/csrc/autograd/function.h>
|
10 |
+
#include <torch/csrc/autograd/functions/basic_ops.h>
|
11 |
+
#include <torch/csrc/autograd/functions/tensor.h>
|
12 |
+
#include <torch/csrc/autograd/grad_mode.h>
|
13 |
+
#include <torch/csrc/autograd/saved_variable.h>
|
14 |
+
#include <torch/csrc/autograd/variable.h>
|
15 |
+
|
16 |
+
#include <torch/csrc/autograd/functions/utils.h>
|
17 |
+
#include <torch/csrc/autograd/jit_decomp_interface.h>
|
18 |
+
#include <torch/csrc/utils/variadic.h>
|
19 |
+
|
20 |
+
#include <array>
|
21 |
+
#include <cstddef>
|
22 |
+
#include <functional>
|
23 |
+
#include <initializer_list>
|
24 |
+
#include <memory>
|
25 |
+
#include <stdexcept>
|
26 |
+
#include <string>
|
27 |
+
#include <tuple>
|
28 |
+
#include <utility>
|
29 |
+
#include <vector>
|
30 |
+
|
31 |
+
#ifdef _MSC_VER
|
32 |
+
#ifdef Type
|
33 |
+
#undef Type
|
34 |
+
#endif
|
35 |
+
#endif
|
36 |
+
|
37 |
+
namespace torch {
|
38 |
+
namespace autograd {
|
39 |
+
|
40 |
+
// The requires_grad argument is used to know if the inplace operation needs
|
41 |
+
// gradient to be setup for it.
|
42 |
+
// In particular, we can have tensor.requires_grad() != requires_grad when
|
43 |
+
// writing a Tensor that requires gradients inplace into a Tensor that does not
|
44 |
+
// require gradients: a = torch.rand(2) b = torch.rand(2, requires_grad=True)
|
45 |
+
// a.copy_(b)
|
46 |
+
inline void check_inplace(const at::Tensor& tensor, bool requires_grad) {
|
47 |
+
if (requires_grad && GradMode::is_enabled()) {
|
48 |
+
auto diff_view_meta = impl::get_view_autograd_meta(tensor);
|
49 |
+
if (diff_view_meta && diff_view_meta->has_bw_view()) {
|
50 |
+
// This can throw or warn
|
51 |
+
handle_view_on_rebase(diff_view_meta);
|
52 |
+
if (tensor.requires_grad() && tensor._base().is_leaf()) {
|
53 |
+
TORCH_CHECK(
|
54 |
+
false,
|
55 |
+
"a view of a leaf Variable that requires grad is being used in an in-place operation.");
|
56 |
+
}
|
57 |
+
}
|
58 |
+
if (tensor.requires_grad() && tensor.is_leaf()) {
|
59 |
+
TORCH_CHECK(
|
60 |
+
false,
|
61 |
+
"a leaf Variable that requires grad is being used in an in-place operation.");
|
62 |
+
}
|
63 |
+
}
|
64 |
+
}
|
65 |
+
|
66 |
+
inline void check_inplace(at::ITensorListRef tensors, bool requires_grad) {
|
67 |
+
for (const auto& tensor : tensors) {
|
68 |
+
check_inplace(tensor, requires_grad);
|
69 |
+
}
|
70 |
+
}
|
71 |
+
|
72 |
+
inline void throw_error_out_requires_grad(const char* name) {
|
73 |
+
AT_ERROR(
|
74 |
+
name,
|
75 |
+
"(): functions with out=... arguments don't support automatic differentiation, "
|
76 |
+
"but one of the arguments requires grad.");
|
77 |
+
}
|
78 |
+
|
79 |
+
inline void throw_error_for_complex_autograd(
|
80 |
+
const at::Tensor& tensor,
|
81 |
+
const char* name) {
|
82 |
+
if (tensor.requires_grad()) {
|
83 |
+
TORCH_CHECK(
|
84 |
+
!tensor.is_complex(),
|
85 |
+
name,
|
86 |
+
" does not support automatic differentiation for outputs with complex dtype.");
|
87 |
+
}
|
88 |
+
}
|
89 |
+
|
90 |
+
inline void throw_error_if_base_and_tensor_are_same(
|
91 |
+
const at::Tensor& base,
|
92 |
+
const at::Tensor& tensor) {
|
93 |
+
TORCH_CHECK(
|
94 |
+
base.unsafeGetTensorImpl() != tensor.unsafeGetTensorImpl(),
|
95 |
+
"View operation returned a tensor that is the same as the input base tensor. This "
|
96 |
+
"is no longer allowed; you must explicitly create a new tensor (e.g., using .detach()). "
|
97 |
+
"As a user, you could have made a mistake implementing __torch_dispatch__ or a Python "
|
98 |
+
"operator decomposition or meta registration; if that's not the case, please "
|
99 |
+
"report a bug to PyTorch or the backend you are using.");
|
100 |
+
}
|
101 |
+
|
102 |
+
inline void throw_error_for_complex_autograd(
|
103 |
+
at::ITensorListRef tensorlist,
|
104 |
+
const char* name) {
|
105 |
+
for (const auto& tensor : tensorlist) {
|
106 |
+
throw_error_for_complex_autograd(tensor, name);
|
107 |
+
}
|
108 |
+
}
|
109 |
+
|
110 |
+
// TODO: Blegh, bare references
|
111 |
+
|
112 |
+
inline void rebase_history(Variable& var, std::shared_ptr<Node> grad_fn) {
|
113 |
+
if (grad_fn && var.defined()) {
|
114 |
+
grad_fn->add_input_metadata(var);
|
115 |
+
impl::rebase_history(var, {std::move(grad_fn), 0});
|
116 |
+
}
|
117 |
+
}
|
118 |
+
|
119 |
+
inline void rebase_history(
|
120 |
+
std::vector<Variable>&& vars,
|
121 |
+
std::shared_ptr<Node> grad_fn) {
|
122 |
+
if (grad_fn) {
|
123 |
+
for (auto& var : vars) {
|
124 |
+
if (var.defined()) {
|
125 |
+
auto output_nr = grad_fn->add_input_metadata(var);
|
126 |
+
impl::rebase_history(var, {grad_fn, output_nr});
|
127 |
+
} else {
|
128 |
+
grad_fn->add_input_metadata(Node::undefined_input());
|
129 |
+
}
|
130 |
+
}
|
131 |
+
}
|
132 |
+
}
|
133 |
+
|
134 |
+
inline void increment_version(const at::Tensor& t) {
|
135 |
+
impl::bump_version(t);
|
136 |
+
}
|
137 |
+
|
138 |
+
struct Flatten : IterArgs<Flatten> {
|
139 |
+
Flatten(variable_list& out) : out(out) {}
|
140 |
+
variable_list& out;
|
141 |
+
void operator()(const at::Tensor& x) {
|
142 |
+
out.emplace_back(x);
|
143 |
+
}
|
144 |
+
void operator()(const c10::optional<at::Tensor>& x) {
|
145 |
+
if (x.has_value())
|
146 |
+
out.emplace_back(x.value());
|
147 |
+
}
|
148 |
+
void operator()(at::ArrayRef<at::Tensor> xs) {
|
149 |
+
out.insert(out.end(), xs.begin(), xs.end());
|
150 |
+
}
|
151 |
+
};
|
152 |
+
|
153 |
+
template <typename... Args>
|
154 |
+
inline variable_list flatten_tensor_args(Args&&... args) {
|
155 |
+
variable_list out;
|
156 |
+
out.reserve(count_tensors(std::forward<Args>(args)...));
|
157 |
+
Flatten(out).apply(std::forward<Args>(args)...);
|
158 |
+
return out; // RVO
|
159 |
+
}
|
160 |
+
|
161 |
+
// See NOTE [ Autograd View Variables ] for details.
|
162 |
+
inline at::Tensor as_view(
|
163 |
+
const at::Tensor& base,
|
164 |
+
const at::Tensor& tensor,
|
165 |
+
bool is_bw_differentiable,
|
166 |
+
bool is_fw_differentiable,
|
167 |
+
std::function<at::Tensor(const at::Tensor&)> view_func = nullptr,
|
168 |
+
CreationMeta creation_meta = CreationMeta::DEFAULT,
|
169 |
+
bool allow_tensor_metadata_change = true) {
|
170 |
+
// Note [View of inference tensor]
|
171 |
+
// For inference tensor this code can only be hit outside InferenceMode
|
172 |
+
// since ADInplaceOrView is in the default_included_set.
|
173 |
+
// If Inplace and View were separate dispatch keys we can just put Inplace
|
174 |
+
// in the default_included_set, so that view ops on inference tensor doesn't
|
175 |
+
// have to go through as_view even outside InferenceMode.
|
176 |
+
if (base.is_inference())
|
177 |
+
return tensor;
|
178 |
+
|
179 |
+
auto diff_view_meta = torch::autograd::impl::get_view_autograd_meta(base);
|
180 |
+
|
181 |
+
// To speed up the most common case, we specially handle when both the forward
|
182 |
+
// and backward view infos are the same, and so a single shared ViewInfo can
|
183 |
+
// be used for both of them.
|
184 |
+
if ((!diff_view_meta || diff_view_meta->shared_view_info()) &&
|
185 |
+
is_bw_differentiable && is_fw_differentiable) {
|
186 |
+
throw_error_if_base_and_tensor_are_same(base, tensor);
|
187 |
+
if (diff_view_meta) {
|
188 |
+
creation_meta = propagate_creation_meta(
|
189 |
+
diff_view_meta->get_creation_meta(), creation_meta);
|
190 |
+
return make_variable_differentiable_view(
|
191 |
+
tensor,
|
192 |
+
diff_view_meta->get_backward_view().chain(
|
193 |
+
base, tensor, std::move(view_func)),
|
194 |
+
c10::nullopt,
|
195 |
+
/*shared_view_info*/ true,
|
196 |
+
creation_meta,
|
197 |
+
allow_tensor_metadata_change);
|
198 |
+
} else {
|
199 |
+
return make_variable_differentiable_view(
|
200 |
+
tensor,
|
201 |
+
ViewInfo(base, std::move(view_func)),
|
202 |
+
c10::nullopt,
|
203 |
+
/*shared_view_info*/ true,
|
204 |
+
creation_meta,
|
205 |
+
allow_tensor_metadata_change);
|
206 |
+
}
|
207 |
+
}
|
208 |
+
|
209 |
+
// If they cannot be shared, create the required view infos
|
210 |
+
c10::optional<ViewInfo> new_bw_info;
|
211 |
+
c10::optional<ViewInfo> new_fw_info;
|
212 |
+
|
213 |
+
if (is_bw_differentiable) {
|
214 |
+
if (diff_view_meta && diff_view_meta->has_bw_view()) {
|
215 |
+
const auto& base_bw_info = diff_view_meta->get_backward_view();
|
216 |
+
new_bw_info = base_bw_info.chain(base, tensor, view_func);
|
217 |
+
} else {
|
218 |
+
new_bw_info = ViewInfo(base, view_func);
|
219 |
+
}
|
220 |
+
} else {
|
221 |
+
TORCH_CHECK(
|
222 |
+
creation_meta == CreationMeta::DEFAULT,
|
223 |
+
"Non-backward differentiable views must have creation_meta=CreationMeta::DEFAULT");
|
224 |
+
}
|
225 |
+
|
226 |
+
if (is_fw_differentiable) {
|
227 |
+
// Check if base is a forward differentiable view
|
228 |
+
if (diff_view_meta && diff_view_meta->has_fw_view()) {
|
229 |
+
const auto& base_fw_info = diff_view_meta->get_forward_view();
|
230 |
+
new_fw_info = base_fw_info.chain(base, tensor, std::move(view_func));
|
231 |
+
} else {
|
232 |
+
new_fw_info = ViewInfo(base, std::move(view_func));
|
233 |
+
}
|
234 |
+
}
|
235 |
+
|
236 |
+
if (is_fw_differentiable || is_bw_differentiable) {
|
237 |
+
if (diff_view_meta && diff_view_meta->has_bw_view()) {
|
238 |
+
creation_meta = propagate_creation_meta(
|
239 |
+
diff_view_meta->get_creation_meta(), creation_meta);
|
240 |
+
}
|
241 |
+
throw_error_if_base_and_tensor_are_same(base, tensor);
|
242 |
+
return make_variable_differentiable_view(
|
243 |
+
tensor,
|
244 |
+
std::move(new_bw_info),
|
245 |
+
std::move(new_fw_info),
|
246 |
+
/*shared_view_info*/ false,
|
247 |
+
creation_meta,
|
248 |
+
allow_tensor_metadata_change);
|
249 |
+
} else {
|
250 |
+
return make_variable_non_differentiable_view(
|
251 |
+
base, tensor, allow_tensor_metadata_change);
|
252 |
+
}
|
253 |
+
}
|
254 |
+
|
255 |
+
// See NOTE [ Autograd View Variables ] for details.
|
256 |
+
inline std::vector<at::Tensor> as_view(
|
257 |
+
const at::Tensor& base,
|
258 |
+
std::vector<at::Tensor>& tensors,
|
259 |
+
bool is_bw_differentiable,
|
260 |
+
bool is_fw_differentiable,
|
261 |
+
CreationMeta creation_meta = CreationMeta::DEFAULT) {
|
262 |
+
// See Note [View of inference tensor]
|
263 |
+
if (base.is_inference())
|
264 |
+
return tensors;
|
265 |
+
|
266 |
+
const auto diff_view_meta =
|
267 |
+
torch::autograd::impl::get_view_autograd_meta(base);
|
268 |
+
|
269 |
+
// Special case when view info can be shared for forward and backward
|
270 |
+
// differentiable views
|
271 |
+
if ((!diff_view_meta || diff_view_meta->shared_view_info()) &&
|
272 |
+
is_bw_differentiable && is_fw_differentiable) {
|
273 |
+
c10::optional<ViewInfo> new_shared_info;
|
274 |
+
if (diff_view_meta) {
|
275 |
+
// TODO: fix fb internal use-case so that it doesn't trigger this internal
|
276 |
+
// assert when the base is not a view. For now, we only do that same
|
277 |
+
// (wrong) thing as the old code which is to only check when the inputs is
|
278 |
+
// a backward differentiable view
|
279 |
+
if (diff_view_meta->has_bw_view()) {
|
280 |
+
TORCH_INTERNAL_ASSERT(
|
281 |
+
creation_meta == CreationMeta::NO_GRAD_MODE ||
|
282 |
+
creation_meta == CreationMeta::INFERENCE_MODE ||
|
283 |
+
creation_meta == CreationMeta::MULTI_OUTPUT_NODE,
|
284 |
+
"Functions that result multiple view must have a creation meta reflecting this behavior or more restrictive.");
|
285 |
+
}
|
286 |
+
creation_meta = propagate_creation_meta(
|
287 |
+
diff_view_meta->get_creation_meta(), creation_meta);
|
288 |
+
const auto& base_bw_info = diff_view_meta->get_backward_view();
|
289 |
+
new_shared_info = ViewInfo(base_bw_info.base_, /* view_func */ nullptr);
|
290 |
+
} else {
|
291 |
+
new_shared_info = ViewInfo(base, /* view_func */ nullptr);
|
292 |
+
}
|
293 |
+
|
294 |
+
for (at::Tensor& tensor : tensors) {
|
295 |
+
if (is_fw_differentiable || is_bw_differentiable) {
|
296 |
+
tensor = make_variable_differentiable_view(
|
297 |
+
tensor,
|
298 |
+
new_shared_info,
|
299 |
+
c10::nullopt,
|
300 |
+
/*shared_view_info*/ true,
|
301 |
+
creation_meta);
|
302 |
+
} else {
|
303 |
+
tensor = make_variable_non_differentiable_view(base, tensor);
|
304 |
+
}
|
305 |
+
}
|
306 |
+
return tensors;
|
307 |
+
}
|
308 |
+
|
309 |
+
c10::optional<ViewInfo> new_bw_info = c10::nullopt;
|
310 |
+
c10::optional<ViewInfo> new_fw_info = c10::nullopt;
|
311 |
+
|
312 |
+
if (is_bw_differentiable) {
|
313 |
+
if (diff_view_meta && diff_view_meta->has_bw_view()) {
|
314 |
+
const auto& base_bw_info = diff_view_meta->get_backward_view();
|
315 |
+
// TODO: fix fb internal use-case so that it doesn't trigger this internal
|
316 |
+
// assert when the base is not a view. In this code, the assert should be
|
317 |
+
// outside of the if statement.
|
318 |
+
TORCH_INTERNAL_ASSERT(
|
319 |
+
creation_meta == CreationMeta::NO_GRAD_MODE ||
|
320 |
+
creation_meta == CreationMeta::INFERENCE_MODE ||
|
321 |
+
creation_meta == CreationMeta::MULTI_OUTPUT_NODE,
|
322 |
+
"Functions that result multiple view must have a creation meta reflecting this behavior or more restrictive.");
|
323 |
+
// It is ok to create a ViewInfo where only the base is correct in this
|
324 |
+
// case as inplace operations on such views are not allowed
|
325 |
+
new_bw_info = ViewInfo(base_bw_info.base_, /* view_func */ nullptr);
|
326 |
+
} else {
|
327 |
+
new_bw_info = ViewInfo(base, /* view_func */ nullptr);
|
328 |
+
}
|
329 |
+
} else {
|
330 |
+
TORCH_CHECK(
|
331 |
+
creation_meta == CreationMeta::DEFAULT,
|
332 |
+
"Non-backward differentiable views must have creation_meta=CreationMeta::DEFAULT");
|
333 |
+
}
|
334 |
+
if (is_fw_differentiable) {
|
335 |
+
// Check if base is a forward differentiable view
|
336 |
+
if (diff_view_meta && diff_view_meta->has_fw_view()) {
|
337 |
+
const auto& base_fw_info = diff_view_meta->get_forward_view();
|
338 |
+
TORCH_INTERNAL_ASSERT(
|
339 |
+
creation_meta == CreationMeta::NO_GRAD_MODE ||
|
340 |
+
creation_meta == CreationMeta::INFERENCE_MODE ||
|
341 |
+
creation_meta == CreationMeta::MULTI_OUTPUT_NODE,
|
342 |
+
"Functions that result multiple view must have a creation meta reflecting this behavior or more restrictive.");
|
343 |
+
// It is ok to create a ViewInfo where only the base is correct in this
|
344 |
+
// case as inplace operations on such views are not allowed
|
345 |
+
new_fw_info = ViewInfo(base_fw_info.base_, /* view_func */ nullptr);
|
346 |
+
} else {
|
347 |
+
new_fw_info = ViewInfo(base, /* view_func */ nullptr);
|
348 |
+
}
|
349 |
+
}
|
350 |
+
|
351 |
+
if ((is_fw_differentiable || is_bw_differentiable) && base.is_view()) {
|
352 |
+
// is_view() => diff_view_meta
|
353 |
+
creation_meta = propagate_creation_meta(
|
354 |
+
diff_view_meta->get_creation_meta(), creation_meta);
|
355 |
+
}
|
356 |
+
|
357 |
+
for (at::Tensor& tensor : tensors) {
|
358 |
+
if (is_fw_differentiable || is_bw_differentiable) {
|
359 |
+
tensor = make_variable_differentiable_view(
|
360 |
+
tensor,
|
361 |
+
new_bw_info,
|
362 |
+
new_fw_info,
|
363 |
+
/*shared_view_info*/ false,
|
364 |
+
creation_meta);
|
365 |
+
} else {
|
366 |
+
tensor = make_variable_non_differentiable_view(base, tensor);
|
367 |
+
}
|
368 |
+
}
|
369 |
+
return tensors;
|
370 |
+
}
|
371 |
+
|
372 |
+
inline void check_no_requires_grad(
|
373 |
+
const at::Tensor& tensor,
|
374 |
+
const char* name,
|
375 |
+
const char* fn_name = "",
|
376 |
+
bool check_grad_mode = true) {
|
377 |
+
TORCH_CHECK(
|
378 |
+
!(tensor.defined() && tensor.requires_grad()) ||
|
379 |
+
!(check_grad_mode && GradMode::is_enabled()),
|
380 |
+
"The function '",
|
381 |
+
fn_name,
|
382 |
+
"' is not differentiable with respect to argument '",
|
383 |
+
name,
|
384 |
+
"'. This input cannot have requires_grad True.");
|
385 |
+
}
|
386 |
+
|
387 |
+
inline void check_no_requires_grad(
|
388 |
+
const c10::optional<at::Tensor>& tensor,
|
389 |
+
const char* name,
|
390 |
+
const char* fn_name = "") {
|
391 |
+
if (tensor.has_value()) {
|
392 |
+
check_no_requires_grad(*tensor, name, fn_name);
|
393 |
+
}
|
394 |
+
}
|
395 |
+
|
396 |
+
inline void check_no_requires_grad(
|
397 |
+
at::ITensorListRef tensors,
|
398 |
+
const char* name,
|
399 |
+
const char* fn_name = "") {
|
400 |
+
// GradMode check is expensive, so check it only once for TensorLists
|
401 |
+
if (!GradMode::is_enabled()) {
|
402 |
+
return;
|
403 |
+
}
|
404 |
+
for (auto& tensor : tensors) {
|
405 |
+
check_no_requires_grad(tensor, name, fn_name, /*check_grad_mode*/ false);
|
406 |
+
}
|
407 |
+
}
|
408 |
+
|
409 |
+
inline void check_no_requires_grad(
|
410 |
+
const c10::List<c10::optional<at::Tensor>>& tensors,
|
411 |
+
const char* name,
|
412 |
+
const char* fn_name = "") {
|
413 |
+
// GradMode check is expensive, so check it only once for TensorLists
|
414 |
+
if (!GradMode::is_enabled()) {
|
415 |
+
return;
|
416 |
+
}
|
417 |
+
for (c10::optional<at::Tensor> tensor : tensors) {
|
418 |
+
if (tensor.has_value()) {
|
419 |
+
check_no_requires_grad(*tensor, name, fn_name, /*check_grad_mode*/ false);
|
420 |
+
}
|
421 |
+
}
|
422 |
+
}
|
423 |
+
|
424 |
+
// Assumed that saved tensor lists are never inplace outputs
|
425 |
+
inline std::vector<SavedVariable> make_saved_variable_list(
|
426 |
+
at::ITensorListRef tensors,
|
427 |
+
const bool is_output = false) {
|
428 |
+
return fmap(tensors, [&is_output](const at::Tensor& tensor) -> SavedVariable {
|
429 |
+
return SavedVariable{tensor, is_output /* is output */};
|
430 |
+
});
|
431 |
+
}
|
432 |
+
|
433 |
+
// Assumed that saved tensor lists are never inplace outputs
|
434 |
+
inline std::vector<SavedVariable> make_saved_variable_list(
|
435 |
+
const c10::List<c10::optional<at::Tensor>>& tensors,
|
436 |
+
const bool is_output = false) {
|
437 |
+
return fmap(
|
438 |
+
tensors,
|
439 |
+
[&is_output](const c10::optional<at::Tensor>& tensor) -> SavedVariable {
|
440 |
+
if (tensor.has_value()) {
|
441 |
+
return SavedVariable{*tensor, is_output /* is output */};
|
442 |
+
} else {
|
443 |
+
return SavedVariable{at::Tensor(), is_output /* is output */};
|
444 |
+
}
|
445 |
+
});
|
446 |
+
}
|
447 |
+
|
448 |
+
inline std::vector<std::vector<int64_t>> to_args_sizes(
|
449 |
+
at::ITensorListRef tensors) {
|
450 |
+
std::vector<std::vector<int64_t>> args_sizes(tensors.size());
|
451 |
+
size_t i = 0;
|
452 |
+
for (const auto& t : tensors) {
|
453 |
+
args_sizes[i++] = t.sizes().vec();
|
454 |
+
}
|
455 |
+
return args_sizes;
|
456 |
+
}
|
457 |
+
|
458 |
+
inline std::vector<std::vector<c10::SymInt>> to_args_sizes_symint(
|
459 |
+
at::ITensorListRef tensors) {
|
460 |
+
std::vector<std::vector<c10::SymInt>> args_sizes(tensors.size());
|
461 |
+
size_t i = 0;
|
462 |
+
for (const auto& t : tensors) {
|
463 |
+
args_sizes[i++] = t.sym_sizes().vec();
|
464 |
+
}
|
465 |
+
return args_sizes;
|
466 |
+
}
|
467 |
+
|
468 |
+
inline std::vector<c10::ScalarType> to_args_scalartypes(
|
469 |
+
at::ITensorListRef tensors) {
|
470 |
+
std::vector<c10::ScalarType> args_scalartypes(tensors.size());
|
471 |
+
size_t i = 0;
|
472 |
+
for (const auto& t : tensors) {
|
473 |
+
args_scalartypes[i++] = t.scalar_type();
|
474 |
+
}
|
475 |
+
return args_scalartypes;
|
476 |
+
}
|
477 |
+
|
478 |
+
namespace impl {
|
479 |
+
|
480 |
+
namespace {
|
481 |
+
|
482 |
+
// If run_jit_decomposition were not a member function, we would be able
|
483 |
+
// to pass this as a template parameter to c10::Boxedkernel::makeFromFunction.
|
484 |
+
// However, member functions cannot be passed this way - instead we wrap our
|
485 |
+
// call in this functor so it can be passed to c10::BoxedKernel::makeFromFunctor
|
486 |
+
class WrapperFunctor final : public c10::OperatorKernel {
|
487 |
+
public:
|
488 |
+
WrapperFunctor(JitDecompInterface* impl) : impl_(impl){};
|
489 |
+
|
490 |
+
void operator()(
|
491 |
+
const c10::OperatorHandle& op,
|
492 |
+
c10::DispatchKeySet ks,
|
493 |
+
torch::jit::Stack* stack) {
|
494 |
+
impl_->run_jit_decomposition(op, stack);
|
495 |
+
}
|
496 |
+
JitDecompInterface* impl_;
|
497 |
+
};
|
498 |
+
|
499 |
+
} // namespace
|
500 |
+
|
501 |
+
template <class Return, class... Args>
|
502 |
+
Return run_jit_decomposition_with_args_for_jvp(
|
503 |
+
c10::string_view name,
|
504 |
+
const c10::OperatorHandle& opHandle,
|
505 |
+
c10::DispatchKeySet dispatchKeySet,
|
506 |
+
Args&&... args) {
|
507 |
+
// see NOTE: [Jit Decomposition Interface]
|
508 |
+
JitDecompInterface* impl = getJitDecompImpl();
|
509 |
+
|
510 |
+
TORCH_CHECK_NOT_IMPLEMENTED(
|
511 |
+
impl && impl->has_jit_decomposition(opHandle.schema()),
|
512 |
+
"Trying to use forward AD with ",
|
513 |
+
name,
|
514 |
+
" that does not support it because it has not been implemented yet.\nPlease file an issue "
|
515 |
+
"to PyTorch at https://github.com/pytorch/pytorch/issues/new?template=feature-request.yml "
|
516 |
+
"so that we can prioritize its implementation.\n"
|
517 |
+
"Note that forward AD support for some operators require PyTorch to be built with "
|
518 |
+
"TorchScript and for JIT to be enabled. "
|
519 |
+
"If the environment var PYTORCH_JIT=0 is set or if the library is not built with TorchScript, "
|
520 |
+
"some operators may no longer be used with forward AD.");
|
521 |
+
|
522 |
+
return c10::KernelFunction::makeFromBoxedKernel(
|
523 |
+
c10::BoxedKernel::makeFromFunctor(
|
524 |
+
std::make_unique<WrapperFunctor>(impl)))
|
525 |
+
.call<Return, Args...>(
|
526 |
+
opHandle, dispatchKeySet, std::forward<Args>(args)...);
|
527 |
+
}
|
528 |
+
|
529 |
+
} // namespace impl
|
530 |
+
|
531 |
+
} // namespace autograd
|
532 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/anomaly_mode.h
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/Export.h>
|
4 |
+
#include <memory>
|
5 |
+
#include <string>
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace autograd {
|
9 |
+
|
10 |
+
// forward declaration of Node from function.h
|
11 |
+
struct Node;
|
12 |
+
|
13 |
+
struct TORCH_API AnomalyMode {
|
14 |
+
static bool is_enabled() {
|
15 |
+
return _enabled;
|
16 |
+
}
|
17 |
+
static bool should_check_nan() {
|
18 |
+
return _check_nan;
|
19 |
+
}
|
20 |
+
static void set_enabled(bool enabled, bool check_nan = true) {
|
21 |
+
_enabled = enabled;
|
22 |
+
_check_nan = check_nan;
|
23 |
+
}
|
24 |
+
|
25 |
+
private:
|
26 |
+
static bool _enabled;
|
27 |
+
static bool _check_nan;
|
28 |
+
};
|
29 |
+
|
30 |
+
/// A RAII guard that enables Anomaly Detection Mode.
|
31 |
+
///
|
32 |
+
/// Anomaly detection mode is useful for debugging problems happening
|
33 |
+
/// in the backward, such as unexpectedly modified tensors or NaNs
|
34 |
+
/// occuring in the backward.
|
35 |
+
///
|
36 |
+
/// The enabling of anomaly mode is global - as soon as there is one
|
37 |
+
/// such guard, it is enabled for all computation and threads. It also
|
38 |
+
/// comes with a significant performance penalty.
|
39 |
+
///
|
40 |
+
/// Example:
|
41 |
+
/// @code
|
42 |
+
/// auto x = torch::tensor({1.}, torch::requires_grad());
|
43 |
+
/// {
|
44 |
+
/// torch::autograd::DetectAnomalyGuard detect_anomaly;
|
45 |
+
/// auto x = torch::tensor({5.0}, torch::requires_grad());
|
46 |
+
/// auto y = x * x;
|
47 |
+
/// auto z = y * y;
|
48 |
+
/// y += 1;
|
49 |
+
/// z.backward();
|
50 |
+
/// }
|
51 |
+
/// @endcode
|
52 |
+
class TORCH_API DetectAnomalyGuard {
|
53 |
+
public:
|
54 |
+
DetectAnomalyGuard(bool check_nan = true);
|
55 |
+
~DetectAnomalyGuard();
|
56 |
+
|
57 |
+
private:
|
58 |
+
bool prev_check_nan_;
|
59 |
+
};
|
60 |
+
|
61 |
+
struct TORCH_API AnomalyMetadata {
|
62 |
+
virtual ~AnomalyMetadata();
|
63 |
+
virtual void store_stack();
|
64 |
+
virtual void print_stack(const std::string& current_node_name);
|
65 |
+
virtual void assign_parent(const std::shared_ptr<Node>& parent_node);
|
66 |
+
|
67 |
+
private:
|
68 |
+
std::string traceback_;
|
69 |
+
std::shared_ptr<Node> parent_;
|
70 |
+
};
|
71 |
+
|
72 |
+
} // namespace autograd
|
73 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd.h
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/autograd/variable.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace autograd {
|
7 |
+
|
8 |
+
/// Computes the sum of gradients of given tensors with respect to graph leaves.
|
9 |
+
///
|
10 |
+
/// The graph is differentiated using the chain rule. If any of ``tensors``
|
11 |
+
/// are non-scalar (i.e. their data has more than one element) and require
|
12 |
+
/// gradient, then the Jacobian-vector product would be computed, in this case
|
13 |
+
/// the function additionally requires specifying `grad_tensors`. It should be a
|
14 |
+
/// sequence of matching length, that contains the "vector" in the
|
15 |
+
/// Jacobian-vector product, usually the gradient of the differentiated function
|
16 |
+
/// w.r.t. corresponding tensors
|
17 |
+
/// (`torch::Tensor()` is an acceptable value for all tensors that don't need
|
18 |
+
/// gradient tensors).
|
19 |
+
///
|
20 |
+
/// This function accumulates gradients in the leaves - you might need to zero
|
21 |
+
/// them before calling it.
|
22 |
+
///
|
23 |
+
/// \param tensors Tensors of which the derivative will be computed.
|
24 |
+
/// \param grad_tensors The "vector" in the Jacobian-vector product, usually
|
25 |
+
/// gradients
|
26 |
+
/// w.r.t. each element of corresponding tensors. `torch::Tensor()` values
|
27 |
+
/// can be specified for scalar Tensors or ones that don't require grad. If
|
28 |
+
/// a `torch::Tensor()` value would be acceptable for all grad_tensors, then
|
29 |
+
/// this argument is optional.
|
30 |
+
/// \param retain_graph If `false`, the graph used to compute the grad will be
|
31 |
+
/// freed.
|
32 |
+
/// Note that in nearly all cases setting this option to `true` is not
|
33 |
+
/// needed and often can be worked around in a much more efficient way.
|
34 |
+
/// Defaults to the value of `create_graph`.
|
35 |
+
/// \param create_graph If `true`, graph of the derivative will be constructed,
|
36 |
+
/// allowing
|
37 |
+
/// to compute higher order derivative products. Defaults to `false`.
|
38 |
+
/// \param inputs Inputs w.r.t. which the gradient will be accumulated into
|
39 |
+
/// `at::Tensor::grad`. All other Tensors will be ignored. If not provided,
|
40 |
+
/// the gradient is accumulated into all the leaf Tensors that were used to
|
41 |
+
/// compute param `tensors`.
|
42 |
+
// When inputs are provided and a given input is not a leaf,
|
43 |
+
// the current implementation will call its grad_fn (even though it is not
|
44 |
+
// strictly needed to get this gradients). It is an implementation detail
|
45 |
+
// on which the user should not rely. See
|
46 |
+
// https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for
|
47 |
+
// more details.
|
48 |
+
TORCH_API void backward(
|
49 |
+
const variable_list& tensors,
|
50 |
+
const variable_list& grad_tensors = {},
|
51 |
+
c10::optional<bool> retain_graph = c10::nullopt,
|
52 |
+
bool create_graph = false,
|
53 |
+
const variable_list& inputs = {});
|
54 |
+
|
55 |
+
/// Computes and returns the sum of gradients of outputs with respect to the
|
56 |
+
/// inputs.
|
57 |
+
///
|
58 |
+
/// ``grad_outputs`` should be a sequence of length matching ``output``
|
59 |
+
/// containing the "vector" in Jacobian-vector product, usually the pre-computed
|
60 |
+
/// gradients w.r.t. each of the outputs. If an output doesn't require_grad,
|
61 |
+
/// then the gradient can be ``torch::Tensor()``).
|
62 |
+
///
|
63 |
+
/// \param outputs outputs of the differentiated function.
|
64 |
+
/// \param inputs Inputs w.r.t. which the gradient will be
|
65 |
+
/// returned (and not accumulated into ``at::Tensor::grad``).
|
66 |
+
/// \param grad_outputs The "vector" in the Jacobian-vector product.
|
67 |
+
/// Usually gradients w.r.t. each output. `torch::Tensor()` values can be
|
68 |
+
/// specified for scalar Tensors or ones that don't require grad. If a
|
69 |
+
/// `torch::Tensor()` value would be acceptable for all grad_tensors, then
|
70 |
+
/// this argument is optional. Default: `{}`.
|
71 |
+
/// \param retain_graph If ``false``, the graph used to compute the grad
|
72 |
+
/// will be freed. Note that in nearly all cases setting this option to
|
73 |
+
/// ``true`` is not needed and often can be worked around in a much more
|
74 |
+
/// efficient way. Defaults to the value of ``create_graph``.
|
75 |
+
/// \param create_graph If ``true``, graph of the derivative will
|
76 |
+
/// be constructed, allowing to compute higher order derivative products.
|
77 |
+
/// Default: ``false``.
|
78 |
+
/// \param allow_unused If ``false``, specifying inputs that were not
|
79 |
+
/// used when computing outputs (and therefore their grad is always zero)
|
80 |
+
/// is an error. Defaults to ``false``.
|
81 |
+
TORCH_API variable_list grad(
|
82 |
+
const variable_list& outputs,
|
83 |
+
const variable_list& inputs,
|
84 |
+
const variable_list& grad_outputs = {},
|
85 |
+
c10::optional<bool> retain_graph = c10::nullopt,
|
86 |
+
bool create_graph = false,
|
87 |
+
bool allow_unused = false);
|
88 |
+
|
89 |
+
namespace forward_ad {
|
90 |
+
|
91 |
+
/// Creates a new dual level and returns its index. This level index should then
|
92 |
+
/// be used to call into the other functions below. This API supports entering a
|
93 |
+
/// new level before the previous one is exited. We call them nested forward AD
|
94 |
+
/// levels. These can be used to compute higher order derivatives.
|
95 |
+
TORCH_API uint64_t enter_dual_level();
|
96 |
+
|
97 |
+
/// Exits the given level. This will clear up all the gradients from this level
|
98 |
+
/// and all dual Tensors that had gradients for this level will become regular
|
99 |
+
/// Tensors again. This function can only be used to exit the innermost nesting
|
100 |
+
/// level and so exiting must happen in reverse order compared to the entering
|
101 |
+
/// that was done with the function above.
|
102 |
+
TORCH_API void exit_dual_level(uint64_t level);
|
103 |
+
|
104 |
+
} // namespace forward_ad
|
105 |
+
} // namespace autograd
|
106 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/autograd_not_implemented_fallback.h
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/library.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace autograd {
|
7 |
+
|
8 |
+
// Default DispatchKey::Autograd fallback for built-in operators.
|
9 |
+
// Can be registered for custom operators.
|
10 |
+
TORCH_API torch::CppFunction autogradNotImplementedFallback();
|
11 |
+
|
12 |
+
// Default DispatchKey::AdInplaceOrView fallback for built-in operators
|
13 |
+
// Can be registered for custom operators.
|
14 |
+
TORCH_API torch::CppFunction autogradNotImplementedInplaceOrViewFallback();
|
15 |
+
|
16 |
+
// Default DispatchKey::Autograd fallback for all other operators (i.e. custom
|
17 |
+
// operators)
|
18 |
+
TORCH_API torch::CppFunction basicAutogradNotImplementedFallback();
|
19 |
+
|
20 |
+
enum class AutogradFallbackMode {
|
21 |
+
Nothing, // Fallback is a redispatch
|
22 |
+
Warn, // Fallback raises a warning if backward is called
|
23 |
+
Error, // Fallback raises an error if backward is called
|
24 |
+
};
|
25 |
+
|
26 |
+
// Change the behavior of "basicAutogradNotImplementedFallback"
|
27 |
+
// In Python this is:
|
28 |
+
// - torch._C._set_autograd_fallback_mode(str) -> None
|
29 |
+
// - torch._C._get_autograd_fallback_mode() -> str
|
30 |
+
TORCH_API void setAutogradFallbackMode(AutogradFallbackMode mode);
|
31 |
+
TORCH_API AutogradFallbackMode getAutogradFallbackMode();
|
32 |
+
|
33 |
+
} // namespace autograd
|
34 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/cpp_hook.h
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <torch/csrc/autograd/function_hook.h>
|
3 |
+
#include <functional>
|
4 |
+
#include <memory>
|
5 |
+
|
6 |
+
namespace torch {
|
7 |
+
namespace autograd {
|
8 |
+
|
9 |
+
using hooks_list =
|
10 |
+
std::vector<std::function<at::TensorBase(const at::TensorBase&)>>;
|
11 |
+
|
12 |
+
struct CppFunctionTensorPreHook : public FunctionPreHook {
|
13 |
+
CppFunctionTensorPreHook(std::shared_ptr<hooks_list> hooks, size_t value_idx);
|
14 |
+
variable_list operator()(const variable_list& values) override;
|
15 |
+
|
16 |
+
std::shared_ptr<hooks_list> hooks_;
|
17 |
+
size_t value_idx_;
|
18 |
+
};
|
19 |
+
|
20 |
+
struct CppFunctionSingleTensorPreHook : public FunctionPreHook {
|
21 |
+
CppFunctionSingleTensorPreHook(
|
22 |
+
std::function<at::TensorBase(const at::TensorBase&)> hook,
|
23 |
+
size_t value_idx);
|
24 |
+
variable_list operator()(const variable_list& values) override;
|
25 |
+
|
26 |
+
std::function<at::TensorBase(const at::TensorBase&)> hook_;
|
27 |
+
size_t value_idx_;
|
28 |
+
};
|
29 |
+
|
30 |
+
} // namespace autograd
|
31 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/custom_function.h
ADDED
@@ -0,0 +1,438 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/ivalue.h>
|
4 |
+
#include <c10/core/SymInt.h>
|
5 |
+
#include <c10/util/flat_hash_map.h>
|
6 |
+
#include <c10/util/irange.h>
|
7 |
+
#include <torch/csrc/autograd/function.h>
|
8 |
+
#include <torch/csrc/autograd/variable.h>
|
9 |
+
#include <vector>
|
10 |
+
|
11 |
+
namespace torch {
|
12 |
+
namespace autograd {
|
13 |
+
|
14 |
+
using optional_variable_list = std::vector<c10::optional<Variable>>;
|
15 |
+
using _jvp_fn_t = std::function<variable_list(variable_list, variable_list)>;
|
16 |
+
using _view_as_self_fn_t = std::function<at::Tensor(at::Tensor)>;
|
17 |
+
|
18 |
+
TORCH_API std::vector<c10::optional<Variable>> _wrap_outputs(
|
19 |
+
const variable_list& input_vars,
|
20 |
+
const std::unordered_set<at::TensorImpl*>& non_differentiable,
|
21 |
+
const std::unordered_set<at::TensorImpl*>& dirty_inputs,
|
22 |
+
const at::ArrayRef<c10::optional<Variable>> raw_outputs,
|
23 |
+
const std::shared_ptr<Node>& cdata,
|
24 |
+
const _jvp_fn_t& jvp_user_function,
|
25 |
+
const std::unordered_set<at::TensorImpl*>& to_save_if_setup_context,
|
26 |
+
const _view_as_self_fn_t& view_as_self_fn);
|
27 |
+
|
28 |
+
TORCH_API void check_variable_result(
|
29 |
+
const at::TensorBase& original,
|
30 |
+
const at::TensorBase& result,
|
31 |
+
const std::string& hook_name);
|
32 |
+
|
33 |
+
// Get the return type of the forward function of the custom Function class X
|
34 |
+
template <typename X, typename... Args>
|
35 |
+
using forward_t = decltype(X::forward(nullptr, std::declval<Args>()...));
|
36 |
+
|
37 |
+
/// To use custom autograd operations, implement a Function subclass with
|
38 |
+
/// static forward and backward functions:
|
39 |
+
///
|
40 |
+
/// `forward` can take as many arguments as you want and should return either a
|
41 |
+
/// variable list or a Variable. Use of any direct Variable arguments will be
|
42 |
+
/// registered in the graph but no vectors/sets or any other data structures
|
43 |
+
/// will be traversed. You can use c10::optional<Tensor> as one of the arguments
|
44 |
+
/// and it will be registered as a variable in the graph if the argument has a
|
45 |
+
/// value. It should take a pointer to `torch::autograd::AutogradContext` as the
|
46 |
+
/// first argument. Variables can be saved in the `ctx` using
|
47 |
+
/// `ctx->save_for_backward`
|
48 |
+
/// (see `torch::autograd::AutogradContext::save_for_backward`) and other data
|
49 |
+
/// can be saved in the `ctx->saved_data` map
|
50 |
+
/// (see `torch::autograd::AutogradContext::saved_data`)
|
51 |
+
/// in the form of `<std::string, at::IValue>` pairs.
|
52 |
+
///
|
53 |
+
/// `backward` should take a pointer to `torch::autograd::AutogradContext`
|
54 |
+
/// and a variable list containing as many Variables as there were outputs from
|
55 |
+
/// `forward` as arguments. It should return as many Variables as there were
|
56 |
+
/// inputs with each of them containing the gradient w.r.t. its corresponding
|
57 |
+
/// input. Variables saved in `forward` can be accessed with
|
58 |
+
/// `ctx->get_saved_variables` (see
|
59 |
+
/// `torch::autograd::AutogradContext::get_saved_variables`) and other saved
|
60 |
+
/// data can be accessed from `ctx->saved_data`.
|
61 |
+
///
|
62 |
+
/// For example:
|
63 |
+
/// ```
|
64 |
+
/// class MyFunction : public Function<MyFunction> {
|
65 |
+
/// public:
|
66 |
+
/// static variable_list forward(AutogradContext *ctx, int n, Variable var) {
|
67 |
+
/// // Save data for backward in context
|
68 |
+
/// ctx->saved_data["n"] = n;
|
69 |
+
/// var.mul_(2);
|
70 |
+
/// // Mark var as modified by inplace operation
|
71 |
+
/// ctx->mark_dirty({var});
|
72 |
+
/// return {var};
|
73 |
+
/// }
|
74 |
+
///
|
75 |
+
/// static variable_list backward(AutogradContext *ctx, variable_list
|
76 |
+
/// grad_output) {
|
77 |
+
/// // Use data saved in forward
|
78 |
+
/// auto n = ctx->saved_data["n"].toInt();
|
79 |
+
/// return {grad_output[0]*n};
|
80 |
+
/// }
|
81 |
+
/// };
|
82 |
+
/// ```
|
83 |
+
///
|
84 |
+
/// To use `MyFunction`:
|
85 |
+
/// ```
|
86 |
+
/// Variable x;
|
87 |
+
/// auto y = MyFunction::apply(6, x);
|
88 |
+
/// // Example backward call
|
89 |
+
/// y[0].sum().backward();
|
90 |
+
/// ```
|
91 |
+
template <class T>
|
92 |
+
struct TORCH_API Function {
|
93 |
+
// We need to use a different template parameter than T here because T will
|
94 |
+
// inherit from Function, and when Function<T> is instantiated, T::forward
|
95 |
+
// is not declared yet.
|
96 |
+
// The enable_if check is to ensure that the user doesn't explicitly provide
|
97 |
+
// the parameter X.
|
98 |
+
template <typename X = T, typename... Args>
|
99 |
+
static auto apply(Args&&... args)
|
100 |
+
-> std::enable_if_t<std::is_same<X, T>::value, forward_t<X, Args...>>;
|
101 |
+
};
|
102 |
+
|
103 |
+
/// Context to save information during `forward` that can be accessed in
|
104 |
+
/// `backward` in custom autograd operations (see `torch::autograd::Function`
|
105 |
+
/// for details).
|
106 |
+
struct TORCH_API AutogradContext {
|
107 |
+
AutogradContext() = default;
|
108 |
+
AutogradContext(const AutogradContext& other) = delete;
|
109 |
+
AutogradContext& operator=(const AutogradContext& other) = delete;
|
110 |
+
|
111 |
+
/// Can be used to save non-variable data for `backward`.
|
112 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
113 |
+
ska::flat_hash_map<std::string, at::IValue> saved_data;
|
114 |
+
|
115 |
+
/// Saves the list of variables for a future call to `backward`. This
|
116 |
+
/// should be called at most once from inside of `forward`.
|
117 |
+
void save_for_backward(variable_list to_save);
|
118 |
+
/// Marks variables in the list as modified in an in-place operation. This
|
119 |
+
/// should be called at most once from inside of `forward` and all arguments
|
120 |
+
/// should be inputs.
|
121 |
+
void mark_dirty(const variable_list& inputs);
|
122 |
+
/// Marks outputs in the list as not requiring gradients. This should be
|
123 |
+
/// called at most once from inside of `forward` and all arguments should be
|
124 |
+
/// outputs.
|
125 |
+
void mark_non_differentiable(const variable_list& outputs);
|
126 |
+
// Sets whether undefined output grad tensors should be expanded to tensors
|
127 |
+
// full of zeros before calling backward function. Default value is true.
|
128 |
+
void set_materialize_grads(bool value);
|
129 |
+
|
130 |
+
/// Get the list of variables that were saved in `forward` using
|
131 |
+
/// `save_for_backward()`. Before returning them to the user, a check is made
|
132 |
+
/// to ensure that they were not modified by any in-place operations.
|
133 |
+
variable_list get_saved_variables() const;
|
134 |
+
const std::unordered_set<at::TensorImpl*>& get_and_bump_dirty() const;
|
135 |
+
const std::unordered_set<at::TensorImpl*>& get_non_differentiable() const;
|
136 |
+
|
137 |
+
/// Expose the Node's `task_should_compute_output` method to the cpp
|
138 |
+
/// custom autograd Function as `needs_input_grad`.
|
139 |
+
bool needs_input_grad(size_t output_edge_index) const;
|
140 |
+
bool needs_input_grad(std::initializer_list<IndexRange> idxs) const;
|
141 |
+
|
142 |
+
private:
|
143 |
+
std::unordered_set<at::TensorImpl*> non_differentiable_;
|
144 |
+
std::unordered_set<at::TensorImpl*> dirty_inputs_;
|
145 |
+
std::vector<torch::autograd::SavedVariable> saved_variables_;
|
146 |
+
variable_list to_save_;
|
147 |
+
bool materialize_grads_{true};
|
148 |
+
|
149 |
+
// The CppNode in the autograd graph that owns this AutogradContext. We need a
|
150 |
+
// weak_ptr to avoid a refcycle. Since grad_fn_ owns this AutogradContext, it
|
151 |
+
// will always be alive when we want to use it.
|
152 |
+
std::weak_ptr<Node> grad_fn_;
|
153 |
+
bool has_freed_buffers_{false};
|
154 |
+
|
155 |
+
void save_variables();
|
156 |
+
|
157 |
+
template <class T>
|
158 |
+
friend struct CppNode;
|
159 |
+
};
|
160 |
+
|
161 |
+
struct TORCH_API VariableInfo {
|
162 |
+
explicit VariableInfo();
|
163 |
+
explicit VariableInfo(const Variable& var);
|
164 |
+
|
165 |
+
Variable zeros(at::OptionalDeviceGuard& device_guard) const;
|
166 |
+
|
167 |
+
at::Layout layout = at::Layout::Strided;
|
168 |
+
at::Device device = at::kCPU;
|
169 |
+
at::ScalarType scalar_type = at::kFloat;
|
170 |
+
std::vector<c10::SymInt> size;
|
171 |
+
bool requires_grad;
|
172 |
+
bool is_empty;
|
173 |
+
};
|
174 |
+
|
175 |
+
// CppNode<T> is the Node in the autograd graph that represents the user defined
|
176 |
+
// backward function for Function<T>. Calls to CppNode::apply are forward to
|
177 |
+
// T::backward().
|
178 |
+
template <class T>
|
179 |
+
struct CppNode : public Node {
|
180 |
+
variable_list apply(variable_list&& inputs) override;
|
181 |
+
AutogradContext ctx_;
|
182 |
+
std::vector<bool> is_variable_input_;
|
183 |
+
std::vector<VariableInfo> input_info_;
|
184 |
+
std::vector<VariableInfo> output_info_;
|
185 |
+
|
186 |
+
void release_variables() override;
|
187 |
+
|
188 |
+
void set_ctx_grad_fn(const std::shared_ptr<Node>& node);
|
189 |
+
void save_variables_to_ctx();
|
190 |
+
};
|
191 |
+
|
192 |
+
struct ExtractVariables : IterArgs<ExtractVariables> {
|
193 |
+
std::vector<bool>& is_var_;
|
194 |
+
variable_list& list_;
|
195 |
+
ExtractVariables(std::vector<bool>& is_var, variable_list& list)
|
196 |
+
: is_var_(is_var), list_(list) {}
|
197 |
+
void operator()(const c10::optional<at::Tensor>& x) {
|
198 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
199 |
+
if (x.has_value() && x.value().defined()) {
|
200 |
+
is_var_.push_back(true);
|
201 |
+
list_.emplace_back(x.value());
|
202 |
+
} else {
|
203 |
+
is_var_.push_back(false);
|
204 |
+
}
|
205 |
+
}
|
206 |
+
void operator()(const at::Tensor& x) {
|
207 |
+
is_var_.push_back(true);
|
208 |
+
list_.emplace_back(x);
|
209 |
+
}
|
210 |
+
void operator()(const at::TensorList& list) {
|
211 |
+
for (const at::Tensor& x : list) {
|
212 |
+
is_var_.push_back(true);
|
213 |
+
list_.emplace_back(x);
|
214 |
+
}
|
215 |
+
}
|
216 |
+
template <typename T>
|
217 |
+
void operator()(const T& x) {
|
218 |
+
is_var_.push_back(false);
|
219 |
+
}
|
220 |
+
};
|
221 |
+
|
222 |
+
template <typename... Args>
|
223 |
+
inline void extract_vars(
|
224 |
+
std::vector<bool>& is_var,
|
225 |
+
variable_list& list,
|
226 |
+
Args&&... args) {
|
227 |
+
ExtractVariables(is_var, list).apply(std::forward<Args>(args)...);
|
228 |
+
}
|
229 |
+
|
230 |
+
template <typename T>
|
231 |
+
typename std::enable_if<std::is_same<T, variable_list>::value, T>::type
|
232 |
+
to_output_type(std::vector<c10::optional<Variable>>& output_list) {
|
233 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
234 |
+
variable_list result;
|
235 |
+
std::transform(
|
236 |
+
output_list.begin(),
|
237 |
+
output_list.end(),
|
238 |
+
std::back_inserter(result),
|
239 |
+
[](const c10::optional<Variable>& var) { return *var; });
|
240 |
+
return result;
|
241 |
+
}
|
242 |
+
|
243 |
+
template <typename T>
|
244 |
+
typename std::enable_if<std::is_same<T, Variable>::value, T>::type
|
245 |
+
to_output_type(std::vector<c10::optional<Variable>>& output_list) {
|
246 |
+
return *output_list[0];
|
247 |
+
}
|
248 |
+
|
249 |
+
inline std::vector<c10::optional<Variable>> to_optional(Variable& output) {
|
250 |
+
return std::vector<c10::optional<Variable>>{output};
|
251 |
+
}
|
252 |
+
|
253 |
+
inline std::vector<c10::optional<Variable>> to_optional(variable_list& output) {
|
254 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
255 |
+
std::vector<c10::optional<Variable>> result;
|
256 |
+
std::transform(
|
257 |
+
output.begin(),
|
258 |
+
output.end(),
|
259 |
+
std::back_inserter(result),
|
260 |
+
[](const Variable& var) { return var; });
|
261 |
+
return result;
|
262 |
+
}
|
263 |
+
|
264 |
+
template <class T>
|
265 |
+
template <typename X, typename... Args>
|
266 |
+
auto Function<T>::apply(Args&&... args)
|
267 |
+
-> std::enable_if_t<std::is_same<X, T>::value, forward_t<X, Args...>> {
|
268 |
+
const auto& functorch_tls = at::functorch::functorchTLSAccessor();
|
269 |
+
if (functorch_tls) {
|
270 |
+
// Function support for functorch is handled in Python.
|
271 |
+
// Here we are dealing with a (C++) Function, which is not supported.
|
272 |
+
// Let's raise an error instead of being silently incorrect.
|
273 |
+
functorch_tls->checkSupportsCppAutogradFunction();
|
274 |
+
}
|
275 |
+
|
276 |
+
std::shared_ptr<CppNode<T>> node(new CppNode<T>(), deleteNode);
|
277 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
278 |
+
variable_list input_vars;
|
279 |
+
|
280 |
+
const size_t num_inputs = sizeof...(Args);
|
281 |
+
input_vars.reserve(num_inputs);
|
282 |
+
node->is_variable_input_.reserve(num_inputs);
|
283 |
+
// TODO Add tracing here
|
284 |
+
extract_vars(node->is_variable_input_, input_vars, args...);
|
285 |
+
|
286 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
287 |
+
bool is_executable =
|
288 |
+
GradMode::is_enabled() && any_variable_requires_grad(input_vars);
|
289 |
+
auto next_edges =
|
290 |
+
(is_executable ? collect_next_edges(input_vars) : edge_list());
|
291 |
+
node->set_ctx_grad_fn(node);
|
292 |
+
node->set_next_edges(std::move(next_edges));
|
293 |
+
node->clear_input_metadata();
|
294 |
+
|
295 |
+
node->input_info_.reserve(input_vars.size());
|
296 |
+
for (auto& var : input_vars) {
|
297 |
+
node->input_info_.emplace_back(var);
|
298 |
+
}
|
299 |
+
|
300 |
+
using forward_return_t = forward_t<X, Args...>;
|
301 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
302 |
+
forward_return_t outputs;
|
303 |
+
{
|
304 |
+
AutoGradMode grad_mode(false);
|
305 |
+
outputs = T::forward(&node->ctx_, std::forward<Args>(args)...);
|
306 |
+
}
|
307 |
+
|
308 |
+
_jvp_fn_t jvp_fn = [](const variable_list& inputs,
|
309 |
+
const variable_list& gI) -> variable_list {
|
310 |
+
TORCH_CHECK(
|
311 |
+
false,
|
312 |
+
"jvp is not implemented for the c++ API of custom Function yet.",
|
313 |
+
"Please open a feature request on GitHub if you need this.");
|
314 |
+
};
|
315 |
+
|
316 |
+
auto view_as_self_fn = [](const at::Tensor& x) -> at::Tensor {
|
317 |
+
return x.view_as(x);
|
318 |
+
};
|
319 |
+
|
320 |
+
auto wrapped_outputs = _wrap_outputs(
|
321 |
+
input_vars,
|
322 |
+
node->ctx_.get_non_differentiable(),
|
323 |
+
node->ctx_.get_and_bump_dirty(),
|
324 |
+
to_optional(outputs),
|
325 |
+
is_executable ? node : nullptr,
|
326 |
+
jvp_fn,
|
327 |
+
{},
|
328 |
+
view_as_self_fn);
|
329 |
+
|
330 |
+
node->output_info_.reserve(wrapped_outputs.size());
|
331 |
+
for (auto& output : wrapped_outputs) {
|
332 |
+
if (is_executable && output.has_value()) {
|
333 |
+
node->output_info_.emplace_back(output.value());
|
334 |
+
} else if (is_executable) {
|
335 |
+
node->output_info_.emplace_back();
|
336 |
+
}
|
337 |
+
}
|
338 |
+
|
339 |
+
if (is_executable) {
|
340 |
+
node->save_variables_to_ctx();
|
341 |
+
}
|
342 |
+
|
343 |
+
// wrapped_outputs will be a variable_list so, convert it to the correct
|
344 |
+
// return type. Only Variable and variable_list are accepted as return types.
|
345 |
+
return to_output_type<forward_return_t>(wrapped_outputs);
|
346 |
+
}
|
347 |
+
|
348 |
+
// The logic here is the same as PyNode::apply, so changes to it should be done
|
349 |
+
// in both the places
|
350 |
+
template <class T>
|
351 |
+
variable_list CppNode<T>::apply(variable_list&& inputs) {
|
352 |
+
at::OptionalDeviceGuard _device_guard;
|
353 |
+
|
354 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
355 |
+
int num_inputs = inputs.size();
|
356 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
357 |
+
variable_list backward_inputs;
|
358 |
+
backward_inputs.reserve(num_inputs);
|
359 |
+
for (const auto i : c10::irange(num_inputs)) {
|
360 |
+
if (inputs[i].defined() || !ctx_.materialize_grads_) {
|
361 |
+
backward_inputs.emplace_back(inputs[i]);
|
362 |
+
} else {
|
363 |
+
backward_inputs.emplace_back(output_info_[i].zeros(_device_guard));
|
364 |
+
}
|
365 |
+
}
|
366 |
+
|
367 |
+
// Acquire lock to here protect thread safety on custom C++ Autograd Node
|
368 |
+
// This is needed for the custom Autograd Node since we don't know if the
|
369 |
+
// user defined Node will write to the shared data during backward.
|
370 |
+
// see Note [Thread Safety on Autograd Node]
|
371 |
+
std::lock_guard<std::mutex> lock(mutex_);
|
372 |
+
|
373 |
+
auto outputs = T::backward(&ctx_, backward_inputs);
|
374 |
+
|
375 |
+
const auto num_forward_inputs =
|
376 |
+
static_cast<int64_t>(is_variable_input_.size());
|
377 |
+
auto num_outputs = static_cast<int64_t>(outputs.size());
|
378 |
+
// Returning too many results is ok, but only as long as they're all
|
379 |
+
// undefined. Truncate the result vector in that case.
|
380 |
+
if (num_outputs > num_forward_inputs) {
|
381 |
+
bool all_undef = true;
|
382 |
+
for (const auto i : c10::irange(num_forward_inputs, num_outputs)) {
|
383 |
+
all_undef &= (!outputs[i].defined());
|
384 |
+
}
|
385 |
+
if (all_undef) {
|
386 |
+
outputs.resize(num_forward_inputs);
|
387 |
+
num_outputs = num_forward_inputs;
|
388 |
+
}
|
389 |
+
}
|
390 |
+
|
391 |
+
if (num_outputs != num_forward_inputs) {
|
392 |
+
std::string msg("function ");
|
393 |
+
msg += name() + " returned an incorrect number of gradients (expected ";
|
394 |
+
msg += c10::to_string(num_forward_inputs) + ", got ";
|
395 |
+
msg += c10::to_string(num_outputs) + ")";
|
396 |
+
throw std::runtime_error(msg);
|
397 |
+
}
|
398 |
+
|
399 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
400 |
+
variable_list results;
|
401 |
+
results.reserve(num_outputs);
|
402 |
+
for (const auto i : c10::irange(num_outputs)) {
|
403 |
+
if (!is_variable_input_[i]) {
|
404 |
+
if (outputs[i].defined()) {
|
405 |
+
std::string msg("function ");
|
406 |
+
msg += name() +
|
407 |
+
" returned a gradient different that is defined at position ";
|
408 |
+
msg += c10::to_string(i + 1) +
|
409 |
+
", but the corresponding forward input was not a Variable";
|
410 |
+
throw std::runtime_error(msg);
|
411 |
+
}
|
412 |
+
continue;
|
413 |
+
}
|
414 |
+
results.emplace_back(outputs[i]);
|
415 |
+
}
|
416 |
+
return results;
|
417 |
+
}
|
418 |
+
|
419 |
+
template <class T>
|
420 |
+
void CppNode<T>::release_variables() {
|
421 |
+
// lock to ensure thread safety, see [Thread Safety on Autograd Node]
|
422 |
+
std::lock_guard<std::mutex> lock(mutex_);
|
423 |
+
ctx_.saved_variables_.clear();
|
424 |
+
ctx_.has_freed_buffers_ = true;
|
425 |
+
}
|
426 |
+
|
427 |
+
template <class T>
|
428 |
+
void CppNode<T>::save_variables_to_ctx() {
|
429 |
+
ctx_.save_variables();
|
430 |
+
}
|
431 |
+
|
432 |
+
template <class T>
|
433 |
+
void CppNode<T>::set_ctx_grad_fn(const std::shared_ptr<Node>& node) {
|
434 |
+
ctx_.grad_fn_ = node;
|
435 |
+
}
|
436 |
+
|
437 |
+
} // namespace autograd
|
438 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/edge.h
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <cstdint>
|
4 |
+
#include <functional>
|
5 |
+
#include <memory>
|
6 |
+
|
7 |
+
#include <c10/util/hash.h>
|
8 |
+
|
9 |
+
namespace torch {
|
10 |
+
namespace autograd {
|
11 |
+
|
12 |
+
struct Node;
|
13 |
+
|
14 |
+
/// Represents a particular input of a function.
|
15 |
+
struct Edge {
|
16 |
+
Edge() noexcept : function(nullptr), input_nr(0) {}
|
17 |
+
|
18 |
+
Edge(std::shared_ptr<Node> function_, uint32_t input_nr_) noexcept
|
19 |
+
: function(std::move(function_)), input_nr(input_nr_) {}
|
20 |
+
|
21 |
+
/// Convenience method to test if an edge is valid.
|
22 |
+
bool is_valid() const noexcept {
|
23 |
+
return function != nullptr;
|
24 |
+
}
|
25 |
+
|
26 |
+
// Required for use in associative containers.
|
27 |
+
bool operator==(const Edge& other) const noexcept {
|
28 |
+
return this->function == other.function && this->input_nr == other.input_nr;
|
29 |
+
}
|
30 |
+
|
31 |
+
bool operator!=(const Edge& other) const noexcept {
|
32 |
+
return !(*this == other);
|
33 |
+
}
|
34 |
+
|
35 |
+
/// The function this `Edge` points to.
|
36 |
+
std::shared_ptr<Node> function;
|
37 |
+
|
38 |
+
/// The identifier of a particular input to the function.
|
39 |
+
uint32_t input_nr;
|
40 |
+
};
|
41 |
+
} // namespace autograd
|
42 |
+
} // namespace torch
|
43 |
+
|
44 |
+
// The idiomatic way of enabling use of a custom type as the key of hash
|
45 |
+
// containers in C++11. This method removes the requirement of having to pass
|
46 |
+
// a custom hasher to std::unordered_{map, set}.
|
47 |
+
// See http://en.cppreference.com/w/cpp/utility/hash for more information.
|
48 |
+
namespace std {
|
49 |
+
template <>
|
50 |
+
struct hash<torch::autograd::Edge> {
|
51 |
+
// These type aliases are required by the standard.
|
52 |
+
using argument_type = torch::autograd::Edge;
|
53 |
+
using return_type = size_t;
|
54 |
+
return_type operator()(const argument_type& edge) const noexcept {
|
55 |
+
return c10::get_hash(edge.function, edge.input_nr);
|
56 |
+
}
|
57 |
+
};
|
58 |
+
} // namespace std
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/engine.h
ADDED
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// Engine implements backpropagation from output variables and their gradients
|
4 |
+
// to "root" variables (variables created by the user with requires_grad=True).
|
5 |
+
|
6 |
+
#include <ATen/Tensor.h>
|
7 |
+
#include <ATen/ThreadLocalState.h>
|
8 |
+
#include <ATen/core/ivalue.h>
|
9 |
+
#include <torch/csrc/Export.h>
|
10 |
+
#include <torch/csrc/autograd/anomaly_mode.h>
|
11 |
+
#include <torch/csrc/autograd/function.h>
|
12 |
+
#include <torch/csrc/autograd/functions/basic_ops.h>
|
13 |
+
#include <torch/csrc/autograd/graph_task.h>
|
14 |
+
#include <torch/csrc/autograd/input_buffer.h>
|
15 |
+
#include <torch/csrc/autograd/saved_variable_hooks.h>
|
16 |
+
#include <torch/csrc/autograd/utils/warnings.h>
|
17 |
+
|
18 |
+
#include <c10/util/CallOnce.h>
|
19 |
+
|
20 |
+
#include <deque>
|
21 |
+
#include <exception>
|
22 |
+
#include <functional>
|
23 |
+
#include <memory>
|
24 |
+
#include <queue>
|
25 |
+
#include <thread>
|
26 |
+
#include <unordered_map>
|
27 |
+
#include <utility>
|
28 |
+
#include <vector>
|
29 |
+
|
30 |
+
namespace torch {
|
31 |
+
namespace autograd {
|
32 |
+
struct ReadyQueue;
|
33 |
+
}
|
34 |
+
} // namespace torch
|
35 |
+
|
36 |
+
namespace torch {
|
37 |
+
namespace autograd {
|
38 |
+
|
39 |
+
// Maximum reentrant backward depth before switching to a new thread
|
40 |
+
// This limit is based on the TSAN's deadlock detector, where it will
|
41 |
+
// fail if a program hold more than 65 locks in one thread at once.
|
42 |
+
// As we hold mutex in every of our custom C++ autograd Node, we would
|
43 |
+
// like to avoid TSAN complains on this when doing reentrant backwards
|
44 |
+
// For reference, see https://github.com/google/sanitizers/issues/950
|
45 |
+
static constexpr int MAX_DEPTH = 60;
|
46 |
+
|
47 |
+
void set_device(int device);
|
48 |
+
TORCH_API void validate_outputs(
|
49 |
+
const edge_list& edges,
|
50 |
+
variable_list& grads,
|
51 |
+
const std::function<std::string(const std::string&)>& format_error);
|
52 |
+
|
53 |
+
struct NodeTask {
|
54 |
+
std::weak_ptr<GraphTask> base_;
|
55 |
+
std::shared_ptr<Node> fn_;
|
56 |
+
// This buffer serves as an implicit "addition" node for all of the
|
57 |
+
// gradients flowing here. Once all the dependencies are finished, we
|
58 |
+
// use the contents of this buffer to run the function.
|
59 |
+
InputBuffer inputs_;
|
60 |
+
// When worker receives a task with isShutdownTask = true, it will immediately
|
61 |
+
// exit. The engine sends a shutdown task to every queue upon its destruction.
|
62 |
+
bool isShutdownTask_;
|
63 |
+
|
64 |
+
int getReentrantDepth() const;
|
65 |
+
|
66 |
+
NodeTask(
|
67 |
+
std::weak_ptr<GraphTask> base,
|
68 |
+
std::shared_ptr<Node> fn,
|
69 |
+
InputBuffer inputs,
|
70 |
+
bool isShutdownTask = false)
|
71 |
+
: base_(std::move(base)),
|
72 |
+
fn_(std::move(fn)),
|
73 |
+
inputs_(std::move(inputs)),
|
74 |
+
isShutdownTask_(isShutdownTask) {}
|
75 |
+
};
|
76 |
+
|
77 |
+
// Guard that sets and restores checkpoint_valid
|
78 |
+
class CheckpointValidGuard {
|
79 |
+
public:
|
80 |
+
explicit CheckpointValidGuard(
|
81 |
+
const std::shared_ptr<const GraphTask>& graph_task);
|
82 |
+
~CheckpointValidGuard();
|
83 |
+
|
84 |
+
private:
|
85 |
+
bool prev_checkpoint_valid_state;
|
86 |
+
};
|
87 |
+
|
88 |
+
struct ReadyQueue {
|
89 |
+
private:
|
90 |
+
// Returns true when t2 should be (weakly) BEFORE t1 in the queue.
|
91 |
+
// Shutdown tasks are first and then empty NodeTask are next.
|
92 |
+
struct CompareNodeTaskTime {
|
93 |
+
bool operator()(NodeTask const& t1, NodeTask const& t2) {
|
94 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
95 |
+
if (t2.isShutdownTask_) {
|
96 |
+
return true;
|
97 |
+
} else if (!t1.fn_ || t1.isShutdownTask_) {
|
98 |
+
return false;
|
99 |
+
} else if (!t2.fn_) {
|
100 |
+
return true;
|
101 |
+
} else if (t1.getReentrantDepth() == t2.getReentrantDepth()) {
|
102 |
+
return t1.fn_->sequence_nr() < t2.fn_->sequence_nr();
|
103 |
+
} else {
|
104 |
+
return t1.getReentrantDepth() < t2.getReentrantDepth();
|
105 |
+
}
|
106 |
+
}
|
107 |
+
};
|
108 |
+
|
109 |
+
// To notify threads waiting on the ReadyQueue of available tasks on the heap_
|
110 |
+
std::condition_variable not_empty_;
|
111 |
+
// To protect read and writes to heap_
|
112 |
+
mutable std::mutex mutex_;
|
113 |
+
|
114 |
+
std::priority_queue<NodeTask, std::vector<NodeTask>, CompareNodeTaskTime>
|
115 |
+
heap_;
|
116 |
+
|
117 |
+
public:
|
118 |
+
// incrementOutstandingTasks indicates whether or not we should increment
|
119 |
+
// 'outstanding_tasks_' for the associated GraphTask. This should mostly
|
120 |
+
// always be true and is only set false in certain cases (see docs for
|
121 |
+
// DistEngine.execute_graph_task_until_ready_queue_empty)
|
122 |
+
void push(NodeTask item, bool incrementOutstandingTasks = true);
|
123 |
+
void pushShutdownTask();
|
124 |
+
NodeTask pop();
|
125 |
+
bool empty() const;
|
126 |
+
size_t size() const;
|
127 |
+
};
|
128 |
+
|
129 |
+
// A single instance of this struct should be created through the whole process
|
130 |
+
// lifetime. The worker thread creation logic and Engine's destructor rely on
|
131 |
+
// this.
|
132 |
+
struct TORCH_API Engine {
|
133 |
+
/// Returns a reference to a static `Engine` instance.
|
134 |
+
static Engine& get_default_engine();
|
135 |
+
|
136 |
+
static Engine& get_base_engine();
|
137 |
+
|
138 |
+
// compiled_autograd needs to live in a different .so file so that it
|
139 |
+
// can have python symbols, so we add a layer of indirection
|
140 |
+
// see [Note: Compiled Autograd]
|
141 |
+
typedef variable_list (*compiled_autograd_fn)(
|
142 |
+
const std::shared_ptr<Node>& graph_root,
|
143 |
+
GraphTask& graph_task,
|
144 |
+
bool accumulate_grad,
|
145 |
+
const edge_list& outputs);
|
146 |
+
static void set_compiled_autograd(compiled_autograd_fn fn);
|
147 |
+
|
148 |
+
Engine(const Engine&) = delete;
|
149 |
+
Engine(Engine&&) = delete;
|
150 |
+
virtual ~Engine();
|
151 |
+
|
152 |
+
// Given a list of (Node, input number) pairs computes the value of the graph
|
153 |
+
// by following next_edge references.
|
154 |
+
virtual variable_list execute(
|
155 |
+
const edge_list& roots,
|
156 |
+
const variable_list& inputs,
|
157 |
+
bool keep_graph,
|
158 |
+
bool create_graph,
|
159 |
+
bool accumulate_grad,
|
160 |
+
const edge_list& outputs = {});
|
161 |
+
|
162 |
+
// Given a pre-populated GraphTask and GraphRoot, computes the backward pass
|
163 |
+
// for the graph.
|
164 |
+
//
|
165 |
+
// NB: This API should only be used by internal autograd specific
|
166 |
+
// machinery and shouldn't be exposed to users in anyway.
|
167 |
+
virtual c10::intrusive_ptr<at::ivalue::Future> execute_with_graph_task(
|
168 |
+
const std::shared_ptr<GraphTask>& graph_task,
|
169 |
+
std::shared_ptr<Node> graph_root,
|
170 |
+
InputBuffer&& input_buffer);
|
171 |
+
|
172 |
+
virtual std::unique_ptr<AnomalyMetadata> make_anomaly_metadata() {
|
173 |
+
return std::make_unique<AnomalyMetadata>();
|
174 |
+
}
|
175 |
+
|
176 |
+
virtual std::unique_ptr<SavedVariableHooks> get_default_saved_variable_hooks() {
|
177 |
+
return nullptr;
|
178 |
+
}
|
179 |
+
|
180 |
+
// We pass cpu_ready_queue to evaluate_function, so that it knows
|
181 |
+
// the correct ready queue to push to after a NodeTask is ready
|
182 |
+
void evaluate_function(
|
183 |
+
std::shared_ptr<GraphTask>& graph_task,
|
184 |
+
Node* func,
|
185 |
+
InputBuffer& inputs,
|
186 |
+
const std::shared_ptr<ReadyQueue>& cpu_ready_queue);
|
187 |
+
|
188 |
+
void initialize_device_threads_pool();
|
189 |
+
virtual void thread_on_exception(
|
190 |
+
std::shared_ptr<GraphTask> graph_task,
|
191 |
+
const std::shared_ptr<Node>& fn,
|
192 |
+
std::exception& e);
|
193 |
+
|
194 |
+
void queue_callback(std::function<void()> callback);
|
195 |
+
|
196 |
+
bool is_checkpoint_valid();
|
197 |
+
|
198 |
+
// Should be called after fork to notify that worker threads are gone
|
199 |
+
void release_workers();
|
200 |
+
|
201 |
+
// Must be called by subclass before destructing to avoid a data-race-on-vptr.
|
202 |
+
void stop();
|
203 |
+
|
204 |
+
// Initializes a device thread for the autograd engine.
|
205 |
+
virtual void thread_init(
|
206 |
+
int device,
|
207 |
+
const std::shared_ptr<ReadyQueue>& ready_queue,
|
208 |
+
bool should_increment = true);
|
209 |
+
|
210 |
+
protected:
|
211 |
+
Engine();
|
212 |
+
void compute_dependencies(Node* root, GraphTask& task, uint64_t min_topo_nr);
|
213 |
+
|
214 |
+
// initialize the thread local ready queue with the ready queue that is
|
215 |
+
// created elsewhere (i.e. thread_init, Engine::execute, etc), or create a new
|
216 |
+
// ready queue if ready_queue is not provided.
|
217 |
+
void init_local_ready_queue(
|
218 |
+
std::shared_ptr<ReadyQueue> ready_queue = nullptr);
|
219 |
+
|
220 |
+
std::shared_ptr<ReadyQueue> ready_queue(
|
221 |
+
std::shared_ptr<ReadyQueue> cpu_ready_queue,
|
222 |
+
at::Device device);
|
223 |
+
std::shared_ptr<ReadyQueue> ready_queue_by_index(
|
224 |
+
std::shared_ptr<ReadyQueue> cpu_ready_queue,
|
225 |
+
int device_index);
|
226 |
+
// start device threads (CUDA, XLA, etc.) in Engine,
|
227 |
+
// note that it does NOT start CPU thread.
|
228 |
+
void start_device_threads();
|
229 |
+
void increment_non_reentrant_thread_count();
|
230 |
+
void decrement_non_reentrant_thread_count();
|
231 |
+
virtual void thread_main(const std::shared_ptr<GraphTask>& task);
|
232 |
+
void reentrant_thread_init();
|
233 |
+
void add_thread_pool_task(const std::weak_ptr<GraphTask>& graph_task);
|
234 |
+
|
235 |
+
// Ensures device_ready_queues_ are initialized only once
|
236 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
237 |
+
c10::once_flag start_device_threads_flag_;
|
238 |
+
// Safe to read device_ready_queues_ without synchronization after
|
239 |
+
// initialization
|
240 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
241 |
+
std::vector<std::shared_ptr<ReadyQueue>> device_ready_queues_;
|
242 |
+
|
243 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
244 |
+
std::vector<std::function<void()>> final_callbacks_;
|
245 |
+
// To protect reads and writes to final_callbacks_
|
246 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
247 |
+
std::mutex post_callbacks_lock_;
|
248 |
+
|
249 |
+
// How many nested reentrant calls are allowed until a new thread is used
|
250 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
251 |
+
int max_recursion_depth_;
|
252 |
+
|
253 |
+
struct ThreadPoolShared {
|
254 |
+
// Data structures used by the threads for executing reentrant backwards
|
255 |
+
// tasks. See Note [Reentrant backwards]
|
256 |
+
// Number of available threads for processing new GraphTasks.
|
257 |
+
unsigned int num_workers_{0};
|
258 |
+
// The threads will wait on work_ to be notified of GraphTasks
|
259 |
+
std::condition_variable work_;
|
260 |
+
// To protect reads and writes to graphtask_queue_ and num_workers_
|
261 |
+
// and for synchronizing creating new threads when needed
|
262 |
+
std::mutex mutex_;
|
263 |
+
// Workers will process the GraphTasks added to this queue. A GraphTask is
|
264 |
+
// allocated inside Engine::execute and lives for the duration of execute
|
265 |
+
std::queue<std::weak_ptr<GraphTask>> graphtasks_queue_;
|
266 |
+
|
267 |
+
ThreadPoolShared() = default;
|
268 |
+
};
|
269 |
+
|
270 |
+
// Temporary workaround until shutting down threads is done
|
271 |
+
// We need shared ownership of all these objects because the threads are
|
272 |
+
// leaked when Engine shuts down, so there may be threads waiting on work_ for
|
273 |
+
// the graphtasks_queue_ to be nonempty.
|
274 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
275 |
+
std::shared_ptr<ThreadPoolShared> thread_pool_shared_;
|
276 |
+
|
277 |
+
private:
|
278 |
+
// Number of non-reentrant threads
|
279 |
+
std::atomic<uint32_t> non_reentrant_device_thread_count_;
|
280 |
+
// Destructor will wait for non-reentrant threads to finish
|
281 |
+
std::condition_variable non_reentrant_device_thread_condvar_;
|
282 |
+
std::mutex non_reentrant_device_thread_mutex_;
|
283 |
+
// stop() must be called before the destruction path goes down to the base
|
284 |
+
// class, in order to avoid a data-race-on-vptr. Use this boolean to guard
|
285 |
+
// whether stop() has already been called, so we can call this in every
|
286 |
+
// destructor of the class hierarchy.
|
287 |
+
bool stopped_{false};
|
288 |
+
};
|
289 |
+
|
290 |
+
// allow python_engine to override the default engine when it loads
|
291 |
+
using EngineStub = Engine& (*)();
|
292 |
+
TORCH_API void set_default_engine_stub(EngineStub stub);
|
293 |
+
|
294 |
+
} // namespace autograd
|
295 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/forward_grad.h
ADDED
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <unordered_set>
|
5 |
+
|
6 |
+
namespace torch {
|
7 |
+
namespace autograd {
|
8 |
+
|
9 |
+
// [ Using ForwardGrad ]
|
10 |
+
// ForwardGrad needs to be a shared_ptr to satisfy constraints of its inner
|
11 |
+
// design. But this shared_ptr must be uniquely associated with the object that
|
12 |
+
// stores it (as of writing, either AutogradMeta or SavedVariable). This object
|
13 |
+
// is called the "owning object" in the discussions below. This owning object
|
14 |
+
// must call `ForwardGrad::clear()` when it is destroyed to ensure that the
|
15 |
+
// ForwardGrad is properly de-allocated.
|
16 |
+
|
17 |
+
struct ForwardGrad;
|
18 |
+
|
19 |
+
// This file contains two classes that are used to store forward AD gradients
|
20 |
+
// and ensure that they are scoped properly. Because forward AD runs
|
21 |
+
// concurrently with the evaluation of the function, we need a mechanism to
|
22 |
+
// separate different forward AD invocations and be able to compute the right
|
23 |
+
// gradients. We model such invocations as levels here. The particular scoping
|
24 |
+
// issue mentioned above has two main drivers:
|
25 |
+
// - Ensure that we can conveniently use forward AD within a high level API
|
26 |
+
// without
|
27 |
+
// leaking the forward AD states outside.
|
28 |
+
// - Ensure that we can keep the level that we expose to the user API simple
|
29 |
+
// (an integer
|
30 |
+
// that represents the nesting depth) while avoiding confusions when the
|
31 |
+
// level index is re-used.
|
32 |
+
|
33 |
+
// The important external APIs from this file are:
|
34 |
+
// - ForwardADLevel::get_next_idx() that can be used to enter a new level and
|
35 |
+
// get its index
|
36 |
+
// - ForwardADLevel::release_idx() that can be used to exit a given level.
|
37 |
+
// - ForwardGrad() can be used to store a given forward gradient that will
|
38 |
+
// handle the level
|
39 |
+
// tracking automatically.
|
40 |
+
|
41 |
+
// The basic implementation strategy is as follows:
|
42 |
+
// Every tensor has a ForwardGrad, maintaining a map from levels to tangents.
|
43 |
+
// ForwardGrad is responsible for registering itself to the appropriate
|
44 |
+
// ForwardADLevel when a new tangent is added to it via ForwardGrad::set_value
|
45 |
+
// and to un-register itself from this same level if that tangent is removed via
|
46 |
+
// ForwardGrad::reset. The ForwardADLevel is created when a new level is entered
|
47 |
+
// via ForwardADLevel::get_next_idx. A reference to the new ForwardADLevel is
|
48 |
+
// stored into a global (for the whole process) vector that ensure it can be
|
49 |
+
// accessed via ForwardADLevel::get_by_idx. This reference is deleted when the
|
50 |
+
// index is released by the user when calling ForwardADLevel::release_idx. When
|
51 |
+
// it is destructed, the ForwardADLevel is responsible for clearing all the
|
52 |
+
// tangents for its level stored in all the ForwardGrad that registered with it.
|
53 |
+
//
|
54 |
+
// This process-wide level design, compared to a thread local one, allows us to
|
55 |
+
// use very simple user facing handle for the level (an int) while enabling
|
56 |
+
// cross-thread forward AD. The only required synchronization for the user is
|
57 |
+
// when entering and exiting the levels. Some discussion on alternative design
|
58 |
+
// is in https://github.com/pytorch/pytorch/pull/49097#discussion_r543716453 and
|
59 |
+
// can be refined in the future.
|
60 |
+
|
61 |
+
// Correctness of concurrency:
|
62 |
+
// Each class uses its own lock when reading or modifying internal storages.
|
63 |
+
// This allows in particular to safely remove tangents from ForwardGrad when the
|
64 |
+
// ForwardADLevel is being exited. We ensure no deadlock by ensuring that a
|
65 |
+
// methods never calls into another class's method while the local class's lock
|
66 |
+
// is held except in one single case: calling from ForwardADLevel's destructor
|
67 |
+
// into ForwardGrad::reset with update_level=false.
|
68 |
+
|
69 |
+
// The lifetime of these objects is as follows:
|
70 |
+
// The ForwardADLevel can be in three states:
|
71 |
+
// - Initialized: where one of its reference is held by the global vector
|
72 |
+
// and there may be more
|
73 |
+
// references held by temporary variables in ForwardGrad's methods.
|
74 |
+
// - About to be destructed: where "release_idx" has been called and the
|
75 |
+
// only reason for the
|
76 |
+
// ForwardADLevel not to be destructed right away is that some methods in
|
77 |
+
// ForwardGrad have owning reference to it. This is done so that a
|
78 |
+
// ForwardADLevel can never be destructed when a ForwardGrad is
|
79 |
+
// registered with it and in the process of adding something to its
|
80 |
+
// internal state.
|
81 |
+
// - Being destructed: Here the ForwardADLevel is not referenced anymore
|
82 |
+
// and can be safely reset
|
83 |
+
// all of the ForwardGrad. Note that we can have more than one reset
|
84 |
+
// being called here (which is ok) but we are guaranteed that there is at
|
85 |
+
// least one.
|
86 |
+
// The ForwardGrad is simpler as there is no intermediary state and no special
|
87 |
+
// destructor for. The logic to unregister it from the different ForwardADLevel
|
88 |
+
// is done when the owning object (AutogradMeta or SavedVariable) is being
|
89 |
+
// destroyed.
|
90 |
+
|
91 |
+
// Other considered design:
|
92 |
+
// To avoid having the ForwardGrad::clear, we considered storing weak_ptr inside
|
93 |
+
// the ForwardADLevel. While this would work, it would mean that the set inside
|
94 |
+
// the ForwardADLevel would only grow unless we do an expensive linear scan to
|
95 |
+
// remove all the dangling weak pointers. Hence this approach was not used.
|
96 |
+
|
97 |
+
// Data structures in this file are optimized for this maximum number of levels.
|
98 |
+
// The number of levels corresponds to the degree of the gradient being
|
99 |
+
// computed using forward AD and we don't expect more than second order
|
100 |
+
// gradients to be common.
|
101 |
+
#define EXPECTED_MAX_LEVEL 2
|
102 |
+
|
103 |
+
struct TORCH_API ForwardADLevel {
|
104 |
+
ForwardADLevel(uint64_t idx) : idx_(idx) {}
|
105 |
+
~ForwardADLevel();
|
106 |
+
|
107 |
+
static uint64_t get_next_idx();
|
108 |
+
static void release_idx(uint64_t idx);
|
109 |
+
static std::shared_ptr<ForwardADLevel> get_by_idx(uint64_t idx);
|
110 |
+
static std::shared_ptr<ForwardADLevel> try_get_by_idx(uint64_t idx);
|
111 |
+
|
112 |
+
void erase(const std::shared_ptr<ForwardGrad>& grad) {
|
113 |
+
std::lock_guard<std::mutex> lock(mutex_);
|
114 |
+
grads_.erase(grad);
|
115 |
+
}
|
116 |
+
|
117 |
+
void insert(const std::shared_ptr<ForwardGrad>& grad) {
|
118 |
+
std::lock_guard<std::mutex> lock(mutex_);
|
119 |
+
grads_.insert(grad);
|
120 |
+
}
|
121 |
+
|
122 |
+
private:
|
123 |
+
std::unordered_set<std::shared_ptr<ForwardGrad>> grads_;
|
124 |
+
std::mutex mutex_;
|
125 |
+
uint64_t idx_;
|
126 |
+
};
|
127 |
+
|
128 |
+
struct TORCH_API ForwardGrad : std::enable_shared_from_this<ForwardGrad> {
|
129 |
+
ForwardGrad() = default;
|
130 |
+
|
131 |
+
// This function must only be called when AutogradMeta or SavedVariable is
|
132 |
+
// being destructed as it ensures that:
|
133 |
+
// - The only (potential) other references to this ForwardGrad are the
|
134 |
+
// different level it is registered to
|
135 |
+
// - No other thread will try to call `set_value` or `value` ever from now
|
136 |
+
// on
|
137 |
+
// - Any of the ForwardADLevel that this ForwardGrad is registered with
|
138 |
+
// might
|
139 |
+
// call `reset` at any point during this function
|
140 |
+
void clear() {
|
141 |
+
c10::SmallVector<uint64_t, EXPECTED_MAX_LEVEL> levels_idx;
|
142 |
+
|
143 |
+
{
|
144 |
+
std::lock_guard<std::mutex> lock(mutex_);
|
145 |
+
for (auto& c : content_) {
|
146 |
+
levels_idx.push_back(c.first);
|
147 |
+
}
|
148 |
+
}
|
149 |
+
|
150 |
+
for (auto l_idx : levels_idx) {
|
151 |
+
// Use "try" version here as another thread might have deleted this
|
152 |
+
// level before we got here
|
153 |
+
// This is an owning reference as we want to keep the level alive
|
154 |
+
// until we successfully unregister ourselves
|
155 |
+
auto level = ForwardADLevel::try_get_by_idx(l_idx);
|
156 |
+
if (level) {
|
157 |
+
level->erase(shared_from_this());
|
158 |
+
}
|
159 |
+
}
|
160 |
+
}
|
161 |
+
|
162 |
+
void set_value(const at::Tensor& value, uint64_t level) {
|
163 |
+
// Owning reference to ensure the forward_level is not destroyed
|
164 |
+
// while we are updating our internal state
|
165 |
+
auto forward_level = ForwardADLevel::get_by_idx(level);
|
166 |
+
forward_level->insert(shared_from_this());
|
167 |
+
|
168 |
+
std::lock_guard<std::mutex> lock(mutex_);
|
169 |
+
content_.insert({level, value});
|
170 |
+
}
|
171 |
+
|
172 |
+
// This function removes the tangent for a given level from this ForwardGrad
|
173 |
+
// Use the update_level flag to disable notifying the level about this reset
|
174 |
+
// This flag is most notably used by the ForwardADLevel destructor.
|
175 |
+
void reset(uint64_t level, bool update_level = true) {
|
176 |
+
if (update_level) {
|
177 |
+
ForwardADLevel::get_by_idx(level)->erase(shared_from_this());
|
178 |
+
}
|
179 |
+
|
180 |
+
std::unique_lock<std::mutex> lock(mutex_);
|
181 |
+
const auto& it = content_.find(level);
|
182 |
+
TORCH_INTERNAL_ASSERT(
|
183 |
+
it != content_.end(), "Resetting a non-existent level.");
|
184 |
+
// Keep the Tensor alive until we have released the lock
|
185 |
+
// This is needed as we can be in a case where this function is called by
|
186 |
+
// ForwardADLevel destructor
|
187 |
+
auto t = (*it).second;
|
188 |
+
content_.erase(level);
|
189 |
+
lock.unlock();
|
190 |
+
}
|
191 |
+
|
192 |
+
const at::Tensor& value(uint64_t level) const;
|
193 |
+
|
194 |
+
bool contains(uint64_t level) {
|
195 |
+
std::lock_guard<std::mutex> lock(mutex_);
|
196 |
+
return content_.count(level) > 0;
|
197 |
+
}
|
198 |
+
|
199 |
+
bool empty() const {
|
200 |
+
return content_.empty();
|
201 |
+
}
|
202 |
+
|
203 |
+
static const at::Tensor& undef_grad();
|
204 |
+
|
205 |
+
private:
|
206 |
+
// TODO(albanD): replace this with a SmallVector
|
207 |
+
std::unordered_map<uint64_t, at::Tensor> content_;
|
208 |
+
mutable std::mutex mutex_;
|
209 |
+
};
|
210 |
+
|
211 |
+
} // namespace autograd
|
212 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function.h
ADDED
@@ -0,0 +1,761 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/autograd/anomaly_mode.h>
|
4 |
+
#include <torch/csrc/autograd/edge.h>
|
5 |
+
#include <torch/csrc/autograd/grad_mode.h>
|
6 |
+
#include <torch/csrc/autograd/graph_task.h>
|
7 |
+
#include <torch/csrc/autograd/input_metadata.h>
|
8 |
+
#include <torch/csrc/autograd/saved_variable.h>
|
9 |
+
#include <torch/csrc/autograd/variable.h>
|
10 |
+
#include <torch/csrc/utils/python_stub.h>
|
11 |
+
#include <torch/csrc/utils/variadic.h>
|
12 |
+
|
13 |
+
#include <ATen/SequenceNumber.h>
|
14 |
+
#include <ATen/core/Tensor.h>
|
15 |
+
#include <ATen/record_function.h>
|
16 |
+
#include <c10/util/Exception.h>
|
17 |
+
#include <c10/util/irange.h>
|
18 |
+
|
19 |
+
#include <algorithm>
|
20 |
+
#include <cstdint>
|
21 |
+
#include <initializer_list>
|
22 |
+
#include <memory>
|
23 |
+
#include <string>
|
24 |
+
#include <utility>
|
25 |
+
#include <vector>
|
26 |
+
|
27 |
+
namespace torch {
|
28 |
+
namespace autograd {
|
29 |
+
|
30 |
+
struct Edge;
|
31 |
+
struct FunctionPostHook;
|
32 |
+
struct FunctionPreHook;
|
33 |
+
|
34 |
+
using tensor_list = std::vector<at::Tensor>;
|
35 |
+
using variable_list = std::vector<Variable>;
|
36 |
+
using edge_list = std::vector<Edge>;
|
37 |
+
using saved_variable_list = std::vector<SavedVariable>;
|
38 |
+
using IndexRange = std::pair<size_t, size_t>;
|
39 |
+
using torch::dynamo::autograd::CompiledNodeArgs;
|
40 |
+
using torch::dynamo::autograd::SwapSavedVariables;
|
41 |
+
|
42 |
+
// Custom deleter to prevent stack overflows.
|
43 |
+
TORCH_API void deleteNode(Node* function);
|
44 |
+
|
45 |
+
// Guard that sets and restores the evaluating node
|
46 |
+
class NodeGuard {
|
47 |
+
public:
|
48 |
+
explicit NodeGuard(std::shared_ptr<Node> node);
|
49 |
+
~NodeGuard();
|
50 |
+
|
51 |
+
private:
|
52 |
+
std::shared_ptr<Node> last_evaluating_node_;
|
53 |
+
};
|
54 |
+
|
55 |
+
// Return the Node currently being evaluated (if any)
|
56 |
+
// This is only set during the backward pass while a Node is being
|
57 |
+
// executed.
|
58 |
+
TORCH_API std::shared_ptr<Node> get_current_node();
|
59 |
+
|
60 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
61 |
+
// Node
|
62 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
63 |
+
// A `Node` is an abstract class that represents an operation taking zero
|
64 |
+
// or more input `Variable`s and producing zero or more output `Variable`s. All
|
65 |
+
// functions in PyTorch's autograd machinery derive from this class and
|
66 |
+
// override its `apply` method. Instances of such subclasses will then be
|
67 |
+
// invokeable via the call operator.
|
68 |
+
//
|
69 |
+
// Nodes in the Autograd Graph
|
70 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
71 |
+
// When viewing the autograd system as a graph, `Node`s are the vertices or
|
72 |
+
// nodes, connected to each other via (directed) `Edge`s, which themselves are
|
73 |
+
// represented via (`Node`, input_nr) pairs. `Variable`s are the outputs to
|
74 |
+
// and inputs of `Node`s, and travel between these edges during execution
|
75 |
+
// of the graph. When two or more `Edge`s (from different sources) point at the
|
76 |
+
// same input to a `Node`, the values produced along all of these edges are
|
77 |
+
// implicitly summed prior to being forwarded to the target `Node`.
|
78 |
+
//
|
79 |
+
// Hierarchy
|
80 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
81 |
+
// Subclasses usually represent differentiable functions as well as their
|
82 |
+
// gradient operators. Note, however, that due to the very general definition
|
83 |
+
// of a `Node` taking *zero* or more inputs and producing *zero* or more
|
84 |
+
// outputs, uses of `Node`s are flexible and extend beyond purely
|
85 |
+
// mathematical operations. For example, the `AccumulateGrad` function is a
|
86 |
+
// *sink*: it takes one input, but produces no outputs, instead accumulating
|
87 |
+
// the input as a side effect. At the other extreme, the `GraphRoot` function
|
88 |
+
// receives no inputs from other functions, but produces multiple outputs.
|
89 |
+
//
|
90 |
+
// Interface
|
91 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
92 |
+
// The most important method on `Node` is the call operator, which takes in
|
93 |
+
// a list of variables and produces a list of variables. The precise size of
|
94 |
+
// these lists can be determined with `num_inputs()` and `num_outputs()`.
|
95 |
+
// `Node`s are stitched together via their `next_edge` interface, which let
|
96 |
+
// you manipulate the set of outgoing edges of a `Node`. You can add an
|
97 |
+
// edge with `add_next_edge()`, retrieve an edge with `next_edge(index)` and
|
98 |
+
// iterate over them via the `next_edges()` method. Other methods exist for
|
99 |
+
// integration with the JIT and other parts of PyTorch. Every `Node` has a
|
100 |
+
// *sequence number* that increases monotonically in the order of `Node`
|
101 |
+
// construction. It can be retrieved via the `sequence_nr()` method. Note that
|
102 |
+
// this sequence number is *thread local*. This means that when `Node`s
|
103 |
+
// `A`, `B` and `C` are created consecutively in the same thread, their
|
104 |
+
// sequence numbers will be ordered `A` < `B` < `C`. If, however, `A` and `B`
|
105 |
+
// are created in one thread and `C` is created in a new thread, there are *no
|
106 |
+
// guarantees* w.r.t. the ordering of `C` relative to `A` or `B`.
|
107 |
+
// See NOTE [ Sequence Number] for more details on the usages of sequence
|
108 |
+
// number.
|
109 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
110 |
+
struct TORCH_API Node : std::enable_shared_from_this<Node> {
|
111 |
+
public:
|
112 |
+
/// Construct a new `Node` with the given `next_edges`
|
113 |
+
explicit Node(uint64_t sequence_nr, edge_list&& next_edges = edge_list())
|
114 |
+
: sequence_nr_(sequence_nr), next_edges_(std::move(next_edges)) {
|
115 |
+
for (const Edge& edge : next_edges_) {
|
116 |
+
update_topological_nr(edge);
|
117 |
+
}
|
118 |
+
|
119 |
+
if (AnomalyMode::is_enabled()) {
|
120 |
+
metadata()->store_stack();
|
121 |
+
|
122 |
+
// If anomaly mode is enabled and graph is constructed, then assign the
|
123 |
+
// currently evaluating node as the parent of this node.
|
124 |
+
// A parent is a Node where this Node is created.
|
125 |
+
// We are tracking the parents to track multiple backward operations.
|
126 |
+
assign_parent();
|
127 |
+
}
|
128 |
+
|
129 |
+
// Store the thread_id of the forward operator.
|
130 |
+
// See NOTE [ Sequence Numbers ]
|
131 |
+
thread_id_ = at::RecordFunction::currentThreadId();
|
132 |
+
}
|
133 |
+
|
134 |
+
explicit Node(edge_list&& next_edges = edge_list())
|
135 |
+
: Node(
|
136 |
+
/*sequence_nr=*/at::sequence_number::get_and_increment(),
|
137 |
+
std::move(next_edges)) {}
|
138 |
+
|
139 |
+
/// Nodes are neither copyable nor moveable.
|
140 |
+
Node(const Node& other) = delete;
|
141 |
+
Node(Node&& other) = delete;
|
142 |
+
Node& operator=(const Node& other) = delete;
|
143 |
+
Node& operator=(Node&& other) = delete;
|
144 |
+
virtual ~Node() = default;
|
145 |
+
|
146 |
+
std::shared_ptr<Node> getptr() {
|
147 |
+
return shared_from_this();
|
148 |
+
}
|
149 |
+
/// Evaluates the function on the given inputs and returns the result of the
|
150 |
+
/// function call.
|
151 |
+
variable_list operator()(variable_list&& inputs) {
|
152 |
+
// In the first iteration of named tensors, autograd ignores names and
|
153 |
+
// operates on unnamed tensors. In the long term, autograd should
|
154 |
+
// probably operate with names.
|
155 |
+
at::NoNamesGuard no_names_guard;
|
156 |
+
|
157 |
+
#ifdef USE_ROCM
|
158 |
+
// Keep track of backward pass for rocblas.
|
159 |
+
at::ROCmBackwardPassGuard in_backward;
|
160 |
+
#endif
|
161 |
+
|
162 |
+
auto step_callbacks =
|
163 |
+
at::getStepCallbacksUnlessEmpty(at::RecordScope::BACKWARD_FUNCTION);
|
164 |
+
if (C10_UNLIKELY(step_callbacks.has_value())) {
|
165 |
+
at::RecordFunction guard(std::move(*step_callbacks));
|
166 |
+
// Using sequence number and thread id to correlate with
|
167 |
+
// the forward pass function
|
168 |
+
guard.setForwardThreadId(thread_id_);
|
169 |
+
if (guard.needsInputs()) {
|
170 |
+
std::vector<c10::IValue> inputs_vec(inputs.begin(), inputs.end());
|
171 |
+
guard.before(
|
172 |
+
name(),
|
173 |
+
c10::ArrayRef<const c10::IValue>(
|
174 |
+
inputs_vec.data(), inputs_vec.size()),
|
175 |
+
static_cast<int64_t>(sequence_nr()));
|
176 |
+
} else {
|
177 |
+
guard.before(name(), static_cast<int64_t>(sequence_nr()));
|
178 |
+
}
|
179 |
+
return apply(std::move(inputs));
|
180 |
+
} else {
|
181 |
+
return apply(std::move(inputs));
|
182 |
+
}
|
183 |
+
}
|
184 |
+
|
185 |
+
// Graph Connectivity API
|
186 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
187 |
+
|
188 |
+
// Inputs. NOTE: inputs of the grad_fn correspond to Tensor outputs of the
|
189 |
+
// forward function.
|
190 |
+
|
191 |
+
// Marker for expected undefined input
|
192 |
+
struct undefined_input {};
|
193 |
+
|
194 |
+
/// Adds the type and shape metadata for a new input. Returns the index of
|
195 |
+
/// of the new input.
|
196 |
+
uint32_t add_input_metadata(
|
197 |
+
const at::TensorOptions& options,
|
198 |
+
c10::SymIntArrayRef shape,
|
199 |
+
bool is_tensor_subclass,
|
200 |
+
bool is_nested) noexcept {
|
201 |
+
uint32_t input_nr = input_metadata_.size();
|
202 |
+
auto meta_shape = MetadataShape{std::in_place_type<SymIntSmallVec>, shape};
|
203 |
+
input_metadata_.emplace_back(
|
204 |
+
options, meta_shape, is_tensor_subclass, is_nested);
|
205 |
+
return input_nr;
|
206 |
+
}
|
207 |
+
|
208 |
+
uint32_t add_input_metadata(const at::Tensor& t) noexcept {
|
209 |
+
uint32_t input_nr = input_metadata_.size();
|
210 |
+
input_metadata_.emplace_back(t);
|
211 |
+
return input_nr;
|
212 |
+
}
|
213 |
+
|
214 |
+
/// Adds a placeholder for an input that will not be used.
|
215 |
+
uint32_t add_input_metadata(undefined_input u) noexcept {
|
216 |
+
uint32_t input_nr = input_metadata_.size();
|
217 |
+
input_metadata_.emplace_back();
|
218 |
+
return input_nr;
|
219 |
+
}
|
220 |
+
|
221 |
+
uint32_t num_inputs() const noexcept {
|
222 |
+
return input_metadata_.size();
|
223 |
+
}
|
224 |
+
|
225 |
+
const InputMetadata& input_metadata(size_t index) const {
|
226 |
+
return input_metadata_[index];
|
227 |
+
}
|
228 |
+
|
229 |
+
// Danger: not thread safe, caller must protect with lock
|
230 |
+
InputMetadata& mutable_input_metadata(size_t index) {
|
231 |
+
return input_metadata_[index];
|
232 |
+
}
|
233 |
+
|
234 |
+
/**
|
235 |
+
* Note: Function Streams
|
236 |
+
* A function's stream (for a given device type) is the stream of the first
|
237 |
+
* element of its input buffer on a device of that type.
|
238 |
+
*
|
239 |
+
* If all elements are on the same device they MUST share a stream. If
|
240 |
+
* elements are on different devices (across multiple GPUs, for example)
|
241 |
+
* they may have different streams.
|
242 |
+
*/
|
243 |
+
c10::optional<c10::Stream> stream(const c10::DeviceType device_type) {
|
244 |
+
for (const auto& metadata : input_metadata_) {
|
245 |
+
if (metadata.device().type() == device_type)
|
246 |
+
return metadata.stream();
|
247 |
+
}
|
248 |
+
|
249 |
+
return c10::nullopt;
|
250 |
+
}
|
251 |
+
|
252 |
+
void clear_input_metadata() {
|
253 |
+
input_metadata_.clear();
|
254 |
+
}
|
255 |
+
|
256 |
+
// Outputs ("Next Edges")
|
257 |
+
|
258 |
+
void update_topological_nr(const Edge& edge) {
|
259 |
+
TORCH_INTERNAL_ASSERT(
|
260 |
+
!has_parent_,
|
261 |
+
"Cannot update a node's topological_nr after it already has a parent."
|
262 |
+
" If we allow this, we can no longer guarantee that a parent's"
|
263 |
+
" topo_nr is always greater than those of all its children")
|
264 |
+
Node* node = edge.function.get();
|
265 |
+
if (node) {
|
266 |
+
auto topo_nr = node->topological_nr();
|
267 |
+
if (topological_nr_ <= topo_nr) {
|
268 |
+
topological_nr_ = topo_nr + 1;
|
269 |
+
}
|
270 |
+
}
|
271 |
+
}
|
272 |
+
|
273 |
+
void set_next_edge(size_t index, Edge edge) {
|
274 |
+
update_topological_nr(edge);
|
275 |
+
next_edges_[index] = std::move(edge);
|
276 |
+
}
|
277 |
+
|
278 |
+
void add_next_edge(Edge edge) {
|
279 |
+
update_topological_nr(edge);
|
280 |
+
next_edges_.emplace_back(std::move(edge));
|
281 |
+
}
|
282 |
+
|
283 |
+
void set_next_edges(edge_list&& next_edges) {
|
284 |
+
next_edges_ = std::move(next_edges);
|
285 |
+
for (const auto& next_edge : next_edges_) {
|
286 |
+
update_topological_nr(next_edge);
|
287 |
+
}
|
288 |
+
}
|
289 |
+
|
290 |
+
const Edge& next_edge(size_t index) const noexcept {
|
291 |
+
return next_edges_[index];
|
292 |
+
}
|
293 |
+
|
294 |
+
const edge_list& next_edges() const noexcept {
|
295 |
+
return next_edges_;
|
296 |
+
}
|
297 |
+
|
298 |
+
edge_list& next_edges() noexcept {
|
299 |
+
return next_edges_;
|
300 |
+
}
|
301 |
+
|
302 |
+
uint32_t num_outputs() const noexcept {
|
303 |
+
return next_edges_.size();
|
304 |
+
}
|
305 |
+
|
306 |
+
// Miscellaneous Methods
|
307 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
308 |
+
|
309 |
+
/// NOTE [ Sequence Number]
|
310 |
+
///
|
311 |
+
/// The sequence_nr has two main usages in autograd:
|
312 |
+
///
|
313 |
+
/// 1) Helps determine the node's execution priority in the engine.
|
314 |
+
/// All else being equal, nodes with higher priority numbers are executed
|
315 |
+
/// first. Thus, nodes corresponding to ops executed later are the first to
|
316 |
+
/// be executed in the backward pass. One caveat is that we prioritize
|
317 |
+
/// AccumulateGrad nodes by explicitly setting its sequence_nr to be
|
318 |
+
/// UINT64_MAX.
|
319 |
+
/// 2) The sequence number of this `Node` is paired with with thread_id it was
|
320 |
+
/// created in
|
321 |
+
/// as a unique identifier by the profiler to annotate recorded events.
|
322 |
+
/// The purpose of this is to help users (and possibly programs)
|
323 |
+
/// interpreting the profiler's output to correlate backward nodes with its
|
324 |
+
/// forward ops. We need both sequence_nr and thread_id to identify a node
|
325 |
+
/// because sequence_nr is thread_local, i.e., starts counting up from zero
|
326 |
+
/// in a new thread
|
327 |
+
uint64_t sequence_nr() const noexcept {
|
328 |
+
return sequence_nr_;
|
329 |
+
}
|
330 |
+
|
331 |
+
void set_sequence_nr(uint64_t sequence_nr) {
|
332 |
+
sequence_nr_ = sequence_nr;
|
333 |
+
}
|
334 |
+
|
335 |
+
// NOTE [ Topological Number ]
|
336 |
+
//
|
337 |
+
// topological_nr is used to prune branches in the DAG during autograd
|
338 |
+
// discovery as maintaining topological_nr helps us check in O(1) if there
|
339 |
+
// does NOT exist a directed path between two nodes.
|
340 |
+
//
|
341 |
+
// The topological order number of this `Node` representing the length of the
|
342 |
+
// longest possible path from this Node to any leaf node. If you are leaf
|
343 |
+
// node, aka AccumulateGrad, this will be zero. This value has the property
|
344 |
+
// that For every pair of nodes X, Y in G, existence of a directed path from X
|
345 |
+
// to Y implies topo_nr(X) > topo_nr(Y). The converse is not true, however, so
|
346 |
+
// we cannot prove existence of a path from X to Y, only non-existence.
|
347 |
+
//
|
348 |
+
// One assumption we make when using topo_nr is that once a node
|
349 |
+
// has been used, i.e., has a parent node, its own topo_nr does not change
|
350 |
+
// we have added some checks with the `has_parent_` field to enforce this.
|
351 |
+
//
|
352 |
+
// What NOT to do:
|
353 |
+
//
|
354 |
+
// 1) 2 -> 1 -> 0 In this diagram we label nodes with their
|
355 |
+
// topo_nr.
|
356 |
+
// 2 -> 1 -> 0 We have two simple graphs that can each
|
357 |
+
// arise from
|
358 |
+
// `t.exp().exp()`, for example.
|
359 |
+
// 2) 2 -> 1 -> 0
|
360 |
+
// /
|
361 |
+
// 2 -> 1 -> 0 We add 2 as a next edge to 1 even though 1
|
362 |
+
// already
|
363 |
+
// has a parent.
|
364 |
+
// 3) 2 -> 1 -> 0
|
365 |
+
// /
|
366 |
+
// 2 -> 3 -> 0 2 < 3, yet there exists a path from 2 to 3!
|
367 |
+
//
|
368 |
+
uint64_t topological_nr() const noexcept {
|
369 |
+
has_parent_ = true;
|
370 |
+
return topological_nr_;
|
371 |
+
}
|
372 |
+
|
373 |
+
// assigning a node as a parent to this node
|
374 |
+
void assign_parent();
|
375 |
+
|
376 |
+
/// Id of the thread that created Node
|
377 |
+
uint64_t thread_id() const noexcept {
|
378 |
+
return thread_id_;
|
379 |
+
}
|
380 |
+
|
381 |
+
/// Returns the name of the dynamic type of the function, for debugging.
|
382 |
+
virtual std::string name() const;
|
383 |
+
|
384 |
+
/// The difference between functions `should_compute_output` and
|
385 |
+
/// `task_should_compute_output`:
|
386 |
+
/// - `should_compute_output` should only be used during graph construction
|
387 |
+
/// and takes into account only requires_grad information
|
388 |
+
/// - `task_should_compute_output` should only be called during the backward
|
389 |
+
/// pass (unless called directly through grad_fn) and takes into account the
|
390 |
+
/// current graph task. Specifically, the autograd engine trims unnecessary
|
391 |
+
/// edges when `inputs` are specified, and during backward untrimmed nodes
|
392 |
+
/// left on the graph can/should check `task_should_compute_output` to see if
|
393 |
+
/// any outgoing edges have been trimmed by the engine. If that is the case,
|
394 |
+
/// gradient computation wrt those edges can be omitted.
|
395 |
+
///
|
396 |
+
/// Returns true if the particular output edge is active, and that particular
|
397 |
+
/// output of this function should be computed.
|
398 |
+
bool should_compute_output(size_t output_edge_index) const {
|
399 |
+
TORCH_CHECK(output_edge_index < num_outputs(), "Index out of range");
|
400 |
+
return next_edges_[output_edge_index].is_valid();
|
401 |
+
}
|
402 |
+
|
403 |
+
/// Returns true if any of the output edges in any of the ranges are active.
|
404 |
+
bool should_compute_output(std::initializer_list<IndexRange> idxs) const {
|
405 |
+
return std::any_of(idxs.begin(), idxs.end(), [this](IndexRange range) {
|
406 |
+
for (const auto i : c10::irange(range.first, range.second)) {
|
407 |
+
if (should_compute_output(i))
|
408 |
+
return true;
|
409 |
+
}
|
410 |
+
return false;
|
411 |
+
});
|
412 |
+
}
|
413 |
+
|
414 |
+
/// Same as the above `should_compute_output` function but will also
|
415 |
+
/// check whether this edge is needed within the current graph task.
|
416 |
+
bool task_should_compute_output(size_t output_edge_index) const {
|
417 |
+
TORCH_CHECK(output_edge_index < num_outputs(), "Index out of range");
|
418 |
+
const auto& next = next_edges_[output_edge_index];
|
419 |
+
if (next.is_valid()) {
|
420 |
+
const auto exec_info = get_current_graph_task_exec_info();
|
421 |
+
if (exec_info && !exec_info->empty()) {
|
422 |
+
auto it = exec_info->find(next.function.get());
|
423 |
+
if (it == exec_info->end() || !it->second.should_execute()) {
|
424 |
+
return false; // this edge is not needed for the current graph_task
|
425 |
+
}
|
426 |
+
}
|
427 |
+
return true;
|
428 |
+
}
|
429 |
+
return false;
|
430 |
+
}
|
431 |
+
|
432 |
+
/// Returns true if any of the output edges in any of the ranges are active
|
433 |
+
/// and should be computed in the current graph task.
|
434 |
+
bool task_should_compute_output(
|
435 |
+
std::initializer_list<IndexRange> idxs) const {
|
436 |
+
return std::any_of(idxs.begin(), idxs.end(), [this](IndexRange range) {
|
437 |
+
for (const auto i : c10::irange(range.first, range.second)) {
|
438 |
+
if (task_should_compute_output(i))
|
439 |
+
return true;
|
440 |
+
}
|
441 |
+
return false;
|
442 |
+
});
|
443 |
+
}
|
444 |
+
|
445 |
+
/// Returns the `PyObject` stored for this `Node` (for Python
|
446 |
+
/// interaction).
|
447 |
+
PyObject* pyobj() const noexcept {
|
448 |
+
return pyobj_;
|
449 |
+
}
|
450 |
+
|
451 |
+
/// Sets the `PyObject` stored for this `Node` (for Python interaction).
|
452 |
+
void set_pyobj(PyObject* pyobj) noexcept {
|
453 |
+
pyobj_ = pyobj;
|
454 |
+
}
|
455 |
+
|
456 |
+
/// Returns the anomaly metadata stored for this `Node`.
|
457 |
+
/// If none exist, creates a new empty one.
|
458 |
+
AnomalyMetadata* metadata() noexcept;
|
459 |
+
|
460 |
+
// Hook API
|
461 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
462 |
+
|
463 |
+
uintptr_t add_post_hook(std::unique_ptr<FunctionPostHook>&& post_hook) {
|
464 |
+
post_hooks_.emplace_back(std::move(post_hook));
|
465 |
+
// Use the raw pointer as the unique key to identify this hook. This key
|
466 |
+
// can then be used in del_post_hook(key) to remove this hook.
|
467 |
+
return reinterpret_cast<std::uintptr_t>(post_hooks_.back().get());
|
468 |
+
}
|
469 |
+
|
470 |
+
const std::vector<std::unique_ptr<FunctionPostHook>>& post_hooks()
|
471 |
+
const noexcept {
|
472 |
+
return post_hooks_;
|
473 |
+
}
|
474 |
+
|
475 |
+
// delete a post hook matching the key
|
476 |
+
bool del_post_hook(const uintptr_t& key) {
|
477 |
+
for (auto it = post_hooks_.begin(); it != post_hooks_.end(); ++it) {
|
478 |
+
if (key == reinterpret_cast<std::uintptr_t>(it->get())) {
|
479 |
+
post_hooks_.erase(it);
|
480 |
+
return true;
|
481 |
+
}
|
482 |
+
}
|
483 |
+
return false;
|
484 |
+
}
|
485 |
+
|
486 |
+
std::vector<std::unique_ptr<FunctionPostHook>>& post_hooks() noexcept {
|
487 |
+
return post_hooks_;
|
488 |
+
}
|
489 |
+
|
490 |
+
void add_pre_hook(std::unique_ptr<FunctionPreHook>&& pre_hook) {
|
491 |
+
pre_hooks_.emplace_back(std::move(pre_hook));
|
492 |
+
}
|
493 |
+
|
494 |
+
void add_tensor_pre_hook(std::unique_ptr<FunctionPreHook>&& pre_hook) {
|
495 |
+
tensor_pre_hooks_.emplace_back(std::move(pre_hook));
|
496 |
+
}
|
497 |
+
|
498 |
+
void add_retains_grad_hook(
|
499 |
+
std::unique_ptr<FunctionPreHook>&& pre_hook,
|
500 |
+
size_t output_idx) {
|
501 |
+
retains_grad_hooks_[output_idx] = std::move(pre_hook);
|
502 |
+
}
|
503 |
+
|
504 |
+
std::unique_ptr<FunctionPreHook> pop_retains_grad_hook(size_t output_idx) {
|
505 |
+
auto ret = std::move(retains_grad_hooks_[output_idx]);
|
506 |
+
retains_grad_hooks_.erase(output_idx);
|
507 |
+
return ret;
|
508 |
+
}
|
509 |
+
|
510 |
+
const std::vector<std::unique_ptr<FunctionPreHook>>& pre_hooks()
|
511 |
+
const noexcept {
|
512 |
+
return pre_hooks_;
|
513 |
+
}
|
514 |
+
|
515 |
+
std::vector<std::unique_ptr<FunctionPreHook>>& pre_hooks() noexcept {
|
516 |
+
return pre_hooks_;
|
517 |
+
}
|
518 |
+
|
519 |
+
virtual std::vector<std::unique_ptr<FunctionPreHook>>&
|
520 |
+
tensor_pre_hooks() noexcept {
|
521 |
+
return tensor_pre_hooks_;
|
522 |
+
}
|
523 |
+
|
524 |
+
virtual std::unique_ptr<PostAccumulateGradHook>&
|
525 |
+
tensor_post_acc_grad_hooks() noexcept {
|
526 |
+
static std::unique_ptr<PostAccumulateGradHook> empty = nullptr;
|
527 |
+
return empty;
|
528 |
+
}
|
529 |
+
|
530 |
+
std::unordered_map<size_t, std::unique_ptr<FunctionPreHook>>&
|
531 |
+
retains_grad_hooks() noexcept {
|
532 |
+
return retains_grad_hooks_;
|
533 |
+
}
|
534 |
+
|
535 |
+
// Customization Points for Subclasses
|
536 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
537 |
+
|
538 |
+
/// Releases saved variables if the operation won't be reused.
|
539 |
+
virtual void release_variables() {}
|
540 |
+
|
541 |
+
/// Called before an apply if `release_variables()` is going to be called.
|
542 |
+
/// Allows larger ops like `InterpreterAutogradFunction` to incrementally
|
543 |
+
/// release variables as they run.
|
544 |
+
virtual void will_release_variables() {}
|
545 |
+
|
546 |
+
/// Returns true if this function is traceable. An op is traceable if all
|
547 |
+
/// operations happening within `apply()` are performed on autograd
|
548 |
+
/// `Variables` (i.e. apply mostly instantiates and applies other functions).
|
549 |
+
virtual bool is_traceable() {
|
550 |
+
return false;
|
551 |
+
}
|
552 |
+
|
553 |
+
/// A `Node` is said to pass state transparently to backward, if the
|
554 |
+
/// state consists only of (Saved)Variables and only non-variable objects
|
555 |
+
/// that parameterize the operation in some way that defines the graph
|
556 |
+
/// structure AND the backward function is traceable. In particular,
|
557 |
+
/// parametrization MUST NOT depend on the data of any `Variable`.
|
558 |
+
/// TODO: it might be possible to handle cases where backward is
|
559 |
+
/// non-traceable but state passing could be considered transparent. This
|
560 |
+
/// will probably depend on saved_variable_list being mutable.
|
561 |
+
/// NOTE: this value matters only if is_traceable() returns false.
|
562 |
+
virtual bool passes_state_transparently() {
|
563 |
+
return false;
|
564 |
+
}
|
565 |
+
|
566 |
+
// see [Note: Compiled Autograd]
|
567 |
+
// Used by compiled autograd to
|
568 |
+
// 1) Extract tensors/symint args
|
569 |
+
// 2) Collect node information for specialization and caching
|
570 |
+
// Implementations in subclasses should call args.collect() with all node
|
571 |
+
// attrs. These functions are only called durring backward.
|
572 |
+
virtual void compiled_args(CompiledNodeArgs& args) {
|
573 |
+
throw std::runtime_error(
|
574 |
+
std::string("compiled_args not implemented: ") + name());
|
575 |
+
}
|
576 |
+
|
577 |
+
// Used by compiled autograd to call apply() with different saved tensors
|
578 |
+
// Implementations should call saved.before() on all attrs, then apply(), then
|
579 |
+
// saved.after() on all attrs in the same order.
|
580 |
+
virtual variable_list apply_with_saved(
|
581 |
+
const variable_list& inputs,
|
582 |
+
SwapSavedVariables& saved) {
|
583 |
+
throw std::runtime_error(
|
584 |
+
std::string("apply_with_saved not implemented: ") + name());
|
585 |
+
}
|
586 |
+
|
587 |
+
protected:
|
588 |
+
/// Performs the `Node`'s actual operation.
|
589 |
+
virtual variable_list apply(variable_list&& inputs) = 0;
|
590 |
+
|
591 |
+
/// Calls `apply()`, but instruments it with tracing machinery.
|
592 |
+
variable_list traced_apply(variable_list inputs);
|
593 |
+
|
594 |
+
// Sequence number used to correlate backward nodes with forward ops in the
|
595 |
+
// profiler and provide determinism in the engine.
|
596 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
597 |
+
uint64_t sequence_nr_;
|
598 |
+
|
599 |
+
// See NOTE [ Topological Number ]
|
600 |
+
uint64_t topological_nr_ = 0;
|
601 |
+
|
602 |
+
// Tracks whether this node has been added as the next_edge of another node
|
603 |
+
// via set_next_edge(s), which always calls topological_nr() of all its
|
604 |
+
// children See NOTE [ Topological Number ] for why we need this.
|
605 |
+
mutable bool has_parent_ = false;
|
606 |
+
|
607 |
+
// Id of the thread that created the instance
|
608 |
+
uint64_t thread_id_ = 0;
|
609 |
+
|
610 |
+
// Note [Thread Safety on Autograd Node]
|
611 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
612 |
+
// Autograd Engine let the owning thread which calls Engine::execute to drive
|
613 |
+
// the GraphTask execution, there might be cases that part of the GraphTask is
|
614 |
+
// shared across different `backward()` or `grad()` calls, i.e. fork new
|
615 |
+
// threads in the middle of the forward and call `backward()` separately from
|
616 |
+
// different threads. We need to protect the thread safety on NodeTask to
|
617 |
+
// prevent data racing on shared variables read/write.
|
618 |
+
//
|
619 |
+
// NB: This is only needed for Autograd Nodes that runs on CPU, technically
|
620 |
+
// "CUDA", "XLA" nodes don't need locking because device threads are always
|
621 |
+
// single threaded.
|
622 |
+
//
|
623 |
+
// Here we add a thread mutex to help protect the Node's thread safety, so
|
624 |
+
// that different threads cannot race the shared data when executing the same
|
625 |
+
// NodeTask from multiple CPU threads. It IS the user/developer responsibility
|
626 |
+
// to take advantage of this mutex to protect the thread safety of their
|
627 |
+
// autograd Node. The general strategy of thread safety on autograd Node:
|
628 |
+
//
|
629 |
+
// 1. User should lock the mutex during Node::release_variables() if the Node
|
630 |
+
// needs
|
631 |
+
// to release the variables on the fly, this serve the purpose that when we
|
632 |
+
// release saved_variables from one thread, no other threads can release
|
633 |
+
// the saved variables concurrently. call the Node::apply(),
|
634 |
+
// 2. User should lock the mutex during Node::apply(), this is to ensure Node
|
635 |
+
// that
|
636 |
+
// writing to the shared variable are not racing across threads (i.e.
|
637 |
+
// AccumulateGrad and custom C++ Autograd Node if writing to shared
|
638 |
+
// variables )
|
639 |
+
// 3. item 2 and item 3 should work together so that when we release saved
|
640 |
+
// variables
|
641 |
+
// from one thread, no other threads can call Node::apply(), this ensures
|
642 |
+
// the variable references from other threads aren't dangling.
|
643 |
+
// 4. if the Node don't release any variables and no shared data read/write in
|
644 |
+
// the Node
|
645 |
+
// i.e. purely functional, user don't need to lock the mutex
|
646 |
+
//
|
647 |
+
// This way we could protect the thread safety on Autograd Node, but we could
|
648 |
+
// still not protect the thread safety on Node pre/post C++ hooks (python
|
649 |
+
// hooks are automatically thread safe), we rely on the user to write thread
|
650 |
+
// safe C++ hooks if they want the hook to be correctly applied in
|
651 |
+
// multithreading environment.
|
652 |
+
std::mutex mutex_;
|
653 |
+
|
654 |
+
edge_list next_edges_;
|
655 |
+
PyObject* pyobj_ = nullptr; // weak reference
|
656 |
+
std::unique_ptr<AnomalyMetadata> anomaly_metadata_ = nullptr;
|
657 |
+
|
658 |
+
// NOTE [Hooks ordering]
|
659 |
+
// We have 3 separate fields for pre hooks registered to the autograd nodes
|
660 |
+
// because the conditions under which they execute are different, and we
|
661 |
+
// want more fine-grained control over the order in which different types
|
662 |
+
// of hooks are executed.
|
663 |
+
// - pre_hooks are only executed when the node itself is executed
|
664 |
+
// - tensor_pre_hook is executed as long as the engine traverses over it
|
665 |
+
// even if that node won't be executed.
|
666 |
+
// - retains_grad_hook are like tensor_pre_hooks except they are always
|
667 |
+
// ordered after all other tensor pre hooks
|
668 |
+
std::vector<std::unique_ptr<FunctionPreHook>> pre_hooks_;
|
669 |
+
std::vector<std::unique_ptr<FunctionPreHook>> tensor_pre_hooks_;
|
670 |
+
std::unordered_map<size_t, std::unique_ptr<FunctionPreHook>>
|
671 |
+
retains_grad_hooks_;
|
672 |
+
std::vector<std::unique_ptr<FunctionPostHook>> post_hooks_;
|
673 |
+
at::SmallVector<InputMetadata, 2> input_metadata_;
|
674 |
+
};
|
675 |
+
|
676 |
+
/// See Node::is_traceable() for definition.
|
677 |
+
struct TraceableFunction : public Node {
|
678 |
+
using Node::Node;
|
679 |
+
bool is_traceable() final {
|
680 |
+
return true;
|
681 |
+
}
|
682 |
+
};
|
683 |
+
|
684 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
685 |
+
// Associated Free Nodes
|
686 |
+
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
687 |
+
|
688 |
+
namespace detail {
|
689 |
+
// Implementation of `collect_next_edges` (see below).
|
690 |
+
struct MakeNextFunctionList : IterArgs<MakeNextFunctionList> {
|
691 |
+
edge_list next_edges;
|
692 |
+
using IterArgs<MakeNextFunctionList>::operator();
|
693 |
+
void operator()(const Variable& variable) {
|
694 |
+
if (variable.defined()) {
|
695 |
+
next_edges.emplace_back(impl::gradient_edge(variable));
|
696 |
+
} else {
|
697 |
+
next_edges.emplace_back();
|
698 |
+
}
|
699 |
+
}
|
700 |
+
void operator()(const Variable* variable) {
|
701 |
+
operator()(*variable);
|
702 |
+
}
|
703 |
+
void operator()(const c10::optional<Variable>& variable) {
|
704 |
+
if (variable.has_value()) {
|
705 |
+
operator()(*variable);
|
706 |
+
} else {
|
707 |
+
next_edges.emplace_back();
|
708 |
+
}
|
709 |
+
}
|
710 |
+
};
|
711 |
+
} // namespace detail
|
712 |
+
|
713 |
+
/// Create an `Edge` between the given `variable` and the `function`, which is
|
714 |
+
/// assumed to be the gradient function of this variable (i.e. the function
|
715 |
+
/// through which this variable is backpropagated during the backward pass).
|
716 |
+
/// This sets the `grad_fn` property of the `variable`. This function assumes
|
717 |
+
/// that the `Variable` is a new input to the gradient function and its
|
718 |
+
/// `input_nr` thus equal to `function->num_inputs()`. Additionally, it
|
719 |
+
/// increments the `Node`'s number of inputs by one. Approximately
|
720 |
+
/// equivalent to `variable.set_gradient_edge(function,
|
721 |
+
/// function->add_input_metadata(variable.dispatch_type(), variable.sizes()))`.
|
722 |
+
/// If you don't want the `Node`'s `num_inputs` to be incremented, use
|
723 |
+
/// `set_gradient_edge` directly.
|
724 |
+
inline void create_gradient_edge(
|
725 |
+
Variable& variable,
|
726 |
+
std::shared_ptr<Node> function) {
|
727 |
+
// Copy before move.
|
728 |
+
const auto input_nr = function->add_input_metadata(variable);
|
729 |
+
impl::set_gradient_edge(variable, {std::move(function), input_nr});
|
730 |
+
}
|
731 |
+
|
732 |
+
/// Return true if any of the variables in the list require a gradient.
|
733 |
+
inline bool any_variable_requires_grad(const variable_list& variables) {
|
734 |
+
return std::any_of(
|
735 |
+
variables.begin(), variables.end(), [](const Variable& variable) {
|
736 |
+
return variable.defined() && variable.requires_grad();
|
737 |
+
});
|
738 |
+
}
|
739 |
+
|
740 |
+
/// Return the next edges of all the given variables, or tuples of variables.
|
741 |
+
template <typename... Variables>
|
742 |
+
edge_list collect_next_edges(Variables&&... variables) {
|
743 |
+
detail::MakeNextFunctionList make;
|
744 |
+
make.apply(std::forward<Variables>(variables)...);
|
745 |
+
return std::move(make.next_edges);
|
746 |
+
}
|
747 |
+
|
748 |
+
struct TypeAndSize {
|
749 |
+
TypeAndSize() : options(at::TensorOptions()) {}
|
750 |
+
/* implicit */
|
751 |
+
TypeAndSize(const at::Tensor& t)
|
752 |
+
: sym_sizes(t.sym_sizes().vec()), options(t.options()) {}
|
753 |
+
|
754 |
+
at::Tensor zeros();
|
755 |
+
|
756 |
+
std::vector<c10::SymInt> sym_sizes;
|
757 |
+
at::TensorOptions options;
|
758 |
+
};
|
759 |
+
|
760 |
+
} // namespace autograd
|
761 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/function_hook.h
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/Tensor.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
#include <string>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
namespace torch::dynamo::autograd {
|
9 |
+
class CompiledNodeArgs;
|
10 |
+
class SwapSavedVariables;
|
11 |
+
} // namespace torch::dynamo::autograd
|
12 |
+
|
13 |
+
// A hook that's called on gradients
|
14 |
+
|
15 |
+
namespace torch {
|
16 |
+
namespace autograd {
|
17 |
+
|
18 |
+
using Variable = at::Tensor;
|
19 |
+
using variable_list = std::vector<Variable>;
|
20 |
+
|
21 |
+
struct TORCH_API FunctionPreHook {
|
22 |
+
virtual ~FunctionPreHook() = default;
|
23 |
+
virtual variable_list operator()(const variable_list& grads) = 0;
|
24 |
+
// only implemented for python hooks, registers hook with compiled autograd
|
25 |
+
virtual void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) {
|
26 |
+
throw std::runtime_error(
|
27 |
+
std::string("compiled_args nyi, see [Note: Compiled Autograd] ") +
|
28 |
+
typeid(*this).name());
|
29 |
+
}
|
30 |
+
};
|
31 |
+
|
32 |
+
struct TORCH_API FunctionPostHook {
|
33 |
+
virtual ~FunctionPostHook() = default;
|
34 |
+
virtual variable_list operator()(
|
35 |
+
const variable_list& outputs /* grad_inputs */,
|
36 |
+
const variable_list& inputs /* grad_outputs */) = 0;
|
37 |
+
// only implemented for python hooks, registers hook with compiled autograd
|
38 |
+
virtual void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) {
|
39 |
+
throw std::runtime_error(
|
40 |
+
std::string("compiled_args nyi, see [Note: Compiled Autograd] ") +
|
41 |
+
typeid(*this).name());
|
42 |
+
}
|
43 |
+
};
|
44 |
+
|
45 |
+
struct TORCH_API PostAccumulateGradHook {
|
46 |
+
virtual ~PostAccumulateGradHook() = default;
|
47 |
+
virtual void operator()(const Variable& tensor) = 0;
|
48 |
+
// only implemented for python hooks on nodes, registers hook with compiled
|
49 |
+
// autograd
|
50 |
+
virtual void compiled_args(torch::dynamo::autograd::CompiledNodeArgs& args) {
|
51 |
+
throw std::runtime_error(
|
52 |
+
std::string("not yet implemented for compiled autograd: ") +
|
53 |
+
typeid(*this).name());
|
54 |
+
}
|
55 |
+
|
56 |
+
virtual void apply_with_saved(
|
57 |
+
Variable&,
|
58 |
+
torch::dynamo::autograd::SwapSavedVariables&) {
|
59 |
+
throw std::runtime_error(
|
60 |
+
std::string("not yet implemented for compiled autograd: ") +
|
61 |
+
typeid(*this).name());
|
62 |
+
}
|
63 |
+
};
|
64 |
+
|
65 |
+
} // namespace autograd
|
66 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/Functions.h
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/python_functions.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <Python.h>
|
4 |
+
|
5 |
+
// @generated from ../tools/autograd/templates/python_functions.h
|
6 |
+
|
7 |
+
// Python bindings for automatically generated autograd functions
|
8 |
+
|
9 |
+
namespace torch { namespace autograd { namespace generated {
|
10 |
+
|
11 |
+
void initialize_autogenerated_functions_0(PyObject* module);
|
12 |
+
void initialize_autogenerated_functions_1(PyObject* module);
|
13 |
+
void initialize_autogenerated_functions_2(PyObject* module);
|
14 |
+
void initialize_autogenerated_functions_3(PyObject* module);
|
15 |
+
void initialize_autogenerated_functions_4(PyObject* module);
|
16 |
+
|
17 |
+
inline void initialize_autogenerated_functions(PyObject* module) {
|
18 |
+
initialize_autogenerated_functions_0(module);
|
19 |
+
initialize_autogenerated_functions_1(module);
|
20 |
+
initialize_autogenerated_functions_2(module);
|
21 |
+
initialize_autogenerated_functions_3(module);
|
22 |
+
initialize_autogenerated_functions_4(module);
|
23 |
+
}
|
24 |
+
|
25 |
+
}}} // namespace torch::autograd::generated
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/generated/variable_factories.h
ADDED
@@ -0,0 +1,728 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// @generated from ../tools/autograd/templates/variable_factories.h
|
4 |
+
|
5 |
+
#include <ATen/core/Tensor.h>
|
6 |
+
#include <ATen/TracerMode.h>
|
7 |
+
#include <ATen/core/grad_mode.h>
|
8 |
+
#include <c10/util/ArrayRef.h>
|
9 |
+
#include <c10/core/MemoryFormat.h>
|
10 |
+
#include <torch/csrc/api/include/torch/detail/TensorDataContainer.h>
|
11 |
+
#include <torch/csrc/autograd/variable.h>
|
12 |
+
|
13 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
14 |
+
#include <ATen/Functions.h>
|
15 |
+
#else
|
16 |
+
#include <ATen/ops/from_blob.h>
|
17 |
+
#include <ATen/ops/_make_dep_token.h>
|
18 |
+
#include <ATen/ops/_cudnn_init_dropout_state.h>
|
19 |
+
#include <ATen/ops/arange.h>
|
20 |
+
#include <ATen/ops/arange.h>
|
21 |
+
#include <ATen/ops/arange.h>
|
22 |
+
#include <ATen/ops/bartlett_window.h>
|
23 |
+
#include <ATen/ops/bartlett_window.h>
|
24 |
+
#include <ATen/ops/blackman_window.h>
|
25 |
+
#include <ATen/ops/blackman_window.h>
|
26 |
+
#include <ATen/ops/empty.h>
|
27 |
+
#include <ATen/ops/empty.h>
|
28 |
+
#include <ATen/ops/empty_permuted.h>
|
29 |
+
#include <ATen/ops/_empty_affine_quantized.h>
|
30 |
+
#include <ATen/ops/_empty_per_channel_affine_quantized.h>
|
31 |
+
#include <ATen/ops/empty_quantized.h>
|
32 |
+
#include <ATen/ops/empty_like.h>
|
33 |
+
#include <ATen/ops/empty_strided.h>
|
34 |
+
#include <ATen/ops/eye.h>
|
35 |
+
#include <ATen/ops/eye.h>
|
36 |
+
#include <ATen/ops/full.h>
|
37 |
+
#include <ATen/ops/full.h>
|
38 |
+
#include <ATen/ops/full_like.h>
|
39 |
+
#include <ATen/ops/from_file.h>
|
40 |
+
#include <ATen/ops/hann_window.h>
|
41 |
+
#include <ATen/ops/hann_window.h>
|
42 |
+
#include <ATen/ops/hamming_window.h>
|
43 |
+
#include <ATen/ops/hamming_window.h>
|
44 |
+
#include <ATen/ops/hamming_window.h>
|
45 |
+
#include <ATen/ops/hamming_window.h>
|
46 |
+
#include <ATen/ops/kaiser_window.h>
|
47 |
+
#include <ATen/ops/kaiser_window.h>
|
48 |
+
#include <ATen/ops/kaiser_window.h>
|
49 |
+
#include <ATen/ops/linspace.h>
|
50 |
+
#include <ATen/ops/linspace.h>
|
51 |
+
#include <ATen/ops/linspace.h>
|
52 |
+
#include <ATen/ops/linspace.h>
|
53 |
+
#include <ATen/ops/logspace.h>
|
54 |
+
#include <ATen/ops/logspace.h>
|
55 |
+
#include <ATen/ops/logspace.h>
|
56 |
+
#include <ATen/ops/logspace.h>
|
57 |
+
#include <ATen/ops/ones.h>
|
58 |
+
#include <ATen/ops/ones.h>
|
59 |
+
#include <ATen/ops/ones_like.h>
|
60 |
+
#include <ATen/ops/scalar_tensor.h>
|
61 |
+
#include <ATen/ops/rand.h>
|
62 |
+
#include <ATen/ops/rand.h>
|
63 |
+
#include <ATen/ops/rand.h>
|
64 |
+
#include <ATen/ops/rand.h>
|
65 |
+
#include <ATen/ops/rand_like.h>
|
66 |
+
#include <ATen/ops/randint.h>
|
67 |
+
#include <ATen/ops/randint.h>
|
68 |
+
#include <ATen/ops/randint.h>
|
69 |
+
#include <ATen/ops/randint.h>
|
70 |
+
#include <ATen/ops/randint_like.h>
|
71 |
+
#include <ATen/ops/randint_like.h>
|
72 |
+
#include <ATen/ops/randn.h>
|
73 |
+
#include <ATen/ops/randn.h>
|
74 |
+
#include <ATen/ops/randn.h>
|
75 |
+
#include <ATen/ops/randn.h>
|
76 |
+
#include <ATen/ops/randn_like.h>
|
77 |
+
#include <ATen/ops/randperm.h>
|
78 |
+
#include <ATen/ops/randperm.h>
|
79 |
+
#include <ATen/ops/range.h>
|
80 |
+
#include <ATen/ops/range.h>
|
81 |
+
#include <ATen/ops/zeros.h>
|
82 |
+
#include <ATen/ops/_efficientzerotensor.h>
|
83 |
+
#include <ATen/ops/zeros.h>
|
84 |
+
#include <ATen/ops/zeros_like.h>
|
85 |
+
#include <ATen/ops/sparse_compressed_tensor.h>
|
86 |
+
#include <ATen/ops/sparse_csr_tensor.h>
|
87 |
+
#include <ATen/ops/sparse_csc_tensor.h>
|
88 |
+
#include <ATen/ops/sparse_bsr_tensor.h>
|
89 |
+
#include <ATen/ops/sparse_bsc_tensor.h>
|
90 |
+
#include <ATen/ops/sparse_compressed_tensor.h>
|
91 |
+
#include <ATen/ops/sparse_csr_tensor.h>
|
92 |
+
#include <ATen/ops/sparse_csc_tensor.h>
|
93 |
+
#include <ATen/ops/sparse_bsr_tensor.h>
|
94 |
+
#include <ATen/ops/sparse_bsc_tensor.h>
|
95 |
+
#include <ATen/ops/_sparse_compressed_tensor_unsafe.h>
|
96 |
+
#include <ATen/ops/_sparse_csr_tensor_unsafe.h>
|
97 |
+
#include <ATen/ops/_sparse_csc_tensor_unsafe.h>
|
98 |
+
#include <ATen/ops/_sparse_bsr_tensor_unsafe.h>
|
99 |
+
#include <ATen/ops/_sparse_bsc_tensor_unsafe.h>
|
100 |
+
#include <ATen/ops/sparse_coo_tensor.h>
|
101 |
+
#include <ATen/ops/sparse_coo_tensor.h>
|
102 |
+
#include <ATen/ops/sparse_coo_tensor.h>
|
103 |
+
#include <ATen/ops/_sparse_coo_tensor_unsafe.h>
|
104 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims.h>
|
105 |
+
#include <ATen/ops/_sparse_coo_tensor_with_dims_and_tensors.h>
|
106 |
+
#include <ATen/ops/_to_copy.h>
|
107 |
+
#include <ATen/ops/tril_indices.h>
|
108 |
+
#include <ATen/ops/triu_indices.h>
|
109 |
+
#include <ATen/ops/normal.h>
|
110 |
+
#include <ATen/ops/fft_fftfreq.h>
|
111 |
+
#include <ATen/ops/fft_rfftfreq.h>
|
112 |
+
#endif
|
113 |
+
|
114 |
+
#include <functional>
|
115 |
+
#include <initializer_list>
|
116 |
+
#include <utility>
|
117 |
+
|
118 |
+
namespace torch {
|
119 |
+
|
120 |
+
/// NOTE: Currently `torch::tensor(...)` doesn't support mixed data types
|
121 |
+
/// (i.e. `torch::tensor({{bool, 2.0}})` doesn't work). We might be able to
|
122 |
+
/// support it in the future by iterating over all sub-lists to find
|
123 |
+
/// the largest data type that can represent all of the elements, or by using
|
124 |
+
/// variadic templates.
|
125 |
+
///
|
126 |
+
/// NOTE: C++ `torch::tensor` with a floating-point type or an `at::ArrayRef` / `std::vector` /
|
127 |
+
/// (nested) braced-init-list of floating-point types always produces a tensor of dtype
|
128 |
+
/// `torch::get_default_dtype()`, matching Python `torch.tensor` behavior.
|
129 |
+
///
|
130 |
+
/// NOTE: C++ `torch::tensor` with an integer type or an `at::ArrayRef` / `std::vector` /
|
131 |
+
/// (nested) braced-init-list of integer types always produces a tensor of dtype `at::kLong`
|
132 |
+
/// (aka. int64_t), matching Python `torch.tensor` behavior.
|
133 |
+
///
|
134 |
+
/// NOTE: The following dtypes are not supported by `torch::tensor` currently:
|
135 |
+
/// - `unsigned int`
|
136 |
+
/// - `unsigned long int`
|
137 |
+
/// - `unsigned long long int`
|
138 |
+
/// - `long long int`
|
139 |
+
inline at::Tensor tensor(detail::TensorDataContainer tensor_data_container, const at::TensorOptions& options = {}) {
|
140 |
+
return autograd::make_variable(
|
141 |
+
// note: we remove the requires_grad setting from the TensorOptions because
|
142 |
+
// it is ignored anyways (and we actually have an assertion that it isn't set
|
143 |
+
// which would fail otherwise). We handle requires_grad explicitly here
|
144 |
+
// instead of passing it through to the kernel.
|
145 |
+
tensor_data_container.convert_to_tensor(options.requires_grad(c10::nullopt)),
|
146 |
+
options.requires_grad());
|
147 |
+
}
|
148 |
+
|
149 |
+
/// A generic deleter function.
|
150 |
+
using Deleter = std::function<void(void*)>;
|
151 |
+
using at::MemoryFormat;
|
152 |
+
|
153 |
+
/// Exposes the given `data` as a `Tensor` without taking ownership of the
|
154 |
+
/// original data. `sizes` should specify the shape of the tensor, `strides` the
|
155 |
+
/// stride in each dimension. The `deleter` function (a
|
156 |
+
/// `std::function<void(void*)>`) will be called on the `data` when the Tensor
|
157 |
+
/// data would normally be deallocated. The `TensorOptions` specify additional
|
158 |
+
/// configuration options for the returned tensor, such as what type to
|
159 |
+
/// interpret the `data` as.
|
160 |
+
inline at::Tensor from_blob(
|
161 |
+
void* data,
|
162 |
+
at::IntArrayRef sizes,
|
163 |
+
at::IntArrayRef strides,
|
164 |
+
const Deleter& deleter,
|
165 |
+
const at::TensorOptions& options = at::TensorOptions()) {
|
166 |
+
at::Tensor tensor = ([&]() {
|
167 |
+
at::AutoDispatchBelowAutograd guard; // TODO: remove
|
168 |
+
at::tracer::impl::NoTracerDispatchMode tracer_guard;
|
169 |
+
return at::from_blob(data, sizes, strides, deleter, options.requires_grad(c10::nullopt));
|
170 |
+
})();
|
171 |
+
return autograd::make_variable(tensor, options.requires_grad());
|
172 |
+
}
|
173 |
+
|
174 |
+
/// Exposes the given `data` as a `Tensor` without taking ownership of the
|
175 |
+
/// original data. `sizes` should specify the shape of the tensor, `strides` the
|
176 |
+
/// stride in each dimension. The `TensorOptions`
|
177 |
+
/// specify additional configuration options for the returned tensor, such as
|
178 |
+
/// what type to interpret the `data` as.
|
179 |
+
inline at::Tensor from_blob(
|
180 |
+
void* data,
|
181 |
+
at::IntArrayRef sizes,
|
182 |
+
at::IntArrayRef strides,
|
183 |
+
const at::TensorOptions& options = at::TensorOptions()) {
|
184 |
+
at::Tensor tensor = ([&]() {
|
185 |
+
at::AutoDispatchBelowAutograd guard; // TODO: remove
|
186 |
+
at::tracer::impl::NoTracerDispatchMode tracer_guard;
|
187 |
+
return at::from_blob(data, sizes, strides, options.requires_grad(c10::nullopt));
|
188 |
+
})();
|
189 |
+
return autograd::make_variable(tensor, options.requires_grad());
|
190 |
+
}
|
191 |
+
|
192 |
+
/// Exposes the given `data` as a `Tensor` without taking ownership of the
|
193 |
+
/// original data. `sizes` should specify the shape of the tensor. The `deleter`
|
194 |
+
/// (a `std::function<void(void*)>`) function will be called on the `data` when
|
195 |
+
/// the Tensor data would normally be deallocated. The `TensorOptions` specify
|
196 |
+
/// additional configuration options for the returned tensor, such as what type
|
197 |
+
/// to interpret the `data` as.
|
198 |
+
inline at::Tensor from_blob(
|
199 |
+
void* data,
|
200 |
+
at::IntArrayRef sizes,
|
201 |
+
const Deleter& deleter,
|
202 |
+
const at::TensorOptions& options = at::TensorOptions()) {
|
203 |
+
at::Tensor tensor = ([&]() {
|
204 |
+
at::AutoDispatchBelowAutograd guard; // TODO: remove
|
205 |
+
at::tracer::impl::NoTracerDispatchMode tracer_guard;
|
206 |
+
return at::from_blob(data, sizes, deleter, options.requires_grad(c10::nullopt));
|
207 |
+
})();
|
208 |
+
return autograd::make_variable(tensor, options.requires_grad());
|
209 |
+
}
|
210 |
+
|
211 |
+
/// Exposes the given `data` as a `Tensor` without taking ownership of the
|
212 |
+
/// original data. `sizes` should specify the shape of the tensor. The
|
213 |
+
/// `TensorOptions` specify additional configuration options for the returned
|
214 |
+
/// tensor, such as what type to interpret the `data` as.
|
215 |
+
inline at::Tensor from_blob(
|
216 |
+
void* data,
|
217 |
+
at::IntArrayRef sizes,
|
218 |
+
const at::TensorOptions& options = at::TensorOptions()) {
|
219 |
+
at::Tensor tensor = ([&]() {
|
220 |
+
at::AutoDispatchBelowAutograd guard; // TODO: remove
|
221 |
+
at::tracer::impl::NoTracerDispatchMode tracer_guard;
|
222 |
+
return at::from_blob(data, sizes, options.requires_grad(c10::nullopt));
|
223 |
+
})();
|
224 |
+
return autograd::make_variable(tensor, options.requires_grad());
|
225 |
+
}
|
226 |
+
|
227 |
+
inline at::Tensor _make_dep_token(at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
|
228 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
229 |
+
return autograd::make_variable(at::_make_dep_token(at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
230 |
+
}
|
231 |
+
inline at::Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, at::TensorOptions options) {
|
232 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
233 |
+
return autograd::make_variable(at::_cudnn_init_dropout_state(dropout, train, dropout_seed, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
234 |
+
}
|
235 |
+
inline at::Tensor arange(const at::Scalar & end, at::TensorOptions options = {}) {
|
236 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
237 |
+
return autograd::make_variable(at::arange(end, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
238 |
+
}
|
239 |
+
inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options = {}) {
|
240 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
241 |
+
return autograd::make_variable(at::arange(start, end, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
242 |
+
}
|
243 |
+
inline at::Tensor arange(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::TensorOptions options = {}) {
|
244 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
245 |
+
return autograd::make_variable(at::arange(start, end, step, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
246 |
+
}
|
247 |
+
inline at::Tensor bartlett_window(int64_t window_length, at::TensorOptions options = {}) {
|
248 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
249 |
+
return autograd::make_variable(at::bartlett_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
250 |
+
}
|
251 |
+
inline at::Tensor bartlett_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
|
252 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
253 |
+
return autograd::make_variable(at::bartlett_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
254 |
+
}
|
255 |
+
inline at::Tensor blackman_window(int64_t window_length, at::TensorOptions options = {}) {
|
256 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
257 |
+
return autograd::make_variable(at::blackman_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
258 |
+
}
|
259 |
+
inline at::Tensor blackman_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
|
260 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
261 |
+
return autograd::make_variable(at::blackman_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
262 |
+
}
|
263 |
+
inline at::Tensor empty(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
|
264 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
265 |
+
return autograd::make_variable(at::empty(size, names, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
266 |
+
}
|
267 |
+
inline at::Tensor empty(at::IntArrayRef size, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
|
268 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
269 |
+
return autograd::make_variable(at::empty(size, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
270 |
+
}
|
271 |
+
inline at::Tensor empty_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
|
272 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
273 |
+
return autograd::make_variable(at::empty_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
274 |
+
}
|
275 |
+
inline at::Tensor empty_permuted(at::IntArrayRef size, at::IntArrayRef physical_layout, at::TensorOptions options = {}) {
|
276 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
277 |
+
return autograd::make_variable(at::empty_permuted(size, physical_layout, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
278 |
+
}
|
279 |
+
inline at::Tensor empty_permuted_symint(c10::SymIntArrayRef size, at::IntArrayRef physical_layout, at::TensorOptions options = {}) {
|
280 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
281 |
+
return autograd::make_variable(at::empty_permuted_symint(size, physical_layout, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
282 |
+
}
|
283 |
+
inline at::Tensor _empty_affine_quantized(at::IntArrayRef size, at::TensorOptions options = {}, double scale = 1, int64_t zero_point = 0, c10::optional<at::MemoryFormat> memory_format = MemoryFormat::Contiguous) {
|
284 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
285 |
+
return autograd::make_variable(at::_empty_affine_quantized(size, at::TensorOptions(options).requires_grad(c10::nullopt), scale, zero_point, memory_format), /*requires_grad=*/options.requires_grad());
|
286 |
+
}
|
287 |
+
inline at::Tensor _empty_affine_quantized_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}, double scale = 1, int64_t zero_point = 0, c10::optional<at::MemoryFormat> memory_format = MemoryFormat::Contiguous) {
|
288 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
289 |
+
return autograd::make_variable(at::_empty_affine_quantized_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt), scale, zero_point, memory_format), /*requires_grad=*/options.requires_grad());
|
290 |
+
}
|
291 |
+
inline at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = MemoryFormat::Contiguous) {
|
292 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
293 |
+
return autograd::make_variable(at::_empty_per_channel_affine_quantized(size, scales, zero_points, axis, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
294 |
+
}
|
295 |
+
inline at::Tensor _empty_per_channel_affine_quantized_symint(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = MemoryFormat::Contiguous) {
|
296 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
297 |
+
return autograd::make_variable(at::_empty_per_channel_affine_quantized_symint(size, scales, zero_points, axis, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
298 |
+
}
|
299 |
+
inline at::Tensor empty_quantized(at::IntArrayRef size, const at::Tensor & qtensor, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
|
300 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
301 |
+
return autograd::make_variable(at::empty_quantized(size, qtensor, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
302 |
+
}
|
303 |
+
inline at::Tensor empty_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
|
304 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
305 |
+
return autograd::make_variable(at::empty_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
306 |
+
}
|
307 |
+
inline at::Tensor empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options = {}) {
|
308 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
309 |
+
return autograd::make_variable(at::empty_strided(size, stride, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
310 |
+
}
|
311 |
+
inline at::Tensor empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options = {}) {
|
312 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
313 |
+
return autograd::make_variable(at::empty_strided_symint(size, stride, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
314 |
+
}
|
315 |
+
inline at::Tensor eye(int64_t n, at::TensorOptions options = {}) {
|
316 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
317 |
+
return autograd::make_variable(at::eye(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
318 |
+
}
|
319 |
+
inline at::Tensor eye_symint(c10::SymInt n, at::TensorOptions options = {}) {
|
320 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
321 |
+
return autograd::make_variable(at::eye_symint(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
322 |
+
}
|
323 |
+
inline at::Tensor eye(int64_t n, int64_t m, at::TensorOptions options = {}) {
|
324 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
325 |
+
return autograd::make_variable(at::eye(n, m, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
326 |
+
}
|
327 |
+
inline at::Tensor eye_symint(c10::SymInt n, c10::SymInt m, at::TensorOptions options = {}) {
|
328 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
329 |
+
return autograd::make_variable(at::eye_symint(n, m, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
330 |
+
}
|
331 |
+
inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
|
332 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
333 |
+
return autograd::make_variable(at::full(size, fill_value, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
334 |
+
}
|
335 |
+
inline at::Tensor full(at::IntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options = {}) {
|
336 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
337 |
+
return autograd::make_variable(at::full(size, fill_value, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
338 |
+
}
|
339 |
+
inline at::Tensor full_symint(c10::SymIntArrayRef size, const at::Scalar & fill_value, at::TensorOptions options = {}) {
|
340 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
341 |
+
return autograd::make_variable(at::full_symint(size, fill_value, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
342 |
+
}
|
343 |
+
inline at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
|
344 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
345 |
+
return autograd::make_variable(at::full_like(self, fill_value, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
346 |
+
}
|
347 |
+
inline at::Tensor from_file(c10::string_view filename, c10::optional<bool> shared = c10::nullopt, c10::optional<int64_t> size = 0, at::TensorOptions options = {}) {
|
348 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
349 |
+
return autograd::make_variable(at::from_file(filename, shared, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
350 |
+
}
|
351 |
+
inline at::Tensor hann_window(int64_t window_length, at::TensorOptions options = {}) {
|
352 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
353 |
+
return autograd::make_variable(at::hann_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
354 |
+
}
|
355 |
+
inline at::Tensor hann_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
|
356 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
357 |
+
return autograd::make_variable(at::hann_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
358 |
+
}
|
359 |
+
inline at::Tensor hamming_window(int64_t window_length, at::TensorOptions options = {}) {
|
360 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
361 |
+
return autograd::make_variable(at::hamming_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
362 |
+
}
|
363 |
+
inline at::Tensor hamming_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
|
364 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
365 |
+
return autograd::make_variable(at::hamming_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
366 |
+
}
|
367 |
+
inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, at::TensorOptions options = {}) {
|
368 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
369 |
+
return autograd::make_variable(at::hamming_window(window_length, periodic, alpha, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
370 |
+
}
|
371 |
+
inline at::Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, at::TensorOptions options = {}) {
|
372 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
373 |
+
return autograd::make_variable(at::hamming_window(window_length, periodic, alpha, beta, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
374 |
+
}
|
375 |
+
inline at::Tensor kaiser_window(int64_t window_length, at::TensorOptions options = {}) {
|
376 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
377 |
+
return autograd::make_variable(at::kaiser_window(window_length, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
378 |
+
}
|
379 |
+
inline at::Tensor kaiser_window(int64_t window_length, bool periodic, at::TensorOptions options = {}) {
|
380 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
381 |
+
return autograd::make_variable(at::kaiser_window(window_length, periodic, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
382 |
+
}
|
383 |
+
inline at::Tensor kaiser_window(int64_t window_length, bool periodic, double beta, at::TensorOptions options = {}) {
|
384 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
385 |
+
return autograd::make_variable(at::kaiser_window(window_length, periodic, beta, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
386 |
+
}
|
387 |
+
inline at::Tensor linspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, at::TensorOptions options = {}) {
|
388 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
389 |
+
return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
390 |
+
}
|
391 |
+
inline at::Tensor linspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, at::TensorOptions options = {}) {
|
392 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
393 |
+
return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
394 |
+
}
|
395 |
+
inline at::Tensor linspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, at::TensorOptions options = {}) {
|
396 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
397 |
+
return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
398 |
+
}
|
399 |
+
inline at::Tensor linspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, at::TensorOptions options = {}) {
|
400 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
401 |
+
return autograd::make_variable(at::linspace(start, end, steps, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
402 |
+
}
|
403 |
+
inline at::Tensor logspace(const at::Scalar & start, const at::Scalar & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
|
404 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
405 |
+
return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
406 |
+
}
|
407 |
+
inline at::Tensor logspace(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
|
408 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
409 |
+
return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
410 |
+
}
|
411 |
+
inline at::Tensor logspace(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
|
412 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
413 |
+
return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
414 |
+
}
|
415 |
+
inline at::Tensor logspace(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base = 10.0, at::TensorOptions options = {}) {
|
416 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
417 |
+
return autograd::make_variable(at::logspace(start, end, steps, base, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
418 |
+
}
|
419 |
+
inline at::Tensor ones(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
|
420 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
421 |
+
return autograd::make_variable(at::ones(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
422 |
+
}
|
423 |
+
inline at::Tensor ones(at::IntArrayRef size, at::TensorOptions options = {}) {
|
424 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
425 |
+
return autograd::make_variable(at::ones(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
426 |
+
}
|
427 |
+
inline at::Tensor ones_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
|
428 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
429 |
+
return autograd::make_variable(at::ones_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
430 |
+
}
|
431 |
+
inline at::Tensor ones_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
|
432 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
433 |
+
return autograd::make_variable(at::ones_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
434 |
+
}
|
435 |
+
inline at::Tensor scalar_tensor(const at::Scalar & s, at::TensorOptions options = {}) {
|
436 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
437 |
+
return autograd::make_variable(at::scalar_tensor(s, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
438 |
+
}
|
439 |
+
inline at::Tensor rand(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
|
440 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
441 |
+
return autograd::make_variable(at::rand(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
442 |
+
}
|
443 |
+
inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
|
444 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
445 |
+
return autograd::make_variable(at::rand_symint(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
446 |
+
}
|
447 |
+
inline at::Tensor rand(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
|
448 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
449 |
+
return autograd::make_variable(at::rand(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
450 |
+
}
|
451 |
+
inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
|
452 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
453 |
+
return autograd::make_variable(at::rand_symint(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
454 |
+
}
|
455 |
+
inline at::Tensor rand(at::IntArrayRef size, at::TensorOptions options = {}) {
|
456 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
457 |
+
return autograd::make_variable(at::rand(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
458 |
+
}
|
459 |
+
inline at::Tensor rand_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
|
460 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
461 |
+
return autograd::make_variable(at::rand_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
462 |
+
}
|
463 |
+
inline at::Tensor rand(at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = {}) {
|
464 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
465 |
+
return autograd::make_variable(at::rand(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
466 |
+
}
|
467 |
+
inline at::Tensor rand_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = {}) {
|
468 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
469 |
+
return autograd::make_variable(at::rand_symint(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
470 |
+
}
|
471 |
+
inline at::Tensor rand_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
|
472 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
473 |
+
return autograd::make_variable(at::rand_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
474 |
+
}
|
475 |
+
inline at::Tensor randint(int64_t high, at::IntArrayRef size, at::TensorOptions options = at::kLong) {
|
476 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
477 |
+
return autograd::make_variable(at::randint(high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
478 |
+
}
|
479 |
+
inline at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options = at::kLong) {
|
480 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
481 |
+
return autograd::make_variable(at::randint_symint(high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
482 |
+
}
|
483 |
+
inline at::Tensor randint(int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
|
484 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
485 |
+
return autograd::make_variable(at::randint(high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
486 |
+
}
|
487 |
+
inline at::Tensor randint_symint(c10::SymInt high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
|
488 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
489 |
+
return autograd::make_variable(at::randint_symint(high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
490 |
+
}
|
491 |
+
inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, at::TensorOptions options = at::kLong) {
|
492 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
493 |
+
return autograd::make_variable(at::randint(low, high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
494 |
+
}
|
495 |
+
inline at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, at::TensorOptions options = at::kLong) {
|
496 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
497 |
+
return autograd::make_variable(at::randint_symint(low, high, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
498 |
+
}
|
499 |
+
inline at::Tensor randint(int64_t low, int64_t high, at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
|
500 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
501 |
+
return autograd::make_variable(at::randint(low, high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
502 |
+
}
|
503 |
+
inline at::Tensor randint_symint(c10::SymInt low, c10::SymInt high, c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
|
504 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
505 |
+
return autograd::make_variable(at::randint_symint(low, high, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
506 |
+
}
|
507 |
+
inline at::Tensor randint_like(const at::Tensor & self, int64_t high, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
|
508 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
509 |
+
return autograd::make_variable(at::randint_like(self, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
510 |
+
}
|
511 |
+
inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt high, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
|
512 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
513 |
+
return autograd::make_variable(at::randint_like_symint(self, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
514 |
+
}
|
515 |
+
inline at::Tensor randint_like(const at::Tensor & self, int64_t low, int64_t high, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
|
516 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
517 |
+
return autograd::make_variable(at::randint_like(self, low, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
518 |
+
}
|
519 |
+
inline at::Tensor randint_like_symint(const at::Tensor & self, c10::SymInt low, c10::SymInt high, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
|
520 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
521 |
+
return autograd::make_variable(at::randint_like_symint(self, low, high, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
522 |
+
}
|
523 |
+
inline at::Tensor randn(at::IntArrayRef size, at::TensorOptions options = {}) {
|
524 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
525 |
+
return autograd::make_variable(at::randn(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
526 |
+
}
|
527 |
+
inline at::Tensor randn_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
|
528 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
529 |
+
return autograd::make_variable(at::randn_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
530 |
+
}
|
531 |
+
inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = {}) {
|
532 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
533 |
+
return autograd::make_variable(at::randn(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
534 |
+
}
|
535 |
+
inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, at::TensorOptions options = {}) {
|
536 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
537 |
+
return autograd::make_variable(at::randn_symint(size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
538 |
+
}
|
539 |
+
inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
|
540 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
541 |
+
return autograd::make_variable(at::randn(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
542 |
+
}
|
543 |
+
inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
|
544 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
545 |
+
return autograd::make_variable(at::randn_symint(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
546 |
+
}
|
547 |
+
inline at::Tensor randn(at::IntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
|
548 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
549 |
+
return autograd::make_variable(at::randn(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
550 |
+
}
|
551 |
+
inline at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional<at::Generator> generator, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
|
552 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
553 |
+
return autograd::make_variable(at::randn_symint(size, generator, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
554 |
+
}
|
555 |
+
inline at::Tensor randn_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
|
556 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
557 |
+
return autograd::make_variable(at::randn_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
558 |
+
}
|
559 |
+
inline at::Tensor randperm(int64_t n, at::TensorOptions options = at::kLong) {
|
560 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
561 |
+
return autograd::make_variable(at::randperm(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
562 |
+
}
|
563 |
+
inline at::Tensor randperm_symint(c10::SymInt n, at::TensorOptions options = at::kLong) {
|
564 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
565 |
+
return autograd::make_variable(at::randperm_symint(n, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
566 |
+
}
|
567 |
+
inline at::Tensor randperm(int64_t n, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
|
568 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
569 |
+
return autograd::make_variable(at::randperm(n, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
570 |
+
}
|
571 |
+
inline at::Tensor randperm_symint(c10::SymInt n, c10::optional<at::Generator> generator, at::TensorOptions options = at::kLong) {
|
572 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
573 |
+
return autograd::make_variable(at::randperm_symint(n, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
574 |
+
}
|
575 |
+
inline at::Tensor range(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step = 1, at::TensorOptions options = {}) {
|
576 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
577 |
+
return autograd::make_variable(at::range(start, end, step, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
578 |
+
}
|
579 |
+
inline at::Tensor range(const at::Scalar & start, const at::Scalar & end, at::TensorOptions options = {}) {
|
580 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
581 |
+
return autograd::make_variable(at::range(start, end, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
582 |
+
}
|
583 |
+
inline at::Tensor zeros(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {}) {
|
584 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
585 |
+
return autograd::make_variable(at::zeros(size, names, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
586 |
+
}
|
587 |
+
inline at::Tensor _efficientzerotensor(at::IntArrayRef size, at::TensorOptions options = {}) {
|
588 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
589 |
+
return autograd::make_variable(at::_efficientzerotensor(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
590 |
+
}
|
591 |
+
inline at::Tensor _efficientzerotensor_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
|
592 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
593 |
+
return autograd::make_variable(at::_efficientzerotensor_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
594 |
+
}
|
595 |
+
inline at::Tensor zeros(at::IntArrayRef size, at::TensorOptions options = {}) {
|
596 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
597 |
+
return autograd::make_variable(at::zeros(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
598 |
+
}
|
599 |
+
inline at::Tensor zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) {
|
600 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
601 |
+
return autograd::make_variable(at::zeros_symint(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
602 |
+
}
|
603 |
+
inline at::Tensor zeros_like(const at::Tensor & self, at::TensorOptions options = {}, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
|
604 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
605 |
+
return autograd::make_variable(at::zeros_like(self, at::TensorOptions(options).requires_grad(c10::nullopt), memory_format), /*requires_grad=*/options.requires_grad());
|
606 |
+
}
|
607 |
+
inline at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
|
608 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
609 |
+
return autograd::make_variable(at::sparse_compressed_tensor(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
610 |
+
}
|
611 |
+
inline at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
|
612 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
613 |
+
return autograd::make_variable(at::sparse_csr_tensor(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
614 |
+
}
|
615 |
+
inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
|
616 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
617 |
+
return autograd::make_variable(at::sparse_csc_tensor(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
618 |
+
}
|
619 |
+
inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
|
620 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
621 |
+
return autograd::make_variable(at::sparse_bsr_tensor(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
622 |
+
}
|
623 |
+
inline at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) {
|
624 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
625 |
+
return autograd::make_variable(at::sparse_bsc_tensor(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
626 |
+
}
|
627 |
+
inline at::Tensor sparse_compressed_tensor(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::TensorOptions options) {
|
628 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
629 |
+
return autograd::make_variable(at::sparse_compressed_tensor(compressed_indices, plain_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
630 |
+
}
|
631 |
+
inline at::Tensor sparse_csr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) {
|
632 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
633 |
+
return autograd::make_variable(at::sparse_csr_tensor(crow_indices, col_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
634 |
+
}
|
635 |
+
inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) {
|
636 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
637 |
+
return autograd::make_variable(at::sparse_csc_tensor(ccol_indices, row_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
638 |
+
}
|
639 |
+
inline at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options) {
|
640 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
641 |
+
return autograd::make_variable(at::sparse_bsr_tensor(crow_indices, col_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
642 |
+
}
|
643 |
+
inline at::Tensor sparse_bsc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) {
|
644 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
645 |
+
return autograd::make_variable(at::sparse_bsc_tensor(ccol_indices, row_indices, values, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
646 |
+
}
|
647 |
+
inline at::Tensor _sparse_compressed_tensor_unsafe(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
|
648 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
649 |
+
return autograd::make_variable(at::_sparse_compressed_tensor_unsafe(compressed_indices, plain_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
650 |
+
}
|
651 |
+
inline at::Tensor _sparse_csr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
|
652 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
653 |
+
return autograd::make_variable(at::_sparse_csr_tensor_unsafe(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
654 |
+
}
|
655 |
+
inline at::Tensor _sparse_csc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
|
656 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
657 |
+
return autograd::make_variable(at::_sparse_csc_tensor_unsafe(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
658 |
+
}
|
659 |
+
inline at::Tensor _sparse_bsr_tensor_unsafe(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
|
660 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
661 |
+
return autograd::make_variable(at::_sparse_bsr_tensor_unsafe(crow_indices, col_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
662 |
+
}
|
663 |
+
inline at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}) {
|
664 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
665 |
+
return autograd::make_variable(at::_sparse_bsc_tensor_unsafe(ccol_indices, row_indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
666 |
+
}
|
667 |
+
inline at::Tensor sparse_coo_tensor(at::IntArrayRef size, at::TensorOptions options) {
|
668 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
669 |
+
return autograd::make_variable(at::sparse_coo_tensor(size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
670 |
+
}
|
671 |
+
inline at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options = {}, c10::optional<bool> is_coalesced = c10::nullopt) {
|
672 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
673 |
+
return autograd::make_variable(at::sparse_coo_tensor(indices, values, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
|
674 |
+
}
|
675 |
+
inline at::Tensor sparse_coo_tensor(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}, c10::optional<bool> is_coalesced = c10::nullopt) {
|
676 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
677 |
+
return autograd::make_variable(at::sparse_coo_tensor(indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
|
678 |
+
}
|
679 |
+
inline at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options = {}, c10::optional<bool> is_coalesced = c10::nullopt) {
|
680 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
681 |
+
return autograd::make_variable(at::_sparse_coo_tensor_unsafe(indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
|
682 |
+
}
|
683 |
+
inline at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options = {}, c10::optional<bool> is_coalesced = c10::nullopt) {
|
684 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
685 |
+
return autograd::make_variable(at::_sparse_coo_tensor_unsafe_symint(indices, values, size, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
|
686 |
+
}
|
687 |
+
inline at::Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, at::TensorOptions options) {
|
688 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
689 |
+
return autograd::make_variable(at::_sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
690 |
+
}
|
691 |
+
inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, at::IntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, c10::optional<bool> is_coalesced = c10::nullopt) {
|
692 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
693 |
+
return autograd::make_variable(at::_sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
|
694 |
+
}
|
695 |
+
inline at::Tensor _sparse_coo_tensor_with_dims_and_tensors_symint(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, at::TensorOptions options, c10::optional<bool> is_coalesced = c10::nullopt) {
|
696 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
697 |
+
return autograd::make_variable(at::_sparse_coo_tensor_with_dims_and_tensors_symint(sparse_dim, dense_dim, size, indices, values, at::TensorOptions(options).requires_grad(c10::nullopt), is_coalesced), /*requires_grad=*/options.requires_grad());
|
698 |
+
}
|
699 |
+
inline at::Tensor _to_copy(const at::Tensor & self, at::TensorOptions options = {}, bool non_blocking = false, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) {
|
700 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
701 |
+
return autograd::make_variable(at::_to_copy(self, at::TensorOptions(options).requires_grad(c10::nullopt), non_blocking, memory_format), /*requires_grad=*/options.requires_grad());
|
702 |
+
}
|
703 |
+
inline at::Tensor tril_indices(int64_t row, int64_t col, int64_t offset = 0, at::TensorOptions options = at::kLong) {
|
704 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
705 |
+
return autograd::make_variable(at::tril_indices(row, col, offset, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
706 |
+
}
|
707 |
+
inline at::Tensor triu_indices(int64_t row, int64_t col, int64_t offset = 0, at::TensorOptions options = at::kLong) {
|
708 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
709 |
+
return autograd::make_variable(at::triu_indices(row, col, offset, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
710 |
+
}
|
711 |
+
inline at::Tensor normal(double mean, double std, at::IntArrayRef size, c10::optional<at::Generator> generator = c10::nullopt, at::TensorOptions options = {}) {
|
712 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
713 |
+
return autograd::make_variable(at::normal(mean, std, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
714 |
+
}
|
715 |
+
inline at::Tensor normal_symint(double mean, double std, c10::SymIntArrayRef size, c10::optional<at::Generator> generator = c10::nullopt, at::TensorOptions options = {}) {
|
716 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
717 |
+
return autograd::make_variable(at::normal_symint(mean, std, size, generator, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
718 |
+
}
|
719 |
+
inline at::Tensor fft_fftfreq(int64_t n, double d = 1.0, at::TensorOptions options = {}) {
|
720 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
721 |
+
return autograd::make_variable(at::fft_fftfreq(n, d, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
722 |
+
}
|
723 |
+
inline at::Tensor fft_rfftfreq(int64_t n, double d = 1.0, at::TensorOptions options = {}) {
|
724 |
+
at::AutoDispatchBelowADInplaceOrView guard;
|
725 |
+
return autograd::make_variable(at::fft_rfftfreq(n, d, at::TensorOptions(options).requires_grad(c10::nullopt)), /*requires_grad=*/options.requires_grad());
|
726 |
+
}
|
727 |
+
|
728 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/grad_mode.h
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/grad_mode.h>
|
4 |
+
#include <torch/csrc/Export.h>
|
5 |
+
|
6 |
+
namespace torch {
|
7 |
+
namespace autograd {
|
8 |
+
|
9 |
+
using GradMode = at::GradMode;
|
10 |
+
using AutoGradMode = at::AutoGradMode;
|
11 |
+
|
12 |
+
} // namespace autograd
|
13 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/graph_task.h
ADDED
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <ATen/ThreadLocalState.h>
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <c10/util/ThreadLocal.h>
|
5 |
+
#include <torch/csrc/autograd/input_buffer.h>
|
6 |
+
#include <torch/csrc/autograd/utils/warnings.h>
|
7 |
+
#include <vector>
|
8 |
+
|
9 |
+
namespace torch {
|
10 |
+
namespace autograd {
|
11 |
+
|
12 |
+
using edge_list = std::vector<Edge>;
|
13 |
+
struct ReadyQueue;
|
14 |
+
|
15 |
+
static constexpr int NO_DEVICE = -2;
|
16 |
+
static constexpr int CPU_DEVICE = -1;
|
17 |
+
|
18 |
+
namespace {
|
19 |
+
std::atomic<uint64_t> graph_task_id{0};
|
20 |
+
}
|
21 |
+
|
22 |
+
// GraphTask holds metadata needed for a single execution of backward()
|
23 |
+
struct GraphTask : std::enable_shared_from_this<GraphTask> {
|
24 |
+
std::atomic<uint64_t> outstanding_tasks_{0};
|
25 |
+
// Indicates if an error occurred while executing any task. When this is
|
26 |
+
// true, it signals all threads to stop executing.
|
27 |
+
std::atomic_bool has_error_{false};
|
28 |
+
std::atomic_bool future_completed_{false};
|
29 |
+
// It is safe to read keep_graph_ without synchronization
|
30 |
+
bool keep_graph_;
|
31 |
+
|
32 |
+
// To protect reads/writes to not_ready_, dependencies_, captured_vars_,
|
33 |
+
// has_error_, future_result_, cpu_ready_queue_, and leaf_streams.
|
34 |
+
std::mutex mutex_;
|
35 |
+
std::unordered_map<Node*, InputBuffer> not_ready_;
|
36 |
+
std::unordered_map<Node*, int> dependencies_;
|
37 |
+
|
38 |
+
// Records the nodes that are in the graph
|
39 |
+
std::unordered_set<Node*> nodes_in_graph_;
|
40 |
+
c10::SmallVector<Node*, 4> graph_roots_;
|
41 |
+
// Note [Exec info]
|
42 |
+
// Exec info is created for each GraphTask, which allows filtering paths on
|
43 |
+
// the graph that are not needed. It has a bit complicated semantics. If it's
|
44 |
+
// empty, it means the task is run in a "default" mode, which means that all
|
45 |
+
// next_edges we encounter should get executed. If it's not empty, only
|
46 |
+
// functions that have an entry and this entry has needed == True should be
|
47 |
+
// executed. exec_info is only empty when the graph is executed via
|
48 |
+
// .backward() and the inputs parameter is not passed. Otherwise, when
|
49 |
+
// executed through .grad(), or when inputs arg is specified for .backward(),
|
50 |
+
// exec_info will be non-empty.
|
51 |
+
//
|
52 |
+
struct ExecInfo {
|
53 |
+
struct Capture {
|
54 |
+
Capture(const Capture&) = delete;
|
55 |
+
Capture(Capture&&) = default;
|
56 |
+
|
57 |
+
Capture(int input_idx, int output_idx)
|
58 |
+
: input_idx_(input_idx), output_idx_(output_idx) {}
|
59 |
+
int input_idx_; // within Node inputs
|
60 |
+
int output_idx_; // within the output vector of a GraphTask
|
61 |
+
|
62 |
+
// This hook will be executed after a grad is captured. The captured
|
63 |
+
// grad will be replaced by the return value of the hook.
|
64 |
+
struct GradCaptureHook {
|
65 |
+
virtual ~GradCaptureHook() = default;
|
66 |
+
virtual at::Tensor operator()(const at::Tensor& grad) = 0;
|
67 |
+
};
|
68 |
+
// NOTE [Deprecated capture hooks]
|
69 |
+
//
|
70 |
+
// The current status of capture hooks is that we continue to support
|
71 |
+
// the single usage of it by distributed in the dist_engine. If anyone
|
72 |
+
// else needs to use it for other purposes, they should file an issue.
|
73 |
+
//
|
74 |
+
// Capture hooks were originally created because there did not exist
|
75 |
+
// any way to register pre/post hooks to grad_fn in a way such that it
|
76 |
+
// would still be executed even if that is the grad_fn of a Tensor
|
77 |
+
// passed as input= of .grad. As far as I know, only dist_engine uses
|
78 |
+
// this hook.
|
79 |
+
//
|
80 |
+
// However, there are other alternatives today like tensor hooks that can
|
81 |
+
// replace the usage that originally motivated its creation. Also,
|
82 |
+
// Captures hooks are an outlier in terms of the types of hook that
|
83 |
+
// autograd offers in how it is registered and behaves, e.g. it is a hook
|
84 |
+
// registered not to the graph, but to a particular graph_task! This makes
|
85 |
+
// it a burden to maintain.
|
86 |
+
//
|
87 |
+
// It would be very nice to clean up/do a migration from pre/post
|
88 |
+
// hooks used in distributed to use tensor hooks, but for now we just
|
89 |
+
// mark this method as deprecated to prevent additional usage.
|
90 |
+
//
|
91 |
+
// If you still think you really need to capture hooks, please file an
|
92 |
+
// issue (and tag autograd).
|
93 |
+
const std::vector<std::unique_ptr<GradCaptureHook>>&
|
94 |
+
DO_NOT_USE_DEPRECATED_get_capture_hooks() const {
|
95 |
+
return hooks_;
|
96 |
+
}
|
97 |
+
// See NOTE [deprecated capture hooks]
|
98 |
+
void DO_NOT_USE_DEPRECATED_register_capture_hook(
|
99 |
+
std::unique_ptr<GradCaptureHook> hook) {
|
100 |
+
hooks_.push_back(std::move(hook));
|
101 |
+
}
|
102 |
+
|
103 |
+
private:
|
104 |
+
// The hooks will be called one by one in the order as they were added.
|
105 |
+
// The input grad of a hook will be the output of its preceding hook. The
|
106 |
+
// first hook will take the captured grad as the input. The output of the
|
107 |
+
// last hook will replace the captured grad.
|
108 |
+
std::vector<std::unique_ptr<GradCaptureHook>> hooks_;
|
109 |
+
};
|
110 |
+
|
111 |
+
bool should_execute() const {
|
112 |
+
return needed_ || captures_;
|
113 |
+
}
|
114 |
+
|
115 |
+
bool needed_ = false;
|
116 |
+
std::unique_ptr<std::vector<Capture>> captures_;
|
117 |
+
};
|
118 |
+
// exec_info_ is safe to read without synchronization
|
119 |
+
std::unordered_map<Node*, ExecInfo> exec_info_;
|
120 |
+
// Captures variables are grads captured that we return to the user. After
|
121 |
+
// execution of the GraphTask is completed, the captured_vars_ are moved
|
122 |
+
// out of the GraphTask and are no longer valid.
|
123 |
+
std::vector<Variable> captured_vars_;
|
124 |
+
|
125 |
+
// Note: this field is not ready to be used until the proper
|
126 |
+
// `thread_locals_.set_grad_mode()` call in the constructor.
|
127 |
+
at::ThreadLocalState thread_locals_ = at::ThreadLocalState();
|
128 |
+
|
129 |
+
std::unordered_set<c10::Stream> leaf_streams;
|
130 |
+
|
131 |
+
// Per-device current streams of the execute() that called this GraphTask.
|
132 |
+
// These will be synced with leaf_streams in exec_post_processing.
|
133 |
+
std::vector<c10::optional<c10::Stream>> caller_current_streams_;
|
134 |
+
|
135 |
+
// Collects caller_current_streams_
|
136 |
+
void stash_current_streams();
|
137 |
+
|
138 |
+
void init_to_execute(
|
139 |
+
Node& graph_root,
|
140 |
+
const edge_list& outputs,
|
141 |
+
bool accumulate_grad,
|
142 |
+
uint64_t min_topo_nr);
|
143 |
+
|
144 |
+
// The value of worker_device in the thread that created this task.
|
145 |
+
// See Note [Reentrant backwards]
|
146 |
+
// Safe to read owner_ and reentrant_depth_ without synchronization
|
147 |
+
int owner_;
|
148 |
+
// The number of parent graph tasks for this graph task
|
149 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
150 |
+
const int reentrant_depth_;
|
151 |
+
|
152 |
+
bool can_checkpoint() const {
|
153 |
+
return exec_info_.empty();
|
154 |
+
}
|
155 |
+
|
156 |
+
// check if the GraphTask is completed or not
|
157 |
+
bool completed();
|
158 |
+
// mark the graph task as completed and trigger post processing
|
159 |
+
void mark_as_completed_and_run_post_processing();
|
160 |
+
|
161 |
+
// Set an appropriate exception on this graph_task which was encountered while
|
162 |
+
// running the provided function.
|
163 |
+
void set_exception(std::exception_ptr eptr, const std::shared_ptr<Node>& fn);
|
164 |
+
|
165 |
+
// Set an appropriate exception on this graph_task which was encountered while
|
166 |
+
// running the provided function. But doesn't signal completion on
|
167 |
+
// 'future_result_' right away. The user needs to explicitly mark
|
168 |
+
// 'future_result_' completed with an appropriate exception.
|
169 |
+
void set_exception_without_signal(const std::shared_ptr<Node>& fn);
|
170 |
+
|
171 |
+
// Whether or not to stop execution for this GraphTask when an error is
|
172 |
+
// encountered. When set to true, this would cause Engine::execute() to throw
|
173 |
+
// an exception as soon as the autograd engine receives an exception.
|
174 |
+
bool exit_on_error_;
|
175 |
+
|
176 |
+
// CPU threads are dedicated to processing CPU work for the backward they
|
177 |
+
// invoked. So any given graph task maintains its own cpu_ready_queue_ where
|
178 |
+
// you should send work for it to be done. We memoize the cpu_ready_queue_ per
|
179 |
+
// GraphTask so that we know which ready queue we should push to if we are on
|
180 |
+
// device thread (i.e. GPU) and but next NodeTask should be run on CPU.
|
181 |
+
std::shared_ptr<ReadyQueue> cpu_ready_queue_;
|
182 |
+
|
183 |
+
// Future representing the completion of the graph task. Notified when all
|
184 |
+
// tasks are done.
|
185 |
+
c10::intrusive_ptr<at::ivalue::Future> future_result_;
|
186 |
+
|
187 |
+
// Final callbacks installed during execution of this GraphTask
|
188 |
+
std::vector<std::function<void()>> final_callbacks_;
|
189 |
+
// To protect reads and writes to final_callbacks_. Intentionally no reusing
|
190 |
+
// mutex_ as the two are protecting different data structures.
|
191 |
+
std::mutex final_callbacks_lock_;
|
192 |
+
|
193 |
+
utils::DelayWarningHandler warning_handler_;
|
194 |
+
|
195 |
+
uint64_t id_;
|
196 |
+
|
197 |
+
GraphTask(
|
198 |
+
bool keep_graph,
|
199 |
+
bool grad_mode,
|
200 |
+
int reentrant_depth,
|
201 |
+
std::shared_ptr<ReadyQueue> cpu_ready_queue,
|
202 |
+
c10::SmallVector<Node*, 4> graph_roots,
|
203 |
+
bool exit_on_error = false)
|
204 |
+
: keep_graph_(keep_graph),
|
205 |
+
graph_roots_(std::move(graph_roots)),
|
206 |
+
owner_(NO_DEVICE),
|
207 |
+
reentrant_depth_(reentrant_depth),
|
208 |
+
exit_on_error_(exit_on_error),
|
209 |
+
cpu_ready_queue_(std::move(cpu_ready_queue)),
|
210 |
+
future_result_(c10::make_intrusive<at::ivalue::Future>(
|
211 |
+
c10::ListType::create(c10::TensorType::get()))),
|
212 |
+
id_(graph_task_id.fetch_add(1, std::memory_order_relaxed)) {
|
213 |
+
thread_locals_.set_grad_mode(grad_mode);
|
214 |
+
}
|
215 |
+
|
216 |
+
private:
|
217 |
+
// run GraphTask post processing
|
218 |
+
void exec_post_processing();
|
219 |
+
};
|
220 |
+
|
221 |
+
// The guard that sets and restores current_graph_task.
|
222 |
+
class GraphTaskGuard {
|
223 |
+
public:
|
224 |
+
explicit GraphTaskGuard(std::shared_ptr<GraphTask> graph_task);
|
225 |
+
~GraphTaskGuard();
|
226 |
+
|
227 |
+
void restore_current_graph_task();
|
228 |
+
|
229 |
+
private:
|
230 |
+
std::shared_ptr<GraphTask> last_graph_task_;
|
231 |
+
};
|
232 |
+
|
233 |
+
TORCH_API const std::unordered_map<Node*, GraphTask::ExecInfo>*
|
234 |
+
get_current_graph_task_exec_info();
|
235 |
+
TORCH_API const std::unordered_set<Node*>*
|
236 |
+
get_current_graph_task_nodes_in_graph();
|
237 |
+
TORCH_API bool get_current_graph_task_keep_graph();
|
238 |
+
TORCH_API std::vector<Node*> get_current_graph_task_execution_order();
|
239 |
+
TORCH_API int get_current_graph_task_id();
|
240 |
+
void add_node_to_current_graph_task_exec_info(Node* fn);
|
241 |
+
|
242 |
+
} // namespace autograd
|
243 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_buffer.h
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// The InputBuffer class accumulates a list of Variables for use by a
|
4 |
+
// function. It implements logic to avoid modifying the passed
|
5 |
+
// values in-place (adding an input twice will accumulate the result).
|
6 |
+
// This behaviour is needed and used only in backward graphs.
|
7 |
+
|
8 |
+
#include <memory>
|
9 |
+
#include <utility>
|
10 |
+
#include <vector>
|
11 |
+
|
12 |
+
#include <c10/core/Stream.h>
|
13 |
+
#include <c10/util/Optional.h>
|
14 |
+
#include <torch/csrc/autograd/variable.h>
|
15 |
+
|
16 |
+
namespace torch {
|
17 |
+
namespace autograd {
|
18 |
+
|
19 |
+
struct InputBuffer {
|
20 |
+
explicit InputBuffer(size_t size) : buffer(size) {}
|
21 |
+
InputBuffer(const InputBuffer& other) = delete;
|
22 |
+
InputBuffer(InputBuffer&& other) = default;
|
23 |
+
explicit InputBuffer(variable_list&& inputs) : buffer(std::move(inputs)){};
|
24 |
+
InputBuffer& operator=(InputBuffer&& other) = default;
|
25 |
+
|
26 |
+
// Accumulates the variable at a specified index.
|
27 |
+
// The optional CUDA streams determine which stream the accumulation
|
28 |
+
// is run on and how the addition is synchronized.
|
29 |
+
TORCH_API void add(
|
30 |
+
size_t pos,
|
31 |
+
Variable&& var,
|
32 |
+
const c10::optional<c10::Stream>& opt_producer_stream,
|
33 |
+
const c10::optional<c10::Stream>& opt_consumer_stream);
|
34 |
+
|
35 |
+
at::Device device() const;
|
36 |
+
|
37 |
+
Variable operator[](size_t pos) {
|
38 |
+
return buffer[pos];
|
39 |
+
}
|
40 |
+
|
41 |
+
// Returns the inputs as a list of variables. Destroys given InputBuffer.
|
42 |
+
static std::vector<Variable> variables(InputBuffer&& g);
|
43 |
+
|
44 |
+
std::vector<Variable> buffer;
|
45 |
+
};
|
46 |
+
|
47 |
+
} // namespace autograd
|
48 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/input_metadata.h
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/ExpandUtils.h>
|
4 |
+
#include <ATen/NestedTensorImpl.h>
|
5 |
+
#include <ATen/core/Tensor.h>
|
6 |
+
#include <c10/core/Device.h>
|
7 |
+
#include <c10/core/DeviceType.h>
|
8 |
+
#include <c10/core/Stream.h>
|
9 |
+
#include <c10/core/SymIntArrayRef.h>
|
10 |
+
#include <c10/core/TensorImpl.h>
|
11 |
+
#include <c10/core/impl/DeviceGuardImplInterface.h>
|
12 |
+
#include <c10/util/DimVector.h>
|
13 |
+
#include <c10/util/Exception.h>
|
14 |
+
#include <c10/util/SmallVector.h>
|
15 |
+
|
16 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
17 |
+
#include <ATen/Functions.h>
|
18 |
+
#else
|
19 |
+
#include <ATen/ops/zeros.h>
|
20 |
+
#endif
|
21 |
+
|
22 |
+
#include <cstdint>
|
23 |
+
#include <utility>
|
24 |
+
|
25 |
+
namespace torch {
|
26 |
+
namespace autograd {
|
27 |
+
|
28 |
+
using SymIntSmallVec = c10::SmallVector<c10::SymInt, c10::kDimVectorStaticSize>;
|
29 |
+
using MetadataShape = std::variant<SymIntSmallVec, at::Tensor>;
|
30 |
+
|
31 |
+
/**
|
32 |
+
* Records TensorOptions, shape of the tensor, whether or not the Python
|
33 |
+
* dispatch key is set (tensor subclass), and, where applicable, the stream the
|
34 |
+
* corresponding operation took place on.
|
35 |
+
*
|
36 |
+
* If is_valid() is false, then the corresponding input is not used and may be
|
37 |
+
* an undefined tensor.
|
38 |
+
*/
|
39 |
+
struct TORCH_API InputMetadata {
|
40 |
+
InputMetadata() = default;
|
41 |
+
InputMetadata(
|
42 |
+
const at::TensorOptions& options,
|
43 |
+
MetadataShape input_shape,
|
44 |
+
bool is_tensor_subclass,
|
45 |
+
bool is_nested);
|
46 |
+
InputMetadata(const at::Tensor& t);
|
47 |
+
|
48 |
+
const at::TensorOptions& options() const {
|
49 |
+
return options_;
|
50 |
+
}
|
51 |
+
|
52 |
+
caffe2::TypeMeta dtype() const {
|
53 |
+
return options_.dtype();
|
54 |
+
}
|
55 |
+
|
56 |
+
at::Device device() const {
|
57 |
+
return options_.device();
|
58 |
+
}
|
59 |
+
|
60 |
+
at::Layout layout() const {
|
61 |
+
return options_.layout();
|
62 |
+
}
|
63 |
+
|
64 |
+
c10::Stream stream() const {
|
65 |
+
return stream_;
|
66 |
+
}
|
67 |
+
|
68 |
+
bool is_tensor_subclass() const {
|
69 |
+
return is_tensor_subclass_;
|
70 |
+
}
|
71 |
+
|
72 |
+
at::Tensor zeros_like() const;
|
73 |
+
|
74 |
+
bool is_same_shape(const at::Tensor& grad) const;
|
75 |
+
|
76 |
+
bool is_expandable_to_shape(const at::Tensor& grad) const;
|
77 |
+
|
78 |
+
at::Tensor reduce_grad(at::Tensor& grad) const;
|
79 |
+
|
80 |
+
std::stringstream incompatible_shape_error_message(
|
81 |
+
const size_t index,
|
82 |
+
const at::Tensor& grad) const;
|
83 |
+
|
84 |
+
bool was_default_constructed() const {
|
85 |
+
return was_default_constructed_;
|
86 |
+
}
|
87 |
+
|
88 |
+
bool is_cpp_nested_tensor() const;
|
89 |
+
|
90 |
+
bool is_nested_tensor() const {
|
91 |
+
return is_nested_;
|
92 |
+
}
|
93 |
+
|
94 |
+
c10::SymIntArrayRef shape_as_dim_vector() const;
|
95 |
+
|
96 |
+
// Danger: not thread safe, caller must protect with lock
|
97 |
+
SymIntSmallVec& mutable_shape_as_dim_vector();
|
98 |
+
|
99 |
+
private:
|
100 |
+
at::Tensor shape_as_tensor() const;
|
101 |
+
bool is_nestedness_same(const at::Tensor& grad) const;
|
102 |
+
bool maybe_expandable_to(const at::Tensor& grad) const;
|
103 |
+
|
104 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
105 |
+
const at::TensorOptions options_;
|
106 |
+
MetadataShape shape_;
|
107 |
+
c10::Stream stream_ = c10::Stream(c10::Stream::Default::DEFAULT, device());
|
108 |
+
bool is_tensor_subclass_ = false;
|
109 |
+
bool is_nested_ = false;
|
110 |
+
bool was_default_constructed_ = true;
|
111 |
+
};
|
112 |
+
} // namespace autograd
|
113 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/jit_decomp_interface.h
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/Tensor.h>
|
4 |
+
#include <ATen/core/function_schema.h>
|
5 |
+
#include <c10/macros/Export.h>
|
6 |
+
|
7 |
+
// NOTE: [Jit Decomposition Interface]
|
8 |
+
//
|
9 |
+
// For some context of why we need this at all, see NOTE: [forward-mode AD
|
10 |
+
// decompositions mechanism]
|
11 |
+
//
|
12 |
+
// Introducing that mechanism from the NOTE is problematic because:
|
13 |
+
// - it relies on TorchScript, so now VariableTypeX.cpp depends on TorchScript.
|
14 |
+
// - there exist internal builds like lite_trainer, which depend on VariableType
|
15 |
+
// but do not depend on TorchScript.
|
16 |
+
//
|
17 |
+
// For internal builds like lite_trainer builds to pass, and for OSS builds that
|
18 |
+
// do depend on TorchScript to still support the forward AD decomp mechanism, we
|
19 |
+
// implement a PImpl pattern to avoid a static dependency in favor of a dynamic
|
20 |
+
// one
|
21 |
+
// - during static initialization time, if the library is built with TorchScript
|
22 |
+
// setJitDecompImpl is called in decomposition_registry.cpp setting a global
|
23 |
+
// ptr to the impl
|
24 |
+
// - when the program is run,if getJitDecompImpl returns a non null ptr, we can
|
25 |
+
// carry on normally, otherwise we gracefully error out
|
26 |
+
//
|
27 |
+
// For extra context, see VariableHooksInterface.h, where a similar technique
|
28 |
+
// is used
|
29 |
+
|
30 |
+
namespace torch {
|
31 |
+
namespace autograd {
|
32 |
+
namespace impl {
|
33 |
+
|
34 |
+
struct TORCH_API JitDecompInterface {
|
35 |
+
virtual ~JitDecompInterface() = default;
|
36 |
+
virtual bool has_jit_decomposition(
|
37 |
+
const c10::FunctionSchema& schema) const = 0;
|
38 |
+
virtual void run_jit_decomposition(
|
39 |
+
const c10::OperatorHandle& op,
|
40 |
+
jit::Stack* stack) const = 0;
|
41 |
+
};
|
42 |
+
|
43 |
+
TORCH_API void setJitDecompImpl(JitDecompInterface* impl);
|
44 |
+
TORCH_API JitDecompInterface* getJitDecompImpl();
|
45 |
+
|
46 |
+
struct TORCH_API JitDecompRegisterer {
|
47 |
+
explicit JitDecompRegisterer(JitDecompInterface* impl) {
|
48 |
+
setJitDecompImpl(impl);
|
49 |
+
}
|
50 |
+
};
|
51 |
+
|
52 |
+
} // namespace impl
|
53 |
+
} // namespace autograd
|
54 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler.h
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/autograd/profiler_kineto.h>
|
4 |
+
#include <torch/csrc/autograd/profiler_legacy.h>
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_kineto.h
ADDED
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <string>
|
4 |
+
#include <vector>
|
5 |
+
|
6 |
+
#include <torch/csrc/profiler/api.h>
|
7 |
+
#include <torch/csrc/profiler/events.h>
|
8 |
+
#include <torch/csrc/profiler/stubs/base.h>
|
9 |
+
#include <torch/csrc/profiler/util.h>
|
10 |
+
|
11 |
+
namespace torch {
|
12 |
+
namespace profiler {
|
13 |
+
namespace impl {
|
14 |
+
struct Result;
|
15 |
+
namespace kineto {
|
16 |
+
struct ActivityTraceWrapper;
|
17 |
+
} // namespace kineto
|
18 |
+
} // namespace impl
|
19 |
+
} // namespace profiler
|
20 |
+
namespace autograd {
|
21 |
+
namespace profiler {
|
22 |
+
using experimental_event_t = std::shared_ptr<torch::profiler::impl::Result>;
|
23 |
+
using extra_meta_t = std::unordered_map<std::string, std::string>;
|
24 |
+
|
25 |
+
struct TORCH_API KinetoEvent {
|
26 |
+
KinetoEvent(
|
27 |
+
const std::shared_ptr<const torch::profiler::impl::Result>&,
|
28 |
+
const bool verbose);
|
29 |
+
|
30 |
+
uint64_t startThreadId() const;
|
31 |
+
uint64_t endThreadId() const;
|
32 |
+
uint8_t activityType() const;
|
33 |
+
uint64_t fwdThreadId() const;
|
34 |
+
bool hasShapes() const;
|
35 |
+
const c10::ArrayRef<std::vector<int64_t>> shapes() const;
|
36 |
+
bool hasTypes() const;
|
37 |
+
const c10::ArrayRef<std::string> dtypes() const;
|
38 |
+
bool hasConcreteInputs() const;
|
39 |
+
const c10::ArrayRef<c10::IValue> concreteInputs() const;
|
40 |
+
uint64_t flops() const;
|
41 |
+
int64_t sequenceNr() const;
|
42 |
+
bool hasStack() const;
|
43 |
+
const c10::ArrayRef<std::string> stack() const;
|
44 |
+
uint8_t scope() const;
|
45 |
+
bool hasModuleHierarchy() const;
|
46 |
+
const c10::ArrayRef<std::string> moduleHierarchy() const;
|
47 |
+
int64_t debugHandle() const;
|
48 |
+
std::string name() const;
|
49 |
+
c10::DeviceType deviceType() const;
|
50 |
+
uint8_t deviceIndex() const;
|
51 |
+
int64_t nBytes() const;
|
52 |
+
uint64_t startUs() const;
|
53 |
+
uint64_t durationUs() const;
|
54 |
+
bool isAsync() const;
|
55 |
+
uint64_t correlationId() const;
|
56 |
+
uint64_t linkedCorrelationId() const;
|
57 |
+
int64_t deviceResourceId() const;
|
58 |
+
std::string backend() const;
|
59 |
+
bool isPythonFunction() const;
|
60 |
+
int64_t cudaElapsedUs() const;
|
61 |
+
int64_t privateuse1ElapsedUs() const;
|
62 |
+
void getPerfEventCounters(torch::profiler::perf_counters_t&) const;
|
63 |
+
extra_meta_t extraMeta() const;
|
64 |
+
|
65 |
+
private:
|
66 |
+
torch::profiler::impl::ProfilerVoidEventStub fallbackStart() const;
|
67 |
+
torch::profiler::impl::ProfilerVoidEventStub fallbackEnd() const;
|
68 |
+
|
69 |
+
std::shared_ptr<const torch::profiler::impl::Result> result_;
|
70 |
+
std::vector<std::string> python_stack_;
|
71 |
+
|
72 |
+
// Copy fields from result so we can return ArrayRefs.
|
73 |
+
std::vector<std::vector<int64_t>> shapes_;
|
74 |
+
std::vector<std::string> dtypes_;
|
75 |
+
std::vector<c10::IValue> concrete_inputs_;
|
76 |
+
};
|
77 |
+
|
78 |
+
// Consolidating events returned directly from Kineto
|
79 |
+
// with events manually created by us (e.g. start/stop marks,
|
80 |
+
// memory allocation events)
|
81 |
+
struct TORCH_API ProfilerResult {
|
82 |
+
ProfilerResult();
|
83 |
+
ProfilerResult(
|
84 |
+
uint64_t start_time,
|
85 |
+
std::vector<KinetoEvent> events,
|
86 |
+
std::unique_ptr<torch::profiler::impl::kineto::ActivityTraceWrapper>&&
|
87 |
+
trace,
|
88 |
+
std::vector<experimental_event_t>&& event_tree);
|
89 |
+
~ProfilerResult();
|
90 |
+
|
91 |
+
uint64_t trace_start_us() const {
|
92 |
+
return trace_start_us_;
|
93 |
+
}
|
94 |
+
|
95 |
+
const std::vector<KinetoEvent>& events() const {
|
96 |
+
return events_;
|
97 |
+
}
|
98 |
+
|
99 |
+
const std::vector<experimental_event_t>& event_tree() const {
|
100 |
+
return event_tree_;
|
101 |
+
}
|
102 |
+
|
103 |
+
void save(const std::string& path);
|
104 |
+
|
105 |
+
private:
|
106 |
+
uint64_t trace_start_us_ = 0;
|
107 |
+
std::vector<KinetoEvent> events_;
|
108 |
+
std::unique_ptr<torch::profiler::impl::kineto::ActivityTraceWrapper> trace_;
|
109 |
+
std::vector<experimental_event_t> event_tree_;
|
110 |
+
};
|
111 |
+
|
112 |
+
/*
|
113 |
+
* This API is used by backends to record latency of events that
|
114 |
+
* happened in the backend but were not visible to pytorch runtime.
|
115 |
+
* For example, if part of the model is lowered to a dsp backend, then
|
116 |
+
* the execution of that part of the model is delegated to the backend.
|
117 |
+
* When backend finishes execution it has an option to provide profiling
|
118 |
+
* information (latency only at the moment) corresponding to different operators
|
119 |
+
* that were executed in the backend.
|
120 |
+
* When such events are recorded by backend using this API, the event
|
121 |
+
* records will be collected by active kineto profiler. If no kineto profiler
|
122 |
+
* is active then the event is ignored.
|
123 |
+
* This provides us with a way to generate all the profiling information
|
124 |
+
* for a model regardless of where model (or part of it) executed.
|
125 |
+
* @param start_time_us: start time in us of the event
|
126 |
+
* @param end_time_us: end time in us of the event
|
127 |
+
* @param debug_handle: debug handle to correlate this event/op with
|
128 |
+
* model level module/source information
|
129 |
+
* @param scope: scope of the event, e.g. LITE_INTERPRETER, RECORD_FN etc.
|
130 |
+
* @param event_name: name of the event, e.g. op name
|
131 |
+
* @param backend_name: name of the backend where the event took place.
|
132 |
+
*/
|
133 |
+
TORCH_API void reportBackendEventToActiveKinetoProfiler(
|
134 |
+
const int64_t start_time_us,
|
135 |
+
const int64_t end_time_us,
|
136 |
+
const int64_t debug_handle,
|
137 |
+
const at::RecordScope scope,
|
138 |
+
const std::string& event_name,
|
139 |
+
const std::string& backend_name);
|
140 |
+
|
141 |
+
TORCH_API void enableProfiler(
|
142 |
+
const torch::profiler::impl::ProfilerConfig& config,
|
143 |
+
const std::set<torch::profiler::impl::ActivityType>& activities,
|
144 |
+
const std::unordered_set<at::RecordScope>& scopes = {});
|
145 |
+
|
146 |
+
/*
|
147 |
+
* Same as enableProfiler but with callback to do post-processing of
|
148 |
+
* KinetoEvents.
|
149 |
+
* enableProfilerWithEventPostProcess enables profiler to capture
|
150 |
+
* specified activities, with specified RecordFunction scope, if any.
|
151 |
+
* Additionally, it takes a functor that does in-place post processing of
|
152 |
+
* events, e.g. populate stack trace or module hierarchy information lazily
|
153 |
+
* using debug_handle.
|
154 |
+
* Example usage is with lite interpreter that has recording scope of
|
155 |
+
* LITE_INTERPRETER. In this case lite interpreter runtime, records debug
|
156 |
+
* handles in RecordFunction, along with other information. Debug handles are
|
157 |
+
* eventually passed down to KinetoEvent and recorded as part of the event.
|
158 |
+
* KinetoEdgeCPUProfiler, in torch/csrc/jit/mobile/profiler_edge.cpp, enables
|
159 |
+
* profiler using post-processing callback, via
|
160 |
+
* enableProfilerWithEventPostProcess, that takes these debug handles and
|
161 |
+
* generates stack trace and module hierarchy information, once profiling is
|
162 |
+
* done.
|
163 |
+
*/
|
164 |
+
using post_process_t = std::function<void(
|
165 |
+
/*debug_handle */ int64_t,
|
166 |
+
/*jit_stack */ std::vector<std::string>&,
|
167 |
+
/*jit_modules */ std::vector<std::string>&)>;
|
168 |
+
TORCH_API void enableProfilerWithEventPostProcess(
|
169 |
+
const torch::profiler::impl::ProfilerConfig& config,
|
170 |
+
const std::set<torch::profiler::impl::ActivityType>& activities,
|
171 |
+
post_process_t&& cb,
|
172 |
+
const std::unordered_set<at::RecordScope>& scopes = {});
|
173 |
+
|
174 |
+
TORCH_API std::unique_ptr<ProfilerResult> disableProfiler();
|
175 |
+
|
176 |
+
TORCH_API void prepareProfiler(
|
177 |
+
const torch::profiler::impl::ProfilerConfig& config,
|
178 |
+
const std::set<torch::profiler::impl::ActivityType>& activities);
|
179 |
+
|
180 |
+
} // namespace profiler
|
181 |
+
} // namespace autograd
|
182 |
+
|
183 |
+
namespace profiler {
|
184 |
+
namespace impl {
|
185 |
+
|
186 |
+
// Experimental.
|
187 |
+
TORCH_API void _reportVulkanEventToProfiler(vulkan_id_t id);
|
188 |
+
|
189 |
+
} // namespace impl
|
190 |
+
} // namespace profiler
|
191 |
+
|
192 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_legacy.h
ADDED
@@ -0,0 +1,417 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <cstdint>
|
4 |
+
#include <forward_list>
|
5 |
+
#include <iostream>
|
6 |
+
#include <memory>
|
7 |
+
#include <mutex>
|
8 |
+
#include <sstream>
|
9 |
+
#include <string>
|
10 |
+
#include <tuple>
|
11 |
+
#include <vector>
|
12 |
+
|
13 |
+
#include <torch/csrc/Export.h>
|
14 |
+
#include <torch/csrc/profiler/api.h>
|
15 |
+
#include <torch/csrc/profiler/stubs/base.h>
|
16 |
+
#include <torch/csrc/profiler/util.h>
|
17 |
+
|
18 |
+
namespace torch {
|
19 |
+
namespace autograd {
|
20 |
+
|
21 |
+
struct Node;
|
22 |
+
|
23 |
+
namespace profiler {
|
24 |
+
|
25 |
+
enum class C10_API_ENUM EventKind : uint16_t {
|
26 |
+
Mark,
|
27 |
+
PushRange,
|
28 |
+
PopRange,
|
29 |
+
MemoryAlloc,
|
30 |
+
};
|
31 |
+
|
32 |
+
// To be deprecated, once we switch to Kineto profiling
|
33 |
+
struct TORCH_API LegacyEvent {
|
34 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
35 |
+
LegacyEvent(
|
36 |
+
EventKind kind,
|
37 |
+
at::StringView name,
|
38 |
+
uint16_t thread_id,
|
39 |
+
bool record_cuda,
|
40 |
+
at::RecordFunctionHandle handle = 0,
|
41 |
+
std::vector<std::vector<int64_t>>&& shapes = {},
|
42 |
+
int node_id = -1,
|
43 |
+
bool is_async = false)
|
44 |
+
: name_(std::move(name)),
|
45 |
+
kind_(kind),
|
46 |
+
thread_id_(thread_id),
|
47 |
+
handle_(handle),
|
48 |
+
shapes_(shapes),
|
49 |
+
node_id_(node_id),
|
50 |
+
is_async_(is_async) {
|
51 |
+
record(record_cuda);
|
52 |
+
}
|
53 |
+
|
54 |
+
// Constructor to be used in conjunction with LegacyEvent::fromIValue.
|
55 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
56 |
+
LegacyEvent(
|
57 |
+
EventKind kind,
|
58 |
+
at::StringView name,
|
59 |
+
uint16_t thread_id,
|
60 |
+
at::RecordFunctionHandle handle,
|
61 |
+
std::vector<std::vector<int64_t>>&& shapes,
|
62 |
+
int node_id,
|
63 |
+
bool is_remote,
|
64 |
+
int64_t cpu_memory_usage,
|
65 |
+
int64_t cpu_ns,
|
66 |
+
bool cuda_recorded,
|
67 |
+
int64_t cuda_memory_usage = 0,
|
68 |
+
int device = -1,
|
69 |
+
double cuda_us = -1)
|
70 |
+
: cpu_ns_(cpu_ns),
|
71 |
+
name_(std::move(name)),
|
72 |
+
kind_(kind),
|
73 |
+
thread_id_(thread_id),
|
74 |
+
handle_(handle),
|
75 |
+
shapes_(shapes),
|
76 |
+
cpu_memory_usage_(cpu_memory_usage),
|
77 |
+
cuda_memory_usage_(cuda_memory_usage),
|
78 |
+
device_(device),
|
79 |
+
node_id_(node_id),
|
80 |
+
is_remote_(is_remote),
|
81 |
+
cuda_us_(cuda_us) {
|
82 |
+
// Sanity check values that were deserialized
|
83 |
+
TORCH_INTERNAL_ASSERT(cpu_ns_ > 0);
|
84 |
+
if (cuda_recorded) {
|
85 |
+
TORCH_INTERNAL_ASSERT(device_ >= 0);
|
86 |
+
TORCH_INTERNAL_ASSERT(cuda_us_ >= 0);
|
87 |
+
}
|
88 |
+
}
|
89 |
+
|
90 |
+
// Returns IValues corresponding to event structure, to be used for
|
91 |
+
// serialization.
|
92 |
+
at::IValue toIValue() const;
|
93 |
+
|
94 |
+
// Reconstructs an event from IValues given by toIValue.
|
95 |
+
static LegacyEvent fromIValue(const at::IValue& eventIValue);
|
96 |
+
|
97 |
+
void record(bool record_cuda);
|
98 |
+
|
99 |
+
std::string kindStr() const {
|
100 |
+
switch (kind_) {
|
101 |
+
case EventKind::Mark:
|
102 |
+
return "mark";
|
103 |
+
case EventKind::PushRange:
|
104 |
+
return "push";
|
105 |
+
case EventKind::PopRange:
|
106 |
+
return "pop";
|
107 |
+
case EventKind::MemoryAlloc:
|
108 |
+
return "memory_alloc";
|
109 |
+
}
|
110 |
+
throw std::runtime_error("unknown event kind");
|
111 |
+
}
|
112 |
+
|
113 |
+
EventKind kind() const {
|
114 |
+
return kind_;
|
115 |
+
}
|
116 |
+
|
117 |
+
const char* name() const {
|
118 |
+
return name_.str();
|
119 |
+
}
|
120 |
+
|
121 |
+
uint64_t threadId() const {
|
122 |
+
return thread_id_;
|
123 |
+
}
|
124 |
+
|
125 |
+
std::vector<std::vector<int64_t>> shapes() const {
|
126 |
+
return shapes_;
|
127 |
+
}
|
128 |
+
|
129 |
+
double cpuElapsedUs(const LegacyEvent& e) const {
|
130 |
+
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers)
|
131 |
+
return static_cast<double>(e.cpu_ns_ - cpu_ns_) / (1000.0);
|
132 |
+
}
|
133 |
+
|
134 |
+
void setCpuUs(int64_t cpu_us) {
|
135 |
+
cpu_ns_ = static_cast<double>(cpu_us) * 1000.0;
|
136 |
+
}
|
137 |
+
|
138 |
+
double cpuUs() const {
|
139 |
+
return static_cast<double>(cpu_ns_) / (1000.0);
|
140 |
+
}
|
141 |
+
|
142 |
+
double cudaElapsedUs(const LegacyEvent& e) const;
|
143 |
+
|
144 |
+
bool hasCuda() const {
|
145 |
+
return cuda_event != nullptr || (isRemote() && device_ != -1);
|
146 |
+
}
|
147 |
+
|
148 |
+
int device() const {
|
149 |
+
return device_;
|
150 |
+
}
|
151 |
+
|
152 |
+
void updateMemoryStats(int64_t alloc_size, c10::Device device) {
|
153 |
+
if (device.is_cuda() || device.type() == c10::DeviceType::HIP) {
|
154 |
+
cuda_memory_usage_ = alloc_size;
|
155 |
+
} else if (
|
156 |
+
device.is_cpu() || device.type() == c10::DeviceType::MKLDNN ||
|
157 |
+
device.type() == c10::DeviceType::IDEEP) {
|
158 |
+
cpu_memory_usage_ = alloc_size;
|
159 |
+
} else {
|
160 |
+
LOG(WARNING) << "Unsupported memory profiling device: " << device;
|
161 |
+
}
|
162 |
+
}
|
163 |
+
|
164 |
+
int64_t cpuMemoryUsage() const {
|
165 |
+
return cpu_memory_usage_;
|
166 |
+
}
|
167 |
+
|
168 |
+
int64_t cudaMemoryUsage() const {
|
169 |
+
return cuda_memory_usage_;
|
170 |
+
}
|
171 |
+
|
172 |
+
at::RecordFunctionHandle handle() const {
|
173 |
+
return handle_;
|
174 |
+
}
|
175 |
+
|
176 |
+
// Node ID corresponding to this event.
|
177 |
+
int nodeId() const {
|
178 |
+
return node_id_;
|
179 |
+
}
|
180 |
+
|
181 |
+
// Set Node ID on this event.
|
182 |
+
void setNodeId(int node_id) {
|
183 |
+
node_id_ = node_id;
|
184 |
+
}
|
185 |
+
|
186 |
+
void setName(at::StringView newName_) {
|
187 |
+
name_ = std::move(newName_);
|
188 |
+
}
|
189 |
+
|
190 |
+
bool isRemote() const {
|
191 |
+
return is_remote_;
|
192 |
+
}
|
193 |
+
|
194 |
+
void setCudaUs(int64_t cuda_us) {
|
195 |
+
cuda_us_ = cuda_us;
|
196 |
+
}
|
197 |
+
|
198 |
+
void setSequenceNr(int64_t sequence_nr) {
|
199 |
+
sequence_nr_ = sequence_nr;
|
200 |
+
}
|
201 |
+
|
202 |
+
int64_t sequenceNr() const {
|
203 |
+
return sequence_nr_;
|
204 |
+
}
|
205 |
+
|
206 |
+
void setCorrelationId(uint64_t correlation_id) {
|
207 |
+
correlation_id_ = correlation_id;
|
208 |
+
}
|
209 |
+
|
210 |
+
uint64_t correlationId() const {
|
211 |
+
return correlation_id_;
|
212 |
+
}
|
213 |
+
|
214 |
+
const std::vector<std::string>& stack() const {
|
215 |
+
return stack_;
|
216 |
+
}
|
217 |
+
|
218 |
+
void setStack(const std::vector<std::string>& stack) {
|
219 |
+
stack_ = stack;
|
220 |
+
}
|
221 |
+
|
222 |
+
uint64_t fwdThreadId() const {
|
223 |
+
return fwd_thread_id_;
|
224 |
+
}
|
225 |
+
|
226 |
+
void setFwdThreadId(uint64_t fwd_thread_id) {
|
227 |
+
fwd_thread_id_ = fwd_thread_id;
|
228 |
+
}
|
229 |
+
|
230 |
+
uint8_t scope() const {
|
231 |
+
return scope_;
|
232 |
+
}
|
233 |
+
|
234 |
+
void setScope(uint8_t scope) {
|
235 |
+
scope_ = scope;
|
236 |
+
}
|
237 |
+
|
238 |
+
const std::unordered_map<std::string, c10::IValue>& extraArgs() const {
|
239 |
+
return extra_args_;
|
240 |
+
}
|
241 |
+
|
242 |
+
void setExtraArgs(std::unordered_map<std::string, c10::IValue>&& save_args) {
|
243 |
+
extra_args_ = std::move(save_args);
|
244 |
+
}
|
245 |
+
|
246 |
+
uint64_t flops() {
|
247 |
+
return flops_;
|
248 |
+
}
|
249 |
+
|
250 |
+
bool isAsync() {
|
251 |
+
return is_async_;
|
252 |
+
}
|
253 |
+
|
254 |
+
void setFlops(uint64_t flops) {
|
255 |
+
flops_ = flops;
|
256 |
+
}
|
257 |
+
|
258 |
+
private:
|
259 |
+
// signed to allow for negative intervals, initialized for safety.
|
260 |
+
int64_t cpu_ns_ = 0;
|
261 |
+
at::StringView name_;
|
262 |
+
EventKind kind_;
|
263 |
+
uint64_t thread_id_;
|
264 |
+
uint64_t fwd_thread_id_;
|
265 |
+
at::RecordFunctionHandle handle_{0};
|
266 |
+
std::vector<std::vector<int64_t>> shapes_;
|
267 |
+
int64_t cpu_memory_usage_ = 0;
|
268 |
+
int64_t cuda_memory_usage_ = 0;
|
269 |
+
int device_ = -1;
|
270 |
+
torch::profiler::impl::ProfilerVoidEventStub cuda_event = nullptr;
|
271 |
+
int node_id_ = 0;
|
272 |
+
bool is_remote_ = false;
|
273 |
+
int64_t cuda_us_ = -1;
|
274 |
+
int64_t sequence_nr_ = -1;
|
275 |
+
bool is_async_ = false;
|
276 |
+
|
277 |
+
std::vector<std::string> stack_;
|
278 |
+
uint8_t scope_;
|
279 |
+
uint64_t correlation_id_;
|
280 |
+
// Extra arguments for computing op flops
|
281 |
+
std::unordered_map<std::string, c10::IValue> extra_args_;
|
282 |
+
uint64_t flops_ = 0;
|
283 |
+
};
|
284 |
+
|
285 |
+
// a linked-list of fixed sized vectors, to avoid
|
286 |
+
// a std::vector resize from taking a large amount of time inside
|
287 |
+
// a profiling event
|
288 |
+
struct RangeEventList {
|
289 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init,modernize-use-equals-default)
|
290 |
+
RangeEventList() {
|
291 |
+
events_.reserve(kReservedCapacity);
|
292 |
+
}
|
293 |
+
|
294 |
+
template <typename... Args>
|
295 |
+
void record(Args&&... args) {
|
296 |
+
std::lock_guard<std::mutex> guard(mutex_);
|
297 |
+
events_.emplace_back(std::forward<Args>(args)...);
|
298 |
+
}
|
299 |
+
|
300 |
+
std::vector<LegacyEvent> consolidate() {
|
301 |
+
std::lock_guard<std::mutex> lock(mutex_);
|
302 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
303 |
+
std::vector<LegacyEvent> result;
|
304 |
+
result.insert(
|
305 |
+
result.begin(),
|
306 |
+
std::make_move_iterator(events_.begin()),
|
307 |
+
std::make_move_iterator(events_.end()));
|
308 |
+
events_.erase(events_.begin(), events_.end());
|
309 |
+
return result;
|
310 |
+
}
|
311 |
+
|
312 |
+
size_t size() {
|
313 |
+
std::lock_guard<std::mutex> lock(mutex_);
|
314 |
+
return events_.size();
|
315 |
+
}
|
316 |
+
|
317 |
+
private:
|
318 |
+
// This mutex is used to serialize access when different threads are writing
|
319 |
+
// to the same instance of RangeEventList.
|
320 |
+
std::mutex mutex_;
|
321 |
+
std::vector<LegacyEvent> events_;
|
322 |
+
|
323 |
+
static const size_t kReservedCapacity = 1024;
|
324 |
+
};
|
325 |
+
|
326 |
+
// A struct to control settings of disableProfiler options.
|
327 |
+
struct TORCH_API ProfilerDisableOptions {
|
328 |
+
ProfilerDisableOptions() = default;
|
329 |
+
ProfilerDisableOptions(bool shouldCleanupTLSState, bool shouldConsolidate)
|
330 |
+
: cleanupTLSState(shouldCleanupTLSState),
|
331 |
+
consolidate(shouldConsolidate) {}
|
332 |
+
// Whether we should clean up profiler states that are thread local, such as
|
333 |
+
// ThreadLocalDebugInfo and thread local RecordFunction callbacks.
|
334 |
+
bool cleanupTLSState = true;
|
335 |
+
// Whether we should consolidate all currently recorded profiled events. If
|
336 |
+
// false, will not consolidate and other threads can continue to write to the
|
337 |
+
// event lists.
|
338 |
+
bool consolidate = true;
|
339 |
+
};
|
340 |
+
|
341 |
+
// NOTE: profiler mode is thread local, with automatic propagation
|
342 |
+
// across thread boundary (e.g. at::launch tasks)
|
343 |
+
TORCH_API void enableProfilerLegacy(
|
344 |
+
const torch::profiler::impl::ProfilerConfig&);
|
345 |
+
using thread_event_lists = std::vector<std::vector<LegacyEvent>>;
|
346 |
+
TORCH_API thread_event_lists disableProfilerLegacy(
|
347 |
+
c10::optional<ProfilerDisableOptions> profilerDisableOptions =
|
348 |
+
c10::nullopt);
|
349 |
+
|
350 |
+
// adds profiledEvents to the current thread local recorded events. Each event
|
351 |
+
// will be marked with node ID given by fromNodeId.
|
352 |
+
TORCH_API void addEventList(std::vector<LegacyEvent>&& profiledEvents);
|
353 |
+
// Writes profiled events to a stream.
|
354 |
+
TORCH_API void writeProfilerEventsToStream(
|
355 |
+
std::ostream& out,
|
356 |
+
const std::vector<LegacyEvent*>& events);
|
357 |
+
|
358 |
+
// Usage:
|
359 |
+
// {
|
360 |
+
// RecordProfile guard("filename.trace");
|
361 |
+
// // code you want to profile
|
362 |
+
// }
|
363 |
+
// Then open filename.trace in chrome://tracing
|
364 |
+
struct TORCH_API RecordProfile {
|
365 |
+
RecordProfile(std::ostream& out);
|
366 |
+
RecordProfile(const std::string& filename);
|
367 |
+
|
368 |
+
~RecordProfile();
|
369 |
+
|
370 |
+
private:
|
371 |
+
void init();
|
372 |
+
std::unique_ptr<std::ofstream> file_;
|
373 |
+
std::ostream& out_;
|
374 |
+
void processEvents(const std::vector<LegacyEvent*>& events);
|
375 |
+
};
|
376 |
+
|
377 |
+
// A guard that enables the legacy profiler, taking in an optional callback to
|
378 |
+
// process the results Usage:
|
379 |
+
// {
|
380 |
+
// TLSLegacyProfilerGuard g([](thread_event_lists profilerResults) {
|
381 |
+
// // process profilerResults
|
382 |
+
// });
|
383 |
+
// Code to profile
|
384 |
+
// }
|
385 |
+
struct TORCH_API TLSLegacyProfilerGuard {
|
386 |
+
explicit TLSLegacyProfilerGuard(
|
387 |
+
const torch::profiler::impl::ProfilerConfig& cfg,
|
388 |
+
c10::optional<std::function<void(const thread_event_lists&)>>
|
389 |
+
resultCallback = c10::nullopt,
|
390 |
+
c10::optional<ProfilerDisableOptions> profilerDisableOptions =
|
391 |
+
c10::nullopt)
|
392 |
+
: cb_(std::move(resultCallback)),
|
393 |
+
// NOLINTNEXTLINE(performance-move-const-arg)
|
394 |
+
profilerDisableOptions_(std::move(profilerDisableOptions)) {
|
395 |
+
enableProfilerLegacy(cfg);
|
396 |
+
}
|
397 |
+
~TLSLegacyProfilerGuard() {
|
398 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
399 |
+
thread_event_lists event_lists =
|
400 |
+
disableProfilerLegacy(profilerDisableOptions_);
|
401 |
+
if (cb_) {
|
402 |
+
try {
|
403 |
+
(*cb_)(event_lists);
|
404 |
+
} catch (const std::exception& e) {
|
405 |
+
LOG(ERROR) << "Got error processing profiler events: " << e.what();
|
406 |
+
}
|
407 |
+
}
|
408 |
+
}
|
409 |
+
|
410 |
+
private:
|
411 |
+
c10::optional<std::function<void(const thread_event_lists&)>> cb_;
|
412 |
+
const c10::optional<ProfilerDisableOptions> profilerDisableOptions_;
|
413 |
+
};
|
414 |
+
|
415 |
+
} // namespace profiler
|
416 |
+
} // namespace autograd
|
417 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/profiler_python.h
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
namespace torch {
|
4 |
+
namespace autograd {
|
5 |
+
namespace profiler {
|
6 |
+
namespace python_tracer {
|
7 |
+
|
8 |
+
void init();
|
9 |
+
|
10 |
+
}
|
11 |
+
} // namespace profiler
|
12 |
+
} // namespace autograd
|
13 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_anomaly_mode.h
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <pybind11/pybind11.h>
|
4 |
+
#include <torch/csrc/autograd/anomaly_mode.h>
|
5 |
+
#include <torch/csrc/python_headers.h>
|
6 |
+
#include <torch/csrc/utils/pybind.h>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace autograd {
|
10 |
+
|
11 |
+
struct PyAnomalyMetadata : public AnomalyMetadata {
|
12 |
+
static constexpr const char* ANOMALY_TRACE_KEY = "traceback_";
|
13 |
+
static constexpr const char* ANOMALY_PARENT_KEY = "parent_";
|
14 |
+
|
15 |
+
PyAnomalyMetadata() {
|
16 |
+
pybind11::gil_scoped_acquire gil;
|
17 |
+
dict_ = PyDict_New();
|
18 |
+
}
|
19 |
+
~PyAnomalyMetadata() override {
|
20 |
+
// If python is already dead, leak the wrapped python objects
|
21 |
+
if (Py_IsInitialized()) {
|
22 |
+
pybind11::gil_scoped_acquire gil;
|
23 |
+
Py_DECREF(dict_);
|
24 |
+
}
|
25 |
+
}
|
26 |
+
void store_stack() override;
|
27 |
+
void print_stack(const std::string& current_node_name) override;
|
28 |
+
void assign_parent(const std::shared_ptr<Node>& parent_node) override;
|
29 |
+
|
30 |
+
PyObject* dict() {
|
31 |
+
return dict_;
|
32 |
+
}
|
33 |
+
|
34 |
+
private:
|
35 |
+
PyObject* dict_;
|
36 |
+
};
|
37 |
+
void _print_stack(
|
38 |
+
PyObject* trace_stack,
|
39 |
+
const std::string& current_node_name,
|
40 |
+
bool is_parent);
|
41 |
+
|
42 |
+
} // namespace autograd
|
43 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_autograd.h
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef THP_AUTOGRAD_H
|
2 |
+
#define THP_AUTOGRAD_H
|
3 |
+
|
4 |
+
PyObject* THPAutograd_initExtension(PyObject* _unused, PyObject* unused);
|
5 |
+
void THPAutograd_initFunctions();
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace autograd {
|
9 |
+
|
10 |
+
PyMethodDef* python_functions();
|
11 |
+
|
12 |
+
}
|
13 |
+
} // namespace torch
|
14 |
+
|
15 |
+
#include <torch/csrc/autograd/python_engine.h>
|
16 |
+
#include <torch/csrc/autograd/python_function.h>
|
17 |
+
#include <torch/csrc/autograd/python_variable.h>
|
18 |
+
|
19 |
+
#endif
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_cpp_function.h
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/python_headers.h>
|
4 |
+
#include <memory>
|
5 |
+
#include <typeinfo>
|
6 |
+
|
7 |
+
#include <torch/csrc/Exceptions.h>
|
8 |
+
#include <torch/csrc/autograd/function.h>
|
9 |
+
#include <torch/csrc/utils/object_ptr.h>
|
10 |
+
|
11 |
+
namespace torch {
|
12 |
+
namespace autograd {
|
13 |
+
|
14 |
+
struct THPCppFunction {
|
15 |
+
PyObject_HEAD std::shared_ptr<Node> cdata;
|
16 |
+
};
|
17 |
+
|
18 |
+
template <typename Ctor>
|
19 |
+
PyObject* CppFunction_pynew(
|
20 |
+
PyTypeObject* type,
|
21 |
+
PyObject* args,
|
22 |
+
PyObject* kwds) {
|
23 |
+
THPObjectPtr obj(type->tp_alloc(type, 0));
|
24 |
+
if (!obj)
|
25 |
+
return nullptr;
|
26 |
+
THPCppFunction* f = (THPCppFunction*)obj.get();
|
27 |
+
HANDLE_TH_ERRORS
|
28 |
+
new (&f->cdata) std::shared_ptr<Node>(Ctor()(args));
|
29 |
+
END_HANDLE_TH_ERRORS
|
30 |
+
if (!f->cdata) {
|
31 |
+
return nullptr;
|
32 |
+
}
|
33 |
+
return obj.release();
|
34 |
+
}
|
35 |
+
|
36 |
+
#define THP_FUNCTION_DEFAULT_METHODS \
|
37 |
+
{(char*)"_register_hook_dict", \
|
38 |
+
THPCppFunction_register_hook_dict, \
|
39 |
+
METH_O, \
|
40 |
+
nullptr}, \
|
41 |
+
{(char*)"register_hook", THPCppFunction_register_hook, METH_O, nullptr}, \
|
42 |
+
{(char*)"register_prehook", \
|
43 |
+
THPCppFunction_register_prehook, \
|
44 |
+
METH_O, \
|
45 |
+
nullptr}, \
|
46 |
+
{(char*)"name", THPCppFunction_name, METH_NOARGS, nullptr}, \
|
47 |
+
{(char*)"_sequence_nr", \
|
48 |
+
THPCppFunction_sequence_nr, \
|
49 |
+
METH_NOARGS, \
|
50 |
+
nullptr}, \
|
51 |
+
{ \
|
52 |
+
(char*)"_set_sequence_nr", THPCppFunction_set_sequence_nr, METH_O, nullptr \
|
53 |
+
}
|
54 |
+
|
55 |
+
#define THP_FUNCTION_DEFAULT_PROPERTIES \
|
56 |
+
{(char*)"next_functions", \
|
57 |
+
THPCppFunction_next_functions, \
|
58 |
+
nullptr, \
|
59 |
+
nullptr, \
|
60 |
+
nullptr}, \
|
61 |
+
{(char*)"requires_grad", \
|
62 |
+
THPCppFunction_requires_grad, \
|
63 |
+
nullptr, \
|
64 |
+
nullptr, \
|
65 |
+
nullptr}, \
|
66 |
+
{ \
|
67 |
+
(char*)"metadata", THPCppFunction_metadata, nullptr, nullptr, nullptr \
|
68 |
+
}
|
69 |
+
|
70 |
+
PyObject* THPCppFunction_next_functions(PyObject* self, void* _unused);
|
71 |
+
PyObject* THPCppFunction_metadata(PyObject* self, void* _unused);
|
72 |
+
PyObject* THPCppFunction_requires_grad(PyObject* self, void* _unused);
|
73 |
+
PyObject* THPCppFunction_register_hook_dict(PyObject* self, PyObject* _var);
|
74 |
+
PyObject* THPCppFunction_register_hook(PyObject* self, PyObject* hook);
|
75 |
+
PyObject* THPCppFunction_register_prehook(PyObject* self, PyObject* hook);
|
76 |
+
|
77 |
+
PyObject* THPCppFunction_name(PyObject* self, PyObject* noargs);
|
78 |
+
PyObject* THPCppFunction_sequence_nr(PyObject* self, PyObject* noargs);
|
79 |
+
|
80 |
+
PyTypeObject* _initFunctionPyTypeObject(
|
81 |
+
PyTypeObject& type,
|
82 |
+
const char* name,
|
83 |
+
PyGetSetDef* function_properties,
|
84 |
+
PyMethodDef* function_methods);
|
85 |
+
|
86 |
+
PyObject* registerFunctionHook(Node& fn, PyObject* hook);
|
87 |
+
|
88 |
+
PyObject* registerFunctionPreHook(Node& fn, PyObject* hook);
|
89 |
+
|
90 |
+
template <typename Ctor>
|
91 |
+
PyTypeObject* createForwardFunctionPyTypeObject(
|
92 |
+
PyTypeObject& type,
|
93 |
+
const char* name,
|
94 |
+
PyGetSetDef* function_properties = nullptr,
|
95 |
+
PyMethodDef* function_methods = nullptr) {
|
96 |
+
type.tp_new = &CppFunction_pynew<Ctor>;
|
97 |
+
return _initFunctionPyTypeObject(
|
98 |
+
type, name, function_properties, function_methods);
|
99 |
+
}
|
100 |
+
|
101 |
+
void registerCppFunction(const std::type_info& type, PyTypeObject* pytype);
|
102 |
+
PyObject* functionToPyObject(const std::shared_ptr<Node>& cdata);
|
103 |
+
|
104 |
+
bool THPCppFunction_Check(PyObject* obj);
|
105 |
+
|
106 |
+
} // namespace autograd
|
107 |
+
} // namespace torch
|
env-llmeval/lib/python3.10/site-packages/torch/include/torch/csrc/autograd/python_engine.h
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/python_headers.h>
|
4 |
+
|
5 |
+
#include <torch/csrc/autograd/engine.h>
|
6 |
+
#include <torch/csrc/autograd/function.h>
|
7 |
+
|
8 |
+
bool THPEngine_initModule(PyObject* module);
|
9 |
+
|
10 |
+
namespace torch {
|
11 |
+
namespace autograd {
|
12 |
+
namespace python {
|
13 |
+
|
14 |
+
struct PythonEngine : public Engine {
|
15 |
+
static Engine& get_python_engine();
|
16 |
+
~PythonEngine() override;
|
17 |
+
void thread_init(
|
18 |
+
int device,
|
19 |
+
const std::shared_ptr<ReadyQueue>& ready_queue,
|
20 |
+
bool should_increment) override;
|
21 |
+
void thread_on_exception(
|
22 |
+
std::shared_ptr<GraphTask> graph_task,
|
23 |
+
const std::shared_ptr<Node>& fn,
|
24 |
+
std::exception& e) override;
|
25 |
+
variable_list execute(
|
26 |
+
const edge_list& roots,
|
27 |
+
const variable_list& inputs,
|
28 |
+
bool keep_graph,
|
29 |
+
bool create_graph,
|
30 |
+
bool accumulate_grad,
|
31 |
+
const edge_list& outputs = {}) override;
|
32 |
+
|
33 |
+
c10::intrusive_ptr<at::ivalue::Future> execute_with_graph_task(
|
34 |
+
const std::shared_ptr<GraphTask>& graph_task,
|
35 |
+
std::shared_ptr<Node> graph_root,
|
36 |
+
InputBuffer&& input_buffer) override;
|
37 |
+
|
38 |
+
std::unique_ptr<AnomalyMetadata> make_anomaly_metadata() override;
|
39 |
+
std::unique_ptr<SavedVariableHooks> get_default_saved_variable_hooks()
|
40 |
+
override;
|
41 |
+
|
42 |
+
private:
|
43 |
+
PythonEngine();
|
44 |
+
};
|
45 |
+
|
46 |
+
} // namespace python
|
47 |
+
} // namespace autograd
|
48 |
+
} // namespace torch
|