applied-ai-018 commited on
Commit
3e63766
·
verified ·
1 Parent(s): 49cb9b3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/10.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step20/zero/18.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step20/zero/20.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  4. ckpts/universal/global_step20/zero/20.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  5. ckpts/universal/global_step20/zero/20.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  6. ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  7. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/all.h +24 -0
  8. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/arg.h +23 -0
  9. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/autograd.h +5 -0
  10. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/cuda.h +30 -0
  11. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data.h +14 -0
  12. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader_options.h +65 -0
  13. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/base.h +103 -0
  14. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/chunk.h +529 -0
  15. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/tensor.h +38 -0
  16. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/detail/data_shuttle.h +87 -0
  17. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/detail/queue.h +84 -0
  18. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/detail/sequencers.h +113 -0
  19. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/example.h +55 -0
  20. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/iterator.h +178 -0
  21. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers.h +9 -0
  22. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms.h +7 -0
  23. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/base.h +53 -0
  24. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/collate.h +35 -0
  25. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/lambda.h +56 -0
  26. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/tensor.h +77 -0
  27. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/worker_exception.h +38 -0
  28. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/TensorDataContainer.h +372 -0
  29. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/static.h +65 -0
  30. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/enum.h +212 -0
  31. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/expanding_array.h +182 -0
  32. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/fft.h +389 -0
  33. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/imethod.h +53 -0
  34. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/jit.h +36 -0
  35. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/linalg.h +1065 -0
  36. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/mps.h +44 -0
  37. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nested.h +95 -0
  38. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn.h +10 -0
  39. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim.h +12 -0
  40. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/adagrad.h +109 -0
  41. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/adam.h +92 -0
  42. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/adamw.h +92 -0
  43. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/lbfgs.h +103 -0
  44. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/optimizer.h +217 -0
  45. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/rmsprop.h +95 -0
  46. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/lr_scheduler.h +39 -0
  47. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/reduce_on_plateau_scheduler.h +63 -0
  48. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/step_lr.h +22 -0
  49. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/serialize.h +309 -0
  50. venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/sgd.h +91 -0
ckpts/universal/global_step20/zero/10.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91402e3f03055441b2170afc1e881f342a64c2e3eceb74c8594ae5947afb82de
3
+ size 50332843
ckpts/universal/global_step20/zero/18.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ba9de269acc5483b4a43632294b2fe88df84245fec84fa6446e9079224c47c3
3
+ size 33555612
ckpts/universal/global_step20/zero/20.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02783e23e70f84fe9fd6cdcaf784dda9d81905db87227920811f32def790e14c
3
+ size 33555612
ckpts/universal/global_step20/zero/20.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e97f0b8385822bff8b631b72a612fb5dbc064a3a1376081ff6115ecb4f910dc
3
+ size 33555627
ckpts/universal/global_step20/zero/20.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:931bd0822c656b733cf4a74eed39a6e6c64b05cc52153b3422e378ab560ba8dc
3
+ size 33555533
ckpts/universal/global_step20/zero/26.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4341825af43d0b0c9f716f3fcb5fbb576aa8cbbc5d2d18f06130ea83d3f4ed3d
3
+ size 33555612
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/all.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #if !defined(_MSC_VER) && __cplusplus < 201703L
4
+ #error C++17 or later compatible compiler is required to use PyTorch.
5
+ #endif
6
+
7
+ #include <torch/autograd.h>
8
+ #include <torch/cuda.h>
9
+ #include <torch/data.h>
10
+ #include <torch/enum.h>
11
+ #include <torch/fft.h>
12
+ #include <torch/jit.h>
13
+ #include <torch/linalg.h>
14
+ #include <torch/mps.h>
15
+ #include <torch/nested.h>
16
+ #include <torch/nn.h>
17
+ #include <torch/optim.h>
18
+ #include <torch/serialize.h>
19
+ #include <torch/sparse.h>
20
+ #include <torch/special.h>
21
+ #include <torch/types.h>
22
+ #include <torch/utils.h>
23
+ #include <torch/version.h>
24
+ #include <torch/xpu.h>
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/arg.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <utility>
4
+
5
+ #define TORCH_ARG(T, name) \
6
+ public: \
7
+ inline auto name(const T& new_##name) -> decltype(*this) { /* NOLINT */ \
8
+ this->name##_ = new_##name; \
9
+ return *this; \
10
+ } \
11
+ inline auto name(T&& new_##name) -> decltype(*this) { /* NOLINT */ \
12
+ this->name##_ = std::move(new_##name); \
13
+ return *this; \
14
+ } \
15
+ inline const T& name() const noexcept { /* NOLINT */ \
16
+ return this->name##_; \
17
+ } \
18
+ inline T& name() noexcept { /* NOLINT */ \
19
+ return this->name##_; \
20
+ } \
21
+ \
22
+ private: \
23
+ T name##_ /* NOLINT */
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/autograd.h ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/autograd/autograd.h>
4
+ #include <torch/csrc/autograd/autograd_not_implemented_fallback.h>
5
+ #include <torch/csrc/autograd/custom_function.h>
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/cuda.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+
5
+ #include <cstddef>
6
+ #include <cstdint>
7
+
8
+ namespace torch {
9
+ namespace cuda {
10
+
11
+ /// Returns the number of CUDA devices available.
12
+ size_t TORCH_API device_count();
13
+
14
+ /// Returns true if at least one CUDA device is available.
15
+ bool TORCH_API is_available();
16
+
17
+ /// Returns true if CUDA is available, and CuDNN is available.
18
+ bool TORCH_API cudnn_is_available();
19
+
20
+ /// Sets the seed for the current GPU.
21
+ void TORCH_API manual_seed(uint64_t seed);
22
+
23
+ /// Sets the seed for all available GPUs.
24
+ void TORCH_API manual_seed_all(uint64_t seed);
25
+
26
+ /// Waits for all kernels in all streams on a CUDA device to complete.
27
+ void TORCH_API synchronize(int64_t device_index = -1);
28
+
29
+ } // namespace cuda
30
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/dataloader.h>
4
+ #include <torch/data/datasets.h>
5
+ #include <torch/data/samplers.h>
6
+ #include <torch/data/transforms.h>
7
+
8
+ // Some "exports".
9
+ namespace torch {
10
+ namespace data {
11
+ using datasets::BatchDataset;
12
+ using datasets::Dataset;
13
+ } // namespace data
14
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/dataloader_options.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/arg.h>
4
+ #include <torch/types.h>
5
+
6
+ #include <chrono>
7
+ #include <cstddef>
8
+
9
+ namespace torch {
10
+ namespace data {
11
+
12
+ /// Options to configure a `DataLoader`.
13
+ struct DataLoaderOptions {
14
+ DataLoaderOptions() = default;
15
+ /* implicit */ DataLoaderOptions(size_t batch_size)
16
+ : batch_size_(batch_size) {}
17
+
18
+ /// The size of each batch to fetch.
19
+ TORCH_ARG(size_t, batch_size) = 1;
20
+
21
+ /// The number of worker threads to launch. If zero, the main thread will
22
+ /// synchronously perform the data loading.
23
+ TORCH_ARG(size_t, workers) = 0;
24
+
25
+ /// The maximum number of jobs to enqueue for fetching by worker threads.
26
+ /// Defaults to two times the number of worker threads.
27
+ TORCH_ARG(optional<size_t>, max_jobs);
28
+
29
+ /// An optional limit on the time to wait for the next batch.
30
+ TORCH_ARG(optional<std::chrono::milliseconds>, timeout);
31
+
32
+ /// Whether to enforce ordering of batches when multiple are loaded
33
+ /// asynchronously by worker threads. Set to `false` for better performance if
34
+ /// you do not care about determinism.
35
+ TORCH_ARG(bool, enforce_ordering) = true;
36
+
37
+ /// Whether to omit the last batch if it contains less than `batch_size`
38
+ /// examples.
39
+ TORCH_ARG(bool, drop_last) = false;
40
+ };
41
+
42
+ /// Like `DataLoaderOptions`, but without any unconfigured state.
43
+ /// `DataLoaderOptions` has some options that depend on other options
44
+ /// (`max_jobs` => `2 * workers`). In the spirit of properly using the C++ type
45
+ /// system, `DataLoaderOptions` allows only setting values. To access values,
46
+ /// you must create a `FullDataLoaderOptions` from a `DataLoaderOptions`
47
+ /// instance, which will do any necessary coalescing.
48
+ struct FullDataLoaderOptions {
49
+ explicit FullDataLoaderOptions(DataLoaderOptions options)
50
+ : batch_size(options.batch_size()),
51
+ workers(options.workers()),
52
+ max_jobs(options.max_jobs().value_or(2 * workers)),
53
+ timeout(options.timeout()),
54
+ enforce_ordering(options.enforce_ordering()),
55
+ drop_last(options.drop_last()) {}
56
+
57
+ size_t batch_size;
58
+ size_t workers;
59
+ size_t max_jobs;
60
+ optional<std::chrono::milliseconds> timeout;
61
+ bool enforce_ordering;
62
+ bool drop_last;
63
+ };
64
+ } // namespace data
65
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/base.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/example.h>
4
+ #include <torch/types.h>
5
+
6
+ #include <c10/util/ArrayRef.h>
7
+
8
+ #include <cstddef>
9
+ #include <cstdint>
10
+ #include <type_traits>
11
+ #include <utility>
12
+ #include <vector>
13
+
14
+ namespace torch {
15
+ namespace data {
16
+ namespace datasets {
17
+ template <typename S, typename T>
18
+ class MapDataset;
19
+ template <typename D, typename T>
20
+ MapDataset<D, T> map(D, T); // NOLINT
21
+ } // namespace datasets
22
+ } // namespace data
23
+ } // namespace torch
24
+
25
+ namespace torch {
26
+ namespace data {
27
+ namespace datasets {
28
+ namespace detail {
29
+ template <typename T>
30
+ struct is_optional : std::false_type {};
31
+ template <typename T>
32
+ struct is_optional<optional<T>> : std::true_type {};
33
+ } // namespace detail
34
+
35
+ /// A dataset that can yield data only in batches.
36
+ template <
37
+ typename Self,
38
+ typename Batch = std::vector<Example<>>,
39
+ typename BatchRequest = ArrayRef<size_t>>
40
+ class BatchDataset {
41
+ public:
42
+ using SelfType = Self;
43
+ using BatchType = Batch;
44
+ using BatchRequestType = BatchRequest;
45
+ constexpr static bool is_stateful = detail::is_optional<BatchType>::value;
46
+
47
+ virtual ~BatchDataset() = default;
48
+
49
+ /// Returns a batch of data given an index.
50
+ virtual Batch get_batch(BatchRequest request) = 0;
51
+
52
+ /// Returns the size of the dataset, or an empty optional if it is unsized.
53
+ virtual optional<size_t> size() const = 0;
54
+
55
+ /// Creates a `MapDataset` that applies the given `transform` to this dataset.
56
+ template <typename TransformType>
57
+ MapDataset<Self, TransformType> map(TransformType transform) & {
58
+ return datasets::map(static_cast<Self&>(*this), std::move(transform));
59
+ }
60
+
61
+ /// Creates a `MapDataset` that applies the given `transform` to this dataset.
62
+ template <typename TransformType>
63
+ MapDataset<Self, TransformType> map(TransformType transform) && {
64
+ return datasets::map(
65
+ std::move(static_cast<Self&>(*this)), std::move(transform));
66
+ }
67
+ };
68
+
69
+ /// A dataset that can yield data in batches, or as individual examples.
70
+ ///
71
+ /// A `Dataset` is a `BatchDataset`, because it supports random access and
72
+ /// therefore batched access is implemented (by default) by calling the random
73
+ /// access indexing function for each index in the requested batch of indices.
74
+ /// This can be customized.
75
+ template <typename Self, typename SingleExample = Example<>>
76
+ class Dataset : public BatchDataset<Self, std::vector<SingleExample>> {
77
+ public:
78
+ using ExampleType = SingleExample;
79
+
80
+ /// Returns the example at the given index.
81
+ virtual ExampleType get(size_t index) = 0;
82
+
83
+ /// Returns a batch of data.
84
+ /// The default implementation calls `get()` for every requested index
85
+ /// in the batch.
86
+ std::vector<ExampleType> get_batch(ArrayRef<size_t> indices) override {
87
+ std::vector<ExampleType> batch;
88
+ batch.reserve(indices.size());
89
+ for (const auto i : indices) {
90
+ batch.push_back(get(i));
91
+ }
92
+ return batch;
93
+ }
94
+ };
95
+
96
+ /// A `StreamDataset` represents a dataset that is a potentially infinite
97
+ /// stream. It takes as batch index only a number, which is the batch size, and
98
+ /// yields that many elements from the stream.
99
+ template <typename Self, typename Batch = std::vector<Example<>>>
100
+ using StreamDataset = BatchDataset<Self, Batch, /*BatchRequest=*/size_t>;
101
+ } // namespace datasets
102
+ } // namespace data
103
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/chunk.h ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/arg.h>
5
+ #include <torch/data/datasets/stateful.h>
6
+ #include <torch/data/samplers.h>
7
+ #include <queue>
8
+ #include <thread>
9
+
10
+ #include <torch/serialize.h>
11
+
12
+ namespace torch {
13
+ namespace data {
14
+ namespace datasets {
15
+
16
+ /// Interface for chunk reader, which performs data chunking and reading of
17
+ /// entire chunks.
18
+ ///
19
+ /// A chunk could be an entire file, such as an audio data file or an image,
20
+ /// or part of a file in the case of a large text-file split based on seek
21
+ /// positions.
22
+ template <
23
+ typename ExampleType_,
24
+ typename ChunkType_ = std::vector<ExampleType_>>
25
+ class ChunkDataReader {
26
+ public:
27
+ virtual ~ChunkDataReader() = default;
28
+
29
+ using ChunkType = ChunkType_;
30
+ using ExampleType = ExampleType_;
31
+
32
+ /// Read an entire chunk.
33
+ virtual ChunkType read_chunk(size_t chunk_index) = 0;
34
+
35
+ /// Returns the number of chunks available in this reader.
36
+ virtual size_t chunk_count() = 0;
37
+
38
+ /// This will clear any internal state associate with this reader.
39
+ virtual void reset() = 0;
40
+ };
41
+
42
+ namespace detail {
43
+ /// BatchDataBuffer manages a queue of UnwrappedBatchData. After a new chunk is
44
+ /// loaded, BatchDataBuffer splits it into small batches and push them into the
45
+ /// queue. When get_batch is called from data loader, it pops cached batches and
46
+ /// return. If the cache is empty, it either waits to load more chunks or return
47
+ /// null if all chunks are loaded.
48
+ template <
49
+ typename UnwrappedBatch,
50
+ typename ExampleSampler = samplers::RandomSampler>
51
+ class BatchDataBuffer {
52
+ public:
53
+ using UnwrappedBatchType = UnwrappedBatch;
54
+ using BatchType = torch::optional<UnwrappedBatchType>;
55
+ using BatchRequestType = typename ExampleSampler::BatchRequestType;
56
+
57
+ BatchDataBuffer(
58
+ size_t batch_size,
59
+ ExampleSampler& example_sampler,
60
+ size_t queue_capacity)
61
+ : batch_size_(batch_size),
62
+ example_sampler_(example_sampler),
63
+ queue_capacity_(queue_capacity) {}
64
+
65
+ /// Return batch data from the queue. Called from the ChunkDataset main
66
+ /// thread.
67
+ BatchType get_batch() {
68
+ std::unique_lock<std::mutex> lock(queue_mutex_);
69
+ cv_read_.wait(lock, [this] {
70
+ // wait till there is available data in the queue or if all chunks are
71
+ // loaded (i.e. the dataset is exhausted for this epoch)
72
+ return (
73
+ this->total_example_count_in_queue_ >= batch_size_ || this->stop_);
74
+ });
75
+ if (batch_queue_.empty()) {
76
+ AT_ASSERT(stop_);
77
+ // All batches have been retrieved. Return an empty batch.
78
+ return nullopt;
79
+ }
80
+
81
+ UnwrappedBatchData batch = std::move(batch_queue_.front());
82
+ batch_queue_.pop();
83
+ if (batch.exception) {
84
+ throw WorkerException(batch.exception);
85
+ }
86
+
87
+ total_example_count_in_queue_ -= batch.batch_data.size();
88
+ lock.unlock();
89
+ cv_write_.notify_all();
90
+
91
+ return batch.batch_data;
92
+ }
93
+
94
+ /// Push preloaded chunks to batch queue. Called from the ChunkDataset worker
95
+ /// threads.
96
+ void add_chunk_data(UnwrappedBatchType data) {
97
+ std::unique_lock<std::mutex> lock(queue_mutex_);
98
+ cv_write_.wait(lock, [this] {
99
+ // stop loading if we have preloaded enough data.
100
+ return this->total_example_count_in_queue_ < this->queue_capacity_ ||
101
+ this->stop_;
102
+ });
103
+ if (stop_) {
104
+ // When stop_ is true, it means no further chunk loading is necessary.
105
+ // Return without any further processing.
106
+ return;
107
+ }
108
+
109
+ auto data_size = data.size();
110
+ auto remaining_size = data_size;
111
+ example_sampler_.reset(data_size);
112
+
113
+ auto fill_batch = [&](size_t example_count, UnwrappedBatchType& batch) {
114
+ auto batch_example_indices = this->example_sampler_.next(example_count);
115
+ AT_ASSERT(
116
+ batch_example_indices &&
117
+ batch_example_indices.value().size() == example_count);
118
+ BatchRequestType& indices = batch_example_indices.value();
119
+ for (size_t i : indices) {
120
+ TORCH_CHECK(i < data_size, "Index out of range");
121
+ batch.emplace_back(std::move(data[i]));
122
+ }
123
+ remaining_size -= example_count;
124
+ };
125
+
126
+ if (!batch_queue_.empty()) {
127
+ // if the queue has existing data, and the last batch doesn't have enough
128
+ // examples to fill a batch_size batch, add more example to this batch
129
+ // first.
130
+ auto& batch = batch_queue_.back();
131
+ size_t current_count = batch.batch_data.size();
132
+ if (current_count < batch_size_) {
133
+ auto example_count =
134
+ std::min(remaining_size, batch_size_ - current_count);
135
+ fill_batch(example_count, batch.batch_data);
136
+ }
137
+ }
138
+
139
+ // If we still have data remaining after filling the last pushed batch, add
140
+ // them to the queue too.
141
+ // NOLINTNEXTLINE(bugprone-infinite-loop)
142
+ while (remaining_size > 0) {
143
+ UnwrappedBatchType current_batch;
144
+
145
+ // Allocate the batch memory ahead of time.
146
+ current_batch.reserve(batch_size_);
147
+
148
+ auto example_count = std::min(remaining_size, batch_size_);
149
+ fill_batch(example_count, current_batch);
150
+ batch_queue_.emplace(std::move(current_batch));
151
+ }
152
+ total_example_count_in_queue_ += data_size;
153
+ lock.unlock();
154
+ cv_read_.notify_all();
155
+ }
156
+
157
+ /// Push exceptions thrown during preloading into batch queue. Called from
158
+ /// the ChunkDataset worker threads.
159
+ void add_chunk_data(std::exception_ptr e_ptr) {
160
+ std::unique_lock<std::mutex> lock(queue_mutex_);
161
+ cv_write_.wait(lock, [this] {
162
+ // stop loading if we have preloaded enough data.
163
+ return (
164
+ this->total_example_count_in_queue_ < this->queue_capacity_ ||
165
+ this->stop_);
166
+ });
167
+ if (stop_) {
168
+ // When stop_ is true, it means this current thread needs to be tore down,
169
+ // the batch buffer will be discarded, so no need to enqueue any new
170
+ // exceptions.
171
+ return;
172
+ }
173
+
174
+ batch_queue_.emplace(e_ptr);
175
+ lock.unlock();
176
+ cv_read_.notify_all();
177
+ }
178
+
179
+ void stop() {
180
+ {
181
+ // Hold the lock before changing stop_ to prevent a race condition which
182
+ // can cause a deadlock. To be more specific, conditional variable
183
+ // cv_write_ waits on predicate stop_ in add_chunk_data(). The wait
184
+ // happens in two steps: 1) while still holding the lock, check if
185
+ // predicate is true; 2) if it is true, proceeds, otherwise, release the
186
+ // lock and wait until notified. Without holding a lock, cv_write_'s
187
+ // notification can happen in between step 1) and 2). In that case, as
188
+ // cv_write_ is not in waiting status yet, so the notification is lost and
189
+ // cv_write_ will sleep forever. By taking a lock before changing
190
+ // predicate stop_, it is ensured updating and evaluating stop_ always
191
+ // happen in a synchronized way
192
+ std::lock_guard<std::mutex> lock(queue_mutex_);
193
+ stop_ = true;
194
+ }
195
+
196
+ // notify all writers, wake them from wait to exit current method.
197
+ cv_write_.notify_all();
198
+ // notify all readers too.
199
+ cv_read_.notify_all();
200
+ }
201
+ /// The batch size is needed to create batches from the chunk data. Similar to
202
+ /// regular dataloader where the batches are created with prefetches,
203
+ /// BatchDataBuffer perform the batch creation using the provided batch size.
204
+ size_t batch_size_ = 0;
205
+
206
+ /// count of total example stored in the queue
207
+ size_t total_example_count_in_queue_ = 0;
208
+
209
+ /// struct that contains a raw unwrapped batch unit. An unwrapped batch unit
210
+ /// is the raw data without 'optional' wrapper. It can be a collection of
211
+ /// images, utterances, e.t.c.
212
+ struct UnwrappedBatchData {
213
+ explicit UnwrappedBatchData(UnwrappedBatchType data)
214
+ : batch_data(std::move(data)) {}
215
+
216
+ // NOLINTNEXTLINE(modernize-pass-by-value)
217
+ explicit UnwrappedBatchData(std::exception_ptr e) : exception(e) {}
218
+
219
+ /// batch data to return
220
+ UnwrappedBatchType batch_data;
221
+
222
+ /// exception pointer which captures any abnormal exceptions while creating
223
+ /// the batch.
224
+ std::exception_ptr exception;
225
+ };
226
+
227
+ /// local cache to store example batches from loaded chunk
228
+ std::queue<UnwrappedBatchData> batch_queue_;
229
+
230
+ // sync batch_queue_ update.
231
+ std::mutex queue_mutex_;
232
+
233
+ std::condition_variable cv_read_;
234
+ std::condition_variable cv_write_;
235
+
236
+ ExampleSampler& example_sampler_;
237
+
238
+ // configurable maximun number of elements the queue can hold at one time.
239
+ size_t queue_capacity_;
240
+
241
+ // When set to true, it wakes the writer threads from the wait and exit
242
+ // current function call. This is needed when ChunkDataSet.Reset is called
243
+ // while the previous epoch is not exhausted yet. When ChunkDataset is waiting
244
+ // its preloader to finish previous work before tearing down the thread, the
245
+ // preloader could be still waiting for the conditional variable, thus cause
246
+ // the program to hang. This boolean is used to break this waiting condition.
247
+ bool stop_ = false;
248
+ };
249
+ } // namespace detail
250
+
251
+ /// Options to configure a `ChunkDataset`.
252
+ struct ChunkDatasetOptions {
253
+ ChunkDatasetOptions() = delete;
254
+ ChunkDatasetOptions(
255
+ size_t preloader_count,
256
+ size_t batch_size,
257
+ size_t cache_size = 2048,
258
+ size_t cross_chunk_shuffle_count = 1)
259
+ : preloader_count_(preloader_count),
260
+ batch_size_(batch_size),
261
+ cache_size_(cache_size),
262
+ cross_chunk_shuffle_count_(cross_chunk_shuffle_count) {
263
+ TORCH_CHECK(
264
+ preloader_count_ > 0,
265
+ "Preloader count is 0. At least one preloader needs to be specified.");
266
+ TORCH_CHECK(
267
+ batch_size_ > 0,
268
+ "Batch size is 0. A positive batch size needs to be specified.");
269
+ TORCH_CHECK(
270
+ cache_size_ > 0,
271
+ "Cache size is 0. A positive cache size needs to be specified.");
272
+ TORCH_CHECK(
273
+ cache_size_ >= batch_size_,
274
+ "Cache size is less than batch size. Cache needs to be large enough to "
275
+ "hold at least one batch.");
276
+ TORCH_CHECK(
277
+ cross_chunk_shuffle_count_ > 0,
278
+ "cross_chunk_shuffle_count needs to be greater than 0.");
279
+ }
280
+
281
+ /// The number of worker thread to preload chunk data.
282
+ TORCH_ARG(size_t, preloader_count);
283
+
284
+ /// The size of each batch.
285
+ TORCH_ARG(size_t, batch_size);
286
+
287
+ /// The capacity of the queue for batch caching.
288
+ TORCH_ARG(size_t, cache_size) = 2048;
289
+
290
+ // The number of chunks to perfrom cross-chunk shuffling. Default to 1 meaning
291
+ // no cross-chunk shuffling. When it is equal to n (n > 1), n random
292
+ // chunks will be loaded at once and example shuffling will be performed
293
+ // across all those n chunks.
294
+ // Note: Usually the default config (1 chunk shuffle + example shuffle) is
295
+ // good enough to generate random distributed data. Use this parameter only if
296
+ // you know cross-shuffle is needed in your case. Also there is a performance
297
+ // penalty when this value is greater than 1, as we need to do extra merge
298
+ // between multiple chunks before performing example sampling.
299
+ TORCH_ARG(size_t, cross_chunk_shuffle_count) = 1;
300
+ };
301
+
302
+ /// A stateful dataset that support hierarchical sampling and prefetching of
303
+ /// entre chunks.
304
+ ///
305
+ /// Unlike regular dataset, chunk dataset require two samplers to operate and
306
+ /// keeps an internal state. `ChunkSampler` selects, which chunk to load next,
307
+ /// while the `ExampleSampler` determins the order of Examples that are returned
308
+ /// in each `get_batch` call. The hierarchical sampling approach used here is
309
+ /// inspired by this paper http://martin.zinkevich.org/publications/nips2010.pdf
310
+ template <
311
+ typename ChunkReader,
312
+ typename ChunkSampler = samplers::RandomSampler,
313
+ typename ExampleSampler = samplers::RandomSampler>
314
+ class ChunkDataset final
315
+ : public StatefulDataset<
316
+ ChunkDataset<ChunkReader, ChunkSampler, ExampleSampler>,
317
+ typename ChunkReader::BatchType,
318
+ size_t> {
319
+ public:
320
+ using BatchType = torch::optional<typename ChunkReader::BatchType>;
321
+ using UnwrappedBatchType = typename ChunkReader::BatchType;
322
+ using BatchRequestType = size_t;
323
+ using ChunkSamplerType = ChunkSampler;
324
+ using ExampleSamplerType = ExampleSampler;
325
+
326
+ ChunkDataset(
327
+ ChunkReader chunk_reader,
328
+ ChunkSampler chunk_sampler,
329
+ ExampleSampler example_sampler,
330
+ ChunkDatasetOptions options,
331
+ std::function<void(UnwrappedBatchType&)> preprocessing_policy =
332
+ std::function<void(UnwrappedBatchType&)>())
333
+ : chunk_reader_(std::move(chunk_reader)),
334
+ chunk_sampler_(std::move(chunk_sampler)),
335
+ example_sampler_(std::move(example_sampler)),
336
+ options_(std::move(options)),
337
+ preprocessing_policy_(std::move(preprocessing_policy)),
338
+ quit_worker_(false),
339
+ running_preloaders_(0),
340
+ load_checkpoint_(false) {}
341
+
342
+ ~ChunkDataset() override {
343
+ // stop batch buffer first.
344
+ if (batch_buffer_) {
345
+ batch_buffer_->stop();
346
+ }
347
+ free_workers();
348
+ }
349
+
350
+ /// Default get_batch method of BatchDataset. This method returns
351
+ /// Example batches created from the preloaded chunks. The implemenation
352
+ /// is dataset agnostic and does not need overriding in different chunk
353
+ /// datasets.
354
+ BatchType get_batch(size_t batch_size) override {
355
+ TORCH_CHECK(
356
+ batch_buffer_ != nullptr,
357
+ "Dataset needs to call reset() before calling get_batch().");
358
+
359
+ TORCH_CHECK(
360
+ batch_size == options_.batch_size(),
361
+ "The requested batch size does not match with the initialized batch size.\n"
362
+ " The requested batch size is ",
363
+ batch_size,
364
+ ", while the dataset is created with batch size equal to ",
365
+ options_.batch_size());
366
+ return batch_buffer_->get_batch();
367
+ }
368
+
369
+ /// Helper method around get_batch as `batch_size` is not strictly necessary
370
+ BatchType get_batch() {
371
+ return get_batch(options_.batch_size());
372
+ }
373
+
374
+ /// This will clear any internal state and starts the internal prefetching
375
+ /// mechanism for the chunk dataset.
376
+ void reset() override {
377
+ // We need this to support partial data reads via dataloader iterator.
378
+ if (batch_buffer_) {
379
+ batch_buffer_->stop();
380
+ }
381
+ // free workers from previous reset if there is any.
382
+ free_workers();
383
+ preload_threads_.clear();
384
+
385
+ if (!load_checkpoint_) {
386
+ chunk_reader_.reset();
387
+ chunk_sampler_.reset(chunk_reader_.chunk_count());
388
+ load_checkpoint_ = false;
389
+ }
390
+
391
+ // Throw out any existing cached batch in the buffer and re-creates a new
392
+ // chunk buffer.
393
+ batch_buffer_ = std::make_unique<
394
+ detail::BatchDataBuffer<UnwrappedBatchType, ExampleSamplerType>>(
395
+ options_.batch_size(), example_sampler_, options_.cache_size());
396
+
397
+ // create new workers for this new epoch.
398
+ quit_worker_ = false;
399
+
400
+ AT_ASSERT(running_preloaders_ == 0);
401
+ running_preloaders_ = options_.preloader_count();
402
+ for (const auto i : c10::irange(options_.preloader_count())) {
403
+ preload_threads_.emplace_back([this, i]() { this->preloader(i); });
404
+ }
405
+ }
406
+
407
+ /// size is not used for chunk dataset.
408
+ optional<size_t> size() const override {
409
+ return torch::nullopt;
410
+ }
411
+
412
+ // provide a references to chunk sampler. Used mainly in distributed data
413
+ // loading to set the epoch number for the sampler.
414
+ ChunkSamplerType& chunk_sampler() {
415
+ return chunk_sampler_;
416
+ }
417
+
418
+ void save(serialize::OutputArchive& archive) const override {
419
+ std::lock_guard<std::mutex> lock(chunk_index_guard_);
420
+ chunk_sampler_.save(archive);
421
+ }
422
+
423
+ void load(serialize::InputArchive& archive) override {
424
+ std::lock_guard<std::mutex> lock(chunk_index_guard_);
425
+ chunk_sampler_.load(archive);
426
+ load_checkpoint_ = true;
427
+ }
428
+
429
+ private:
430
+ /// running on worker thread to preload chunk data.
431
+ void preloader(size_t id) {
432
+ while (!quit_worker_.load()) {
433
+ try {
434
+ std::vector<size_t> chunk_idx;
435
+ {
436
+ std::lock_guard<std::mutex> lock(chunk_index_guard_);
437
+ if (auto chunk_sampler_result = chunk_sampler_.next(
438
+ this->options_.cross_chunk_shuffle_count())) {
439
+ chunk_idx = chunk_sampler_result.value();
440
+ } else {
441
+ break;
442
+ }
443
+ }
444
+ UnwrappedBatchType data = chunk_reader_.read_chunk(chunk_idx[0]);
445
+ for (const auto i : c10::irange(1, chunk_idx.size())) {
446
+ auto chunk_data = chunk_reader_.read_chunk(chunk_idx[i]);
447
+ std::move(
448
+ chunk_data.begin(), chunk_data.end(), std::back_inserter(data));
449
+ }
450
+ if (preprocessing_policy_) {
451
+ preprocessing_policy_(data);
452
+ }
453
+ if (!data.empty()) { // skip empty chunks.
454
+ batch_buffer_->add_chunk_data(std::move(data));
455
+ }
456
+ } catch (...) {
457
+ batch_buffer_->add_chunk_data(std::current_exception());
458
+ }
459
+ }
460
+ AT_ASSERT(running_preloaders_.load() > 0);
461
+ --running_preloaders_;
462
+ if (running_preloaders_.load() == 0) {
463
+ // all preloaders are completed, so we can notify the batch_buffer.
464
+ batch_buffer_->stop();
465
+ }
466
+ }
467
+
468
+ /// Block the current thread until the workers finish execution and exit.
469
+ void free_workers() {
470
+ if (!quit_worker_.load()) {
471
+ quit_worker_ = true;
472
+ for (auto& worker_thread : preload_threads_) {
473
+ worker_thread.join();
474
+ }
475
+ }
476
+ }
477
+
478
+ private:
479
+ // Templated class that defines what is a chunk and how to read chunk data.
480
+ // When a chunk is returned by chunk_reader_, ChunkDataset split it into
481
+ // batches and caches them in batch_buffer_.
482
+ ChunkReader chunk_reader_;
483
+
484
+ // chunk sampler to shuffle different chunks
485
+ ChunkSamplerType chunk_sampler_;
486
+
487
+ // example sampler to shuffle examples in a specific chunk
488
+ ExampleSamplerType example_sampler_;
489
+
490
+ // batch data buffer which holds chunk data from preloading thread.
491
+ std::shared_ptr<
492
+ detail::BatchDataBuffer<UnwrappedBatchType, ExampleSamplerType>>
493
+ batch_buffer_;
494
+
495
+ // worker thread pool
496
+ std::vector<std::thread> preload_threads_;
497
+
498
+ /// The options the Dataset was configured with.
499
+ const ChunkDatasetOptions options_;
500
+
501
+ // function pointer wrapper to apply custom processing over chunk data. This
502
+ // is considered an advanced parameter for developers who want to apply a
503
+ // pre-process to the chunk data before sampling into minibatch.
504
+ // Different than the collate function, this policy is applied on the chunk
505
+ // level, instead of minibatch level. When a chunk of data is loaded (multiple
506
+ // chunks if cross_chunk_shuffle_count_ is greater than 1), this policy is
507
+ // applied to the full loaded data. It is useful if developers want to
508
+ // perform pre-processing (like bucketing) to the chunk data before
509
+ // example sampler samples the data. By default it's an empty pointer and no
510
+ // action will be taken.
511
+ std::function<void(UnwrappedBatchType&)> preprocessing_policy_;
512
+
513
+ // indicate whether the worker thread can be teared down
514
+ std::atomic<bool> quit_worker_;
515
+
516
+ // keep track of running preloaders to notify batch buffer. A value 0
517
+ // indicates that the chunk loading is completed.
518
+ std::atomic<size_t> running_preloaders_;
519
+
520
+ // mutex to synchronize chunk sampler next() call.
521
+ mutable std::mutex chunk_index_guard_;
522
+
523
+ // boolean value to indicate whether we need to load the checkpoint for
524
+ // chunk_sampler_.
525
+ bool load_checkpoint_;
526
+ };
527
+ } // namespace datasets
528
+ } // namespace data
529
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/datasets/tensor.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/datasets/base.h>
4
+ #include <torch/data/example.h>
5
+ #include <torch/types.h>
6
+
7
+ #include <cstddef>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace data {
12
+ namespace datasets {
13
+
14
+ /// A dataset of tensors.
15
+ /// Stores a single tensor internally, which is then indexed inside `get()`.
16
+ struct TensorDataset : public Dataset<TensorDataset, TensorExample> {
17
+ /// Creates a `TensorDataset` from a vector of tensors.
18
+ explicit TensorDataset(const std::vector<Tensor>& tensors)
19
+ : TensorDataset(torch::stack(tensors)) {}
20
+
21
+ explicit TensorDataset(torch::Tensor tensor) : tensor(std::move(tensor)) {}
22
+
23
+ /// Returns a single `TensorExample`.
24
+ TensorExample get(size_t index) override {
25
+ return tensor[index];
26
+ }
27
+
28
+ /// Returns the number of tensors in the dataset.
29
+ optional<size_t> size() const override {
30
+ return tensor.size(0);
31
+ }
32
+
33
+ Tensor tensor;
34
+ };
35
+
36
+ } // namespace datasets
37
+ } // namespace data
38
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/detail/data_shuttle.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/detail/queue.h>
4
+ #include <torch/types.h>
5
+
6
+ #include <c10/util/Exception.h>
7
+ #include <c10/util/Optional.h>
8
+
9
+ #include <chrono>
10
+ #include <utility>
11
+
12
+ namespace torch {
13
+ namespace data {
14
+ namespace detail {
15
+
16
+ /// Encapsulates the full life cycle of DataLoader jobs.
17
+ ///
18
+ /// When a new job is enqueued to the `DataShuttle`, a counter for in-flight
19
+ /// jobs is bumped. This job is said to be "in-flight" until its result is
20
+ /// popped. Worker threads dequeue jobs as soon as they are available. When a
21
+ /// worker finishes a job, it enqueues the result. Only when the main thread
22
+ /// dequeues a result is the count of in-flight jobs decremented. When the main
23
+ /// thread attempts to dequeue a job but no jobs are in-flight, that means the
24
+ /// epoch is complete and `pop_result` returns an empty optional.
25
+ template <typename Job, typename Result>
26
+ class DataShuttle {
27
+ public:
28
+ /// Pushes a new job. Called by the main thread.
29
+ void push_job(Job job) {
30
+ new_jobs_.push(std::move(job));
31
+ ++in_flight_jobs_;
32
+ }
33
+
34
+ /// Pushes the result of a job. Called by worker threads.
35
+ void push_result(Result result) {
36
+ results_.push(std::move(result));
37
+ }
38
+
39
+ /// Returns the next job, blocking until there is one available. Called by
40
+ /// worker threads.
41
+ Job pop_job() {
42
+ return new_jobs_.pop();
43
+ }
44
+
45
+ /// Returns the result of a job, or nullopt if all jobs were exhausted. Called
46
+ /// by the main thread.
47
+ optional<Result> pop_result(
48
+ optional<std::chrono::milliseconds> timeout = nullopt) {
49
+ if (in_flight_jobs_ > 0) {
50
+ auto result = results_.pop(timeout);
51
+ --in_flight_jobs_;
52
+ return result;
53
+ }
54
+ return nullopt;
55
+ }
56
+
57
+ /// Discards any jobs that are not yet in flight, and waits for all in-flight
58
+ /// jobs to finish, discarding their result.
59
+ void drain() {
60
+ // Clear all inputs so that no further jobs are scheduled.
61
+ auto number_cleared = new_jobs_.clear();
62
+ in_flight_jobs_ -= number_cleared;
63
+ // Remove any outstanding results.
64
+ while (in_flight_jobs_ > 0) {
65
+ pop_result();
66
+ }
67
+ }
68
+
69
+ /// Returns the number of jobs that are still in progress.
70
+ /// When this number is zero, an epoch is finished.
71
+ size_t in_flight_jobs() const noexcept {
72
+ return in_flight_jobs_;
73
+ }
74
+
75
+ private:
76
+ /// The queue for jobs that are not yet in flight.
77
+ Queue<Job> new_jobs_;
78
+ /// The number of in-flight jobs.
79
+ /// NOTE: Not atomic because only manipulated by the main thread.
80
+ size_t in_flight_jobs_ = 0;
81
+ /// The queue for results of finished jobs.
82
+ Queue<Result> results_;
83
+ };
84
+
85
+ } // namespace detail
86
+ } // namespace data
87
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/detail/queue.h ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/types.h>
4
+
5
+ #include <c10/util/Exception.h>
6
+
7
+ #include <chrono>
8
+ #include <condition_variable>
9
+ #include <cstddef>
10
+ #include <mutex>
11
+ #include <queue>
12
+
13
+ namespace torch {
14
+ namespace data {
15
+ namespace detail {
16
+
17
+ /// A basic locked, blocking MPMC queue.
18
+ ///
19
+ /// Every `push` and `pop` is guarded by a mutex. A condition variable is used
20
+ /// to communicate insertion of new elements, such that waiting threads will be
21
+ /// woken up if they are currently waiting inside a call to `pop()`.
22
+ ///
23
+ /// Note that this data structure is written specifically for use with the
24
+ /// `DataLoader`. Its behavior is tailored to this use case and may not be
25
+ /// applicable to more general uses.
26
+ template <typename T>
27
+ class Queue {
28
+ public:
29
+ /// Pushes a new value to the back of the `Queue` and notifies one thread on
30
+ /// the waiting side about this event.
31
+ void push(T value) {
32
+ {
33
+ std::lock_guard<std::mutex> lock(mutex_);
34
+ queue_.push(std::move(value));
35
+ }
36
+ cv_.notify_one();
37
+ }
38
+
39
+ /// Blocks until at least one element is ready to be popped from the front of
40
+ /// the queue. An optional `timeout` in seconds can be used to limit the time
41
+ /// spent waiting for an element. If the wait times out, an exception is
42
+ /// raised.
43
+ T pop(optional<std::chrono::milliseconds> timeout = nullopt) {
44
+ std::unique_lock<std::mutex> lock(mutex_);
45
+ if (timeout) {
46
+ if (!cv_.wait_for(
47
+ lock, *timeout, [this] { return !this->queue_.empty(); })) {
48
+ // clang-format off
49
+ AT_ERROR(
50
+ "Timeout in DataLoader queue while waiting for next batch"
51
+ " (timeout was ", timeout->count(), " ms)");
52
+ // clang-format on
53
+ }
54
+ } else {
55
+ cv_.wait(lock, [this] { return !this->queue_.empty(); });
56
+ }
57
+ AT_ASSERT(!queue_.empty());
58
+ T value = queue_.front();
59
+ queue_.pop();
60
+ lock.unlock();
61
+ return value;
62
+ }
63
+
64
+ /// Empties the queue and returns the number of elements that were present at
65
+ /// the start of the function. No threads are notified about this event as it
66
+ /// is assumed to be used to drain the queue during shutdown of a
67
+ /// `DataLoader`.
68
+ size_t clear() {
69
+ std::lock_guard<std::mutex> lock(this->mutex_);
70
+ const auto size = queue_.size();
71
+ while (!queue_.empty()) {
72
+ queue_.pop();
73
+ }
74
+ return size;
75
+ }
76
+
77
+ private:
78
+ std::queue<T> queue_;
79
+ std::mutex mutex_;
80
+ std::condition_variable cv_;
81
+ };
82
+ } // namespace detail
83
+ } // namespace data
84
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/detail/sequencers.h ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/types.h>
4
+
5
+ #include <algorithm>
6
+ #include <cstddef>
7
+ #include <vector>
8
+
9
+ namespace torch {
10
+ namespace data {
11
+ namespace detail {
12
+ namespace sequencers {
13
+ namespace detail {
14
+ template <typename Result>
15
+ bool buffer_contains_result(const std::vector<optional<Result>>& buffer) {
16
+ return std::any_of(
17
+ buffer.begin(), buffer.end(), [](const optional<Result>& result) {
18
+ return result.has_value();
19
+ });
20
+ }
21
+ } // namespace detail
22
+
23
+ /// A `Sequencer` accepts a function that yields the next result of a
24
+ /// `DataLoader` and then has the opportunity to influence the order in which
25
+ /// these results are returned. The `NoSequencer` does not enforce any
26
+ /// sequencing and returns any result directly. The `OrderedSequencer` instead
27
+ /// buffers results internally to return them in order of their sequence number.
28
+ template <typename Result>
29
+ struct Sequencer {
30
+ using ResultProducer = std::function<optional<Result>()>;
31
+ virtual ~Sequencer() = default;
32
+ virtual optional<Result> next(ResultProducer next_result) = 0;
33
+ };
34
+
35
+ /// A `Sequencer` that does not enforce any ordering. It is effectively the
36
+ /// identity function.
37
+ template <typename Result>
38
+ struct NoSequencer final : public Sequencer<Result> {
39
+ using typename Sequencer<Result>::ResultProducer;
40
+ optional<Result> next(ResultProducer next_result) override {
41
+ return next_result();
42
+ }
43
+ };
44
+
45
+ /// A `Sequencer` that buffers results and returns them in order of their
46
+ /// sequence number. The `OrderedSequencer` maintains an internal, monotonically
47
+ /// incrementing counter for the next sequence number it expects. If it receives
48
+ /// a result with a higher sequence number, it will buffer it for later (when
49
+ /// the sequence number reaches that of this result). Otherwise, if the sequence
50
+ /// numbers match, the result is returned.
51
+ ///
52
+ /// Implementation note: The `OrderedSequencer` is implemented with a fixed-size
53
+ /// buffer. Let `m` be the maximum number of jobs in the data loader's queue and
54
+ /// `s` be the current sequence number. Assume `m` jobs are scheduled in the
55
+ /// `DataLoader`. Any new result is stored at index `job.sqn mod m` in the
56
+ /// `OrderedSequencer`. Why are we sure sequence numbers of new jobs will not
57
+ /// collide with sequence numbers of buffered jobs? The `OrderedSequencer` will
58
+ /// not return from `next()` until it receives the result with sqn `s`. This
59
+ /// means no new jobs can be scheduled in the `DataLoader` in the meantime,
60
+ /// which enforces that as long as sqn `s` has not been received, `s + m` (which
61
+ /// would cause a collision in the fixed-size buffer) will not yet be scheduled.
62
+ template <typename Result>
63
+ struct OrderedSequencer : public Sequencer<Result> {
64
+ using typename Sequencer<Result>::ResultProducer;
65
+
66
+ /// Constructs the `OrderedSequencer` with the maximum number of results it
67
+ /// will ever hold at one point in time.
68
+ explicit OrderedSequencer(size_t max_jobs) : buffer_(max_jobs) {}
69
+
70
+ /// Buffers results until the next one in the expected order is received.
71
+ optional<Result> next(ResultProducer next_result) override {
72
+ // If we already have the result for the next sqn, return it.
73
+ if (auto& maybe_result = buffer(next_sequence_number_)) {
74
+ auto result = std::move(*maybe_result);
75
+ buffer(next_sequence_number_++).reset();
76
+ return result;
77
+ }
78
+ // Otherwise wait for the next result.
79
+ while (true) {
80
+ auto result = next_result();
81
+ if (!result) {
82
+ AT_ASSERT(!detail::buffer_contains_result(buffer_));
83
+ break;
84
+ }
85
+ // If it was not nullopt and the sequence numbers match, return it
86
+ // directly and bump the sequence number.
87
+ if (result->sequence_number == next_sequence_number_) {
88
+ ++next_sequence_number_;
89
+ return result;
90
+ }
91
+ // Stash the result for later.
92
+ AT_ASSERT(!buffer(result->sequence_number).has_value());
93
+ buffer(result->sequence_number) = std::move(result);
94
+ }
95
+ // The result was an empty optional, so we are done with this epoch.
96
+ return nullopt;
97
+ }
98
+
99
+ /// Accesses the buffer at the `index` modulo the buffer size.
100
+ optional<Result>& buffer(size_t index) {
101
+ return buffer_.at(index % buffer_.size());
102
+ }
103
+
104
+ /// The monotonically increasing sequence number we expect.
105
+ size_t next_sequence_number_ = 0;
106
+
107
+ /// A fixed-size buffer (after construction).
108
+ std::vector<optional<Result>> buffer_;
109
+ };
110
+ } // namespace sequencers
111
+ } // namespace detail
112
+ } // namespace data
113
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/example.h ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/types.h>
4
+
5
+ namespace torch {
6
+ namespace data {
7
+
8
+ /// An `Example` from a dataset.
9
+ ///
10
+ /// A dataset consists of data and an associated target (label).
11
+ template <typename Data = at::Tensor, typename Target = at::Tensor>
12
+ struct Example {
13
+ using DataType = Data;
14
+ using TargetType = Target;
15
+
16
+ Example() = default;
17
+ Example(Data data, Target target)
18
+ : data(std::move(data)), target(std::move(target)) {}
19
+
20
+ Data data;
21
+ Target target;
22
+ };
23
+
24
+ namespace example {
25
+ using NoTarget = void;
26
+ } // namespace example
27
+
28
+ /// A specialization for `Example` that does not have a target.
29
+ ///
30
+ /// This class exists so that code can be written for a templated `Example`
31
+ /// type, and work both for labeled and unlabeled datasets.
32
+ template <typename Data>
33
+ struct Example<Data, example::NoTarget> {
34
+ using DataType = Data;
35
+ using TargetType = example::NoTarget;
36
+
37
+ Example() = default;
38
+ /* implicit */ Example(Data data) : data(std::move(data)) {}
39
+
40
+ // When a DataLoader returns an Example like this, that example should be
41
+ // implicitly convertible to the underlying data type.
42
+
43
+ operator Data&() {
44
+ return data;
45
+ }
46
+ operator const Data&() const {
47
+ return data;
48
+ }
49
+
50
+ Data data;
51
+ };
52
+
53
+ using TensorExample = Example<at::Tensor, example::NoTarget>;
54
+ } // namespace data
55
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/iterator.h ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/utils/variadic.h>
4
+ #include <torch/types.h>
5
+
6
+ #include <c10/util/Exception.h>
7
+
8
+ #include <functional>
9
+ #include <iterator>
10
+ #include <memory>
11
+ #include <type_traits>
12
+ #include <utility>
13
+
14
+ namespace torch {
15
+ namespace data {
16
+ namespace detail {
17
+ // For increased safety and more separated logic, this implementation of
18
+ // `Iterator` consists of a `ValidIterator` and a `SentinelIterator`. A
19
+ // `ValidIterator` yields new batches until the `DataLoader` is exhausted. While
20
+ // the `DataLoader` is not exhausted, `ValidIterator`s compare equal if they are
21
+ // the same object. When the `ValidIterator` becomes exhausted, it compares
22
+ // equal to the `SentinelIterator`, but not before. Half the code here is to
23
+ // implement double dispatch for the comparison. Got damnit, C++.
24
+
25
+ template <typename Batch>
26
+ struct ValidIterator;
27
+
28
+ template <typename Batch>
29
+ struct SentinelIterator;
30
+
31
+ /// Base class for the `ValidIterator` and `SentinelIterator`
32
+ template <typename Batch>
33
+ struct IteratorImpl {
34
+ virtual ~IteratorImpl() = default;
35
+ virtual void next() = 0;
36
+ virtual Batch& get() = 0;
37
+ virtual bool operator==(const IteratorImpl& other) const = 0;
38
+ virtual bool operator==(const ValidIterator<Batch>& other) const = 0;
39
+ virtual bool operator==(const SentinelIterator<Batch>& other) const = 0;
40
+ };
41
+
42
+ template <typename Batch>
43
+ struct ValidIterator : public IteratorImpl<Batch> {
44
+ using BatchProducer = std::function<optional<Batch>()>;
45
+
46
+ explicit ValidIterator(BatchProducer next_batch)
47
+ : next_batch_(std::move(next_batch)) {}
48
+
49
+ /// Fetches the next batch.
50
+ void next() override {
51
+ // If we didn't get the very first batch yet, get it now.
52
+ lazy_initialize();
53
+ TORCH_CHECK(
54
+ batch_.has_value(), "Attempted to increment iterator past the end");
55
+ // Increment to the next batch.
56
+ batch_ = next_batch_();
57
+ }
58
+
59
+ /// Returns the current batch. The precondition for this operation to not
60
+ /// throw an exception is that it has been compared to the `SentinelIterator`
61
+ /// and did not compare equal.
62
+ Batch& get() override {
63
+ // If we didn't get the very first batch yet, get it now.
64
+ lazy_initialize();
65
+ TORCH_CHECK(
66
+ batch_.has_value(),
67
+ "Attempted to dereference iterator that was past the end");
68
+ return batch_.value();
69
+ }
70
+
71
+ /// Does double dispatch.
72
+ bool operator==(const IteratorImpl<Batch>& other) const override {
73
+ return other == *this;
74
+ }
75
+
76
+ /// A `ValidIterator` is equal to the `SentinelIterator` iff. the
77
+ /// `ValidIterator` has reached the end of the dataloader.
78
+ bool operator==(const SentinelIterator<Batch>& /* unused */) const override {
79
+ lazy_initialize();
80
+ return !batch_;
81
+ }
82
+
83
+ /// Returns true if the memory address of `other` equals that of `this`.
84
+ bool operator==(const ValidIterator<Batch>& other) const override {
85
+ return &other == this;
86
+ }
87
+
88
+ /// Gets the very first batch if it has not yet been fetched.
89
+ void lazy_initialize() const {
90
+ if (!initialized_) {
91
+ batch_ = next_batch_();
92
+ initialized_ = true;
93
+ }
94
+ }
95
+
96
+ BatchProducer next_batch_;
97
+ mutable optional<Batch> batch_;
98
+ mutable bool initialized_ = false;
99
+ };
100
+
101
+ template <typename Batch>
102
+ struct SentinelIterator : public IteratorImpl<Batch> {
103
+ void next() override {
104
+ AT_ERROR(
105
+ "Incrementing the DataLoader's past-the-end iterator is not allowed");
106
+ }
107
+
108
+ Batch& get() override {
109
+ AT_ERROR(
110
+ "Dereferencing the DataLoader's past-the-end iterator is not allowed");
111
+ }
112
+
113
+ /// Does double dispatch.
114
+ bool operator==(const IteratorImpl<Batch>& other) const override {
115
+ return other == *this;
116
+ }
117
+
118
+ /// Calls the comparison operator between `ValidIterator` and
119
+ /// `SentinelIterator`.
120
+ bool operator==(const ValidIterator<Batch>& other) const override {
121
+ return other == *this;
122
+ }
123
+
124
+ /// Sentinel iterators always compare equal.
125
+ bool operator==(const SentinelIterator<Batch>& other) const override {
126
+ return true;
127
+ }
128
+ };
129
+ } // namespace detail
130
+
131
+ template <typename Batch>
132
+ class Iterator {
133
+ public:
134
+ // Type aliases to make the class recognized as a proper iterator.
135
+ using difference_type = std::ptrdiff_t;
136
+ using value_type = Batch;
137
+ using pointer = Batch*;
138
+ using reference = Batch&;
139
+ using iterator_category = std::input_iterator_tag;
140
+
141
+ explicit Iterator(std::unique_ptr<detail::IteratorImpl<Batch>> impl)
142
+ : impl_(std::move(impl)) {}
143
+
144
+ /// Increments the iterator.
145
+ /// Only permitted for valid iterators (not past the end).
146
+ Iterator& operator++() {
147
+ impl_->next();
148
+ return *this;
149
+ }
150
+
151
+ /// Returns the current batch.
152
+ /// Only permitted for valid iterators (not past the end).
153
+ Batch& operator*() {
154
+ return impl_->get();
155
+ }
156
+
157
+ /// Returns a pointer to the current batch.
158
+ /// Only permitted for valid iterators (not past the end).
159
+ Batch* operator->() {
160
+ return &impl_->get();
161
+ }
162
+
163
+ /// Compares two iterators for equality.
164
+ bool operator==(const Iterator& other) const {
165
+ return *impl_ == *other.impl_;
166
+ }
167
+
168
+ /// Compares two iterators for inequality.
169
+ bool operator!=(const Iterator& other) const {
170
+ return !(*this == other);
171
+ }
172
+
173
+ private:
174
+ /// Points either to a `ValidIterator` or to a `SentinelIterator`.
175
+ std::shared_ptr<detail::IteratorImpl<Batch>> impl_;
176
+ };
177
+ } // namespace data
178
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/samplers.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/samplers/base.h>
4
+ #include <torch/data/samplers/custom_batch_request.h>
5
+ #include <torch/data/samplers/distributed.h>
6
+ #include <torch/data/samplers/random.h>
7
+ #include <torch/data/samplers/sequential.h>
8
+ #include <torch/data/samplers/serialize.h>
9
+ #include <torch/data/samplers/stream.h>
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms.h ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/transforms/base.h>
4
+ #include <torch/data/transforms/collate.h>
5
+ #include <torch/data/transforms/lambda.h>
6
+ #include <torch/data/transforms/stack.h>
7
+ #include <torch/data/transforms/tensor.h>
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/base.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/types.h>
4
+
5
+ #include <utility>
6
+ #include <vector>
7
+
8
+ namespace torch {
9
+ namespace data {
10
+ namespace transforms {
11
+
12
+ /// A transformation of a batch to a new batch.
13
+ template <typename InputBatch, typename OutputBatch>
14
+ class BatchTransform {
15
+ public:
16
+ using InputBatchType = InputBatch;
17
+ using OutputBatchType = OutputBatch;
18
+
19
+ virtual ~BatchTransform() = default;
20
+
21
+ /// Applies the transformation to the given `input_batch`.
22
+ virtual OutputBatch apply_batch(InputBatch input_batch) = 0;
23
+ };
24
+
25
+ /// A transformation of individual input examples to individual output examples.
26
+ ///
27
+ /// Just like a `Dataset` is a `BatchDataset`, a `Transform` is a
28
+ /// `BatchTransform` that can operate on the level of individual examples rather
29
+ /// than entire batches. The batch-level transform is implemented (by default)
30
+ /// in terms of the example-level transform, though this can be customized.
31
+ template <typename Input, typename Output>
32
+ class Transform
33
+ : public BatchTransform<std::vector<Input>, std::vector<Output>> {
34
+ public:
35
+ using InputType = Input;
36
+ using OutputType = Output;
37
+
38
+ /// Applies the transformation to the given `input`.
39
+ virtual OutputType apply(InputType input) = 0;
40
+
41
+ /// Applies the `transformation` over the entire `input_batch`.
42
+ std::vector<Output> apply_batch(std::vector<Input> input_batch) override {
43
+ std::vector<Output> output_batch;
44
+ output_batch.reserve(input_batch.size());
45
+ for (auto&& input : input_batch) {
46
+ output_batch.push_back(apply(std::move(input)));
47
+ }
48
+ return output_batch;
49
+ }
50
+ };
51
+ } // namespace transforms
52
+ } // namespace data
53
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/collate.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/example.h>
4
+ #include <torch/data/transforms/lambda.h>
5
+
6
+ #include <vector>
7
+
8
+ namespace torch {
9
+ namespace data {
10
+ namespace transforms {
11
+
12
+ /// A `Collation` is a transform that reduces a batch into a single value.
13
+ /// The result is a `BatchDataset` that has the type of the single value as its
14
+ /// `BatchType`.
15
+ template <typename T, typename BatchType = std::vector<T>>
16
+ using Collation = BatchTransform<BatchType, T>;
17
+
18
+ /// A `Collate` allows passing a custom function to reduce/collate a batch
19
+ /// into a single value. It's effectively the lambda version of `Collation`,
20
+ /// which you could subclass and override `operator()` to achieve the same.
21
+ ///
22
+ /// \rst
23
+ /// .. code-block:: cpp
24
+ /// using namespace torch::data;
25
+ ///
26
+ /// auto dataset = datasets::MNIST("path/to/mnist")
27
+ /// .map(transforms::Collate<Example<>>([](std::vector<Example<>> e) {
28
+ /// return std::move(e.front());
29
+ /// }));
30
+ /// \endrst
31
+ template <typename T, typename BatchType = std::vector<T>>
32
+ using Collate = BatchLambda<BatchType, T>;
33
+ } // namespace transforms
34
+ } // namespace data
35
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/lambda.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/transforms/base.h>
4
+
5
+ #include <functional>
6
+ #include <utility>
7
+ #include <vector>
8
+
9
+ namespace torch {
10
+ namespace data {
11
+ namespace transforms {
12
+
13
+ /// A `BatchTransform` that applies a user-provided functor to a batch.
14
+ template <typename Input, typename Output = Input>
15
+ class BatchLambda : public BatchTransform<Input, Output> {
16
+ public:
17
+ using typename BatchTransform<Input, Output>::InputBatchType;
18
+ using typename BatchTransform<Input, Output>::OutputBatchType;
19
+ using FunctionType = std::function<OutputBatchType(InputBatchType)>;
20
+
21
+ /// Constructs the `BatchLambda` from the given `function` object.
22
+ explicit BatchLambda(FunctionType function)
23
+ : function_(std::move(function)) {}
24
+
25
+ /// Applies the user-provided function object to the `input_batch`.
26
+ OutputBatchType apply_batch(InputBatchType input_batch) override {
27
+ return function_(std::move(input_batch));
28
+ }
29
+
30
+ private:
31
+ FunctionType function_;
32
+ };
33
+
34
+ // A `Transform` that applies a user-provided functor to individual examples.
35
+ template <typename Input, typename Output = Input>
36
+ class Lambda : public Transform<Input, Output> {
37
+ public:
38
+ using typename Transform<Input, Output>::InputType;
39
+ using typename Transform<Input, Output>::OutputType;
40
+ using FunctionType = std::function<Output(Input)>;
41
+
42
+ /// Constructs the `Lambda` from the given `function` object.
43
+ explicit Lambda(FunctionType function) : function_(std::move(function)) {}
44
+
45
+ /// Applies the user-provided function object to the `input`.
46
+ OutputType apply(InputType input) override {
47
+ return function_(std::move(input));
48
+ }
49
+
50
+ private:
51
+ FunctionType function_;
52
+ };
53
+
54
+ } // namespace transforms
55
+ } // namespace data
56
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/transforms/tensor.h ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/data/example.h>
4
+ #include <torch/data/transforms/base.h>
5
+ #include <torch/types.h>
6
+
7
+ #include <functional>
8
+ #include <utility>
9
+
10
+ namespace torch {
11
+ namespace data {
12
+ namespace transforms {
13
+
14
+ /// A `Transform` that is specialized for the typical `Example<Tensor, Tensor>`
15
+ /// combination. It exposes a single `operator()` interface hook (for
16
+ /// subclasses), and calls this function on input `Example` objects.
17
+ template <typename Target = Tensor>
18
+ class TensorTransform
19
+ : public Transform<Example<Tensor, Target>, Example<Tensor, Target>> {
20
+ public:
21
+ using E = Example<Tensor, Target>;
22
+ using typename Transform<E, E>::InputType;
23
+ using typename Transform<E, E>::OutputType;
24
+
25
+ /// Transforms a single input tensor to an output tensor.
26
+ virtual Tensor operator()(Tensor input) = 0;
27
+
28
+ /// Implementation of `Transform::apply` that calls `operator()`.
29
+ OutputType apply(InputType input) override {
30
+ input.data = (*this)(std::move(input.data));
31
+ return input;
32
+ }
33
+ };
34
+
35
+ /// A `Lambda` specialized for the typical `Example<Tensor, Tensor>` input type.
36
+ template <typename Target = Tensor>
37
+ class TensorLambda : public TensorTransform<Target> {
38
+ public:
39
+ using FunctionType = std::function<Tensor(Tensor)>;
40
+
41
+ /// Creates a `TensorLambda` from the given `function`.
42
+ explicit TensorLambda(FunctionType function)
43
+ : function_(std::move(function)) {}
44
+
45
+ /// Applies the user-provided functor to the input tensor.
46
+ Tensor operator()(Tensor input) override {
47
+ return function_(std::move(input));
48
+ }
49
+
50
+ private:
51
+ FunctionType function_;
52
+ };
53
+
54
+ /// Normalizes input tensors by subtracting the supplied mean and dividing by
55
+ /// the given standard deviation.
56
+ template <typename Target = Tensor>
57
+ struct Normalize : public TensorTransform<Target> {
58
+ /// Constructs a `Normalize` transform. The mean and standard deviation can be
59
+ /// anything that is broadcastable over the input tensors (like single
60
+ /// scalars).
61
+ Normalize(ArrayRef<double> mean, ArrayRef<double> stddev)
62
+ : mean(torch::tensor(mean, torch::kFloat32)
63
+ .unsqueeze(/*dim=*/1)
64
+ .unsqueeze(/*dim=*/2)),
65
+ stddev(torch::tensor(stddev, torch::kFloat32)
66
+ .unsqueeze(/*dim=*/1)
67
+ .unsqueeze(/*dim=*/2)) {}
68
+
69
+ torch::Tensor operator()(Tensor input) override {
70
+ return input.sub(mean).div(stddev);
71
+ }
72
+
73
+ torch::Tensor mean, stddev;
74
+ };
75
+ } // namespace transforms
76
+ } // namespace data
77
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data/worker_exception.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <exception>
4
+ #include <string>
5
+ #include <utility>
6
+
7
+ namespace torch {
8
+ namespace data {
9
+
10
+ /// An exception thrown when a DataLoader's worker thread throws an exception,
11
+ /// which is caught. A `WorkerException` stores an `exception_ptr` to the
12
+ /// original exception thrown in the worker thread.
13
+ struct WorkerException : public std::exception {
14
+ /// Constructs a `WorkerException` from an `exception_ptr`.
15
+ explicit WorkerException(std::exception_ptr original)
16
+ : original_exception(std::move(original)),
17
+ message("Caught exception in DataLoader worker thread.") {
18
+ try {
19
+ std::rethrow_exception(original_exception);
20
+ } catch (std::exception& e) {
21
+ message += " Original message: ";
22
+ message += e.what();
23
+ }
24
+ }
25
+
26
+ const char* what() const noexcept override {
27
+ return message.c_str();
28
+ }
29
+
30
+ /// The original exception thrown in the worker thread.
31
+ std::exception_ptr original_exception;
32
+
33
+ /// This exception's message (not the original exception's message).
34
+ std::string message;
35
+ };
36
+
37
+ } // namespace data
38
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/TensorDataContainer.h ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Dispatch.h>
4
+ #include <ATen/ScalarOps.h>
5
+ #include <ATen/core/Tensor.h>
6
+ #include <ATen/core/grad_mode.h>
7
+
8
+ #include <c10/util/irange.h>
9
+
10
+ #ifndef AT_PER_OPERATOR_HEADERS
11
+ #include <ATen/Functions.h>
12
+ #else
13
+ #include <ATen/ops/empty.h>
14
+ #include <ATen/ops/tensor.h>
15
+ #endif
16
+
17
+ #include <initializer_list>
18
+
19
+ namespace torch {
20
+
21
+ namespace detail {
22
+
23
+ enum class TensorDataContainerType { Scalar, InitList, Tensor };
24
+
25
+ struct TensorDataContainer;
26
+
27
+ inline std::ostream& operator<<(
28
+ std::ostream& stream,
29
+ const TensorDataContainer& tensor_data_container);
30
+
31
+ // FIXME: There is no `operator<<` overload for `at::kBFloat16` type,
32
+ // and we need to convert it to `float` type using `operator float()` function
33
+ // defined in `c10/util/BFloat16.h`.
34
+ // Tracking issue: https://github.com/pytorch/pytorch/issues/28845
35
+ inline std::ostream& operator<<(std::ostream& stream, c10::BFloat16 value) {
36
+ stream << static_cast<float>(value);
37
+ return stream;
38
+ }
39
+
40
+ inline c10::ScalarType compute_desired_dtype(c10::ScalarType scalar_type) {
41
+ if (scalar_type == at::kInt || scalar_type == at::kLong) {
42
+ // C++ `torch::tensor` with an integer type or an `at::ArrayRef` /
43
+ // `std::vector` / (nested) braced-init-list of integer types always
44
+ // produces a tensor of dtype `at::kLong` (aka. int64_t), matching Python
45
+ // `torch.tensor` behavior.
46
+ return at::kLong;
47
+ } else if (scalar_type == at::kFloat || scalar_type == at::kDouble) {
48
+ // C++ `torch::tensor` with a floating-point type or an `at::ArrayRef` /
49
+ // `std::vector` / (nested) braced-init-list of floating-point types always
50
+ // produces a tensor of dtype `torch::get_default_dtype()`, matching Python
51
+ // `torch.tensor` behavior.
52
+ return at::typeMetaToScalarType(at::get_default_dtype());
53
+ } else {
54
+ return scalar_type;
55
+ }
56
+ }
57
+
58
+ // We use `TensorDataContainer` to support converting the following data
59
+ // container types into the equivalent Tensor:
60
+ //
61
+ // 1. Arbitrarily nested braced-init-list (e.g. `{{1, 2}, {3, 4}}`).
62
+ // 2. `at::ArrayRef` of supported tensor data types.
63
+ // 3. `std::vector` of supported tensor data types.
64
+ //
65
+ // At any time, a `TensorDataContainer` object represents one of the following:
66
+ //
67
+ // 1. A scalar with value `scalar()` and type `scalar_type()`.
68
+ // 2. A Tensor represented in `std::initializer_list<TensorDataContainer>` form,
69
+ // with value `init_list()`, Tensor scalar type `scalar_type()`, and Tensor
70
+ // sizes `sizes()`.
71
+ // 3. A Tensor represented in `at::Tensor` form, with value `tensor()`, scalar
72
+ // type `scalar_type()`,
73
+ // and Tensor sizes `sizes()`.
74
+ //
75
+ // All the infrastructure here is mostly to support converting an arbitrarily
76
+ // nested braced-init-list to the equivalent Tensor successfully. Consider the
77
+ // following example:
78
+ //
79
+ // `torch::tensor({{1}, {2}})`
80
+ //
81
+ // this will call into the `torch::tensor` function:
82
+ //
83
+ // `at::Tensor tensor(detail::TensorDataContainer tensor_data_container, const
84
+ // at::TensorOptions& options = {})`
85
+ //
86
+ // the compiler will first try to convert `{{1}, {2}}` to `TensorDataContainer`
87
+ // type:
88
+ //
89
+ // `TensorDataContainer({{1}, {2}})`
90
+ //
91
+ // which matches to the
92
+ // `TensorDataContainer(std::initializer_list<TensorDataContainer>)`
93
+ // constructor, and in an attempt to convert `{1}` and `{2}` to
94
+ // `TensorDataContainer`, it calls the following:
95
+ //
96
+ // `TensorDataContainer({1})` (same call path happens for `{2}`, and we'll just
97
+ // focus on `{1}` here)
98
+ //
99
+ // At this point, theoretically there are two plausible ways for `{1}` to be
100
+ // matched to one of the constructors of `TensorDataContainer`:
101
+ //
102
+ // 1. It can be a list-initialization of a scalar value, thus matching
103
+ // `TensorDataContainer(int value)`.
104
+ // 2. It can be converted to `std::initializer_list<TensorDataContainer>`, thus
105
+ // matching
106
+ // `TensorDataContainer(std::initializer_list<TensorDataContainer>)`.
107
+ //
108
+ // How does the compiler decide which one to choose? According to
109
+ // `https://en.cppreference.com/w/cpp/language/list_initialization`,
110
+ // braced-init-list always prefers the constructor that takes
111
+ // `std::initializer_list`. Hence we happily move forward with constructor #2,
112
+ // and it calls the following:
113
+ //
114
+ // `TensorDataContainer(1)`
115
+ //
116
+ // Now it matches `TensorDataContainer(int value)`, which stores `1` as a scalar
117
+ // value. All is good.
118
+ struct TensorDataContainer {
119
+ // NOTE: For tensors with zero-size dimensions (e.g. `torch::tensor({{},
120
+ // {}})`), the innermost empty braced-init-list `{}` matches the default
121
+ // constructor of the innermost `TensorDataContainer`.
122
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
123
+ TensorDataContainer()
124
+ : sizes_({0}),
125
+ // NOTE: In Python, the dtype of tensors with zero-size dimensions (e.g.
126
+ // `torch.tensor([[], []])`) depends on the value of
127
+ // `torch.get_default_dtype()`, and we should do the same for the C++
128
+ // equivalent.
129
+ scalar_type_(at::typeMetaToScalarType(at::get_default_dtype())),
130
+ type_(TensorDataContainerType::InitList) {}
131
+ #define TENSOR(T, S) \
132
+ TensorDataContainer(T value) \
133
+ : sizes_(), \
134
+ scalar_type_(at::k##S), \
135
+ type_(TensorDataContainerType::Scalar), \
136
+ scalar_(value) {}
137
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
138
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR)
139
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
140
+ AT_FORALL_COMPLEX_TYPES(TENSOR)
141
+ #undef TENSOR
142
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
143
+ TensorDataContainer(std::initializer_list<TensorDataContainer> init_list)
144
+ : sizes_(),
145
+ scalar_type_(init_list.begin()->scalar_type()),
146
+ type_(TensorDataContainerType::InitList),
147
+ init_list_(init_list) {
148
+ const TensorDataContainer& first_elem = *(init_list.begin());
149
+ for (const auto& elem : init_list) {
150
+ TORCH_CHECK(
151
+ elem.sizes() == first_elem.sizes(),
152
+ "Expected all sub-lists to have sizes: ",
153
+ first_elem.sizes(),
154
+ " (e.g. ",
155
+ first_elem,
156
+ "), ",
157
+ "but got sub-list ",
158
+ elem,
159
+ " with sizes: ",
160
+ elem.sizes());
161
+ TORCH_CHECK(
162
+ elem.scalar_type() == first_elem.scalar_type(),
163
+ "Expected all elements of the tensor to have the same scalar type: ",
164
+ first_elem.scalar_type(),
165
+ ", but got element of scalar type: ",
166
+ elem.scalar_type());
167
+ }
168
+ sizes_.reserve(first_elem.sizes().size() + 1);
169
+ sizes_.push_back(init_list.size());
170
+ sizes_.insert(
171
+ sizes_.end(), first_elem.sizes().begin(), first_elem.sizes().end());
172
+ }
173
+
174
+ #define TENSOR(T, S) \
175
+ TensorDataContainer(at::ArrayRef<T> values) \
176
+ : sizes_({(int64_t)values.size()}), \
177
+ scalar_type_(at::k##S), \
178
+ type_(TensorDataContainerType::Tensor) { \
179
+ at::AutoDispatchBelowAutograd mode; \
180
+ if (scalar_type_ == at::kBool) { \
181
+ tensor_ = at::tensor(values, at::TensorOptions().device(at::kCPU)); \
182
+ } else { \
183
+ tensor_ = at::tensor(values, at::dtype(scalar_type_).device(at::kCPU)); \
184
+ } \
185
+ }
186
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
187
+ AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR)
188
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
189
+ AT_FORALL_COMPLEX_TYPES(TENSOR)
190
+ #undef TENSOR
191
+
192
+ // NOTE: We need to handle `std::vector` explicitly instead of relying on an
193
+ // implicit conversion to `at::ArrayRef`, otherwise the following error can be
194
+ // thrown when calling `torch::tensor(std::vector<int>({1, 2}))`:
195
+ // ```
196
+ // error: no matching function for call to 'tensor(const std::vector<int>&)'
197
+ // no known conversion for argument 1 from 'const std::vector<int>' to
198
+ // 'torch::detail::TensorDataContainer'
199
+ // ```
200
+ //
201
+ // NOTE: `torch::tensor(std::vector<bool>)` is not supported for now, because
202
+ // ArrayRef<bool> cannot be constructed from a std::vector<bool> bitfield.
203
+ #define TENSOR(T, S) \
204
+ TensorDataContainer(const std::vector<T>& values) \
205
+ : TensorDataContainer(at::ArrayRef<T>(values)) {}
206
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
207
+ AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TENSOR)
208
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
209
+ AT_FORALL_COMPLEX_TYPES(TENSOR)
210
+ #undef TENSOR
211
+
212
+ bool is_scalar() const {
213
+ return type_ == TensorDataContainerType::Scalar;
214
+ }
215
+
216
+ const c10::Scalar& scalar() const {
217
+ TORCH_CHECK(
218
+ is_scalar(),
219
+ "Can only call `scalar()` on a TensorDataContainer that has `is_scalar() == true`");
220
+ return scalar_;
221
+ }
222
+
223
+ bool is_init_list() const {
224
+ return type_ == TensorDataContainerType::InitList;
225
+ }
226
+
227
+ const std::initializer_list<TensorDataContainer>& init_list() const {
228
+ TORCH_CHECK(
229
+ is_init_list(),
230
+ "Can only call `init_list()` on a TensorDataContainer that has `is_init_list() == true`");
231
+ return init_list_;
232
+ }
233
+
234
+ bool is_tensor() const {
235
+ return type_ == TensorDataContainerType::Tensor;
236
+ }
237
+
238
+ const at::Tensor& tensor() const {
239
+ TORCH_CHECK(
240
+ is_tensor(),
241
+ "Can only call `tensor()` on a TensorDataContainer that has `is_tensor() == true`");
242
+ return tensor_;
243
+ }
244
+
245
+ const std::vector<int64_t>& sizes() const {
246
+ return sizes_;
247
+ }
248
+
249
+ const c10::ScalarType& scalar_type() const {
250
+ return scalar_type_;
251
+ }
252
+
253
+ at::Tensor convert_to_tensor(at::TensorOptions options) const {
254
+ if (!options.has_dtype()) {
255
+ options = options.dtype(compute_desired_dtype(scalar_type_));
256
+ }
257
+
258
+ if (is_scalar()) {
259
+ at::AutoDispatchBelowAutograd mode;
260
+ return at::scalar_tensor(scalar_, options);
261
+ } else if (is_init_list()) {
262
+ // NOTE: Here we explicitly choose to initialize the tensor on CPU first,
263
+ // fill each element of the tensor, and then move the tensor to the
264
+ // desired device. For CUDA device, this approach only involves 1 CUDA
265
+ // kernel launch, and is much faster than initializing the tensor on CUDA
266
+ // first and then filling each element of it (which involves `N` CUDA
267
+ // kernel launches where `N` is the number of the elements in the tensor).
268
+ at::Tensor tensor = ([&]() {
269
+ at::AutoDispatchBelowAutograd mode;
270
+ return at::empty(sizes_, options.device(at::kCPU));
271
+ })();
272
+ fill_tensor(tensor);
273
+ return tensor.to(options.device());
274
+ } else if (is_tensor()) {
275
+ auto output = tensor_.to(options);
276
+ TORCH_CHECK(
277
+ !tensor_.is_complex() || output.is_complex(),
278
+ "can not do torch::tensor(complex, dtype=non-complex) because complex can not be casted to real number without loss of information");
279
+ return output;
280
+ } else {
281
+ TORCH_INTERNAL_ASSERT(false, "Invalid TensorDataContainer type");
282
+ }
283
+ }
284
+
285
+ void pretty_print_recursive(std::ostream& stream) const {
286
+ if (is_scalar()) {
287
+ AT_DISPATCH_ALL_TYPES_AND3(
288
+ at::kBool,
289
+ at::kHalf,
290
+ at::kBFloat16,
291
+ scalar_type_,
292
+ "TensorDataContainer_pretty_print_scalar",
293
+ [&] { stream << scalar_.to<scalar_t>(); });
294
+ } else if (is_init_list()) {
295
+ stream << "{";
296
+ for (const TensorDataContainer* it = init_list_.begin();
297
+ it != init_list_.end();
298
+ it++) {
299
+ stream << *it;
300
+ if (std::next(it) != init_list_.end())
301
+ stream << ", ";
302
+ }
303
+ stream << "}";
304
+ } else if (is_tensor()) {
305
+ stream << "{";
306
+ for (const auto i : c10::irange(tensor_.sizes()[0])) {
307
+ AT_DISPATCH_ALL_TYPES_AND3(
308
+ at::kBool,
309
+ at::kHalf,
310
+ at::kBFloat16,
311
+ scalar_type_,
312
+ "TensorDataContainer_pretty_print_tensor_item",
313
+ [&] { stream << tensor_[i].item<scalar_t>(); });
314
+ if (i != tensor_.sizes()[0] - 1)
315
+ stream << ", ";
316
+ }
317
+ stream << "}";
318
+ } else {
319
+ TORCH_INTERNAL_ASSERT(false, "Invalid TensorDataContainer type");
320
+ }
321
+ }
322
+
323
+ private:
324
+ void fill_tensor(at::Tensor& tensor) const {
325
+ if (is_scalar()) {
326
+ TORCH_INTERNAL_ASSERT(
327
+ tensor.dim() == 0,
328
+ "Expected a 0-dim Tensor, but got Tensor with dimensions: ",
329
+ tensor.dim());
330
+ at::NoGradGuard guard;
331
+ tensor.fill_(scalar_);
332
+ } else if (is_init_list()) {
333
+ TORCH_INTERNAL_ASSERT(
334
+ tensor.sizes()[0] == (int64_t)init_list_.size(),
335
+ "Expected a Tensor with size ",
336
+ init_list_.size(),
337
+ " in its first dimension, but got Tensor with size ",
338
+ tensor.sizes()[0],
339
+ " in its first dimension");
340
+ size_t index = 0;
341
+ for (const auto& elem : init_list_) {
342
+ at::Tensor slice = tensor[index];
343
+ elem.fill_tensor(slice);
344
+ index++;
345
+ }
346
+ } else if (is_tensor()) {
347
+ TORCH_INTERNAL_ASSERT(
348
+ false,
349
+ "TensorDataContainer is already a Tensor type, `fill_tensor` should not be called");
350
+ } else {
351
+ TORCH_INTERNAL_ASSERT(false, "Invalid TensorDataContainer type");
352
+ }
353
+ }
354
+
355
+ std::vector<int64_t> sizes_;
356
+ c10::ScalarType scalar_type_;
357
+ TensorDataContainerType type_;
358
+ c10::Scalar scalar_;
359
+ std::initializer_list<TensorDataContainer> init_list_;
360
+ at::Tensor tensor_;
361
+ };
362
+
363
+ inline std::ostream& operator<<(
364
+ std::ostream& stream,
365
+ const TensorDataContainer& tensor_data_container) {
366
+ tensor_data_container.pretty_print_recursive(stream);
367
+ return stream;
368
+ }
369
+
370
+ } // namespace detail
371
+
372
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/detail/static.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/utils/variadic.h>
4
+ #include <torch/types.h>
5
+
6
+ #include <cstdint>
7
+ #include <type_traits>
8
+
9
+ namespace torch {
10
+ namespace nn {
11
+ class Module;
12
+ } // namespace nn
13
+ } // namespace torch
14
+
15
+ namespace torch {
16
+ namespace detail {
17
+ /// Detects if a type T has a forward() method.
18
+ template <typename T>
19
+ struct has_forward {
20
+ // Declare two types with differing size.
21
+ using yes = int8_t;
22
+ using no = int16_t;
23
+
24
+ // Here we declare two functions. The first is only enabled if `&U::forward`
25
+ // is well-formed and returns the `yes` type. In C++, the ellipsis parameter
26
+ // type (`...`) always puts the function at the bottom of overload resolution.
27
+ // This is specified in the standard as: 1) A standard conversion sequence is
28
+ // always better than a user-defined conversion sequence or an ellipsis
29
+ // conversion sequence. 2) A user-defined conversion sequence is always better
30
+ // than an ellipsis conversion sequence This means that if the first overload
31
+ // is viable, it will be preferred over the second as long as we pass any
32
+ // convertible type. The type of `&U::forward` is a pointer type, so we can
33
+ // pass e.g. 0.
34
+ template <typename U>
35
+ static yes test(decltype(&U::forward));
36
+ template <typename U>
37
+ static no test(...);
38
+
39
+ // Finally we test statically whether the size of the type returned by the
40
+ // selected overload is the size of the `yes` type.
41
+ static constexpr bool value = (sizeof(test<T>(nullptr)) == sizeof(yes));
42
+ };
43
+
44
+ template <typename Head = void, typename... Tail>
45
+ constexpr bool check_not_lvalue_references() {
46
+ return (!std::is_lvalue_reference<Head>::value ||
47
+ std::is_const<typename std::remove_reference<Head>::type>::value) &&
48
+ check_not_lvalue_references<Tail...>();
49
+ }
50
+
51
+ template <>
52
+ inline constexpr bool check_not_lvalue_references<void>() {
53
+ return true;
54
+ }
55
+
56
+ /// A type trait whose `value` member is true if `M` derives from `Module`.
57
+ template <typename M>
58
+ using is_module =
59
+ std::is_base_of<torch::nn::Module, typename std::decay<M>::type>;
60
+
61
+ template <typename M, typename T = void>
62
+ using enable_if_module_t =
63
+ typename std::enable_if<is_module<M>::value, T>::type;
64
+ } // namespace detail
65
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/enum.h ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <string>
4
+ #include <variant>
5
+
6
+ #include <ATen/core/Reduction.h>
7
+ #include <c10/util/Exception.h>
8
+ #include <torch/csrc/Export.h>
9
+
10
+ #define TORCH_ENUM_DECLARE(name) \
11
+ namespace torch { \
12
+ namespace enumtype { \
13
+ /* \
14
+ NOTE: We need to provide the default constructor for each struct, \
15
+ otherwise Clang 3.8 would complain: \
16
+ ``` \
17
+ error: default initialization of an object of const type 'const \
18
+ enumtype::Enum1' without a user-provided default constructor \
19
+ ``` \
20
+ */ \
21
+ struct k##name { \
22
+ k##name() {} \
23
+ }; \
24
+ } \
25
+ TORCH_API extern const enumtype::k##name k##name; \
26
+ }
27
+
28
+ #define TORCH_ENUM_DEFINE(name) \
29
+ namespace torch { \
30
+ const enumtype::k##name k##name; \
31
+ }
32
+
33
+ #define TORCH_ENUM_PRETTY_PRINT(name) \
34
+ std::string operator()(const enumtype::k##name& v) const { \
35
+ std::string k("k"); \
36
+ return k + #name; \
37
+ }
38
+
39
+ // NOTE: Backstory on why we need the following two macros:
40
+ //
41
+ // Consider the following options class:
42
+ //
43
+ // ```
44
+ // struct TORCH_API SomeOptions {
45
+ // typedef std::variant<enumtype::kNone, enumtype::kMean, enumtype::kSum>
46
+ // reduction_t; SomeOptions(reduction_t reduction = torch::kMean) :
47
+ // reduction_(reduction) {}
48
+ //
49
+ // TORCH_ARG(reduction_t, reduction);
50
+ // };
51
+ // ```
52
+ //
53
+ // and the functional that uses it:
54
+ //
55
+ // ```
56
+ // Tensor some_functional(
57
+ // const Tensor& input,
58
+ // SomeOptions options = {}) {
59
+ // ...
60
+ // }
61
+ // ```
62
+ //
63
+ // Normally, we would expect this to work:
64
+ //
65
+ // `F::some_functional(input, torch::kNone)`
66
+ //
67
+ // However, it throws the following error instead:
68
+ //
69
+ // ```
70
+ // error: could not convert `torch::kNone` from `const torch::enumtype::kNone`
71
+ // to `torch::nn::SomeOptions`
72
+ // ```
73
+ //
74
+ // To get around this problem, we explicitly provide the following constructors
75
+ // for `SomeOptions`:
76
+ //
77
+ // ```
78
+ // SomeOptions(torch::enumtype::kNone reduction) : reduction_(torch::kNone) {}
79
+ // SomeOptions(torch::enumtype::kMean reduction) : reduction_(torch::kMean) {}
80
+ // SomeOptions(torch::enumtype::kSum reduction) : reduction_(torch::kSum) {}
81
+ // ```
82
+ //
83
+ // so that the conversion from `torch::kNone` to `SomeOptions` would work.
84
+ //
85
+ // Note that we also provide the default constructor `SomeOptions() {}`, so that
86
+ // `SomeOptions options = {}` can work.
87
+ #define TORCH_OPTIONS_CTOR_VARIANT_ARG3( \
88
+ OPTIONS_NAME, ARG_NAME, TYPE1, TYPE2, TYPE3) \
89
+ OPTIONS_NAME() = default; \
90
+ OPTIONS_NAME(torch::enumtype::TYPE1 ARG_NAME) : ARG_NAME##_(torch::TYPE1) {} \
91
+ OPTIONS_NAME(torch::enumtype::TYPE2 ARG_NAME) : ARG_NAME##_(torch::TYPE2) {} \
92
+ OPTIONS_NAME(torch::enumtype::TYPE3 ARG_NAME) : ARG_NAME##_(torch::TYPE3) {}
93
+
94
+ #define TORCH_OPTIONS_CTOR_VARIANT_ARG4( \
95
+ OPTIONS_NAME, ARG_NAME, TYPE1, TYPE2, TYPE3, TYPE4) \
96
+ OPTIONS_NAME() = default; \
97
+ OPTIONS_NAME(torch::enumtype::TYPE1 ARG_NAME) : ARG_NAME##_(torch::TYPE1) {} \
98
+ OPTIONS_NAME(torch::enumtype::TYPE2 ARG_NAME) : ARG_NAME##_(torch::TYPE2) {} \
99
+ OPTIONS_NAME(torch::enumtype::TYPE3 ARG_NAME) : ARG_NAME##_(torch::TYPE3) {} \
100
+ OPTIONS_NAME(torch::enumtype::TYPE4 ARG_NAME) : ARG_NAME##_(torch::TYPE4) {}
101
+
102
+ TORCH_ENUM_DECLARE(Linear)
103
+ TORCH_ENUM_DECLARE(Conv1D)
104
+ TORCH_ENUM_DECLARE(Conv2D)
105
+ TORCH_ENUM_DECLARE(Conv3D)
106
+ TORCH_ENUM_DECLARE(ConvTranspose1D)
107
+ TORCH_ENUM_DECLARE(ConvTranspose2D)
108
+ TORCH_ENUM_DECLARE(ConvTranspose3D)
109
+ TORCH_ENUM_DECLARE(Sigmoid)
110
+ TORCH_ENUM_DECLARE(Tanh)
111
+ TORCH_ENUM_DECLARE(ReLU)
112
+ TORCH_ENUM_DECLARE(GELU)
113
+ TORCH_ENUM_DECLARE(SiLU)
114
+ TORCH_ENUM_DECLARE(Mish)
115
+ TORCH_ENUM_DECLARE(LeakyReLU)
116
+ TORCH_ENUM_DECLARE(FanIn)
117
+ TORCH_ENUM_DECLARE(FanOut)
118
+ TORCH_ENUM_DECLARE(Constant)
119
+ TORCH_ENUM_DECLARE(Reflect)
120
+ TORCH_ENUM_DECLARE(Replicate)
121
+ TORCH_ENUM_DECLARE(Circular)
122
+ TORCH_ENUM_DECLARE(Nearest)
123
+ TORCH_ENUM_DECLARE(Bilinear)
124
+ TORCH_ENUM_DECLARE(Bicubic)
125
+ TORCH_ENUM_DECLARE(Trilinear)
126
+ TORCH_ENUM_DECLARE(Area)
127
+ TORCH_ENUM_DECLARE(NearestExact)
128
+ TORCH_ENUM_DECLARE(Sum)
129
+ TORCH_ENUM_DECLARE(Mean)
130
+ TORCH_ENUM_DECLARE(Max)
131
+ TORCH_ENUM_DECLARE(None)
132
+ TORCH_ENUM_DECLARE(BatchMean)
133
+ TORCH_ENUM_DECLARE(Zeros)
134
+ TORCH_ENUM_DECLARE(Border)
135
+ TORCH_ENUM_DECLARE(Reflection)
136
+ TORCH_ENUM_DECLARE(RNN_TANH)
137
+ TORCH_ENUM_DECLARE(RNN_RELU)
138
+ TORCH_ENUM_DECLARE(LSTM)
139
+ TORCH_ENUM_DECLARE(GRU)
140
+ TORCH_ENUM_DECLARE(Valid)
141
+ TORCH_ENUM_DECLARE(Same)
142
+
143
+ namespace torch {
144
+ namespace enumtype {
145
+
146
+ struct _compute_enum_name {
147
+ TORCH_ENUM_PRETTY_PRINT(Linear)
148
+ TORCH_ENUM_PRETTY_PRINT(Conv1D)
149
+ TORCH_ENUM_PRETTY_PRINT(Conv2D)
150
+ TORCH_ENUM_PRETTY_PRINT(Conv3D)
151
+ TORCH_ENUM_PRETTY_PRINT(ConvTranspose1D)
152
+ TORCH_ENUM_PRETTY_PRINT(ConvTranspose2D)
153
+ TORCH_ENUM_PRETTY_PRINT(ConvTranspose3D)
154
+ TORCH_ENUM_PRETTY_PRINT(Sigmoid)
155
+ TORCH_ENUM_PRETTY_PRINT(Tanh)
156
+ TORCH_ENUM_PRETTY_PRINT(ReLU)
157
+ TORCH_ENUM_PRETTY_PRINT(GELU)
158
+ TORCH_ENUM_PRETTY_PRINT(SiLU)
159
+ TORCH_ENUM_PRETTY_PRINT(Mish)
160
+ TORCH_ENUM_PRETTY_PRINT(LeakyReLU)
161
+ TORCH_ENUM_PRETTY_PRINT(FanIn)
162
+ TORCH_ENUM_PRETTY_PRINT(FanOut)
163
+ TORCH_ENUM_PRETTY_PRINT(Constant)
164
+ TORCH_ENUM_PRETTY_PRINT(Reflect)
165
+ TORCH_ENUM_PRETTY_PRINT(Replicate)
166
+ TORCH_ENUM_PRETTY_PRINT(Circular)
167
+ TORCH_ENUM_PRETTY_PRINT(Nearest)
168
+ TORCH_ENUM_PRETTY_PRINT(Bilinear)
169
+ TORCH_ENUM_PRETTY_PRINT(Bicubic)
170
+ TORCH_ENUM_PRETTY_PRINT(Trilinear)
171
+ TORCH_ENUM_PRETTY_PRINT(Area)
172
+ TORCH_ENUM_PRETTY_PRINT(NearestExact)
173
+ TORCH_ENUM_PRETTY_PRINT(Sum)
174
+ TORCH_ENUM_PRETTY_PRINT(Mean)
175
+ TORCH_ENUM_PRETTY_PRINT(Max)
176
+ TORCH_ENUM_PRETTY_PRINT(None)
177
+ TORCH_ENUM_PRETTY_PRINT(BatchMean)
178
+ TORCH_ENUM_PRETTY_PRINT(Zeros)
179
+ TORCH_ENUM_PRETTY_PRINT(Border)
180
+ TORCH_ENUM_PRETTY_PRINT(Reflection)
181
+ TORCH_ENUM_PRETTY_PRINT(RNN_TANH)
182
+ TORCH_ENUM_PRETTY_PRINT(RNN_RELU)
183
+ TORCH_ENUM_PRETTY_PRINT(LSTM)
184
+ TORCH_ENUM_PRETTY_PRINT(GRU)
185
+ TORCH_ENUM_PRETTY_PRINT(Valid)
186
+ TORCH_ENUM_PRETTY_PRINT(Same)
187
+ };
188
+
189
+ template <typename V>
190
+ std::string get_enum_name(V variant_enum) {
191
+ return std::visit(enumtype::_compute_enum_name{}, variant_enum);
192
+ }
193
+
194
+ template <typename V>
195
+ at::Reduction::Reduction reduction_get_enum(V variant_enum) {
196
+ if (std::holds_alternative<enumtype::kNone>(variant_enum)) {
197
+ return at::Reduction::None;
198
+ } else if (std::holds_alternative<enumtype::kMean>(variant_enum)) {
199
+ return at::Reduction::Mean;
200
+ } else if (std::holds_alternative<enumtype::kSum>(variant_enum)) {
201
+ return at::Reduction::Sum;
202
+ } else {
203
+ TORCH_CHECK(
204
+ false,
205
+ get_enum_name(variant_enum),
206
+ " is not a valid value for reduction");
207
+ return at::Reduction::END;
208
+ }
209
+ }
210
+
211
+ } // namespace enumtype
212
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/expanding_array.h ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/ArrayRef.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <c10/util/Optional.h>
6
+ #include <c10/util/irange.h>
7
+
8
+ #include <algorithm>
9
+ #include <array>
10
+ #include <cstdint>
11
+ #include <initializer_list>
12
+ #include <string>
13
+ #include <vector>
14
+
15
+ namespace torch {
16
+
17
+ /// A utility class that accepts either a container of `D`-many values, or a
18
+ /// single value, which is internally repeated `D` times. This is useful to
19
+ /// represent parameters that are multidimensional, but often equally sized in
20
+ /// all dimensions. For example, the kernel size of a 2D convolution has an `x`
21
+ /// and `y` length, but `x` and `y` are often equal. In such a case you could
22
+ /// just pass `3` to an `ExpandingArray<2>` and it would "expand" to `{3, 3}`.
23
+ template <size_t D, typename T = int64_t>
24
+ class ExpandingArray {
25
+ public:
26
+ /// Constructs an `ExpandingArray` from an `initializer_list`. The extent of
27
+ /// the length is checked against the `ExpandingArray`'s extent parameter `D`
28
+ /// at runtime.
29
+ /*implicit*/ ExpandingArray(std::initializer_list<T> list)
30
+ : ExpandingArray(at::ArrayRef<T>(list)) {}
31
+
32
+ /// Constructs an `ExpandingArray` from an `std::vector`. The extent of
33
+ /// the length is checked against the `ExpandingArray`'s extent parameter `D`
34
+ /// at runtime.
35
+ /*implicit*/ ExpandingArray(std::vector<T> vec)
36
+ : ExpandingArray(at::ArrayRef<T>(vec)) {}
37
+
38
+ /// Constructs an `ExpandingArray` from an `at::ArrayRef`. The extent of
39
+ /// the length is checked against the `ExpandingArray`'s extent parameter `D`
40
+ /// at runtime.
41
+ /*implicit*/ ExpandingArray(at::ArrayRef<T> values) {
42
+ // clang-format off
43
+ TORCH_CHECK(
44
+ values.size() == D,
45
+ "Expected ", D, " values, but instead got ", values.size());
46
+ // clang-format on
47
+ std::copy(values.begin(), values.end(), values_.begin());
48
+ }
49
+
50
+ /// Constructs an `ExpandingArray` from a single value, which is repeated `D`
51
+ /// times (where `D` is the extent parameter of the `ExpandingArray`).
52
+ /*implicit*/ ExpandingArray(T single_size) {
53
+ values_.fill(single_size);
54
+ }
55
+
56
+ /// Constructs an `ExpandingArray` from a correctly sized `std::array`.
57
+ /*implicit*/ ExpandingArray(const std::array<T, D>& values)
58
+ : values_(values) {}
59
+
60
+ /// Accesses the underlying `std::array`.
61
+ std::array<T, D>& operator*() {
62
+ return values_;
63
+ }
64
+
65
+ /// Accesses the underlying `std::array`.
66
+ const std::array<T, D>& operator*() const {
67
+ return values_;
68
+ }
69
+
70
+ /// Accesses the underlying `std::array`.
71
+ std::array<T, D>* operator->() {
72
+ return &values_;
73
+ }
74
+
75
+ /// Accesses the underlying `std::array`.
76
+ const std::array<T, D>* operator->() const {
77
+ return &values_;
78
+ }
79
+
80
+ /// Returns an `ArrayRef` to the underlying `std::array`.
81
+ operator at::ArrayRef<T>() const {
82
+ return values_;
83
+ }
84
+
85
+ /// Returns the extent of the `ExpandingArray`.
86
+ size_t size() const noexcept {
87
+ return D;
88
+ }
89
+
90
+ protected:
91
+ /// The backing array.
92
+ // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
93
+ std::array<T, D> values_;
94
+ };
95
+
96
+ template <size_t D, typename T>
97
+ std::ostream& operator<<(
98
+ std::ostream& stream,
99
+ const ExpandingArray<D, T>& expanding_array) {
100
+ if (expanding_array.size() == 1) {
101
+ return stream << expanding_array->at(0);
102
+ }
103
+ return stream << static_cast<at::ArrayRef<T>>(expanding_array);
104
+ }
105
+
106
+ /// A utility class that accepts either a container of `D`-many
107
+ /// `c10::optional<T>` values, or a single `c10::optional<T>` value, which is
108
+ /// internally repeated `D` times. It has the additional ability to accept
109
+ /// containers of the underlying type `T` and convert them to a container of
110
+ /// `c10::optional<T>`.
111
+ template <size_t D, typename T = int64_t>
112
+ class ExpandingArrayWithOptionalElem
113
+ : public ExpandingArray<D, c10::optional<T>> {
114
+ public:
115
+ using ExpandingArray<D, c10::optional<T>>::ExpandingArray;
116
+
117
+ /// Constructs an `ExpandingArrayWithOptionalElem` from an `initializer_list`
118
+ /// of the underlying type `T`. The extent of the length is checked against
119
+ /// the `ExpandingArrayWithOptionalElem`'s extent parameter `D` at runtime.
120
+ /*implicit*/ ExpandingArrayWithOptionalElem(std::initializer_list<T> list)
121
+ : ExpandingArrayWithOptionalElem(at::ArrayRef<T>(list)) {}
122
+
123
+ /// Constructs an `ExpandingArrayWithOptionalElem` from an `std::vector` of
124
+ /// the underlying type `T`. The extent of the length is checked against the
125
+ /// `ExpandingArrayWithOptionalElem`'s extent parameter `D` at runtime.
126
+ /*implicit*/ ExpandingArrayWithOptionalElem(std::vector<T> vec)
127
+ : ExpandingArrayWithOptionalElem(at::ArrayRef<T>(vec)) {}
128
+
129
+ /// Constructs an `ExpandingArrayWithOptionalElem` from an `at::ArrayRef` of
130
+ /// the underlying type `T`. The extent of the length is checked against the
131
+ /// `ExpandingArrayWithOptionalElem`'s extent parameter `D` at runtime.
132
+ /*implicit*/ ExpandingArrayWithOptionalElem(at::ArrayRef<T> values)
133
+ : ExpandingArray<D, c10::optional<T>>(0) {
134
+ // clang-format off
135
+ TORCH_CHECK(
136
+ values.size() == D,
137
+ "Expected ", D, " values, but instead got ", values.size());
138
+ // clang-format on
139
+ for (const auto i : c10::irange(this->values_.size())) {
140
+ this->values_[i] = values[i];
141
+ }
142
+ }
143
+
144
+ /// Constructs an `ExpandingArrayWithOptionalElem` from a single value of the
145
+ /// underlying type `T`, which is repeated `D` times (where `D` is the extent
146
+ /// parameter of the `ExpandingArrayWithOptionalElem`).
147
+ /*implicit*/ ExpandingArrayWithOptionalElem(T single_size)
148
+ : ExpandingArray<D, c10::optional<T>>(0) {
149
+ for (const auto i : c10::irange(this->values_.size())) {
150
+ this->values_[i] = single_size;
151
+ }
152
+ }
153
+
154
+ /// Constructs an `ExpandingArrayWithOptionalElem` from a correctly sized
155
+ /// `std::array` of the underlying type `T`.
156
+ /*implicit*/ ExpandingArrayWithOptionalElem(const std::array<T, D>& values)
157
+ : ExpandingArray<D, c10::optional<T>>(0) {
158
+ for (const auto i : c10::irange(this->values_.size())) {
159
+ this->values_[i] = values[i];
160
+ }
161
+ }
162
+ };
163
+
164
+ template <size_t D, typename T>
165
+ std::ostream& operator<<(
166
+ std::ostream& stream,
167
+ const ExpandingArrayWithOptionalElem<D, T>& expanding_array_with_opt_elem) {
168
+ if (expanding_array_with_opt_elem.size() == 1) {
169
+ const auto& elem = expanding_array_with_opt_elem->at(0);
170
+ stream << (elem.has_value() ? c10::str(elem.value()) : "None");
171
+ } else {
172
+ std::vector<std::string> str_array;
173
+ for (const auto& elem : *expanding_array_with_opt_elem) {
174
+ str_array.emplace_back(
175
+ elem.has_value() ? c10::str(elem.value()) : "None");
176
+ }
177
+ stream << at::ArrayRef<std::string>(str_array);
178
+ }
179
+ return stream;
180
+ }
181
+
182
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/fft.h ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+
5
+ namespace torch {
6
+ namespace fft {
7
+
8
+ /// Computes the 1 dimensional fast Fourier transform over a given dimension.
9
+ /// See https://pytorch.org/docs/master/fft.html#torch.fft.fft.
10
+ ///
11
+ /// Example:
12
+ /// ```
13
+ /// auto t = torch::randn(128, dtype=kComplexDouble);
14
+ /// torch::fft::fft(t);
15
+ /// ```
16
+ inline Tensor fft(
17
+ const Tensor& self,
18
+ c10::optional<SymInt> n = c10::nullopt,
19
+ int64_t dim = -1,
20
+ c10::optional<c10::string_view> norm = c10::nullopt) {
21
+ return torch::fft_fft_symint(self, n, dim, norm);
22
+ }
23
+
24
+ /// Computes the 1 dimensional inverse Fourier transform over a given dimension.
25
+ /// See https://pytorch.org/docs/master/fft.html#torch.fft.ifft.
26
+ ///
27
+ /// Example:
28
+ /// ```
29
+ /// auto t = torch::randn(128, dtype=kComplexDouble);
30
+ /// torch::fft::ifft(t);
31
+ /// ```
32
+ inline Tensor ifft(
33
+ const Tensor& self,
34
+ c10::optional<SymInt> n = c10::nullopt,
35
+ int64_t dim = -1,
36
+ c10::optional<c10::string_view> norm = c10::nullopt) {
37
+ return torch::fft_ifft_symint(self, n, dim, norm);
38
+ }
39
+
40
+ /// Computes the 2-dimensional fast Fourier transform over the given dimensions.
41
+ /// See https://pytorch.org/docs/master/fft.html#torch.fft.fft2.
42
+ ///
43
+ /// Example:
44
+ /// ```
45
+ /// auto t = torch::randn({128, 128}, dtype=kComplexDouble);
46
+ /// torch::fft::fft2(t);
47
+ /// ```
48
+ inline Tensor fft2(
49
+ const Tensor& self,
50
+ OptionalIntArrayRef s = c10::nullopt,
51
+ IntArrayRef dim = {-2, -1},
52
+ c10::optional<c10::string_view> norm = c10::nullopt) {
53
+ return torch::fft_fft2(self, s, dim, norm);
54
+ }
55
+
56
+ /// Computes the inverse of torch.fft.fft2
57
+ /// See https://pytorch.org/docs/master/fft.html#torch.fft.ifft2.
58
+ ///
59
+ /// Example:
60
+ /// ```
61
+ /// auto t = torch::randn({128, 128}, dtype=kComplexDouble);
62
+ /// torch::fft::ifft2(t);
63
+ /// ```
64
+ inline Tensor ifft2(
65
+ const Tensor& self,
66
+ at::OptionalIntArrayRef s = c10::nullopt,
67
+ IntArrayRef dim = {-2, -1},
68
+ c10::optional<c10::string_view> norm = c10::nullopt) {
69
+ return torch::fft_ifft2(self, s, dim, norm);
70
+ }
71
+
72
+ /// Computes the N dimensional fast Fourier transform over given dimensions.
73
+ /// See https://pytorch.org/docs/master/fft.html#torch.fft.fftn.
74
+ ///
75
+ /// Example:
76
+ /// ```
77
+ /// auto t = torch::randn({128, 128}, dtype=kComplexDouble);
78
+ /// torch::fft::fftn(t);
79
+ /// ```
80
+ inline Tensor fftn(
81
+ const Tensor& self,
82
+ at::OptionalIntArrayRef s = c10::nullopt,
83
+ at::OptionalIntArrayRef dim = c10::nullopt,
84
+ c10::optional<c10::string_view> norm = c10::nullopt) {
85
+ return torch::fft_fftn(self, s, dim, norm);
86
+ }
87
+
88
+ /// Computes the N dimensional fast Fourier transform over given dimensions.
89
+ /// See https://pytorch.org/docs/master/fft.html#torch.fft.ifftn.
90
+ ///
91
+ /// Example:
92
+ /// ```
93
+ /// auto t = torch::randn({128, 128}, dtype=kComplexDouble);
94
+ /// torch::fft::ifftn(t);
95
+ /// ```
96
+ inline Tensor ifftn(
97
+ const Tensor& self,
98
+ at::OptionalIntArrayRef s = c10::nullopt,
99
+ at::OptionalIntArrayRef dim = c10::nullopt,
100
+ c10::optional<c10::string_view> norm = c10::nullopt) {
101
+ return torch::fft_ifftn(self, s, dim, norm);
102
+ }
103
+
104
+ /// Computes the 1 dimensional FFT of real input with onesided Hermitian output.
105
+ /// See https://pytorch.org/docs/master/fft.html#torch.fft.rfft.
106
+ ///
107
+ /// Example:
108
+ /// ```
109
+ /// auto t = torch::randn(128);
110
+ /// auto T = torch::fft::rfft(t);
111
+ /// assert(T.is_complex() && T.numel() == 128 / 2 + 1);
112
+ /// ```
113
+ inline Tensor rfft(
114
+ const Tensor& self,
115
+ c10::optional<SymInt> n = c10::nullopt,
116
+ int64_t dim = -1,
117
+ c10::optional<c10::string_view> norm = c10::nullopt) {
118
+ return torch::fft_rfft_symint(self, n, dim, norm);
119
+ }
120
+
121
+ /// Computes the inverse of torch.fft.rfft
122
+ ///
123
+ /// The input is a onesided Hermitian Fourier domain signal, with real-valued
124
+ /// output. See https://pytorch.org/docs/master/fft.html#torch.fft.irfft
125
+ ///
126
+ /// Example:
127
+ /// ```
128
+ /// auto T = torch::randn(128 / 2 + 1, torch::kComplexDouble);
129
+ /// auto t = torch::fft::irfft(t, /*n=*/128);
130
+ /// assert(t.is_floating_point() && T.numel() == 128);
131
+ /// ```
132
+ inline Tensor irfft(
133
+ const Tensor& self,
134
+ c10::optional<SymInt> n = c10::nullopt,
135
+ int64_t dim = -1,
136
+ c10::optional<c10::string_view> norm = c10::nullopt) {
137
+ return torch::fft_irfft_symint(self, n, dim, norm);
138
+ }
139
+
140
+ /// Computes the 2-dimensional FFT of real input. Returns a onesided Hermitian
141
+ /// output. See https://pytorch.org/docs/master/fft.html#torch.fft.rfft2
142
+ ///
143
+ /// Example:
144
+ /// ```
145
+ /// auto t = torch::randn({128, 128}, dtype=kDouble);
146
+ /// torch::fft::rfft2(t);
147
+ /// ```
148
+ inline Tensor rfft2(
149
+ const Tensor& self,
150
+ at::OptionalIntArrayRef s = c10::nullopt,
151
+ IntArrayRef dim = {-2, -1},
152
+ c10::optional<c10::string_view> norm = c10::nullopt) {
153
+ return torch::fft_rfft2(self, s, dim, norm);
154
+ }
155
+
156
+ /// Computes the inverse of torch.fft.rfft2.
157
+ /// See https://pytorch.org/docs/master/fft.html#torch.fft.irfft2.
158
+ ///
159
+ /// Example:
160
+ /// ```
161
+ /// auto t = torch::randn({128, 128}, dtype=kComplexDouble);
162
+ /// torch::fft::irfft2(t);
163
+ /// ```
164
+ inline Tensor irfft2(
165
+ const Tensor& self,
166
+ at::OptionalIntArrayRef s = c10::nullopt,
167
+ IntArrayRef dim = {-2, -1},
168
+ c10::optional<c10::string_view> norm = c10::nullopt) {
169
+ return torch::fft_irfft2(self, s, dim, norm);
170
+ }
171
+
172
+ /// Computes the N dimensional FFT of real input with onesided Hermitian output.
173
+ /// See https://pytorch.org/docs/master/fft.html#torch.fft.rfftn
174
+ ///
175
+ /// Example:
176
+ /// ```
177
+ /// auto t = torch::randn({128, 128}, dtype=kDouble);
178
+ /// torch::fft::rfftn(t);
179
+ /// ```
180
+ inline Tensor rfftn(
181
+ const Tensor& self,
182
+ at::OptionalIntArrayRef s = c10::nullopt,
183
+ at::OptionalIntArrayRef dim = c10::nullopt,
184
+ c10::optional<c10::string_view> norm = c10::nullopt) {
185
+ return torch::fft_rfftn(self, s, dim, norm);
186
+ }
187
+
188
+ /// Computes the inverse of torch.fft.rfftn.
189
+ /// See https://pytorch.org/docs/master/fft.html#torch.fft.irfftn.
190
+ ///
191
+ /// Example:
192
+ /// ```
193
+ /// auto t = torch::randn({128, 128}, dtype=kComplexDouble);
194
+ /// torch::fft::irfftn(t);
195
+ /// ```
196
+ inline Tensor irfftn(
197
+ const Tensor& self,
198
+ at::OptionalIntArrayRef s = c10::nullopt,
199
+ at::OptionalIntArrayRef dim = c10::nullopt,
200
+ c10::optional<c10::string_view> norm = c10::nullopt) {
201
+ return torch::fft_irfftn(self, s, dim, norm);
202
+ }
203
+
204
+ /// Computes the 1 dimensional FFT of a onesided Hermitian signal
205
+ ///
206
+ /// The input represents a Hermitian symmetric time domain signal. The returned
207
+ /// Fourier domain representation of such a signal is a real-valued. See
208
+ /// https://pytorch.org/docs/master/fft.html#torch.fft.hfft
209
+ ///
210
+ /// Example:
211
+ /// ```
212
+ /// auto t = torch::randn(128 / 2 + 1, torch::kComplexDouble);
213
+ /// auto T = torch::fft::hfft(t, /*n=*/128);
214
+ /// assert(T.is_floating_point() && T.numel() == 128);
215
+ /// ```
216
+ inline Tensor hfft(
217
+ const Tensor& self,
218
+ c10::optional<SymInt> n = c10::nullopt,
219
+ int64_t dim = -1,
220
+ c10::optional<c10::string_view> norm = c10::nullopt) {
221
+ return torch::fft_hfft_symint(self, n, dim, norm);
222
+ }
223
+
224
+ /// Computes the inverse FFT of a real-valued Fourier domain signal.
225
+ ///
226
+ /// The output is a onesided representation of the Hermitian symmetric time
227
+ /// domain signal. See https://pytorch.org/docs/master/fft.html#torch.fft.ihfft.
228
+ ///
229
+ /// Example:
230
+ /// ```
231
+ /// auto T = torch::randn(128, torch::kDouble);
232
+ /// auto t = torch::fft::ihfft(T);
233
+ /// assert(t.is_complex() && T.numel() == 128 / 2 + 1);
234
+ /// ```
235
+ inline Tensor ihfft(
236
+ const Tensor& self,
237
+ c10::optional<SymInt> n = c10::nullopt,
238
+ int64_t dim = -1,
239
+ c10::optional<c10::string_view> norm = c10::nullopt) {
240
+ return torch::fft_ihfft_symint(self, n, dim, norm);
241
+ }
242
+
243
+ /// Computes the 2-dimensional FFT of a Hermitian symmetric input signal.
244
+ ///
245
+ /// The input is a onesided representation of the Hermitian symmetric time
246
+ /// domain signal. See https://pytorch.org/docs/master/fft.html#torch.fft.hfft2.
247
+ ///
248
+ /// Example:
249
+ /// ```
250
+ /// auto t = torch::randn({128, 65}, torch::kComplexDouble);
251
+ /// auto T = torch::fft::hfft2(t, /*s=*/{128, 128});
252
+ /// assert(T.is_floating_point() && T.numel() == 128 * 128);
253
+ /// ```
254
+ inline Tensor hfft2(
255
+ const Tensor& self,
256
+ at::OptionalIntArrayRef s = c10::nullopt,
257
+ IntArrayRef dim = {-2, -1},
258
+ c10::optional<c10::string_view> norm = c10::nullopt) {
259
+ return torch::fft_hfft2(self, s, dim, norm);
260
+ }
261
+
262
+ /// Computes the 2-dimensional IFFT of a real input signal.
263
+ ///
264
+ /// The output is a onesided representation of the Hermitian symmetric time
265
+ /// domain signal. See
266
+ /// https://pytorch.org/docs/master/fft.html#torch.fft.ihfft2.
267
+ ///
268
+ /// Example:
269
+ /// ```
270
+ /// auto T = torch::randn({128, 128}, torch::kDouble);
271
+ /// auto t = torch::fft::hfft2(T);
272
+ /// assert(t.is_complex() && t.size(1) == 65);
273
+ /// ```
274
+ inline Tensor ihfft2(
275
+ const Tensor& self,
276
+ at::OptionalIntArrayRef s = c10::nullopt,
277
+ IntArrayRef dim = {-2, -1},
278
+ c10::optional<c10::string_view> norm = c10::nullopt) {
279
+ return torch::fft_ihfft2(self, s, dim, norm);
280
+ }
281
+
282
+ /// Computes the N-dimensional FFT of a Hermitian symmetric input signal.
283
+ ///
284
+ /// The input is a onesided representation of the Hermitian symmetric time
285
+ /// domain signal. See https://pytorch.org/docs/master/fft.html#torch.fft.hfftn.
286
+ ///
287
+ /// Example:
288
+ /// ```
289
+ /// auto t = torch::randn({128, 65}, torch::kComplexDouble);
290
+ /// auto T = torch::fft::hfftn(t, /*s=*/{128, 128});
291
+ /// assert(T.is_floating_point() && T.numel() == 128 * 128);
292
+ /// ```
293
+ inline Tensor hfftn(
294
+ const Tensor& self,
295
+ at::OptionalIntArrayRef s = c10::nullopt,
296
+ IntArrayRef dim = {-2, -1},
297
+ c10::optional<c10::string_view> norm = c10::nullopt) {
298
+ return torch::fft_hfftn(self, s, dim, norm);
299
+ }
300
+
301
+ /// Computes the N-dimensional IFFT of a real input signal.
302
+ ///
303
+ /// The output is a onesided representation of the Hermitian symmetric time
304
+ /// domain signal. See
305
+ /// https://pytorch.org/docs/master/fft.html#torch.fft.ihfftn.
306
+ ///
307
+ /// Example:
308
+ /// ```
309
+ /// auto T = torch::randn({128, 128}, torch::kDouble);
310
+ /// auto t = torch::fft::hfft2(T);
311
+ /// assert(t.is_complex() && t.size(1) == 65);
312
+ /// ```
313
+ inline Tensor ihfftn(
314
+ const Tensor& self,
315
+ at::OptionalIntArrayRef s = c10::nullopt,
316
+ IntArrayRef dim = {-2, -1},
317
+ c10::optional<c10::string_view> norm = c10::nullopt) {
318
+ return torch::fft_ihfftn(self, s, dim, norm);
319
+ }
320
+
321
+ /// Computes the discrete Fourier Transform sample frequencies for a signal of
322
+ /// size n.
323
+ ///
324
+ /// See https://pytorch.org/docs/master/fft.html#torch.fft.fftfreq
325
+ ///
326
+ /// Example:
327
+ /// ```
328
+ /// auto frequencies = torch::fft::fftfreq(128, torch::kDouble);
329
+ /// ```
330
+ inline Tensor fftfreq(int64_t n, double d, const TensorOptions& options = {}) {
331
+ return torch::fft_fftfreq(n, d, options);
332
+ }
333
+
334
+ inline Tensor fftfreq(int64_t n, const TensorOptions& options = {}) {
335
+ return torch::fft_fftfreq(n, /*d=*/1.0, options);
336
+ }
337
+
338
+ /// Computes the sample frequencies for torch.fft.rfft with a signal of size n.
339
+ ///
340
+ /// Like torch.fft.rfft, only the positive frequencies are included.
341
+ /// See https://pytorch.org/docs/master/fft.html#torch.fft.rfftfreq
342
+ ///
343
+ /// Example:
344
+ /// ```
345
+ /// auto frequencies = torch::fft::rfftfreq(128, torch::kDouble);
346
+ /// ```
347
+ inline Tensor rfftfreq(int64_t n, double d, const TensorOptions& options) {
348
+ return torch::fft_rfftfreq(n, d, options);
349
+ }
350
+
351
+ inline Tensor rfftfreq(int64_t n, const TensorOptions& options) {
352
+ return torch::fft_rfftfreq(n, /*d=*/1.0, options);
353
+ }
354
+
355
+ /// Reorders n-dimensional FFT output to have negative frequency terms first, by
356
+ /// a torch.roll operation.
357
+ ///
358
+ /// See https://pytorch.org/docs/master/fft.html#torch.fft.fftshift
359
+ ///
360
+ /// Example:
361
+ /// ```
362
+ /// auto x = torch::randn({127, 4});
363
+ /// auto centred_fft = torch::fft::fftshift(torch::fft::fftn(x));
364
+ /// ```
365
+ inline Tensor fftshift(
366
+ const Tensor& x,
367
+ at::OptionalIntArrayRef dim = c10::nullopt) {
368
+ return torch::fft_fftshift(x, dim);
369
+ }
370
+
371
+ /// Inverse of torch.fft.fftshift
372
+ ///
373
+ /// See https://pytorch.org/docs/master/fft.html#torch.fft.ifftshift
374
+ ///
375
+ /// Example:
376
+ /// ```
377
+ /// auto x = torch::randn({127, 4});
378
+ /// auto shift = torch::fft::fftshift(x)
379
+ /// auto unshift = torch::fft::ifftshift(shift);
380
+ /// assert(torch::allclose(x, unshift));
381
+ /// ```
382
+ inline Tensor ifftshift(
383
+ const Tensor& x,
384
+ at::OptionalIntArrayRef dim = c10::nullopt) {
385
+ return torch::fft_ifftshift(x, dim);
386
+ }
387
+
388
+ } // namespace fft
389
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/imethod.h ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/ivalue.h>
3
+ #include <vector>
4
+
5
+ namespace torch {
6
+
7
+ class TORCH_API IMethod {
8
+ /*
9
+ IMethod provides a portable interface for torch methods, whether
10
+ they are backed by torchscript or python/deploy.
11
+
12
+ This is helpful since torchscript methods provide additional information
13
+ (e.g. FunctionSchema, Graph) which aren't available in pure python methods.
14
+
15
+ Higher level APIs should prefer depending on this interface rather
16
+ than a specific implementation of it, to promote portability and reuse, and
17
+ avoid unintentional dependencies on e.g. script methods.
18
+
19
+ Note: This API is experimental, and may evolve.
20
+ */
21
+ public:
22
+ using IValueList = std::vector<c10::IValue>;
23
+ using IValueMap = std::unordered_map<std::string, at::IValue>;
24
+
25
+ IMethod() = default;
26
+ IMethod(const IMethod&) = default;
27
+ IMethod& operator=(const IMethod&) = default;
28
+ IMethod(IMethod&&) noexcept = default;
29
+ IMethod& operator=(IMethod&&) noexcept = default;
30
+ virtual ~IMethod() = default;
31
+
32
+ virtual c10::IValue operator()(
33
+ std::vector<c10::IValue> args,
34
+ const IValueMap& kwargs = IValueMap()) const = 0;
35
+
36
+ virtual const std::string& name() const = 0;
37
+
38
+ // Returns an ordered list of argument names, possible in both
39
+ // script and python methods. This is a more portable dependency
40
+ // than a ScriptMethod FunctionSchema, which has more information
41
+ // than can be generally expected from a python method.
42
+ const std::vector<std::string>& getArgumentNames() const;
43
+
44
+ protected:
45
+ virtual void setArgumentNames(
46
+ std::vector<std::string>& argumentNames) const = 0;
47
+
48
+ private:
49
+ mutable bool isArgumentNamesInitialized_{false};
50
+ mutable std::vector<std::string> argumentNames_;
51
+ };
52
+
53
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/jit.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+ #include <torch/csrc/jit/api/module.h>
5
+
6
+ #include <memory>
7
+ #include <string>
8
+
9
+ namespace torch {
10
+ namespace jit {
11
+
12
+ /// Compiles script code into an executable graph.
13
+ ///
14
+ /// Takes a string containing functions in script syntax and compiles them into
15
+ /// a module (graph). The returned module provides a `run_method` function
16
+ /// that may be used to invoke the compiled functions.
17
+ ///
18
+ /// For example:
19
+ /// \rst
20
+ /// .. code-block:: cpp
21
+ ///
22
+ /// auto module = torch::jit::compile(R"JIT(
23
+ /// def relu_script(a, b):
24
+ /// return torch.relu(a + b)
25
+ /// def test_while(a, i):
26
+ /// while i < 10:
27
+ /// a += a
28
+ /// i += 1
29
+ /// return a
30
+ /// )JIT");
31
+ /// IValue output = module->run_method("relu_script", a, b);
32
+ /// \endrst
33
+ TORCH_API std::shared_ptr<CompilationUnit> compile(const std::string& source);
34
+
35
+ } // namespace jit
36
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/linalg.h ADDED
@@ -0,0 +1,1065 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+
5
+ namespace torch {
6
+ namespace linalg {
7
+
8
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
9
+ namespace detail {
10
+
11
+ inline Tensor cholesky(const Tensor& self) {
12
+ return torch::linalg_cholesky(self);
13
+ }
14
+
15
+ inline Tensor cholesky_out(Tensor& result, const Tensor& self) {
16
+ return torch::linalg_cholesky_out(result, self);
17
+ }
18
+
19
+ inline Tensor det(const Tensor& self) {
20
+ return torch::linalg_det(self);
21
+ }
22
+
23
+ inline std::tuple<Tensor, Tensor> slogdet(const Tensor& input) {
24
+ return torch::linalg_slogdet(input);
25
+ }
26
+
27
+ inline std::tuple<Tensor&, Tensor&> slogdet_out(
28
+ Tensor& sign,
29
+ Tensor& logabsdet,
30
+ const Tensor& input) {
31
+ return torch::linalg_slogdet_out(sign, logabsdet, input);
32
+ }
33
+
34
+ inline std::tuple<Tensor, Tensor> eig(const Tensor& self) {
35
+ return torch::linalg_eig(self);
36
+ }
37
+
38
+ inline std::tuple<Tensor&, Tensor&> eig_out(
39
+ Tensor& eigvals,
40
+ Tensor& eigvecs,
41
+ const Tensor& self) {
42
+ return torch::linalg_eig_out(eigvals, eigvecs, self);
43
+ }
44
+
45
+ inline Tensor eigvals(const Tensor& self) {
46
+ return torch::linalg_eigvals(self);
47
+ }
48
+
49
+ inline Tensor& eigvals_out(Tensor& result, const Tensor& self) {
50
+ return torch::linalg_eigvals_out(result, self);
51
+ }
52
+
53
+ inline std::tuple<Tensor, Tensor> eigh(
54
+ const Tensor& self,
55
+ c10::string_view uplo) {
56
+ return torch::linalg_eigh(self, uplo);
57
+ }
58
+
59
+ inline std::tuple<Tensor&, Tensor&> eigh_out(
60
+ Tensor& eigvals,
61
+ Tensor& eigvecs,
62
+ const Tensor& self,
63
+ c10::string_view uplo) {
64
+ return torch::linalg_eigh_out(eigvals, eigvecs, self, uplo);
65
+ }
66
+
67
+ inline Tensor eigvalsh(const Tensor& self, c10::string_view uplo) {
68
+ return torch::linalg_eigvalsh(self, uplo);
69
+ }
70
+
71
+ inline Tensor& eigvalsh_out(
72
+ Tensor& result,
73
+ const Tensor& self,
74
+ c10::string_view uplo) {
75
+ return torch::linalg_eigvalsh_out(result, self, uplo);
76
+ }
77
+
78
+ inline Tensor householder_product(const Tensor& input, const Tensor& tau) {
79
+ return torch::linalg_householder_product(input, tau);
80
+ }
81
+
82
+ inline Tensor& householder_product_out(
83
+ Tensor& result,
84
+ const Tensor& input,
85
+ const Tensor& tau) {
86
+ return torch::linalg_householder_product_out(result, input, tau);
87
+ }
88
+
89
+ inline std::tuple<Tensor, Tensor> lu_factor(
90
+ const Tensor& self,
91
+ const bool pivot) {
92
+ return torch::linalg_lu_factor(self, pivot);
93
+ }
94
+
95
+ inline std::tuple<Tensor&, Tensor&> lu_factor_out(
96
+ Tensor& LU,
97
+ Tensor& pivots,
98
+ const Tensor& self,
99
+ const bool pivot) {
100
+ return torch::linalg_lu_factor_out(LU, pivots, self, pivot);
101
+ }
102
+
103
+ inline std::tuple<Tensor, Tensor, Tensor> lu(
104
+ const Tensor& self,
105
+ const bool pivot) {
106
+ return torch::linalg_lu(self, pivot);
107
+ }
108
+
109
+ inline std::tuple<Tensor&, Tensor&, Tensor&> lu_out(
110
+ Tensor& P,
111
+ Tensor& L,
112
+ Tensor& U,
113
+ const Tensor& self,
114
+ const bool pivot) {
115
+ return torch::linalg_lu_out(P, L, U, self, pivot);
116
+ }
117
+
118
+ inline std::tuple<Tensor, Tensor, Tensor, Tensor> lstsq(
119
+ const Tensor& self,
120
+ const Tensor& b,
121
+ c10::optional<double> cond,
122
+ c10::optional<c10::string_view> driver) {
123
+ return torch::linalg_lstsq(self, b, cond, driver);
124
+ }
125
+
126
+ inline Tensor matrix_exp(const Tensor& self) {
127
+ return torch::linalg_matrix_exp(self);
128
+ }
129
+
130
+ inline Tensor norm(
131
+ const Tensor& self,
132
+ const optional<Scalar>& opt_ord,
133
+ OptionalIntArrayRef opt_dim,
134
+ bool keepdim,
135
+ optional<ScalarType> opt_dtype) {
136
+ return torch::linalg_norm(self, opt_ord, opt_dim, keepdim, opt_dtype);
137
+ }
138
+
139
+ inline Tensor norm(
140
+ const Tensor& self,
141
+ c10::string_view ord,
142
+ OptionalIntArrayRef opt_dim,
143
+ bool keepdim,
144
+ optional<ScalarType> opt_dtype) {
145
+ return torch::linalg_norm(self, ord, opt_dim, keepdim, opt_dtype);
146
+ }
147
+
148
+ inline Tensor& norm_out(
149
+ Tensor& result,
150
+ const Tensor& self,
151
+ const optional<Scalar>& opt_ord,
152
+ OptionalIntArrayRef opt_dim,
153
+ bool keepdim,
154
+ optional<ScalarType> opt_dtype) {
155
+ return torch::linalg_norm_out(
156
+ result, self, opt_ord, opt_dim, keepdim, opt_dtype);
157
+ }
158
+
159
+ inline Tensor& norm_out(
160
+ Tensor& result,
161
+ const Tensor& self,
162
+ c10::string_view ord,
163
+ OptionalIntArrayRef opt_dim,
164
+ bool keepdim,
165
+ optional<ScalarType> opt_dtype) {
166
+ return torch::linalg_norm_out(result, self, ord, opt_dim, keepdim, opt_dtype);
167
+ }
168
+
169
+ inline Tensor vector_norm(
170
+ const Tensor& self,
171
+ Scalar ord,
172
+ OptionalIntArrayRef opt_dim,
173
+ bool keepdim,
174
+ optional<ScalarType> opt_dtype) {
175
+ return torch::linalg_vector_norm(self, ord, opt_dim, keepdim, opt_dtype);
176
+ }
177
+
178
+ inline Tensor& vector_norm_out(
179
+ Tensor& result,
180
+ const Tensor& self,
181
+ Scalar ord,
182
+ OptionalIntArrayRef opt_dim,
183
+ bool keepdim,
184
+ optional<ScalarType> opt_dtype) {
185
+ return torch::linalg_vector_norm_out(
186
+ result, self, ord, opt_dim, keepdim, opt_dtype);
187
+ }
188
+
189
+ inline Tensor matrix_norm(
190
+ const Tensor& self,
191
+ const Scalar& ord,
192
+ IntArrayRef dim,
193
+ bool keepdim,
194
+ optional<ScalarType> dtype) {
195
+ return torch::linalg_matrix_norm(self, ord, dim, keepdim, dtype);
196
+ }
197
+
198
+ inline Tensor& matrix_norm_out(
199
+ const Tensor& self,
200
+ const Scalar& ord,
201
+ IntArrayRef dim,
202
+ bool keepdim,
203
+ optional<ScalarType> dtype,
204
+ Tensor& result) {
205
+ return torch::linalg_matrix_norm_out(result, self, ord, dim, keepdim, dtype);
206
+ }
207
+
208
+ inline Tensor matrix_norm(
209
+ const Tensor& self,
210
+ std::string ord,
211
+ IntArrayRef dim,
212
+ bool keepdim,
213
+ optional<ScalarType> dtype) {
214
+ return torch::linalg_matrix_norm(self, ord, dim, keepdim, dtype);
215
+ }
216
+
217
+ inline Tensor& matrix_norm_out(
218
+ const Tensor& self,
219
+ std::string ord,
220
+ IntArrayRef dim,
221
+ bool keepdim,
222
+ optional<ScalarType> dtype,
223
+ Tensor& result) {
224
+ return torch::linalg_matrix_norm_out(result, self, ord, dim, keepdim, dtype);
225
+ }
226
+
227
+ inline Tensor matrix_power(const Tensor& self, int64_t n) {
228
+ return torch::linalg_matrix_power(self, n);
229
+ }
230
+
231
+ inline Tensor& matrix_power_out(const Tensor& self, int64_t n, Tensor& result) {
232
+ return torch::linalg_matrix_power_out(result, self, n);
233
+ }
234
+
235
+ inline Tensor matrix_rank(const Tensor& input, double tol, bool hermitian) {
236
+ return torch::linalg_matrix_rank(input, tol, hermitian);
237
+ }
238
+
239
+ inline Tensor matrix_rank(
240
+ const Tensor& input,
241
+ const Tensor& tol,
242
+ bool hermitian) {
243
+ return torch::linalg_matrix_rank(input, tol, hermitian);
244
+ }
245
+
246
+ inline Tensor matrix_rank(
247
+ const Tensor& input,
248
+ c10::optional<double> atol,
249
+ c10::optional<double> rtol,
250
+ bool hermitian) {
251
+ return torch::linalg_matrix_rank(input, atol, rtol, hermitian);
252
+ }
253
+
254
+ inline Tensor matrix_rank(
255
+ const Tensor& input,
256
+ const c10::optional<Tensor>& atol,
257
+ const c10::optional<Tensor>& rtol,
258
+ bool hermitian) {
259
+ return torch::linalg_matrix_rank(input, atol, rtol, hermitian);
260
+ }
261
+
262
+ inline Tensor& matrix_rank_out(
263
+ Tensor& result,
264
+ const Tensor& input,
265
+ double tol,
266
+ bool hermitian) {
267
+ return torch::linalg_matrix_rank_out(result, input, tol, hermitian);
268
+ }
269
+
270
+ inline Tensor& matrix_rank_out(
271
+ Tensor& result,
272
+ const Tensor& input,
273
+ const Tensor& tol,
274
+ bool hermitian) {
275
+ return torch::linalg_matrix_rank_out(result, input, tol, hermitian);
276
+ }
277
+
278
+ inline Tensor& matrix_rank_out(
279
+ Tensor& result,
280
+ const Tensor& input,
281
+ c10::optional<double> atol,
282
+ c10::optional<double> rtol,
283
+ bool hermitian) {
284
+ return torch::linalg_matrix_rank_out(result, input, atol, rtol, hermitian);
285
+ }
286
+
287
+ inline Tensor& matrix_rank_out(
288
+ Tensor& result,
289
+ const Tensor& input,
290
+ const c10::optional<Tensor>& atol,
291
+ const c10::optional<Tensor>& rtol,
292
+ bool hermitian) {
293
+ return torch::linalg_matrix_rank_out(result, input, atol, rtol, hermitian);
294
+ }
295
+
296
+ inline Tensor multi_dot(TensorList tensors) {
297
+ return torch::linalg_multi_dot(tensors);
298
+ }
299
+
300
+ inline Tensor& multi_dot_out(TensorList tensors, Tensor& result) {
301
+ return torch::linalg_multi_dot_out(result, tensors);
302
+ }
303
+
304
+ inline Tensor pinv(const Tensor& input, double rcond, bool hermitian) {
305
+ return torch::linalg_pinv(input, rcond, hermitian);
306
+ }
307
+
308
+ inline Tensor& pinv_out(
309
+ Tensor& result,
310
+ const Tensor& input,
311
+ double rcond,
312
+ bool hermitian) {
313
+ return torch::linalg_pinv_out(result, input, rcond, hermitian);
314
+ }
315
+
316
+ inline std::tuple<Tensor, Tensor> qr(
317
+ const Tensor& input,
318
+ c10::string_view mode) {
319
+ return torch::linalg_qr(input, mode);
320
+ }
321
+
322
+ inline std::tuple<Tensor&, Tensor&> qr_out(
323
+ Tensor& Q,
324
+ Tensor& R,
325
+ const Tensor& input,
326
+ c10::string_view mode) {
327
+ return torch::linalg_qr_out(Q, R, input, mode);
328
+ }
329
+
330
+ inline std::tuple<Tensor, Tensor> solve_ex(
331
+ const Tensor& input,
332
+ const Tensor& other,
333
+ bool left,
334
+ bool check_errors) {
335
+ return torch::linalg_solve_ex(input, other, left, check_errors);
336
+ }
337
+
338
+ inline std::tuple<Tensor&, Tensor&> solve_ex_out(
339
+ Tensor& result,
340
+ Tensor& info,
341
+ const Tensor& input,
342
+ const Tensor& other,
343
+ bool left,
344
+ bool check_errors) {
345
+ return torch::linalg_solve_ex_out(
346
+ result, info, input, other, left, check_errors);
347
+ }
348
+
349
+ inline Tensor solve(const Tensor& input, const Tensor& other, bool left) {
350
+ return torch::linalg_solve(input, other, left);
351
+ }
352
+
353
+ inline Tensor& solve_out(
354
+ Tensor& result,
355
+ const Tensor& input,
356
+ const Tensor& other,
357
+ bool left) {
358
+ return torch::linalg_solve_out(result, input, other, left);
359
+ }
360
+
361
+ inline Tensor solve_triangular(
362
+ const Tensor& input,
363
+ const Tensor& other,
364
+ bool upper,
365
+ bool left,
366
+ bool unitriangular) {
367
+ return torch::linalg_solve_triangular(
368
+ input, other, upper, left, unitriangular);
369
+ }
370
+
371
+ inline Tensor& solve_triangular_out(
372
+ Tensor& result,
373
+ const Tensor& input,
374
+ const Tensor& other,
375
+ bool upper,
376
+ bool left,
377
+ bool unitriangular) {
378
+ return torch::linalg_solve_triangular_out(
379
+ result, input, other, upper, left, unitriangular);
380
+ }
381
+
382
+ inline std::tuple<Tensor, Tensor, Tensor> svd(
383
+ const Tensor& input,
384
+ bool full_matrices,
385
+ c10::optional<c10::string_view> driver) {
386
+ return torch::linalg_svd(input, full_matrices, driver);
387
+ }
388
+
389
+ inline std::tuple<Tensor&, Tensor&, Tensor&> svd_out(
390
+ Tensor& U,
391
+ Tensor& S,
392
+ Tensor& Vh,
393
+ const Tensor& input,
394
+ bool full_matrices,
395
+ c10::optional<c10::string_view> driver) {
396
+ return torch::linalg_svd_out(U, S, Vh, input, full_matrices, driver);
397
+ }
398
+
399
+ inline Tensor svdvals(
400
+ const Tensor& input,
401
+ c10::optional<c10::string_view> driver) {
402
+ return torch::linalg_svdvals(input, driver);
403
+ }
404
+
405
+ inline Tensor& svdvals_out(
406
+ Tensor& result,
407
+ const Tensor& input,
408
+ c10::optional<c10::string_view> driver) {
409
+ return torch::linalg_svdvals_out(result, input, driver);
410
+ }
411
+
412
+ inline Tensor tensorinv(const Tensor& self, int64_t ind) {
413
+ return torch::linalg_tensorinv(self, ind);
414
+ }
415
+
416
+ inline Tensor& tensorinv_out(Tensor& result, const Tensor& self, int64_t ind) {
417
+ return torch::linalg_tensorinv_out(result, self, ind);
418
+ }
419
+
420
+ inline Tensor tensorsolve(
421
+ const Tensor& self,
422
+ const Tensor& other,
423
+ OptionalIntArrayRef dims) {
424
+ return torch::linalg_tensorsolve(self, other, dims);
425
+ }
426
+
427
+ inline Tensor& tensorsolve_out(
428
+ Tensor& result,
429
+ const Tensor& self,
430
+ const Tensor& other,
431
+ OptionalIntArrayRef dims) {
432
+ return torch::linalg_tensorsolve_out(result, self, other, dims);
433
+ }
434
+
435
+ inline Tensor inv(const Tensor& input) {
436
+ return torch::linalg_inv(input);
437
+ }
438
+
439
+ inline Tensor& inv_out(Tensor& result, const Tensor& input) {
440
+ return torch::linalg_inv_out(result, input);
441
+ }
442
+
443
+ } // namespace detail
444
+ #endif /* DOXYGEN_SHOULD_SKIP_THIS */
445
+
446
+ /// Cholesky decomposition
447
+ ///
448
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.cholesky
449
+ ///
450
+ /// Example:
451
+ /// ```
452
+ /// auto A = torch::randn({4, 4});
453
+ /// auto A = torch::matmul(A, A.t());
454
+ /// auto L = torch::linalg::cholesky(A);
455
+ /// assert(torch::allclose(torch::matmul(L, L.t()), A));
456
+ /// ```
457
+ inline Tensor cholesky(const Tensor& self) {
458
+ return detail::cholesky(self);
459
+ }
460
+
461
+ inline Tensor cholesky_out(Tensor& result, const Tensor& self) {
462
+ return detail::cholesky_out(result, self);
463
+ }
464
+
465
+ // C10_DEPRECATED_MESSAGE("linalg_det is deprecated, use det instead.")
466
+ inline Tensor linalg_det(const Tensor& self) {
467
+ return detail::det(self);
468
+ }
469
+
470
+ /// See the documentation of torch.linalg.det
471
+ inline Tensor det(const Tensor& self) {
472
+ return detail::det(self);
473
+ }
474
+
475
+ /// Computes the sign and (natural) logarithm of the determinant
476
+ ///
477
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.slogdet
478
+ inline std::tuple<Tensor, Tensor> slogdet(const Tensor& input) {
479
+ return detail::slogdet(input);
480
+ }
481
+
482
+ inline std::tuple<Tensor&, Tensor&> slogdet_out(
483
+ Tensor& sign,
484
+ Tensor& logabsdet,
485
+ const Tensor& input) {
486
+ return detail::slogdet_out(sign, logabsdet, input);
487
+ }
488
+
489
+ /// Computes eigenvalues and eigenvectors of non-symmetric/non-hermitian
490
+ /// matrices
491
+ ///
492
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.eig
493
+ inline std::tuple<Tensor, Tensor> eig(const Tensor& self) {
494
+ return detail::eig(self);
495
+ }
496
+
497
+ inline std::tuple<Tensor&, Tensor&> eig_out(
498
+ Tensor& eigvals,
499
+ Tensor& eigvecs,
500
+ const Tensor& self) {
501
+ return detail::eig_out(eigvals, eigvecs, self);
502
+ }
503
+
504
+ /// Computes eigenvalues of non-symmetric/non-hermitian matrices
505
+ ///
506
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.eigvals
507
+ inline Tensor eigvals(const Tensor& self) {
508
+ return detail::eigvals(self);
509
+ }
510
+
511
+ inline Tensor& eigvals_out(Tensor& result, const Tensor& self) {
512
+ return detail::eigvals_out(result, self);
513
+ }
514
+
515
+ /// Computes eigenvalues and eigenvectors
516
+ ///
517
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.eigh
518
+ inline std::tuple<Tensor, Tensor> eigh(
519
+ const Tensor& self,
520
+ c10::string_view uplo) {
521
+ return detail::eigh(self, uplo);
522
+ }
523
+
524
+ inline std::tuple<Tensor&, Tensor&> eigh_out(
525
+ Tensor& eigvals,
526
+ Tensor& eigvecs,
527
+ const Tensor& self,
528
+ c10::string_view uplo) {
529
+ return detail::eigh_out(eigvals, eigvecs, self, uplo);
530
+ }
531
+
532
+ /// Computes eigenvalues
533
+ ///
534
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.eigvalsh
535
+ inline Tensor eigvalsh(const Tensor& self, c10::string_view uplo) {
536
+ return detail::eigvalsh(self, uplo);
537
+ }
538
+
539
+ inline Tensor& eigvalsh_out(
540
+ Tensor& result,
541
+ const Tensor& self,
542
+ c10::string_view uplo) {
543
+ return detail::eigvalsh_out(result, self, uplo);
544
+ }
545
+
546
+ /// Computes the product of Householder matrices
547
+ ///
548
+ /// See
549
+ /// https://pytorch.org/docs/master/linalg.html#torch.linalg.householder_product
550
+ inline Tensor householder_product(const Tensor& input, const Tensor& tau) {
551
+ return detail::householder_product(input, tau);
552
+ }
553
+
554
+ inline Tensor& householder_product_out(
555
+ Tensor& result,
556
+ const Tensor& input,
557
+ const Tensor& tau) {
558
+ return detail::householder_product_out(result, input, tau);
559
+ }
560
+
561
+ inline std::tuple<Tensor, Tensor, Tensor, Tensor> lstsq(
562
+ const Tensor& self,
563
+ const Tensor& b,
564
+ c10::optional<double> cond,
565
+ c10::optional<c10::string_view> driver) {
566
+ return detail::lstsq(self, b, cond, driver);
567
+ }
568
+
569
+ /// Computes the matrix exponential
570
+ ///
571
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_exp
572
+ inline Tensor matrix_exp(const Tensor& input) {
573
+ return detail::matrix_exp(input);
574
+ }
575
+
576
+ // C10_DEPRECATED_MESSAGE("linalg_norm is deprecated, use norm instead.")
577
+ inline Tensor linalg_norm(
578
+ const Tensor& self,
579
+ const optional<Scalar>& opt_ord,
580
+ OptionalIntArrayRef opt_dim,
581
+ bool keepdim,
582
+ optional<ScalarType> opt_dtype) {
583
+ return detail::norm(self, opt_ord, opt_dim, keepdim, opt_dtype);
584
+ }
585
+
586
+ // C10_DEPRECATED_MESSAGE("linalg_norm is deprecated, use norm instead.")
587
+ inline Tensor linalg_norm(
588
+ const Tensor& self,
589
+ c10::string_view ord,
590
+ OptionalIntArrayRef opt_dim,
591
+ bool keepdim,
592
+ optional<ScalarType> opt_dtype) {
593
+ return detail::norm(self, ord, opt_dim, keepdim, opt_dtype);
594
+ }
595
+
596
+ // C10_DEPRECATED_MESSAGE("linalg_norm_out is deprecated, use norm_out
597
+ // instead.")
598
+ inline Tensor& linalg_norm_out(
599
+ Tensor& result,
600
+ const Tensor& self,
601
+ const optional<Scalar>& opt_ord,
602
+ OptionalIntArrayRef opt_dim,
603
+ bool keepdim,
604
+ optional<ScalarType> opt_dtype) {
605
+ return detail::norm_out(result, self, opt_ord, opt_dim, keepdim, opt_dtype);
606
+ }
607
+
608
+ // C10_DEPRECATED_MESSAGE("linalg_norm_out is deprecated, use norm_out
609
+ // instead.")
610
+ inline Tensor& linalg_norm_out(
611
+ Tensor& result,
612
+ const Tensor& self,
613
+ c10::string_view ord,
614
+ OptionalIntArrayRef opt_dim,
615
+ bool keepdim,
616
+ optional<ScalarType> opt_dtype) {
617
+ return detail::norm_out(result, self, ord, opt_dim, keepdim, opt_dtype);
618
+ }
619
+
620
+ /// Computes the LU factorization with partial pivoting
621
+ ///
622
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.lu_factor
623
+ inline std::tuple<Tensor, Tensor> lu_factor(
624
+ const Tensor& input,
625
+ const bool pivot = true) {
626
+ return detail::lu_factor(input, pivot);
627
+ }
628
+
629
+ inline std::tuple<Tensor&, Tensor&> lu_factor_out(
630
+ Tensor& LU,
631
+ Tensor& pivots,
632
+ const Tensor& self,
633
+ const bool pivot = true) {
634
+ return detail::lu_factor_out(LU, pivots, self, pivot);
635
+ }
636
+
637
+ /// Computes the LU factorization with partial pivoting
638
+ ///
639
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.lu
640
+ inline std::tuple<Tensor, Tensor, Tensor> lu(
641
+ const Tensor& input,
642
+ const bool pivot = true) {
643
+ return detail::lu(input, pivot);
644
+ }
645
+
646
+ inline std::tuple<Tensor&, Tensor&, Tensor&> lu_out(
647
+ Tensor& P,
648
+ Tensor& L,
649
+ Tensor& U,
650
+ const Tensor& self,
651
+ const bool pivot = true) {
652
+ return detail::lu_out(P, L, U, self, pivot);
653
+ }
654
+
655
+ inline Tensor norm(
656
+ const Tensor& self,
657
+ const optional<Scalar>& opt_ord,
658
+ OptionalIntArrayRef opt_dim,
659
+ bool keepdim,
660
+ optional<ScalarType> opt_dtype) {
661
+ return detail::norm(self, opt_ord, opt_dim, keepdim, opt_dtype);
662
+ }
663
+
664
+ inline Tensor norm(
665
+ const Tensor& self,
666
+ std::string ord,
667
+ OptionalIntArrayRef opt_dim,
668
+ bool keepdim,
669
+ optional<ScalarType> opt_dtype) {
670
+ return detail::norm(self, ord, opt_dim, keepdim, opt_dtype);
671
+ }
672
+
673
+ inline Tensor& norm_out(
674
+ Tensor& result,
675
+ const Tensor& self,
676
+ const optional<Scalar>& opt_ord,
677
+ OptionalIntArrayRef opt_dim,
678
+ bool keepdim,
679
+ optional<ScalarType> opt_dtype) {
680
+ return detail::norm_out(result, self, opt_ord, opt_dim, keepdim, opt_dtype);
681
+ }
682
+
683
+ inline Tensor& norm_out(
684
+ Tensor& result,
685
+ const Tensor& self,
686
+ std::string ord,
687
+ OptionalIntArrayRef opt_dim,
688
+ bool keepdim,
689
+ optional<ScalarType> opt_dtype) {
690
+ return detail::norm_out(result, self, ord, opt_dim, keepdim, opt_dtype);
691
+ }
692
+
693
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.vector_norm
694
+ inline Tensor vector_norm(
695
+ const Tensor& self,
696
+ Scalar ord,
697
+ OptionalIntArrayRef opt_dim,
698
+ bool keepdim,
699
+ optional<ScalarType> opt_dtype) {
700
+ return detail::vector_norm(self, ord, opt_dim, keepdim, opt_dtype);
701
+ }
702
+
703
+ inline Tensor& vector_norm_out(
704
+ Tensor& result,
705
+ const Tensor& self,
706
+ Scalar ord,
707
+ OptionalIntArrayRef opt_dim,
708
+ bool keepdim,
709
+ optional<ScalarType> opt_dtype) {
710
+ return detail::vector_norm_out(
711
+ result, self, ord, opt_dim, keepdim, opt_dtype);
712
+ }
713
+
714
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_norm
715
+ inline Tensor matrix_norm(
716
+ const Tensor& self,
717
+ const Scalar& ord,
718
+ IntArrayRef dim,
719
+ bool keepdim,
720
+ optional<ScalarType> dtype) {
721
+ return detail::matrix_norm(self, ord, dim, keepdim, dtype);
722
+ }
723
+
724
+ inline Tensor& matrix_norm_out(
725
+ const Tensor& self,
726
+ const Scalar& ord,
727
+ IntArrayRef dim,
728
+ bool keepdim,
729
+ optional<ScalarType> dtype,
730
+ Tensor& result) {
731
+ return detail::matrix_norm_out(self, ord, dim, keepdim, dtype, result);
732
+ }
733
+
734
+ inline Tensor matrix_norm(
735
+ const Tensor& self,
736
+ std::string ord,
737
+ IntArrayRef dim,
738
+ bool keepdim,
739
+ optional<ScalarType> dtype) {
740
+ return detail::matrix_norm(self, ord, dim, keepdim, dtype);
741
+ }
742
+
743
+ inline Tensor& matrix_norm_out(
744
+ const Tensor& self,
745
+ std::string ord,
746
+ IntArrayRef dim,
747
+ bool keepdim,
748
+ optional<ScalarType> dtype,
749
+ Tensor& result) {
750
+ return detail::matrix_norm_out(self, ord, dim, keepdim, dtype, result);
751
+ }
752
+
753
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_power
754
+ inline Tensor matrix_power(const Tensor& self, int64_t n) {
755
+ return detail::matrix_power(self, n);
756
+ }
757
+
758
+ inline Tensor& matrix_power_out(const Tensor& self, int64_t n, Tensor& result) {
759
+ return detail::matrix_power_out(self, n, result);
760
+ }
761
+
762
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_rank
763
+ inline Tensor matrix_rank(const Tensor& input, double tol, bool hermitian) {
764
+ return detail::matrix_rank(input, tol, hermitian);
765
+ }
766
+
767
+ inline Tensor matrix_rank(
768
+ const Tensor& input,
769
+ const Tensor& tol,
770
+ bool hermitian) {
771
+ return detail::matrix_rank(input, tol, hermitian);
772
+ }
773
+
774
+ inline Tensor matrix_rank(
775
+ const Tensor& input,
776
+ c10::optional<double> atol,
777
+ c10::optional<double> rtol,
778
+ bool hermitian) {
779
+ return detail::matrix_rank(input, atol, rtol, hermitian);
780
+ }
781
+
782
+ inline Tensor matrix_rank(
783
+ const Tensor& input,
784
+ const c10::optional<Tensor>& atol,
785
+ const c10::optional<Tensor>& rtol,
786
+ bool hermitian) {
787
+ return detail::matrix_rank(input, atol, rtol, hermitian);
788
+ }
789
+
790
+ inline Tensor& matrix_rank_out(
791
+ Tensor& result,
792
+ const Tensor& input,
793
+ double tol,
794
+ bool hermitian) {
795
+ return detail::matrix_rank_out(result, input, tol, hermitian);
796
+ }
797
+
798
+ inline Tensor& matrix_rank_out(
799
+ Tensor& result,
800
+ const Tensor& input,
801
+ const Tensor& tol,
802
+ bool hermitian) {
803
+ return detail::matrix_rank_out(result, input, tol, hermitian);
804
+ }
805
+
806
+ inline Tensor& matrix_rank_out(
807
+ Tensor& result,
808
+ const Tensor& input,
809
+ c10::optional<double> atol,
810
+ c10::optional<double> rtol,
811
+ bool hermitian) {
812
+ return detail::matrix_rank_out(result, input, atol, rtol, hermitian);
813
+ }
814
+
815
+ inline Tensor& matrix_rank_out(
816
+ Tensor& result,
817
+ const Tensor& input,
818
+ const c10::optional<Tensor>& atol,
819
+ const c10::optional<Tensor>& rtol,
820
+ bool hermitian) {
821
+ return detail::matrix_rank_out(result, input, atol, rtol, hermitian);
822
+ }
823
+
824
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.multi_dot
825
+ inline Tensor multi_dot(TensorList tensors) {
826
+ return detail::multi_dot(tensors);
827
+ }
828
+
829
+ inline Tensor& multi_dot_out(TensorList tensors, Tensor& result) {
830
+ return detail::multi_dot_out(tensors, result);
831
+ }
832
+
833
+ /// Computes the pseudo-inverse
834
+ ///
835
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.pinv
836
+ inline Tensor pinv(
837
+ const Tensor& input,
838
+ double rcond = 1e-15,
839
+ bool hermitian = false) {
840
+ return detail::pinv(input, rcond, hermitian);
841
+ }
842
+
843
+ inline Tensor& pinv_out(
844
+ Tensor& result,
845
+ const Tensor& input,
846
+ double rcond = 1e-15,
847
+ bool hermitian = false) {
848
+ return detail::pinv_out(result, input, rcond, hermitian);
849
+ }
850
+
851
+ /// Computes the QR decomposition
852
+ ///
853
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.qr
854
+ inline std::tuple<Tensor, Tensor> qr(
855
+ const Tensor& input,
856
+ c10::string_view mode = "reduced") {
857
+ // C++17 Change the initialisation to "reduced"sv
858
+ // Same for qr_out
859
+ return detail::qr(input, mode);
860
+ }
861
+
862
+ inline std::tuple<Tensor&, Tensor&> qr_out(
863
+ Tensor& Q,
864
+ Tensor& R,
865
+ const Tensor& input,
866
+ c10::string_view mode = "reduced") {
867
+ return detail::qr_out(Q, R, input, mode);
868
+ }
869
+
870
+ /// Computes the LDL decomposition
871
+ ///
872
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.ldl_factor_ex
873
+ inline std::tuple<Tensor, Tensor, Tensor> ldl_factor_ex(
874
+ const Tensor& input,
875
+ bool hermitian,
876
+ bool check_errors) {
877
+ return torch::linalg_ldl_factor_ex(input, hermitian, check_errors);
878
+ }
879
+
880
+ inline std::tuple<Tensor&, Tensor&, Tensor&> ldl_factor_ex_out(
881
+ Tensor& LD,
882
+ Tensor& pivots,
883
+ Tensor& info,
884
+ const Tensor& input,
885
+ bool hermitian,
886
+ bool check_errors) {
887
+ return torch::linalg_ldl_factor_ex_out(
888
+ LD, pivots, info, input, hermitian, check_errors);
889
+ }
890
+
891
+ /// Solve a system of linear equations using the LDL decomposition
892
+ ///
893
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.ldl_solve
894
+ inline Tensor ldl_solve(
895
+ const Tensor& LD,
896
+ const Tensor& pivots,
897
+ const Tensor& B,
898
+ bool hermitian) {
899
+ return torch::linalg_ldl_solve(LD, pivots, B, hermitian);
900
+ }
901
+
902
+ inline Tensor& ldl_solve_out(
903
+ Tensor& result,
904
+ const Tensor& LD,
905
+ const Tensor& pivots,
906
+ const Tensor& B,
907
+ bool hermitian) {
908
+ return torch::linalg_ldl_solve_out(result, LD, pivots, B, hermitian);
909
+ }
910
+
911
+ /// Solves a system linear system AX = B
912
+ ///
913
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.solve_ex
914
+ inline std::tuple<Tensor, Tensor> solve_ex(
915
+ const Tensor& input,
916
+ const Tensor& other,
917
+ bool left,
918
+ bool check_errors) {
919
+ return detail::solve_ex(input, other, left, check_errors);
920
+ }
921
+
922
+ inline std::tuple<Tensor&, Tensor&> solve_ex_out(
923
+ Tensor& result,
924
+ Tensor& info,
925
+ const Tensor& input,
926
+ const Tensor& other,
927
+ bool left,
928
+ bool check_errors) {
929
+ return detail::solve_ex_out(result, info, input, other, left, check_errors);
930
+ }
931
+
932
+ /// Computes a tensor `x` such that `matmul(input, x) = other`.
933
+ ///
934
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.solve
935
+ inline Tensor solve(const Tensor& input, const Tensor& other, bool left) {
936
+ return detail::solve(input, other, left);
937
+ }
938
+
939
+ inline Tensor& solve_out(
940
+ Tensor& result,
941
+ const Tensor& input,
942
+ const Tensor& other,
943
+ bool left) {
944
+ return detail::solve_out(result, input, other, left);
945
+ }
946
+
947
+ /// Computes a solution of a linear system AX = B for input = A and other = B
948
+ /// whenever A is square upper or lower triangular and does not have zeros in
949
+ /// the diagonal
950
+ ///
951
+ /// See
952
+ /// https://pytorch.org/docs/master/linalg.html#torch.linalg.solve_triangular
953
+ inline Tensor solve_triangular(
954
+ const Tensor& input,
955
+ const Tensor& other,
956
+ bool upper,
957
+ bool left,
958
+ bool unitriangular) {
959
+ return detail::solve_triangular(input, other, upper, left, unitriangular);
960
+ }
961
+
962
+ inline Tensor& solve_triangular_out(
963
+ Tensor& result,
964
+ const Tensor& input,
965
+ const Tensor& other,
966
+ bool upper,
967
+ bool left,
968
+ bool unitriangular) {
969
+ return detail::solve_triangular_out(
970
+ result, input, other, upper, left, unitriangular);
971
+ }
972
+
973
+ /// Computes the singular values and singular vectors
974
+ ///
975
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.svd
976
+ inline std::tuple<Tensor, Tensor, Tensor> svd(
977
+ const Tensor& input,
978
+ bool full_matrices,
979
+ c10::optional<c10::string_view> driver) {
980
+ return detail::svd(input, full_matrices, driver);
981
+ }
982
+
983
+ inline std::tuple<Tensor&, Tensor&, Tensor&> svd_out(
984
+ Tensor& U,
985
+ Tensor& S,
986
+ Tensor& Vh,
987
+ const Tensor& input,
988
+ bool full_matrices,
989
+ c10::optional<c10::string_view> driver) {
990
+ return detail::svd_out(U, S, Vh, input, full_matrices, driver);
991
+ }
992
+
993
+ /// Computes the singular values
994
+ ///
995
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.svdvals
996
+ inline Tensor svdvals(
997
+ const Tensor& input,
998
+ c10::optional<c10::string_view> driver) {
999
+ return detail::svdvals(input, driver);
1000
+ }
1001
+
1002
+ inline Tensor& svdvals_out(
1003
+ Tensor& result,
1004
+ const Tensor& input,
1005
+ c10::optional<c10::string_view> driver) {
1006
+ return detail::svdvals_out(result, input, driver);
1007
+ }
1008
+
1009
+ /// Computes the inverse of a tensor
1010
+ ///
1011
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.tensorinv
1012
+ ///
1013
+ /// Example:
1014
+ /// ```
1015
+ /// auto a = torch::eye(4*6).reshape({4, 6, 8, 3});
1016
+ /// int64_t ind = 2;
1017
+ /// auto ainv = torch::linalg::tensorinv(a, ind);
1018
+ /// ```
1019
+ inline Tensor tensorinv(const Tensor& self, int64_t ind) {
1020
+ return detail::tensorinv(self, ind);
1021
+ }
1022
+
1023
+ inline Tensor& tensorinv_out(Tensor& result, const Tensor& self, int64_t ind) {
1024
+ return detail::tensorinv_out(result, self, ind);
1025
+ }
1026
+
1027
+ /// Computes a tensor `x` such that `tensordot(input, x, dims=x.dim()) = other`.
1028
+ ///
1029
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.tensorsolve
1030
+ ///
1031
+ /// Example:
1032
+ /// ```
1033
+ /// auto a = torch::eye(2*3*4).reshape({2*3, 4, 2, 3, 4});
1034
+ /// auto b = torch::randn(2*3, 4);
1035
+ /// auto x = torch::linalg::tensorsolve(a, b);
1036
+ /// ```
1037
+ inline Tensor tensorsolve(
1038
+ const Tensor& input,
1039
+ const Tensor& other,
1040
+ OptionalIntArrayRef dims) {
1041
+ return detail::tensorsolve(input, other, dims);
1042
+ }
1043
+
1044
+ inline Tensor& tensorsolve_out(
1045
+ Tensor& result,
1046
+ const Tensor& input,
1047
+ const Tensor& other,
1048
+ OptionalIntArrayRef dims) {
1049
+ return detail::tensorsolve_out(result, input, other, dims);
1050
+ }
1051
+
1052
+ /// Computes a tensor `inverse_input` such that `dot(input, inverse_input) =
1053
+ /// eye(input.size(0))`.
1054
+ ///
1055
+ /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.inv
1056
+ inline Tensor inv(const Tensor& input) {
1057
+ return detail::inv(input);
1058
+ }
1059
+
1060
+ inline Tensor& inv_out(Tensor& result, const Tensor& input) {
1061
+ return detail::inv_out(result, input);
1062
+ }
1063
+
1064
+ } // namespace linalg
1065
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/mps.h ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/csrc/Export.h>
4
+
5
+ #include <cstddef>
6
+ #include <cstdint>
7
+
8
+ #ifdef __OBJC__
9
+ #include <Foundation/Foundation.h>
10
+ #include <Metal/Metal.h>
11
+ using MTLCommandBuffer_t = id<MTLCommandBuffer>;
12
+ using DispatchQueue_t = dispatch_queue_t;
13
+ #else
14
+ using MTLCommandBuffer_t = void*;
15
+ using DispatchQueue_t = void*;
16
+ #endif
17
+
18
+ namespace torch {
19
+ namespace mps {
20
+
21
+ /// Returns true if MPS device is available.
22
+ bool TORCH_API is_available();
23
+
24
+ /// Sets the RNG seed for the MPS device.
25
+ void TORCH_API manual_seed(uint64_t seed);
26
+
27
+ /// Waits for all streams on the MPS device to complete.
28
+ /// This blocks the calling CPU thread by using the 'waitUntilCompleted()'
29
+ /// method to wait for Metal command buffers finish executing all the
30
+ /// encoded GPU operations before returning.
31
+ void TORCH_API synchronize();
32
+
33
+ /// Submits the currently active command buffer to run on the MPS device.
34
+ void TORCH_API commit();
35
+
36
+ /// Get the current command buffer to encode the Metal commands.
37
+ MTLCommandBuffer_t TORCH_API get_command_buffer();
38
+
39
+ /// Get the dispatch_queue_t to synchronize encoding the custom kernels
40
+ /// with the PyTorch MPS backend.
41
+ DispatchQueue_t TORCH_API get_dispatch_queue();
42
+
43
+ } // namespace mps
44
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nested.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ATen.h>
4
+ #include <ATen/core/ATen_fwd.h>
5
+ #include <torch/csrc/api/include/torch/detail/TensorDataContainer.h>
6
+ #include <algorithm>
7
+
8
+ namespace torch {
9
+ namespace nested {
10
+
11
+ /// Nested tensor
12
+ ///
13
+ /// See
14
+ /// https://pytorch.org/docs/master/nested.html#torch.nested.nested_tensor
15
+ ///
16
+ /// ```
17
+ // implemented on python object to allow torch.nested.nested_tensor to be
18
+ // constructed with arbitrarily nested python objects - for now, only arbitrary
19
+ // python lists and lists of Tensors
20
+ // See torch/csrc/autograd/python_nested_functions_manual.cpp for Python
21
+ // implementation
22
+ // See here for C++ implementation
23
+ inline at::Tensor nested_tensor(
24
+ at::TensorList nested_tensor_data,
25
+ const at::TensorOptions& options = {}) {
26
+ auto out = at::_nested_tensor_from_tensor_list(
27
+ nested_tensor_data,
28
+ c10::typeMetaToScalarType(options.dtype()),
29
+ c10::nullopt,
30
+ options.device(),
31
+ options.pinned_memory());
32
+ if (options.has_requires_grad() && options.requires_grad()) {
33
+ out.requires_grad_(true);
34
+ }
35
+ return out;
36
+ }
37
+
38
+ inline at::Tensor nested_tensor(
39
+ at::ArrayRef<detail::TensorDataContainer> nested_tensor_data,
40
+ const at::TensorOptions& options = {}) {
41
+ for (const auto& tdc : nested_tensor_data) {
42
+ TORCH_CHECK(
43
+ tdc.is_init_list(),
44
+ "nested_tensor() not implemented for these parameters");
45
+ }
46
+ // Construct a TensorList using nested_tensor_data
47
+ std::vector<at::Tensor> tensor_list(nested_tensor_data.size());
48
+ std::transform(
49
+ nested_tensor_data.begin(),
50
+ nested_tensor_data.end(),
51
+ tensor_list.begin(),
52
+ [&](const detail::TensorDataContainer& tdc) {
53
+ return tdc.convert_to_tensor(options);
54
+ });
55
+ auto out = at::_nested_tensor_from_tensor_list(
56
+ tensor_list,
57
+ c10::typeMetaToScalarType(options.dtype()),
58
+ c10::nullopt,
59
+ options.device(),
60
+ options.pinned_memory());
61
+ if (options.has_requires_grad() && options.requires_grad()) {
62
+ out.requires_grad_(true);
63
+ }
64
+ return out;
65
+ }
66
+
67
+ /// As Nested Tensor
68
+ ///
69
+ /// See
70
+ /// https://pytorch.org/docs/master/nested.html#torch.nested.as_nested_tensor
71
+ ///
72
+ /// ```
73
+ inline at::Tensor as_nested_tensor(
74
+ at::TensorList list,
75
+ c10::optional<at::ScalarType> dtype = c10::nullopt,
76
+ c10::optional<at::Device> device = c10::nullopt) {
77
+ return at::_nested_tensor_from_tensor_list(
78
+ list, dtype, c10::nullopt, device, c10::nullopt);
79
+ }
80
+
81
+ /// Nested to padded tensor
82
+ ///
83
+ /// See
84
+ /// https://pytorch.org/docs/master/nested.html#torch.nested.to_padded_tensor
85
+ ///
86
+ /// ```
87
+ inline at::Tensor to_padded_tensor(
88
+ const at::Tensor& self,
89
+ double padding,
90
+ at::OptionalIntArrayRef output_size = c10::nullopt) {
91
+ return at::nested_to_padded_tensor(self, padding, output_size);
92
+ }
93
+
94
+ } // namespace nested
95
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/nn.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/cloneable.h>
4
+ #include <torch/nn/functional.h>
5
+ #include <torch/nn/init.h>
6
+ #include <torch/nn/module.h>
7
+ #include <torch/nn/modules.h>
8
+ #include <torch/nn/options.h>
9
+ #include <torch/nn/pimpl.h>
10
+ #include <torch/nn/utils.h>
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/optim/adagrad.h>
4
+ #include <torch/optim/adam.h>
5
+ #include <torch/optim/adamw.h>
6
+ #include <torch/optim/lbfgs.h>
7
+ #include <torch/optim/optimizer.h>
8
+ #include <torch/optim/rmsprop.h>
9
+ #include <torch/optim/sgd.h>
10
+
11
+ #include <torch/optim/schedulers/lr_scheduler.h>
12
+ #include <torch/optim/schedulers/step_lr.h>
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/adagrad.h ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/pimpl.h>
4
+ #include <torch/optim/optimizer.h>
5
+ #include <torch/optim/serialize.h>
6
+ #include <torch/serialize/archive.h>
7
+ #include <torch/types.h>
8
+
9
+ #include <utility>
10
+ #include <vector>
11
+
12
+ namespace torch {
13
+ namespace serialize {
14
+ class OutputArchive;
15
+ class InputArchive;
16
+ } // namespace serialize
17
+ } // namespace torch
18
+
19
+ namespace torch {
20
+ namespace optim {
21
+
22
+ struct TORCH_API AdagradOptions
23
+ : public OptimizerCloneableOptions<AdagradOptions> {
24
+ AdagradOptions(double lr = 1e-2);
25
+ TORCH_ARG(double, lr) = 1e-2;
26
+ TORCH_ARG(double, lr_decay) = 0;
27
+ TORCH_ARG(double, weight_decay) = 0;
28
+ TORCH_ARG(double, initial_accumulator_value) = 0;
29
+ TORCH_ARG(double, eps) = 1e-10;
30
+
31
+ public:
32
+ void serialize(torch::serialize::InputArchive& archive) override;
33
+ void serialize(torch::serialize::OutputArchive& archive) const override;
34
+ TORCH_API friend bool operator==(
35
+ const AdagradOptions& lhs,
36
+ const AdagradOptions& rhs);
37
+ double get_lr() const override;
38
+ void set_lr(const double lr) override;
39
+ };
40
+
41
+ struct TORCH_API AdagradParamState
42
+ : public OptimizerCloneableParamState<AdagradParamState> {
43
+ TORCH_ARG(torch::Tensor, sum);
44
+ TORCH_ARG(int64_t, step) = 0;
45
+
46
+ public:
47
+ AdagradParamState() = default;
48
+ AdagradParamState(const AdagradParamState&) = default;
49
+ AdagradParamState& operator=(const AdagradParamState&) = default;
50
+ AdagradParamState(AdagradParamState&&) noexcept = default;
51
+ AdagradParamState& operator=(AdagradParamState&&) noexcept = default;
52
+ void serialize(torch::serialize::InputArchive& archive) override;
53
+ void serialize(torch::serialize::OutputArchive& archive) const override;
54
+ TORCH_API friend bool operator==(
55
+ const AdagradParamState& lhs,
56
+ const AdagradParamState& rhs);
57
+ };
58
+
59
+ class TORCH_API Adagrad : public Optimizer {
60
+ public:
61
+ explicit Adagrad(
62
+ std::vector<OptimizerParamGroup> param_groups,
63
+ AdagradOptions defaults = {})
64
+ : Optimizer(
65
+ std::move(param_groups),
66
+ std::make_unique<AdagradOptions>(defaults)) {
67
+ TORCH_CHECK(defaults.lr() >= 0, "Invalid learning rate: ", defaults.lr());
68
+ TORCH_CHECK(
69
+ defaults.lr_decay() >= 0,
70
+ "Invalid lr_decay value: ",
71
+ defaults.lr_decay());
72
+ TORCH_CHECK(
73
+ defaults.weight_decay() >= 0,
74
+ "Invalid weight_decay value: ",
75
+ defaults.weight_decay());
76
+ TORCH_CHECK(
77
+ defaults.initial_accumulator_value() >= 0,
78
+ "Invalid initial_accumulator_value value: ",
79
+ defaults.initial_accumulator_value());
80
+ TORCH_CHECK(defaults.eps() >= 0, "Invalid epsilon value: ", defaults.eps());
81
+
82
+ for (const auto& group : param_groups_) {
83
+ for (const auto& p : group.params()) {
84
+ auto state = std::make_unique<AdagradParamState>();
85
+ state->step(0);
86
+ state->sum(torch::full_like(
87
+ p.data(),
88
+ defaults.initial_accumulator_value(),
89
+ at::MemoryFormat::Preserve));
90
+ state_[p.unsafeGetTensorImpl()] = std::move(state);
91
+ }
92
+ }
93
+ }
94
+
95
+ explicit Adagrad(std::vector<Tensor> params, AdagradOptions defaults = {})
96
+ : Adagrad({OptimizerParamGroup(std::move(params))}, defaults) {}
97
+
98
+ torch::Tensor step(LossClosure closure = nullptr) override;
99
+ void save(serialize::OutputArchive& archive) const override;
100
+ void load(serialize::InputArchive& archive) override;
101
+
102
+ private:
103
+ template <typename Self, typename Archive>
104
+ static void serialize(Self& self, Archive& archive) {
105
+ _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(Adagrad);
106
+ }
107
+ };
108
+ } // namespace optim
109
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/adam.h ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/module.h>
4
+ #include <torch/optim/optimizer.h>
5
+ #include <torch/optim/serialize.h>
6
+
7
+ #include <utility>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace serialize {
12
+ class OutputArchive;
13
+ class InputArchive;
14
+ } // namespace serialize
15
+ } // namespace torch
16
+
17
+ namespace torch {
18
+ namespace optim {
19
+
20
+ struct TORCH_API AdamOptions : public OptimizerCloneableOptions<AdamOptions> {
21
+ AdamOptions(double lr = 1e-3);
22
+ TORCH_ARG(double, lr) = 1e-3;
23
+ typedef std::tuple<double, double> betas_t;
24
+ TORCH_ARG(betas_t, betas) = std::make_tuple(0.9, 0.999);
25
+ TORCH_ARG(double, eps) = 1e-8;
26
+ TORCH_ARG(double, weight_decay) = 0;
27
+ TORCH_ARG(bool, amsgrad) = false;
28
+
29
+ public:
30
+ void serialize(torch::serialize::InputArchive& archive) override;
31
+ void serialize(torch::serialize::OutputArchive& archive) const override;
32
+ TORCH_API friend bool operator==(
33
+ const AdamOptions& lhs,
34
+ const AdamOptions& rhs);
35
+ double get_lr() const override;
36
+ void set_lr(const double lr) override;
37
+ };
38
+
39
+ struct TORCH_API AdamParamState
40
+ : public OptimizerCloneableParamState<AdamParamState> {
41
+ TORCH_ARG(int64_t, step) = 0;
42
+ TORCH_ARG(torch::Tensor, exp_avg);
43
+ TORCH_ARG(torch::Tensor, exp_avg_sq);
44
+ TORCH_ARG(torch::Tensor, max_exp_avg_sq) = {};
45
+
46
+ public:
47
+ void serialize(torch::serialize::InputArchive& archive) override;
48
+ void serialize(torch::serialize::OutputArchive& archive) const override;
49
+ TORCH_API friend bool operator==(
50
+ const AdamParamState& lhs,
51
+ const AdamParamState& rhs);
52
+ };
53
+
54
+ class TORCH_API Adam : public Optimizer {
55
+ public:
56
+ explicit Adam(
57
+ std::vector<OptimizerParamGroup> param_groups,
58
+ AdamOptions defaults = {})
59
+ : Optimizer(
60
+ std::move(param_groups),
61
+ std::make_unique<AdamOptions>(defaults)) {
62
+ TORCH_CHECK(defaults.lr() >= 0, "Invalid learning rate: ", defaults.lr());
63
+ TORCH_CHECK(defaults.eps() >= 0, "Invalid epsilon value: ", defaults.eps());
64
+ auto betas = defaults.betas();
65
+ TORCH_CHECK(
66
+ 0 <= std::get<0>(betas) && std::get<0>(betas) < 1.0,
67
+ "Invalid beta parameter at index 0: ",
68
+ std::get<0>(betas));
69
+ TORCH_CHECK(
70
+ 0 <= std::get<1>(betas) && std::get<1>(betas) < 1.0,
71
+ "Invalid beta parameter at index 1: ",
72
+ std::get<1>(betas));
73
+ TORCH_CHECK(
74
+ defaults.weight_decay() >= 0,
75
+ "Invalid weight_decay value: ",
76
+ defaults.weight_decay());
77
+ }
78
+ explicit Adam(std::vector<Tensor> params, AdamOptions defaults = {})
79
+ : Adam({OptimizerParamGroup(std::move(params))}, defaults) {}
80
+
81
+ torch::Tensor step(LossClosure closure = nullptr) override;
82
+ void save(serialize::OutputArchive& archive) const override;
83
+ void load(serialize::InputArchive& archive) override;
84
+
85
+ private:
86
+ template <typename Self, typename Archive>
87
+ static void serialize(Self& self, Archive& archive) {
88
+ _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(Adam);
89
+ }
90
+ };
91
+ } // namespace optim
92
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/adamw.h ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/module.h>
4
+ #include <torch/optim/optimizer.h>
5
+ #include <torch/optim/serialize.h>
6
+
7
+ #include <utility>
8
+ #include <vector>
9
+
10
+ namespace torch {
11
+ namespace serialize {
12
+ class OutputArchive;
13
+ class InputArchive;
14
+ } // namespace serialize
15
+ } // namespace torch
16
+
17
+ namespace torch {
18
+ namespace optim {
19
+
20
+ struct TORCH_API AdamWOptions : public OptimizerCloneableOptions<AdamWOptions> {
21
+ AdamWOptions(double lr = 1e-3);
22
+ TORCH_ARG(double, lr) = 1e-3;
23
+ typedef std::tuple<double, double> betas_t;
24
+ TORCH_ARG(betas_t, betas) = std::make_tuple(0.9, 0.999);
25
+ TORCH_ARG(double, eps) = 1e-8;
26
+ TORCH_ARG(double, weight_decay) = 1e-2;
27
+ TORCH_ARG(bool, amsgrad) = false;
28
+
29
+ public:
30
+ void serialize(torch::serialize::InputArchive& archive) override;
31
+ void serialize(torch::serialize::OutputArchive& archive) const override;
32
+ TORCH_API friend bool operator==(
33
+ const AdamWOptions& lhs,
34
+ const AdamWOptions& rhs);
35
+ double get_lr() const override;
36
+ void set_lr(const double lr) override;
37
+ };
38
+
39
+ struct TORCH_API AdamWParamState
40
+ : public OptimizerCloneableParamState<AdamWParamState> {
41
+ TORCH_ARG(int64_t, step) = 0;
42
+ TORCH_ARG(torch::Tensor, exp_avg);
43
+ TORCH_ARG(torch::Tensor, exp_avg_sq);
44
+ TORCH_ARG(torch::Tensor, max_exp_avg_sq) = {};
45
+
46
+ public:
47
+ void serialize(torch::serialize::InputArchive& archive) override;
48
+ void serialize(torch::serialize::OutputArchive& archive) const override;
49
+ TORCH_API friend bool operator==(
50
+ const AdamWParamState& lhs,
51
+ const AdamWParamState& rhs);
52
+ };
53
+
54
+ class TORCH_API AdamW : public Optimizer {
55
+ public:
56
+ explicit AdamW(
57
+ std::vector<OptimizerParamGroup> param_groups,
58
+ AdamWOptions defaults = {})
59
+ : Optimizer(
60
+ std::move(param_groups),
61
+ std::make_unique<AdamWOptions>(defaults)) {
62
+ TORCH_CHECK(defaults.lr() >= 0, "Invalid learning rate: ", defaults.lr());
63
+ TORCH_CHECK(defaults.eps() >= 0, "Invalid epsilon value: ", defaults.eps());
64
+ auto betas = defaults.betas();
65
+ TORCH_CHECK(
66
+ 0 <= std::get<0>(betas) && std::get<0>(betas) < 1.0,
67
+ "Invalid beta parameter at index 0: ",
68
+ std::get<0>(betas));
69
+ TORCH_CHECK(
70
+ 0 <= std::get<1>(betas) && std::get<1>(betas) < 1.0,
71
+ "Invalid beta parameter at index 1: ",
72
+ std::get<1>(betas));
73
+ TORCH_CHECK(
74
+ defaults.weight_decay() >= 0,
75
+ "Invalid weight_decay value: ",
76
+ defaults.weight_decay());
77
+ }
78
+ explicit AdamW(std::vector<Tensor> params, AdamWOptions defaults = {})
79
+ : AdamW({OptimizerParamGroup(std::move(params))}, defaults) {}
80
+
81
+ torch::Tensor step(LossClosure closure = nullptr) override;
82
+ void save(serialize::OutputArchive& archive) const override;
83
+ void load(serialize::InputArchive& archive) override;
84
+
85
+ private:
86
+ template <typename Self, typename Archive>
87
+ static void serialize(Self& self, Archive& archive) {
88
+ _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(AdamW);
89
+ }
90
+ };
91
+ } // namespace optim
92
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/lbfgs.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/module.h>
4
+ #include <torch/optim/optimizer.h>
5
+ #include <torch/optim/serialize.h>
6
+ #include <torch/serialize/archive.h>
7
+
8
+ #include <deque>
9
+ #include <functional>
10
+ #include <memory>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace optim {
15
+
16
+ struct TORCH_API LBFGSOptions : public OptimizerCloneableOptions<LBFGSOptions> {
17
+ LBFGSOptions(double lr = 1);
18
+ TORCH_ARG(double, lr) = 1;
19
+ TORCH_ARG(int64_t, max_iter) = 20;
20
+ TORCH_ARG(c10::optional<int64_t>, max_eval) = c10::nullopt;
21
+ TORCH_ARG(double, tolerance_grad) = 1e-7;
22
+ TORCH_ARG(double, tolerance_change) = 1e-9;
23
+ TORCH_ARG(int64_t, history_size) = 100;
24
+ TORCH_ARG(c10::optional<std::string>, line_search_fn) = c10::nullopt;
25
+
26
+ public:
27
+ void serialize(torch::serialize::InputArchive& archive) override;
28
+ void serialize(torch::serialize::OutputArchive& archive) const override;
29
+ TORCH_API friend bool operator==(
30
+ const LBFGSOptions& lhs,
31
+ const LBFGSOptions& rhs);
32
+ double get_lr() const override;
33
+ void set_lr(const double lr) override;
34
+ };
35
+
36
+ struct TORCH_API LBFGSParamState
37
+ : public OptimizerCloneableParamState<LBFGSParamState> {
38
+ TORCH_ARG(int64_t, func_evals) = 0;
39
+ TORCH_ARG(int64_t, n_iter) = 0;
40
+ TORCH_ARG(double, t) = 0;
41
+ TORCH_ARG(double, prev_loss) = 0;
42
+ TORCH_ARG(Tensor, d) = {};
43
+ TORCH_ARG(Tensor, H_diag) = {};
44
+ TORCH_ARG(Tensor, prev_flat_grad) = {};
45
+ TORCH_ARG(std::deque<Tensor>, old_dirs);
46
+ TORCH_ARG(std::deque<Tensor>, old_stps);
47
+ TORCH_ARG(std::deque<Tensor>, ro);
48
+ TORCH_ARG(c10::optional<std::vector<Tensor>>, al) = c10::nullopt;
49
+
50
+ public:
51
+ void serialize(torch::serialize::InputArchive& archive) override;
52
+ void serialize(torch::serialize::OutputArchive& archive) const override;
53
+ TORCH_API friend bool operator==(
54
+ const LBFGSParamState& lhs,
55
+ const LBFGSParamState& rhs);
56
+ };
57
+
58
+ class TORCH_API LBFGS : public Optimizer {
59
+ public:
60
+ explicit LBFGS(
61
+ std::vector<OptimizerParamGroup> param_groups,
62
+ LBFGSOptions defaults = {})
63
+ : Optimizer(
64
+ std::move(param_groups),
65
+ std::make_unique<LBFGSOptions>(defaults)) {
66
+ TORCH_CHECK(
67
+ param_groups_.size() == 1,
68
+ "LBFGS doesn't support per-parameter options (parameter groups)");
69
+ if (defaults.max_eval() == c10::nullopt) {
70
+ auto max_eval_val = (defaults.max_iter() * 5) / 4;
71
+ static_cast<LBFGSOptions&>(param_groups_[0].options())
72
+ .max_eval(max_eval_val);
73
+ static_cast<LBFGSOptions&>(*defaults_.get()).max_eval(max_eval_val);
74
+ }
75
+ _numel_cache = c10::nullopt;
76
+ }
77
+ explicit LBFGS(std::vector<Tensor> params, LBFGSOptions defaults = {})
78
+ : LBFGS({OptimizerParamGroup(std::move(params))}, defaults) {}
79
+
80
+ Tensor step(LossClosure closure) override;
81
+ void save(serialize::OutputArchive& archive) const override;
82
+ void load(serialize::InputArchive& archive) override;
83
+
84
+ private:
85
+ c10::optional<int64_t> _numel_cache;
86
+ int64_t _numel();
87
+ Tensor _gather_flat_grad();
88
+ void _add_grad(const double step_size, const Tensor& update);
89
+ std::tuple<double, Tensor> _directional_evaluate(
90
+ const LossClosure& closure,
91
+ const std::vector<Tensor>& x,
92
+ double t,
93
+ const Tensor& d);
94
+ void _set_param(const std::vector<Tensor>& params_data);
95
+ std::vector<Tensor> _clone_param();
96
+
97
+ template <typename Self, typename Archive>
98
+ static void serialize(Self& self, Archive& archive) {
99
+ _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(LBFGS);
100
+ }
101
+ };
102
+ } // namespace optim
103
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/optimizer.h ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/util/Exception.h>
5
+ #include <c10/util/flat_hash_map.h>
6
+
7
+ #include <torch/arg.h>
8
+ #include <torch/csrc/Export.h>
9
+
10
+ #include <algorithm>
11
+ #include <functional>
12
+ #include <iterator>
13
+ #include <memory>
14
+ #include <string>
15
+ #include <vector>
16
+
17
+ // Forward declarations confuse Doxygen
18
+ #ifndef DOXYGEN_SHOULD_SKIP_THIS
19
+ namespace at {
20
+ class Tensor;
21
+ } // namespace at
22
+
23
+ namespace torch {
24
+ using at::Tensor;
25
+ namespace serialize {
26
+ class OutputArchive;
27
+ class InputArchive;
28
+ } // namespace serialize
29
+ } // namespace torch
30
+ #endif // DOXYGEN_SHOULD_SKIP_THIS
31
+
32
+ namespace torch {
33
+ namespace optim {
34
+
35
+ class TORCH_API OptimizerParamState {
36
+ public:
37
+ OptimizerParamState() = default;
38
+ OptimizerParamState(const OptimizerParamState&) = default;
39
+ OptimizerParamState& operator=(const OptimizerParamState&) = default;
40
+ OptimizerParamState(OptimizerParamState&&) noexcept = default;
41
+ OptimizerParamState& operator=(OptimizerParamState&&) noexcept = default;
42
+ virtual std::unique_ptr<OptimizerParamState> clone() const;
43
+ virtual void serialize(torch::serialize::InputArchive& archive);
44
+ virtual void serialize(torch::serialize::OutputArchive& archive) const;
45
+ virtual ~OptimizerParamState() = default;
46
+ };
47
+
48
+ template <typename Derived>
49
+ class OptimizerCloneableParamState : public OptimizerParamState {
50
+ std::unique_ptr<OptimizerParamState> clone() const override {
51
+ return std::make_unique<Derived>(static_cast<const Derived&>(*this));
52
+ }
53
+ };
54
+
55
+ class TORCH_API OptimizerOptions {
56
+ public:
57
+ OptimizerOptions() = default;
58
+ OptimizerOptions(const OptimizerOptions&) = default;
59
+ OptimizerOptions& operator=(const OptimizerOptions&) = default;
60
+ OptimizerOptions(OptimizerOptions&&) noexcept = default;
61
+ OptimizerOptions& operator=(OptimizerOptions&&) noexcept = default;
62
+ virtual std::unique_ptr<OptimizerOptions> clone() const;
63
+ virtual void serialize(torch::serialize::InputArchive& archive);
64
+ virtual void serialize(torch::serialize::OutputArchive& archive) const;
65
+ virtual ~OptimizerOptions() = default;
66
+ virtual double get_lr() const;
67
+ virtual void set_lr(const double lr);
68
+ };
69
+
70
+ template <typename Derived>
71
+ class OptimizerCloneableOptions : public OptimizerOptions {
72
+ private:
73
+ std::unique_ptr<OptimizerOptions> clone() const override {
74
+ return std::make_unique<Derived>(static_cast<const Derived&>(*this));
75
+ }
76
+ };
77
+
78
+ /// Stores parameters in the param_group and stores a pointer to the
79
+ /// OptimizerOptions
80
+ class TORCH_API OptimizerParamGroup {
81
+ public:
82
+ // NOTE: In order to store `OptimizerParamGroup` in a `std::vector`, it has to
83
+ // be copy-constructible.
84
+ OptimizerParamGroup(const OptimizerParamGroup& param_group)
85
+ : params_(param_group.params()),
86
+ options_(
87
+ param_group.has_options() ? param_group.options().clone()
88
+ : nullptr) {}
89
+ OptimizerParamGroup(std::vector<Tensor> params)
90
+ : params_(std::move(params)) {}
91
+ OptimizerParamGroup(
92
+ std::vector<Tensor> params,
93
+ std::unique_ptr<OptimizerOptions> options)
94
+ : params_(std::move(params)), options_(std::move(options)) {}
95
+
96
+ bool has_options() const;
97
+ OptimizerOptions& options();
98
+ const OptimizerOptions& options() const;
99
+ void set_options(std::unique_ptr<OptimizerOptions> options);
100
+ std::vector<Tensor>& params();
101
+ const std::vector<Tensor>& params() const;
102
+
103
+ protected:
104
+ std::vector<Tensor> params_;
105
+ std::unique_ptr<OptimizerOptions> options_;
106
+ };
107
+
108
+ class TORCH_API Optimizer {
109
+ public:
110
+ // The copy constructor is deleted, because the user should use the
111
+ // `state_dict` / `load_state_dict` API to copy an optimizer instead.
112
+ Optimizer(const Optimizer& optimizer) = delete;
113
+ Optimizer(Optimizer&& optimizer) = default;
114
+
115
+ explicit Optimizer(
116
+ std::vector<OptimizerParamGroup> param_groups,
117
+ std::unique_ptr<OptimizerOptions> defaults)
118
+ : defaults_(std::move(defaults)) {
119
+ for (const auto& param_group : param_groups) {
120
+ add_param_group(param_group);
121
+ }
122
+ }
123
+
124
+ /// Constructs the `Optimizer` from a vector of parameters.
125
+ explicit Optimizer(
126
+ std::vector<Tensor> parameters,
127
+ std::unique_ptr<OptimizerOptions> defaults)
128
+ : Optimizer(
129
+ {OptimizerParamGroup(std::move(parameters))},
130
+ std::move(defaults)){};
131
+
132
+ /// Adds the given param_group to the optimizer's param_group list.
133
+ void add_param_group(const OptimizerParamGroup& param_group);
134
+
135
+ virtual ~Optimizer() = default;
136
+
137
+ using LossClosure = std::function<Tensor()>;
138
+ /// A loss function closure, which is expected to return the loss value.
139
+ virtual Tensor step(LossClosure closure = nullptr) = 0;
140
+
141
+ /// Adds the given vector of parameters to the optimizer's parameter list.
142
+ void add_parameters(const std::vector<Tensor>& parameters);
143
+
144
+ /// Zeros out the gradients of all parameters.
145
+ void zero_grad(bool set_to_none = true);
146
+
147
+ /// Provides a const reference to the parameters in the first param_group this
148
+ /// optimizer holds.
149
+ const std::vector<Tensor>& parameters() const noexcept;
150
+
151
+ /// Provides a reference to the parameters in the first param_group this
152
+ /// optimizer holds.
153
+ std::vector<Tensor>& parameters() noexcept;
154
+
155
+ /// Returns the number of parameters referenced by the optimizer.
156
+ size_t size() const noexcept;
157
+
158
+ OptimizerOptions& defaults() noexcept;
159
+
160
+ const OptimizerOptions& defaults() const noexcept;
161
+
162
+ /// Provides a reference to the param_groups this optimizer holds.
163
+ std::vector<OptimizerParamGroup>& param_groups() noexcept;
164
+
165
+ /// Provides a const reference to the param_groups this optimizer holds.
166
+ const std::vector<OptimizerParamGroup>& param_groups() const noexcept;
167
+
168
+ /// Provides a reference to the state this optimizer holds
169
+ ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>>&
170
+ state() noexcept;
171
+
172
+ /// Provides a const reference to the state this optimizer holds
173
+ const ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>>& state()
174
+ const noexcept;
175
+
176
+ /// Serializes the optimizer state into the given `archive`.
177
+ virtual void save(serialize::OutputArchive& archive) const;
178
+
179
+ /// Deserializes the optimizer state from the given `archive`.
180
+ virtual void load(serialize::InputArchive& archive);
181
+
182
+ protected:
183
+ std::vector<OptimizerParamGroup> param_groups_;
184
+ ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>> state_;
185
+ std::unique_ptr<OptimizerOptions> defaults_;
186
+ };
187
+
188
+ /* How do we decide whether to serialize undefined tensors or
189
+ c10::nullopt values into the output archive?
190
+ Answer: we strictly follow the behavior of Python API. To be more specific:
191
+
192
+ For optimizer options:
193
+ a) For undefined tensor: currently no tensor is used as an options argument in
194
+ Python API, so we don't need to worry about it now. b) For c10::nullopt value:
195
+ we serialize c10::nullopt values into the output archive, to follow the exact
196
+ same behavior as Python API.
197
+
198
+ For optimizer param state:
199
+ a) For undefined tensor: in param state, undefined tensor in C++ impl is
200
+ equivalent to missing key in Python impl. Since we don't serialize missing keys
201
+ in Python API, we skip undefined tensors when serializing the param state. b)
202
+ For c10::nullopt value: in param state, c10::nullopt value in C++ impl is
203
+ equivalent to missing key in Python impl. Since we don't serialize missing keys
204
+ in Python API, we skip c10::nullopt values when serializing the param state. */
205
+
206
+ /// Serializes an `Optimizer` into an `OutputArchive`.
207
+ TORCH_API serialize::OutputArchive& operator<<(
208
+ serialize::OutputArchive& archive,
209
+ const Optimizer& optimizer);
210
+
211
+ /// Deserializes a `Tensor` from an `InputArchive`.
212
+ TORCH_API serialize::InputArchive& operator>>(
213
+ serialize::InputArchive& archive,
214
+ Optimizer& optimizer);
215
+
216
+ } // namespace optim
217
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/rmsprop.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/module.h>
4
+ #include <torch/optim/optimizer.h>
5
+ #include <torch/optim/serialize.h>
6
+ #include <torch/serialize/archive.h>
7
+ #include <torch/types.h>
8
+
9
+ #include <functional>
10
+ #include <memory>
11
+ #include <string>
12
+ #include <vector>
13
+
14
+ namespace torch {
15
+ namespace serialize {
16
+ class OutputArchive;
17
+ class InputArchive;
18
+ } // namespace serialize
19
+ } // namespace torch
20
+
21
+ namespace torch {
22
+ namespace optim {
23
+
24
+ struct TORCH_API RMSpropOptions
25
+ : public OptimizerCloneableOptions<RMSpropOptions> {
26
+ RMSpropOptions(double lr = 1e-2);
27
+ TORCH_ARG(double, lr) = 1e-2;
28
+ TORCH_ARG(double, alpha) = 0.99;
29
+ TORCH_ARG(double, eps) = 1e-8;
30
+ TORCH_ARG(double, weight_decay) = 0;
31
+ TORCH_ARG(double, momentum) = 0;
32
+ TORCH_ARG(bool, centered) = false;
33
+
34
+ public:
35
+ void serialize(torch::serialize::InputArchive& archive) override;
36
+ void serialize(torch::serialize::OutputArchive& archive) const override;
37
+ TORCH_API friend bool operator==(
38
+ const RMSpropOptions& lhs,
39
+ const RMSpropOptions& rhs);
40
+ double get_lr() const override;
41
+ void set_lr(const double lr) override;
42
+ };
43
+
44
+ struct TORCH_API RMSpropParamState
45
+ : public OptimizerCloneableParamState<RMSpropParamState> {
46
+ TORCH_ARG(int64_t, step) = 0;
47
+ TORCH_ARG(torch::Tensor, square_avg);
48
+ TORCH_ARG(torch::Tensor, momentum_buffer) = {};
49
+ TORCH_ARG(torch::Tensor, grad_avg) = {};
50
+
51
+ public:
52
+ void serialize(torch::serialize::InputArchive& archive) override;
53
+ void serialize(torch::serialize::OutputArchive& archive) const override;
54
+ TORCH_API friend bool operator==(
55
+ const RMSpropParamState& lhs,
56
+ const RMSpropParamState& rhs);
57
+ };
58
+
59
+ class TORCH_API RMSprop : public Optimizer {
60
+ public:
61
+ explicit RMSprop(
62
+ std::vector<OptimizerParamGroup> param_groups,
63
+ RMSpropOptions defaults = {})
64
+ : Optimizer(
65
+ std::move(param_groups),
66
+ std::make_unique<RMSpropOptions>(defaults)) {
67
+ TORCH_CHECK(defaults.lr() >= 0, "Invalid learning rate: ", defaults.lr());
68
+ TORCH_CHECK(defaults.eps() >= 0, "Invalid epsilon value: ", defaults.eps());
69
+ TORCH_CHECK(
70
+ defaults.momentum() >= 0,
71
+ "Invalid momentum value: ",
72
+ defaults.momentum());
73
+ TORCH_CHECK(
74
+ defaults.weight_decay() >= 0,
75
+ "Invalid weight_decay value: ",
76
+ defaults.weight_decay());
77
+ TORCH_CHECK(
78
+ defaults.alpha() >= 0, "Invalid alpha value: ", defaults.alpha());
79
+ }
80
+
81
+ explicit RMSprop(std::vector<Tensor> params, RMSpropOptions defaults = {})
82
+ : RMSprop({OptimizerParamGroup(std::move(params))}, defaults) {}
83
+
84
+ torch::Tensor step(LossClosure closure = nullptr) override;
85
+ void save(serialize::OutputArchive& archive) const override;
86
+ void load(serialize::InputArchive& archive) override;
87
+
88
+ private:
89
+ template <typename Self, typename Archive>
90
+ static void serialize(Self& self, Archive& archive) {
91
+ _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(RMSprop);
92
+ }
93
+ };
94
+ } // namespace optim
95
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/lr_scheduler.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/optim/optimizer.h>
4
+
5
+ #include <torch/csrc/Export.h>
6
+
7
+ namespace torch {
8
+ namespace optim {
9
+
10
+ class TORCH_API LRScheduler {
11
+ public:
12
+ // This class needs to take a reference of an optimizer from outside such that
13
+ // it can modify its learning rates; due to this the lifetime of said
14
+ // optimizer must be maintained
15
+ LRScheduler(torch::optim::Optimizer& optimizer);
16
+
17
+ virtual ~LRScheduler() = default;
18
+
19
+ void step();
20
+
21
+ protected:
22
+ // A vector of learning rates is calculated and returned from the specific
23
+ // subclass. A vector is returned with each element being a separate learning
24
+ // rate for each param group - although the normal use case would be to return
25
+ // a vector of identical elements.
26
+ virtual std::vector<double> get_lrs() = 0;
27
+
28
+ // Get current learning rates from the optimizer
29
+ std::vector<double> get_current_lrs() const;
30
+
31
+ unsigned step_count_{};
32
+
33
+ private:
34
+ void set_optimizer_lrs(const std::vector<double>& learning_rates);
35
+
36
+ torch::optim::Optimizer& optimizer_;
37
+ };
38
+ } // namespace optim
39
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/reduce_on_plateau_scheduler.h ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/optim/optimizer.h>
4
+
5
+ #include <torch/csrc/Export.h>
6
+
7
+ #include <string>
8
+
9
+ #include <cmath>
10
+
11
+ #include <iostream>
12
+
13
+ namespace torch {
14
+ namespace optim {
15
+
16
+ class TORCH_API ReduceLROnPlateauScheduler {
17
+ public:
18
+ enum SchedulerMode { min, max };
19
+ enum ThresholdMode { rel, abs };
20
+ ReduceLROnPlateauScheduler(
21
+ Optimizer& optimizer,
22
+ SchedulerMode mode = min,
23
+ float factor = 0.1,
24
+ int patience = 10,
25
+ double threshold = 1e-4,
26
+ ThresholdMode threshold_mode = rel,
27
+ int cooldown = 0,
28
+ const std::vector<float>& min_lr = std::vector<float>(),
29
+ double eps = 1e-8,
30
+ bool verbose = false);
31
+
32
+ virtual ~ReduceLROnPlateauScheduler() = default;
33
+
34
+ void step(float metric);
35
+
36
+ private:
37
+ void reset();
38
+ void reduce_lr(int epoch);
39
+ bool in_cooldown();
40
+ bool is_better(float a);
41
+ void init_is_better(
42
+ SchedulerMode mode,
43
+ double threshold,
44
+ ThresholdMode threshold_mode);
45
+
46
+ Optimizer& optimizer;
47
+ SchedulerMode mode;
48
+ float mode_worse;
49
+ float factor;
50
+ int patience;
51
+ double threshold;
52
+ ThresholdMode threshold_mode;
53
+ int cooldown;
54
+ int cooldown_counter;
55
+ std::vector<float> min_lrs;
56
+ double eps;
57
+ float best;
58
+ bool verbose;
59
+ int last_epoch;
60
+ int num_bad_epochs;
61
+ };
62
+ } // namespace optim
63
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/schedulers/step_lr.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/optim/schedulers/lr_scheduler.h>
4
+
5
+ namespace torch {
6
+ namespace optim {
7
+
8
+ class TORCH_API StepLR : public LRScheduler {
9
+ public:
10
+ StepLR(
11
+ torch::optim::Optimizer& optimizer,
12
+ const unsigned step_size,
13
+ const double gamma = 0.1);
14
+
15
+ private:
16
+ std::vector<double> get_lrs() override;
17
+
18
+ const unsigned step_size_;
19
+ const double gamma_;
20
+ };
21
+ } // namespace optim
22
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/serialize.h ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/irange.h>
4
+ #include <torch/optim/optimizer.h>
5
+ #include <torch/serialize/archive.h>
6
+ #include <torch/types.h>
7
+ #include <cstddef>
8
+ #include <cstdint>
9
+ #include <deque>
10
+ #include <string>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace optim {
15
+ namespace detail {
16
+ // Utility function to save state
17
+ template <typename DerivedOptimizerParamState>
18
+ void serialize(
19
+ serialize::OutputArchive& archive,
20
+ const ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>>&
21
+ state) {
22
+ for (const auto& item : state) {
23
+ serialize::OutputArchive param_state_archive(archive.compilation_unit());
24
+ std::string tensorimpl_key =
25
+ std::to_string(reinterpret_cast<size_t>(item.first));
26
+ const DerivedOptimizerParamState& curr_state =
27
+ static_cast<const DerivedOptimizerParamState&>(*(item.second.get()));
28
+ curr_state.serialize(param_state_archive);
29
+ archive.write(tensorimpl_key, param_state_archive);
30
+ }
31
+ }
32
+
33
+ // Utility function to load state
34
+ template <typename DerivedOptimizerParamState>
35
+ void serialize(
36
+ serialize::InputArchive& archive,
37
+ ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>>& state) {
38
+ std::vector<std::string> tensorimpl_keys = archive.keys();
39
+ for (const std::string& tensorimpl_key : tensorimpl_keys) {
40
+ serialize::InputArchive param_state_archive;
41
+ archive.read(tensorimpl_key, param_state_archive);
42
+ DerivedOptimizerParamState param_state;
43
+ param_state.serialize(param_state_archive);
44
+ state[reinterpret_cast<void*>(std::stoull(tensorimpl_key))] =
45
+ std::make_unique<DerivedOptimizerParamState>(param_state);
46
+ }
47
+ }
48
+
49
+ // Utility function to save param_groups
50
+ template <typename DerivedOptimizerParamOptions>
51
+ void serialize(
52
+ serialize::OutputArchive& archive,
53
+ const std::vector<OptimizerParamGroup>& param_groups) {
54
+ archive.write(
55
+ "param_groups/size",
56
+ torch::tensor(static_cast<int64_t>(param_groups.size())));
57
+ for (const auto i : c10::irange(param_groups.size())) {
58
+ serialize::OutputArchive param_group_archive(archive.compilation_unit());
59
+ std::vector<Tensor> params = param_groups[i].params();
60
+ param_group_archive.write(
61
+ "params/size", torch::tensor(static_cast<int64_t>(params.size())));
62
+ for (const auto index : c10::irange(params.size())) {
63
+ param_group_archive.write(
64
+ "params/" + std::to_string(index),
65
+ IValue(std::to_string(
66
+ reinterpret_cast<size_t>(params[index].unsafeGetTensorImpl()))));
67
+ }
68
+ const DerivedOptimizerParamOptions& param_group_options =
69
+ static_cast<const DerivedOptimizerParamOptions&>(
70
+ param_groups[i].options());
71
+ serialize::OutputArchive param_group_options_archive(
72
+ param_group_archive.compilation_unit());
73
+ param_group_options.serialize(param_group_options_archive);
74
+ param_group_archive.write("options", param_group_options_archive);
75
+ archive.write("param_groups/" + std::to_string(i), param_group_archive);
76
+ }
77
+ }
78
+
79
+ // Utility function to load param_groups
80
+ // We take as input vector of pair of string and unique_ptr to optimizer options
81
+ // so that we can retain the state for each param by using the old tensor impl
82
+ // keys (saved during serialization) and map the new tensor impl keys to the
83
+ // correct state for each param
84
+ template <typename DerivedOptimizerParamOptions>
85
+ void serialize(
86
+ serialize::InputArchive& archive,
87
+ std::vector<
88
+ std::pair<std::vector<std::string>, std::unique_ptr<OptimizerOptions>>>&
89
+ param_groups) {
90
+ torch::Tensor param_groups_size_tensor;
91
+ archive.read("param_groups/size", param_groups_size_tensor);
92
+ const int64_t param_groups_size = param_groups_size_tensor.item<int64_t>();
93
+ for (const auto i : c10::irange(param_groups_size)) {
94
+ serialize::InputArchive param_group_archive;
95
+ archive.read("param_groups/" + std::to_string(i), param_group_archive);
96
+ torch::Tensor size_tensor;
97
+ param_group_archive.read("params/size", size_tensor);
98
+ const int64_t size = size_tensor.item<int64_t>();
99
+ std::vector<std::string> params;
100
+ for (const auto index : c10::irange(size)) {
101
+ IValue ivalue;
102
+ param_group_archive.read("params/" + std::to_string(index), ivalue);
103
+ std::string element = ivalue.toStringRef();
104
+ params.emplace_back(element);
105
+ }
106
+ serialize::InputArchive param_group_options_archive;
107
+ param_group_archive.read("options", param_group_options_archive);
108
+ DerivedOptimizerParamOptions param_group_options(0);
109
+ param_group_options.serialize(param_group_options_archive);
110
+ param_groups.emplace_back(std::make_pair(
111
+ params,
112
+ std::make_unique<DerivedOptimizerParamOptions>(param_group_options)));
113
+ }
114
+ }
115
+ } // namespace detail
116
+
117
+ // Note: These functions are all called `serialize()` so they can be called
118
+ // inside a template where the archive type is a template type and can thus be
119
+ // passed such that the appropriate overload is selected.
120
+
121
+ /// Utility function to save a value of `int64_t` type.
122
+ void serialize(
123
+ serialize::OutputArchive& archive,
124
+ const std::string& key,
125
+ const int64_t& value);
126
+
127
+ /// Utility function to load a value of `int64_t` type.
128
+ void serialize(
129
+ serialize::InputArchive& archive,
130
+ const std::string& key,
131
+ int64_t& value);
132
+
133
+ /// Utility function to save a vector of step buffers.
134
+ void serialize(
135
+ serialize::OutputArchive& archive,
136
+ const std::string& key,
137
+ const std::vector<int64_t>& steps);
138
+
139
+ /// Utility function to load a vector of step buffers.
140
+ void serialize(
141
+ serialize::InputArchive& archive,
142
+ const std::string& key,
143
+ std::vector<int64_t>& steps);
144
+
145
+ // Utility function to save state and param_groups
146
+ template <
147
+ typename DerivedOptimizerParamState,
148
+ typename DerivedOptimizerParamOptions>
149
+ void serialize(serialize::OutputArchive& archive, const Optimizer& optimizer) {
150
+ archive.write("pytorch_version", IValue("1.5.0"));
151
+ serialize::OutputArchive state_archive(archive.compilation_unit());
152
+ detail::serialize<DerivedOptimizerParamState>(
153
+ state_archive, optimizer.state());
154
+ archive.write("state", state_archive);
155
+
156
+ serialize::OutputArchive param_groups_archive(archive.compilation_unit());
157
+ detail::serialize<DerivedOptimizerParamOptions>(
158
+ param_groups_archive, optimizer.param_groups());
159
+ archive.write("param_groups", param_groups_archive);
160
+ }
161
+
162
+ // Utility function to load state and param_groups and update state
163
+ template <
164
+ typename DerivedOptimizerParamState,
165
+ typename DerivedOptimizerParamOptions>
166
+ void serialize(serialize::InputArchive& archive, Optimizer& optimizer) {
167
+ IValue pytorch_version;
168
+ archive.read("pytorch_version", pytorch_version);
169
+ TORCH_INTERNAL_ASSERT(pytorch_version.toStringRef() == "1.5.0");
170
+ serialize::InputArchive state_archive;
171
+ archive.read("state", state_archive);
172
+ ska::flat_hash_map<void*, std::unique_ptr<OptimizerParamState>> saved_state;
173
+ detail::serialize<DerivedOptimizerParamState>(state_archive, saved_state);
174
+
175
+ serialize::InputArchive param_groups_archive;
176
+ archive.read("param_groups", param_groups_archive);
177
+ std::vector<
178
+ std::pair<std::vector<std::string>, std::unique_ptr<OptimizerOptions>>>
179
+ saved_param_groups;
180
+ detail::serialize<DerivedOptimizerParamOptions>(
181
+ param_groups_archive, saved_param_groups);
182
+
183
+ // update state
184
+ TORCH_CHECK(
185
+ saved_param_groups.size() == optimizer.param_groups().size(),
186
+ "loaded state dict has a different number of parameter groups");
187
+ for (const auto i : c10::irange(saved_param_groups.size())) {
188
+ std::vector<std::string> param_group_old_keys = saved_param_groups[i].first;
189
+ std::vector<Tensor> params = optimizer.param_groups()[i].params();
190
+ TORCH_CHECK(
191
+ param_group_old_keys.size() == params.size(),
192
+ "loaded state dict contains a parameter group that has a different size than the optimizer's parameter group");
193
+
194
+ for (const auto idx : c10::irange(params.size())) {
195
+ auto param_group_old_key =
196
+ reinterpret_cast<void*>(std::stoull(param_group_old_keys[idx]));
197
+ if (saved_state.find(param_group_old_key) != saved_state.end()) {
198
+ optimizer.state()[params[idx].unsafeGetTensorImpl()] =
199
+ std::move(saved_state[param_group_old_key]);
200
+ }
201
+ }
202
+ }
203
+ }
204
+
205
+ /// Utility function to save a vector of buffers.
206
+ template <typename BufferContainer>
207
+ void serialize(
208
+ serialize::OutputArchive& archive,
209
+ const std::string& key,
210
+ const BufferContainer& buffers) {
211
+ archive.write(
212
+ key + "/size", torch::tensor(static_cast<int64_t>(buffers.size())));
213
+ for (const auto index : c10::irange(buffers.size())) {
214
+ archive.write(
215
+ key + "/" + std::to_string(index), buffers[index], /*is_buffer=*/true);
216
+ }
217
+ }
218
+
219
+ /// Utility function to load a vector of buffers.
220
+ template <typename BufferContainer>
221
+ void serialize(
222
+ serialize::InputArchive& archive,
223
+ const std::string& key,
224
+ BufferContainer& buffers) {
225
+ buffers.clear();
226
+ torch::Tensor size_tensor;
227
+ archive.read(key + "/size", size_tensor);
228
+ const size_t size = size_tensor.item<int64_t>();
229
+ for (const auto index : c10::irange(size)) {
230
+ buffers.emplace_back();
231
+ archive.read(
232
+ key + "/" + std::to_string(index), buffers.back(), /*is_buffer=*/true);
233
+ }
234
+ }
235
+
236
+ template <typename T>
237
+ c10::List<T> deque_to_list(const std::deque<T>& dq) {
238
+ c10::List<T> list;
239
+ list.reserve(dq.size());
240
+ for (const auto& e : dq) {
241
+ list.emplace_back(e);
242
+ }
243
+ return list;
244
+ }
245
+
246
+ template <typename T>
247
+ std::deque<T> list_to_deque(const c10::List<T>& list) {
248
+ std::deque<T> dq;
249
+ for (const auto& e : list) {
250
+ dq.emplace_back(e);
251
+ }
252
+ return dq;
253
+ }
254
+
255
+ #define _TORCH_OPTIM_SERIALIZE(name) \
256
+ torch::optim::serialize(archive, #name, self.name)
257
+
258
+ #define _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(OptimizerName) \
259
+ torch::optim::serialize<OptimizerName##ParamState, OptimizerName##Options>( \
260
+ archive, self)
261
+
262
+ #define _TORCH_OPTIM_SERIALIZE_TORCH_ARG(name) \
263
+ { \
264
+ auto ivalue = torch::IValue(name()); \
265
+ /* do not serialize if name is an undefined tensor*/ \
266
+ if (!(ivalue.isTensor() && \
267
+ ivalue.unsafeToTensorImpl() == \
268
+ at::UndefinedTensorImpl::singleton())) { \
269
+ archive.write(#name, ivalue); \
270
+ } \
271
+ }
272
+
273
+ #define _TORCH_OPTIM_SERIALIZE_TORCH_ARG_DEQUE(name) \
274
+ { \
275
+ c10::IValue ivalue = torch::IValue(deque_to_list(name())); \
276
+ archive.write(#name, ivalue); \
277
+ }
278
+
279
+ #define _TORCH_OPTIM_DESERIALIZE_TORCH_ARG(T, name) \
280
+ { \
281
+ c10::IValue ivalue; \
282
+ bool exists = archive.try_read(#name, ivalue); \
283
+ if (exists) { \
284
+ name(ivalue.to<T>()); \
285
+ } else { \
286
+ bool is_tensor_type = std::is_base_of<torch::Tensor, T>::value; \
287
+ TORCH_INTERNAL_ASSERT(is_tensor_type); \
288
+ } \
289
+ }
290
+
291
+ #define _TORCH_OPTIM_DESERIALIZE_TORCH_ARG_OPTIONAL(T, name) \
292
+ { \
293
+ c10::IValue ivalue; \
294
+ bool exists = archive.try_read(#name, ivalue); \
295
+ if (exists) { \
296
+ name(ivalue.toOptional<T>()); \
297
+ } \
298
+ }
299
+
300
+ #define _TORCH_OPTIM_DESERIALIZE_TORCH_ARG_DEQUE(T, name) \
301
+ { \
302
+ c10::IValue ivalue; \
303
+ archive.read(#name, ivalue); \
304
+ auto list = ivalue.to<c10::List<T::value_type>>(); \
305
+ name(list_to_deque(list)); \
306
+ }
307
+
308
+ } // namespace optim
309
+ } // namespace torch
venv/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim/sgd.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <torch/nn/module.h>
4
+ #include <torch/optim/optimizer.h>
5
+ #include <torch/optim/serialize.h>
6
+ #include <torch/serialize/archive.h>
7
+ #include <torch/types.h>
8
+
9
+ #include <cstddef>
10
+ #include <utility>
11
+ #include <vector>
12
+
13
+ namespace torch {
14
+ namespace serialize {
15
+ class OutputArchive;
16
+ class InputArchive;
17
+ } // namespace serialize
18
+ } // namespace torch
19
+
20
+ namespace torch {
21
+ namespace optim {
22
+
23
+ struct TORCH_API SGDOptions : public OptimizerCloneableOptions<SGDOptions> {
24
+ SGDOptions(double lr);
25
+ TORCH_ARG(double, lr);
26
+ TORCH_ARG(double, momentum) = 0;
27
+ TORCH_ARG(double, dampening) = 0;
28
+ TORCH_ARG(double, weight_decay) = 0;
29
+ TORCH_ARG(bool, nesterov) = false;
30
+
31
+ public:
32
+ void serialize(torch::serialize::InputArchive& archive) override;
33
+ void serialize(torch::serialize::OutputArchive& archive) const override;
34
+ TORCH_API friend bool operator==(
35
+ const SGDOptions& lhs,
36
+ const SGDOptions& rhs);
37
+ double get_lr() const override;
38
+ void set_lr(const double lr) override;
39
+ };
40
+
41
+ struct TORCH_API SGDParamState
42
+ : public OptimizerCloneableParamState<SGDParamState> {
43
+ TORCH_ARG(torch::Tensor, momentum_buffer);
44
+
45
+ public:
46
+ void serialize(torch::serialize::InputArchive& archive) override;
47
+ void serialize(torch::serialize::OutputArchive& archive) const override;
48
+ TORCH_API friend bool operator==(
49
+ const SGDParamState& lhs,
50
+ const SGDParamState& rhs);
51
+ };
52
+
53
+ class TORCH_API SGD : public Optimizer {
54
+ public:
55
+ explicit SGD(
56
+ std::vector<OptimizerParamGroup> param_groups,
57
+ SGDOptions defaults)
58
+ : Optimizer(
59
+ std::move(param_groups),
60
+ std::make_unique<SGDOptions>(defaults)) {
61
+ TORCH_CHECK(defaults.lr() >= 0, "Invalid learning rate: ", defaults.lr());
62
+ TORCH_CHECK(
63
+ defaults.momentum() >= 0,
64
+ "Invalid momentum value: ",
65
+ defaults.momentum());
66
+ TORCH_CHECK(
67
+ defaults.weight_decay() >= 0,
68
+ "Invalid weight_decay value: ",
69
+ defaults.weight_decay());
70
+ TORCH_CHECK(
71
+ !defaults.nesterov() ||
72
+ (defaults.momentum() > 0 && defaults.dampening() == 0),
73
+ "Nesterov momentum requires a momentum and zero dampening");
74
+ }
75
+
76
+ explicit SGD(std::vector<Tensor> params, SGDOptions defaults)
77
+ : SGD({OptimizerParamGroup(std::move(params))}, defaults) {}
78
+
79
+ torch::Tensor step(LossClosure closure = nullptr) override;
80
+
81
+ void save(serialize::OutputArchive& archive) const override;
82
+ void load(serialize::InputArchive& archive) override;
83
+
84
+ private:
85
+ template <typename Self, typename Archive>
86
+ static void serialize(Self& self, Archive& archive) {
87
+ _TORCH_OPTIM_SERIALIZE_WITH_TEMPLATE_ARG(SGD);
88
+ }
89
+ };
90
+ } // namespace optim
91
+ } // namespace torch