Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step20/zero/11.input_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step20/zero/11.input_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step20/zero/11.input_layernorm.weight/fp32.pt +3 -0
- ckpts/universal/global_step20/zero/16.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step20/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h +39 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/debug_info.h +57 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/file_format.h +196 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/flatbuffer_loader.h +136 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/frame.h +53 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/function.h +86 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h +38 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_export_common.h +23 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/interpreter.h +30 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h +45 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/observer.h +110 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_bytecode.h +25 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/prim_ops_registery.h +32 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/profiler_edge.h +119 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/promoted_prim_ops.h +63 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/quantization.h +38 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/register_ops_common_utils.h +55 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h +11 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h +22 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h +19 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h +32 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h +14 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dead_code_elimination.h +42 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h +11 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_linear_bn.h +29 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h +15 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h +13 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h +54 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h +12 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h +17 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h +18 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx.h +28 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_dict_idioms.h +38 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_non_tensor.h +14 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h +12 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_exceptions.h +23 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_inplace_ops.h +14 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h +16 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/specialize_autogradzero.h +21 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/subgraph_rewrite.h +117 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h +58 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_cache.h +57 -0
- venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/argument_spec.h +511 -0
ckpts/universal/global_step20/zero/11.input_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9c10c697dcdc57d4b5c8c4e89861530f08ae3d32bf6f2353d0ba1503ad1c1f02
|
3 |
+
size 9372
|
ckpts/universal/global_step20/zero/11.input_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f6182c43529a98a93a7c2686bd01bf0187a0e89331f98b3e876f957b9cf8bac7
|
3 |
+
size 9387
|
ckpts/universal/global_step20/zero/11.input_layernorm.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:502c0eed83d1b276adba1696ecd37f073ec756e1cfbb2044add3acfc3e407f72
|
3 |
+
size 9293
|
ckpts/universal/global_step20/zero/16.mlp.dense_4h_to_h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb0f36303a9db36196bf043fc9ed8b97c9d409fbcd614248b4b1d46a5d0429bf
|
3 |
+
size 33555612
|
ckpts/universal/global_step20/zero/16.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2a7867e0253a7aaff53be04d3caeabae2443153a978322a79e2aefa516476c7f
|
3 |
+
size 33555627
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <vector>
|
4 |
+
|
5 |
+
#include <ATen/core/ivalue.h>
|
6 |
+
#include <ATen/core/operator_name.h>
|
7 |
+
#include <torch/csrc/jit/runtime/instruction.h>
|
8 |
+
|
9 |
+
namespace torch {
|
10 |
+
namespace jit {
|
11 |
+
namespace mobile {
|
12 |
+
|
13 |
+
using Stack = std::vector<c10::IValue>;
|
14 |
+
using DebugHandle = int64_t;
|
15 |
+
|
16 |
+
class Function;
|
17 |
+
|
18 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
19 |
+
struct Code {
|
20 |
+
std::vector<Instruction> instructions_;
|
21 |
+
std::vector<DebugHandle> debug_handles_;
|
22 |
+
std::vector<c10::OperatorName> op_names_;
|
23 |
+
std::vector<int> operator_input_sizes_;
|
24 |
+
std::vector<std::function<void(Stack&)>> operators_;
|
25 |
+
std::vector<c10::IValue> constants_;
|
26 |
+
std::vector<c10::TypePtr> types_;
|
27 |
+
// TODO After we actually export CALL instructions we can remove this.
|
28 |
+
// We may need a two-stage importing scheme, where we firstly construct all
|
29 |
+
// function objects, and then append referenced function pointers. This could
|
30 |
+
// be done in parseMethods().
|
31 |
+
std::vector<mobile::Function*> functions_;
|
32 |
+
size_t register_size_ = 0; // Aggregated output size.
|
33 |
+
// initialized means operators_ array is filled with operators
|
34 |
+
bool initialized = false;
|
35 |
+
};
|
36 |
+
|
37 |
+
} // namespace mobile
|
38 |
+
} // namespace jit
|
39 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/debug_info.h
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/util/flat_hash_map.h>
|
3 |
+
#include <caffe2/serialize/inline_container.h>
|
4 |
+
#include <torch/csrc/jit/api/compilation_unit.h>
|
5 |
+
#include <torch/csrc/jit/ir/scope.h>
|
6 |
+
#include <torch/csrc/jit/serialization/source_range_serialization.h>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace jit {
|
10 |
+
/*
|
11 |
+
* MobileDebugTable:
|
12 |
+
* Deserializes debug_pkl and callstack_map records from PT model's zip archive
|
13 |
+
* and stores them in a map of debug handles to DebugInfoPair. Debug handles are
|
14 |
+
* unique per model and runtime, be in lite interpreter or delegate, an
|
15 |
+
* exception of BackendRuntimeException should raised using debug handles.
|
16 |
+
* getSourceDebugString method is responsible for translating debug
|
17 |
+
* handles to correspond debug information.
|
18 |
+
* This debug informatin includes stack trace of model level source code and
|
19 |
+
* module hierarchy where the exception occurred.
|
20 |
+
*/
|
21 |
+
class MobileDebugTable {
|
22 |
+
public:
|
23 |
+
MobileDebugTable() = default;
|
24 |
+
MobileDebugTable(
|
25 |
+
std::unique_ptr<caffe2::serialize::PyTorchStreamReader>& reader,
|
26 |
+
const std::shared_ptr<CompilationUnit>& cu);
|
27 |
+
|
28 |
+
template <typename It>
|
29 |
+
MobileDebugTable(It begin, It end) : callstack_ptr_map_(begin, end) {}
|
30 |
+
|
31 |
+
std::string getSourceDebugString(
|
32 |
+
const int64_t debug_handle,
|
33 |
+
const std::string& top_module_type_name = "ModuleTypeUnknown") const;
|
34 |
+
std::string getSourceDebugString(
|
35 |
+
const std::vector<int64_t>& debug_handles,
|
36 |
+
const std::string& top_module_type_name = "ModuleTypeUnknown") const;
|
37 |
+
std::string getModuleHierarchyInfo(
|
38 |
+
const int64_t debug_handle,
|
39 |
+
const std::string& top_module_type_name = "ModuleTypeUnknown") const;
|
40 |
+
std::string getModuleHierarchyInfo(
|
41 |
+
const std::vector<int64_t>& debug_handles,
|
42 |
+
const std::string& top_module_type_name = "ModuleTypeUnknown") const;
|
43 |
+
|
44 |
+
const ska::flat_hash_map<int64_t, DebugInfoTuple>& getCallStackPtrMap()
|
45 |
+
const {
|
46 |
+
return callstack_ptr_map_;
|
47 |
+
}
|
48 |
+
|
49 |
+
private:
|
50 |
+
std::pair<std::string, std::string> getSourceDebugModuleHierarchyInfo(
|
51 |
+
const std::vector<int64_t>& debug_handles,
|
52 |
+
const std::string& top_module_type_name = "ModuleTypeUnknown") const;
|
53 |
+
ska::flat_hash_map<int64_t, DebugInfoTuple> callstack_ptr_map_;
|
54 |
+
};
|
55 |
+
|
56 |
+
} // namespace jit
|
57 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/file_format.h
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <array>
|
4 |
+
#include <cerrno>
|
5 |
+
#include <cstddef>
|
6 |
+
#include <cstring>
|
7 |
+
#include <fstream>
|
8 |
+
#include <istream>
|
9 |
+
#include <memory>
|
10 |
+
|
11 |
+
#include <c10/core/CPUAllocator.h>
|
12 |
+
#include <c10/core/impl/alloc_cpu.h>
|
13 |
+
#include <caffe2/serialize/read_adapter_interface.h>
|
14 |
+
|
15 |
+
#if defined(HAVE_MMAP)
|
16 |
+
#include <fcntl.h>
|
17 |
+
#include <sys/mman.h>
|
18 |
+
#include <sys/stat.h>
|
19 |
+
#include <sys/types.h>
|
20 |
+
#include <unistd.h>
|
21 |
+
#endif
|
22 |
+
|
23 |
+
/**
|
24 |
+
* @file
|
25 |
+
*
|
26 |
+
* Helpers for identifying file formats when reading serialized data.
|
27 |
+
*
|
28 |
+
* Note that these functions are declared inline because they will typically
|
29 |
+
* only be called from one or two locations per binary.
|
30 |
+
*/
|
31 |
+
|
32 |
+
namespace torch {
|
33 |
+
namespace jit {
|
34 |
+
|
35 |
+
/**
|
36 |
+
* The format of a file or data stream.
|
37 |
+
*/
|
38 |
+
enum class FileFormat {
|
39 |
+
UnknownFileFormat = 0,
|
40 |
+
FlatbufferFileFormat,
|
41 |
+
ZipFileFormat,
|
42 |
+
};
|
43 |
+
|
44 |
+
/// The size of the buffer to pass to #getFileFormat(), in bytes.
|
45 |
+
constexpr size_t kFileFormatHeaderSize = 8;
|
46 |
+
constexpr size_t kMaxAlignment = 16;
|
47 |
+
|
48 |
+
/**
|
49 |
+
* Returns the likely file format based on the magic header bytes in @p header,
|
50 |
+
* which should contain the first bytes of a file or data stream.
|
51 |
+
*/
|
52 |
+
// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
|
53 |
+
static inline FileFormat getFileFormat(const char* data) {
|
54 |
+
// The size of magic strings to look for in the buffer.
|
55 |
+
static constexpr size_t kMagicSize = 4;
|
56 |
+
|
57 |
+
// Bytes 4..7 of a Flatbuffer-encoded file produced by
|
58 |
+
// `flatbuffer_serializer.h`. (The first four bytes contain an offset to the
|
59 |
+
// actual Flatbuffer data.)
|
60 |
+
static constexpr std::array<char, kMagicSize> kFlatbufferMagicString = {
|
61 |
+
'P', 'T', 'M', 'F'};
|
62 |
+
static constexpr size_t kFlatbufferMagicOffset = 4;
|
63 |
+
|
64 |
+
// The first four bytes of a ZIP file.
|
65 |
+
static constexpr std::array<char, kMagicSize> kZipMagicString = {
|
66 |
+
'P', 'K', '\x03', '\x04'};
|
67 |
+
|
68 |
+
// Note that we check for Flatbuffer magic first. Since the first four bytes
|
69 |
+
// of flatbuffer data contain an offset to the root struct, it's theoretically
|
70 |
+
// possible to construct a file whose offset looks like the ZIP magic. On the
|
71 |
+
// other hand, bytes 4-7 of ZIP files are constrained to a small set of values
|
72 |
+
// that do not typically cross into the printable ASCII range, so a ZIP file
|
73 |
+
// should never have a header that looks like a Flatbuffer file.
|
74 |
+
if (std::memcmp(
|
75 |
+
data + kFlatbufferMagicOffset,
|
76 |
+
kFlatbufferMagicString.data(),
|
77 |
+
kMagicSize) == 0) {
|
78 |
+
// Magic header for a binary file containing a Flatbuffer-serialized mobile
|
79 |
+
// Module.
|
80 |
+
return FileFormat::FlatbufferFileFormat;
|
81 |
+
} else if (std::memcmp(data, kZipMagicString.data(), kMagicSize) == 0) {
|
82 |
+
// Magic header for a zip file, which we use to store pickled sub-files.
|
83 |
+
return FileFormat::ZipFileFormat;
|
84 |
+
}
|
85 |
+
return FileFormat::UnknownFileFormat;
|
86 |
+
}
|
87 |
+
|
88 |
+
/**
|
89 |
+
* Returns the likely file format based on the magic header bytes of @p data.
|
90 |
+
* If the stream position changes while inspecting the data, this function will
|
91 |
+
* restore the stream position to its original offset before returning.
|
92 |
+
*/
|
93 |
+
// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
|
94 |
+
static inline FileFormat getFileFormat(std::istream& data) {
|
95 |
+
FileFormat format = FileFormat::UnknownFileFormat;
|
96 |
+
std::streampos orig_pos = data.tellg();
|
97 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
98 |
+
std::array<char, kFileFormatHeaderSize> header;
|
99 |
+
data.read(header.data(), header.size());
|
100 |
+
if (data.good()) {
|
101 |
+
format = getFileFormat(header.data());
|
102 |
+
}
|
103 |
+
data.seekg(orig_pos, data.beg);
|
104 |
+
return format;
|
105 |
+
}
|
106 |
+
|
107 |
+
/**
|
108 |
+
* Returns the likely file format based on the magic header bytes of the file
|
109 |
+
* named @p filename.
|
110 |
+
*/
|
111 |
+
// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
|
112 |
+
static inline FileFormat getFileFormat(const std::string& filename) {
|
113 |
+
std::ifstream data(filename, std::ifstream::binary);
|
114 |
+
return getFileFormat(data);
|
115 |
+
}
|
116 |
+
|
117 |
+
// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
|
118 |
+
static void file_not_found_error() {
|
119 |
+
std::stringstream message;
|
120 |
+
message << "Error while opening file: ";
|
121 |
+
if (errno == ENOENT) {
|
122 |
+
message << "no such file or directory" << std::endl;
|
123 |
+
} else {
|
124 |
+
message << "error no is: " << errno << std::endl;
|
125 |
+
}
|
126 |
+
TORCH_CHECK(false, message.str());
|
127 |
+
}
|
128 |
+
|
129 |
+
// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
|
130 |
+
static inline std::tuple<std::shared_ptr<char>, size_t> get_file_content(
|
131 |
+
const char* filename) {
|
132 |
+
#if defined(HAVE_MMAP)
|
133 |
+
int fd = open(filename, O_RDONLY);
|
134 |
+
if (fd < 0) {
|
135 |
+
// failed to open file, chances are it's no such file or directory.
|
136 |
+
file_not_found_error();
|
137 |
+
}
|
138 |
+
struct stat statbuf {};
|
139 |
+
fstat(fd, &statbuf);
|
140 |
+
size_t size = statbuf.st_size;
|
141 |
+
void* ptr = mmap(nullptr, statbuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
|
142 |
+
close(fd);
|
143 |
+
auto deleter = [statbuf](char* ptr) { munmap(ptr, statbuf.st_size); };
|
144 |
+
std::shared_ptr<char> data(reinterpret_cast<char*>(ptr), deleter);
|
145 |
+
#else
|
146 |
+
FILE* f = fopen(filename, "rb");
|
147 |
+
if (f == nullptr) {
|
148 |
+
file_not_found_error();
|
149 |
+
}
|
150 |
+
fseek(f, 0, SEEK_END);
|
151 |
+
size_t size = ftell(f);
|
152 |
+
fseek(f, 0, SEEK_SET);
|
153 |
+
// make sure buffer size is multiple of alignment
|
154 |
+
size_t buffer_size = (size / kMaxAlignment + 1) * kMaxAlignment;
|
155 |
+
std::shared_ptr<char> data(
|
156 |
+
static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
|
157 |
+
fread(data.get(), size, 1, f);
|
158 |
+
fclose(f);
|
159 |
+
#endif
|
160 |
+
return std::make_tuple(data, size);
|
161 |
+
}
|
162 |
+
|
163 |
+
// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
|
164 |
+
static inline std::tuple<std::shared_ptr<char>, size_t> get_stream_content(
|
165 |
+
std::istream& in) {
|
166 |
+
// get size of the stream and reset to orig
|
167 |
+
std::streampos orig_pos = in.tellg();
|
168 |
+
in.seekg(orig_pos, std::ios::end);
|
169 |
+
const long size = in.tellg();
|
170 |
+
in.seekg(orig_pos, in.beg);
|
171 |
+
|
172 |
+
// read stream
|
173 |
+
// NOLINT make sure buffer size is multiple of alignment
|
174 |
+
size_t buffer_size = (size / kMaxAlignment + 1) * kMaxAlignment;
|
175 |
+
std::shared_ptr<char> data(
|
176 |
+
static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
|
177 |
+
in.read(data.get(), size);
|
178 |
+
|
179 |
+
// reset stream to original position
|
180 |
+
in.seekg(orig_pos, in.beg);
|
181 |
+
return std::make_tuple(data, size);
|
182 |
+
}
|
183 |
+
|
184 |
+
// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
|
185 |
+
static inline std::tuple<std::shared_ptr<char>, size_t> get_rai_content(
|
186 |
+
caffe2::serialize::ReadAdapterInterface* rai) {
|
187 |
+
size_t buffer_size = (rai->size() / kMaxAlignment + 1) * kMaxAlignment;
|
188 |
+
std::shared_ptr<char> data(
|
189 |
+
static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
|
190 |
+
rai->read(
|
191 |
+
0, data.get(), rai->size(), "Loading ReadAdapterInterface to bytes");
|
192 |
+
return std::make_tuple(data, buffer_size);
|
193 |
+
}
|
194 |
+
|
195 |
+
} // namespace jit
|
196 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/flatbuffer_loader.h
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <istream>
|
4 |
+
#include <memory>
|
5 |
+
#include <string>
|
6 |
+
#include <unordered_map>
|
7 |
+
#include <vector>
|
8 |
+
|
9 |
+
#include <ATen/core/ivalue.h>
|
10 |
+
#include <c10/core/Device.h>
|
11 |
+
#include <c10/macros/Macros.h>
|
12 |
+
#include <c10/util/Optional.h>
|
13 |
+
#include <torch/csrc/jit/mobile/module.h>
|
14 |
+
|
15 |
+
/**
|
16 |
+
* Defines the public API for loading flatbuffer-serialized mobile modules.
|
17 |
+
* Note that this header must not include or depend on flatbuffer-defined
|
18 |
+
* types, to avoid leaking those details to PyTorch clients.
|
19 |
+
*/
|
20 |
+
|
21 |
+
namespace torch {
|
22 |
+
namespace jit {
|
23 |
+
|
24 |
+
/// All non-copied data pointers provided to `parse_and_initialize_*` functions
|
25 |
+
/// must be aligned to this boundary. Since the Module will point directly into
|
26 |
+
/// the data, this alignment is necessary to ensure that certain types/structs
|
27 |
+
/// are properly aligned.
|
28 |
+
constexpr size_t kFlatbufferDataAlignmentBytes = 16;
|
29 |
+
|
30 |
+
/// Maps file names to file contents.
|
31 |
+
using ExtraFilesMap = std::unordered_map<std::string, std::string>;
|
32 |
+
|
33 |
+
// On high level, to produce a Module from a file on disk, we need to go
|
34 |
+
// through the follow steps:
|
35 |
+
// 1. Read: Read the file from disk -> memory
|
36 |
+
// 2. Deserialize: Parse the bytes to produce some in memory manipulable
|
37 |
+
// structure
|
38 |
+
// 3. Module initialization: Produce mobile::Module out of the structure
|
39 |
+
// produced in 2.
|
40 |
+
// Under this context, the structure described in 2. is the flatbuffer-defined
|
41 |
+
// type mobile::serialization::Module. However, this step/type is not visible in
|
42 |
+
// the public API.
|
43 |
+
|
44 |
+
// Parse a mobile::Module from raw bytes.
|
45 |
+
//
|
46 |
+
// This function does steps 2+3 described above.
|
47 |
+
//
|
48 |
+
// Does not take ownership of `data`; if you want it to take ownership, see the
|
49 |
+
// shared_ptr overload of this function.
|
50 |
+
//
|
51 |
+
// If should_copy_tensor_memory is true, then the returned module will NOT have
|
52 |
+
// refences to `data`, so `data` can be freed immediately.
|
53 |
+
//
|
54 |
+
// If should_copy_tensor_memory is false, then returned module will have tensors
|
55 |
+
// that points inside of `data`; the caller will need to make sure that `data`
|
56 |
+
// outlives the returned Module. Also, `data` must be aligned to
|
57 |
+
// kFlatbufferDataAlignmentBytes.
|
58 |
+
TORCH_API mobile::Module parse_and_initialize_mobile_module(
|
59 |
+
void* data,
|
60 |
+
size_t size, // of `data`, in bytes.
|
61 |
+
c10::optional<at::Device> device = c10::nullopt,
|
62 |
+
ExtraFilesMap* extra_files = nullptr,
|
63 |
+
bool should_copy_tensor_memory = false);
|
64 |
+
|
65 |
+
// Parse a mobile::Module from raw bytes.
|
66 |
+
//
|
67 |
+
// This function does steps 2+3 described above.
|
68 |
+
//
|
69 |
+
// The returned Module holds a reference to `data`, which must be aligned to
|
70 |
+
// kFlatbufferDataAlignmentBytes.
|
71 |
+
//
|
72 |
+
// If you do not want the Module to hold a reference to `data`, see the raw
|
73 |
+
// pointer overload of this function.
|
74 |
+
TORCH_API mobile::Module parse_and_initialize_mobile_module(
|
75 |
+
std::shared_ptr<char> data,
|
76 |
+
size_t size, // of `data`, in bytes.
|
77 |
+
c10::optional<at::Device> device = c10::nullopt,
|
78 |
+
ExtraFilesMap* extra_files = nullptr);
|
79 |
+
|
80 |
+
// Parse a mobile::Module from raw bytes, also returning JIT-related metadata.
|
81 |
+
//
|
82 |
+
// This is the same as parse_and_initialize_mobile_module() except that it also
|
83 |
+
// extracts JIT source files and constants. Can be used to construct a
|
84 |
+
// jit::Module.
|
85 |
+
TORCH_API mobile::Module parse_and_initialize_mobile_module_for_jit(
|
86 |
+
void* data,
|
87 |
+
size_t size, // of `data`, in bytes.
|
88 |
+
ExtraFilesMap& jit_sources,
|
89 |
+
std::vector<IValue>& jit_constants,
|
90 |
+
c10::optional<at::Device> device = c10::nullopt,
|
91 |
+
ExtraFilesMap* extra_files = nullptr);
|
92 |
+
|
93 |
+
// Load a mobile::Module from a filepath.
|
94 |
+
//
|
95 |
+
// This function does steps 1+2+3 described above.
|
96 |
+
//
|
97 |
+
// We need to have this as a convienience because Python API will need to wrap
|
98 |
+
// this. C++ clients should use one of the versions of
|
99 |
+
// parse_and_initialize_mobile_module() so they can manage the raw data more
|
100 |
+
// directly.
|
101 |
+
TORCH_API mobile::Module load_mobile_module_from_file(
|
102 |
+
const std::string& filename,
|
103 |
+
c10::optional<at::Device> device = c10::nullopt,
|
104 |
+
ExtraFilesMap* extra_files = nullptr);
|
105 |
+
|
106 |
+
TORCH_API uint64_t get_bytecode_version(std::istream& in);
|
107 |
+
TORCH_API uint64_t get_bytecode_version(const std::string& filename);
|
108 |
+
TORCH_API uint64_t get_bytecode_version_from_bytes(char* flatbuffer_content);
|
109 |
+
|
110 |
+
TORCH_API mobile::ModuleInfo get_module_info_from_flatbuffer(
|
111 |
+
char* flatbuffer_content);
|
112 |
+
|
113 |
+
// The methods below are less efficient because it need to read the stream in
|
114 |
+
// its entirity to a buffer
|
115 |
+
TORCH_API mobile::Module load_mobile_module_from_stream_with_copy(
|
116 |
+
std::istream& in,
|
117 |
+
c10::optional<at::Device> device = c10::nullopt,
|
118 |
+
ExtraFilesMap* extra_files = nullptr);
|
119 |
+
|
120 |
+
TORCH_API mobile::Module parse_flatbuffer_no_object(
|
121 |
+
std::shared_ptr<char> data,
|
122 |
+
size_t size,
|
123 |
+
c10::optional<at::Device> device);
|
124 |
+
|
125 |
+
TORCH_API mobile::Module parse_and_initialize_mobile_module(
|
126 |
+
void* data,
|
127 |
+
size_t,
|
128 |
+
c10::optional<at::Device>,
|
129 |
+
ExtraFilesMap* extra_files,
|
130 |
+
bool should_copy_tensor_memory);
|
131 |
+
|
132 |
+
// no op, TODO(qihan) delete
|
133 |
+
TORCH_API bool register_flatbuffer_loader();
|
134 |
+
|
135 |
+
} // namespace jit
|
136 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/frame.h
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <cstddef>
|
4 |
+
|
5 |
+
#include <c10/util/Optional.h>
|
6 |
+
#include <torch/csrc/jit/mobile/code.h>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace jit {
|
10 |
+
namespace mobile {
|
11 |
+
|
12 |
+
class Frame {
|
13 |
+
public:
|
14 |
+
explicit Frame(const Code& code) : code_(code) {}
|
15 |
+
const Code& getCode() const {
|
16 |
+
return code_;
|
17 |
+
}
|
18 |
+
|
19 |
+
void step() {
|
20 |
+
pc_++;
|
21 |
+
}
|
22 |
+
|
23 |
+
void jump(size_t n) {
|
24 |
+
pc_ += n;
|
25 |
+
}
|
26 |
+
|
27 |
+
size_t getPC() const {
|
28 |
+
return pc_;
|
29 |
+
}
|
30 |
+
|
31 |
+
const Instruction& getInstruction() const {
|
32 |
+
return code_.instructions_.at(pc_);
|
33 |
+
}
|
34 |
+
|
35 |
+
c10::optional<int64_t> getDebugHandle() const {
|
36 |
+
return getDebugHandle(pc_);
|
37 |
+
}
|
38 |
+
|
39 |
+
c10::optional<int64_t> getDebugHandle(size_t pc) const {
|
40 |
+
if (pc >= code_.debug_handles_.size()) {
|
41 |
+
return {};
|
42 |
+
}
|
43 |
+
return code_.debug_handles_[pc];
|
44 |
+
}
|
45 |
+
|
46 |
+
private:
|
47 |
+
const Code& code_;
|
48 |
+
size_t pc_{0};
|
49 |
+
};
|
50 |
+
|
51 |
+
} // namespace mobile
|
52 |
+
} // namespace jit
|
53 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/function.h
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <vector>
|
4 |
+
|
5 |
+
#include <ATen/core/function.h>
|
6 |
+
#include <ATen/core/function_schema.h>
|
7 |
+
#include <ATen/core/ivalue.h>
|
8 |
+
#include <torch/csrc/jit/mobile/code.h>
|
9 |
+
|
10 |
+
namespace torch {
|
11 |
+
namespace jit {
|
12 |
+
enum OpCode : uint8_t;
|
13 |
+
struct Instruction;
|
14 |
+
struct OperatorString;
|
15 |
+
|
16 |
+
namespace mobile {
|
17 |
+
|
18 |
+
class TORCH_API Function : public torch::jit::Function {
|
19 |
+
public:
|
20 |
+
explicit Function(c10::QualifiedName name);
|
21 |
+
Function(
|
22 |
+
c10::QualifiedName name,
|
23 |
+
Code code,
|
24 |
+
at::optional<c10::FunctionSchema> schema);
|
25 |
+
void run(Stack& stack) override;
|
26 |
+
at::IValue operator()(Stack& stack);
|
27 |
+
void ensure_defined() override {}
|
28 |
+
size_t num_inputs() const override;
|
29 |
+
const c10::QualifiedName& qualname() const override;
|
30 |
+
bool call(Stack&, c10::function_ref<void(const mobile::Code&)>) override;
|
31 |
+
|
32 |
+
// NOTE: the APIs below is dangerous: if you call append_instruction with
|
33 |
+
// dbg_handle and then call it without; then the dbg_handle will become
|
34 |
+
// misaligned. Therefore only use ONE variant at time.
|
35 |
+
void append_instruction(OpCode op, int X, int N, int64_t dbg_handle);
|
36 |
+
void append_instruction(OpCode op, int X, int N);
|
37 |
+
void append_operator(
|
38 |
+
const std::string& name,
|
39 |
+
const std::string& overload_name,
|
40 |
+
const c10::optional<int>& num_specified_args);
|
41 |
+
void append_constant(const c10::IValue& constant);
|
42 |
+
void append_type(const c10::TypePtr& type);
|
43 |
+
void append_function(mobile::Function& func);
|
44 |
+
|
45 |
+
void set_register_size(size_t size);
|
46 |
+
|
47 |
+
int64_t get_debug_handle(size_t pc) const;
|
48 |
+
const Code& get_code() const;
|
49 |
+
Code& get_code();
|
50 |
+
|
51 |
+
torch::jit::Function& setSchema(c10::FunctionSchema schema) override;
|
52 |
+
bool hasSchema() const;
|
53 |
+
const c10::FunctionSchema& getSchema() const override;
|
54 |
+
|
55 |
+
// Returns the debug handle corresponding to where the execution
|
56 |
+
// is halted due to exception.
|
57 |
+
// If no corresponding debug handle is found then -1 is returned.
|
58 |
+
const std::vector<int64_t>& getExceptionDebugHandles() const;
|
59 |
+
static Function& registerFunc(
|
60 |
+
const std::string& qualified_name,
|
61 |
+
const std::vector<Instruction>& instructions,
|
62 |
+
const std::vector<c10::IValue>& constants,
|
63 |
+
const std::vector<c10::TypePtr>& types,
|
64 |
+
const size_t register_size);
|
65 |
+
|
66 |
+
// if not initialize, initialize by loading operators.
|
67 |
+
// return true of all op loaded, return false if some op is not found
|
68 |
+
// in the current runtime. Then, the ops that did not found will be filled
|
69 |
+
// in unsupported_op_names
|
70 |
+
bool initialize_operators(bool should_check_operators);
|
71 |
+
|
72 |
+
private:
|
73 |
+
c10::QualifiedName name_;
|
74 |
+
Code code_;
|
75 |
+
at::optional<c10::FunctionSchema> schema_; // (byte-code version 4+)
|
76 |
+
};
|
77 |
+
|
78 |
+
c10::optional<std::function<void(Stack&)>> makeOperatorFunction(
|
79 |
+
c10::OperatorName opname,
|
80 |
+
c10::optional<int> num_specified_args);
|
81 |
+
|
82 |
+
TORCH_API std::string operator_str(const c10::OperatorName& opname);
|
83 |
+
|
84 |
+
} // namespace mobile
|
85 |
+
} // namespace jit
|
86 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/TensorBase.h>
|
4 |
+
#include <c10/core/Device.h>
|
5 |
+
#include <c10/util/Optional.h>
|
6 |
+
#include <torch/csrc/jit/mobile/module.h>
|
7 |
+
|
8 |
+
#include <istream>
|
9 |
+
#include <map>
|
10 |
+
#include <string>
|
11 |
+
|
12 |
+
namespace torch {
|
13 |
+
namespace jit {
|
14 |
+
|
15 |
+
/**
|
16 |
+
* Loads named parameters from the serialized data in @p in.
|
17 |
+
*
|
18 |
+
* Calls #TORCH_CHECK() if the data format is not recognized.
|
19 |
+
*/
|
20 |
+
TORCH_API std::map<std::string, at::Tensor> _load_parameters(
|
21 |
+
std::istream& in,
|
22 |
+
c10::optional<at::Device> device = c10::nullopt);
|
23 |
+
|
24 |
+
/**
|
25 |
+
* Loads named parameters from the serialized data in @p filename.
|
26 |
+
*
|
27 |
+
* Calls #TORCH_CHECK() if the data format is not recognized.
|
28 |
+
*/
|
29 |
+
TORCH_API std::map<std::string, at::Tensor> _load_parameters(
|
30 |
+
const std::string& filename,
|
31 |
+
c10::optional<at::Device> device = c10::nullopt);
|
32 |
+
|
33 |
+
// NOTE: Please prefer using _load_parameters over using the function below.
|
34 |
+
TORCH_API std::map<std::string, at::Tensor> mobile_module_to_parameter_map(
|
35 |
+
const mobile::Module& module);
|
36 |
+
|
37 |
+
} // namespace jit
|
38 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_export_common.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
/**
|
4 |
+
* @file
|
5 |
+
* Declarations shared between import_data.cpp and export_data.cpp
|
6 |
+
*/
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace jit {
|
10 |
+
namespace mobile {
|
11 |
+
|
12 |
+
namespace internal {
|
13 |
+
/**
|
14 |
+
* The name of the mobile::Module attribute which contains saved parameters, as
|
15 |
+
* a Dict of names to Tensors. Only used for Flatbuffer serialization.
|
16 |
+
*/
|
17 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
18 |
+
constexpr char kSavedParametersAttributeName[] = "data";
|
19 |
+
} // namespace internal
|
20 |
+
|
21 |
+
} // namespace mobile
|
22 |
+
} // namespace jit
|
23 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/interpreter.h
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <vector>
|
4 |
+
|
5 |
+
#include <torch/csrc/jit/mobile/code.h>
|
6 |
+
#include <torch/csrc/jit/mobile/frame.h>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace jit {
|
10 |
+
namespace mobile {
|
11 |
+
|
12 |
+
struct InterpreterState {
|
13 |
+
TORCH_API explicit InterpreterState(const Code& code);
|
14 |
+
TORCH_API bool run(Stack& stack);
|
15 |
+
|
16 |
+
private:
|
17 |
+
void enterFrame(const Code&);
|
18 |
+
void leaveFrame();
|
19 |
+
void saveExceptionDebugHandles();
|
20 |
+
void callFunction(torch::jit::Function& f, Stack& stack);
|
21 |
+
|
22 |
+
c10::IValue& reg(size_t reg);
|
23 |
+
std::vector<c10::IValue> registers_;
|
24 |
+
std::vector<Frame> frames_;
|
25 |
+
};
|
26 |
+
|
27 |
+
const std::vector<DebugHandle>& getInterpretersExceptionDebugHandles();
|
28 |
+
} // namespace mobile
|
29 |
+
} // namespace jit
|
30 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/ivalue.h>
|
4 |
+
#include <torch/csrc/jit/mobile/function.h>
|
5 |
+
|
6 |
+
namespace torch {
|
7 |
+
namespace jit {
|
8 |
+
namespace mobile {
|
9 |
+
|
10 |
+
class Module;
|
11 |
+
|
12 |
+
struct TORCH_API Method {
|
13 |
+
Method(const Module* owner, Function* function);
|
14 |
+
|
15 |
+
void run(Stack& stack) const;
|
16 |
+
void run(Stack&& stack) const {
|
17 |
+
run(stack);
|
18 |
+
}
|
19 |
+
|
20 |
+
c10::IValue operator()(std::vector<c10::IValue> stack) const;
|
21 |
+
|
22 |
+
const std::string& name() const {
|
23 |
+
return function_->name();
|
24 |
+
}
|
25 |
+
|
26 |
+
int64_t get_debug_handle(size_t pc) const {
|
27 |
+
return function_->get_debug_handle(pc);
|
28 |
+
}
|
29 |
+
|
30 |
+
Function& function() const {
|
31 |
+
return *function_;
|
32 |
+
}
|
33 |
+
|
34 |
+
private:
|
35 |
+
// Methods are uniquely owned by a single module.
|
36 |
+
// This raw pointer allows referencing the module
|
37 |
+
const Module* owner_;
|
38 |
+
|
39 |
+
// Underlying unbound function
|
40 |
+
Function* function_;
|
41 |
+
};
|
42 |
+
|
43 |
+
} // namespace mobile
|
44 |
+
} // namespace jit
|
45 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/observer.h
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/ThreadLocalDebugInfo.h>
|
4 |
+
#include <string>
|
5 |
+
#include <unordered_map>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
|
10 |
+
class MobileDebugInfo : public c10::DebugInfoBase {
|
11 |
+
public:
|
12 |
+
const std::string& getModelName() {
|
13 |
+
return model_name_;
|
14 |
+
}
|
15 |
+
|
16 |
+
void setModelName(const std::string& model_name) {
|
17 |
+
model_name_ = model_name;
|
18 |
+
}
|
19 |
+
|
20 |
+
const std::string& getMethodName() {
|
21 |
+
return method_name_;
|
22 |
+
}
|
23 |
+
|
24 |
+
void setMethodName(const std::string& method_name) {
|
25 |
+
method_name_ = method_name;
|
26 |
+
}
|
27 |
+
|
28 |
+
size_t getOpIdx() {
|
29 |
+
return op_idx_;
|
30 |
+
}
|
31 |
+
|
32 |
+
void setOpIdx(size_t op_idx) {
|
33 |
+
op_idx_ = op_idx;
|
34 |
+
}
|
35 |
+
|
36 |
+
private:
|
37 |
+
std::string model_name_;
|
38 |
+
std::string method_name_;
|
39 |
+
// TODO: Kimish
|
40 |
+
// If we launch a thread such as for at::launch, interepter continuation
|
41 |
+
// and if the caching allocator is enabled in the base thread
|
42 |
+
// then, in order to propagate this information, that is caching allocator
|
43 |
+
// is enabled, across thread boundaries we can use the mechanism provided
|
44 |
+
// by ThreadLocalDebugInfo
|
45 |
+
// Once the thread local MobileDebugInfo is accessible in the launched
|
46 |
+
// thread, it can be accessed in that thread and that thread can set
|
47 |
+
// its own thread local CachingAllocatorInfo.
|
48 |
+
// However, we cannot expect every launched thread to extract and set
|
49 |
+
// its own thread local copy of CachingAllocatorInfo.
|
50 |
+
// But this can be done in lite interpreter, where in the run method
|
51 |
+
// it can do info =
|
52 |
+
// c10::ThreadLocalDebugInfo::get(c10::DebugInfoKind::MOBILE_RUNTIME_INFO))
|
53 |
+
// .get_caching_allocator_info();
|
54 |
+
// GetThreadLocalCachingAllocatorInfo() = info;
|
55 |
+
// Other option is to have MobileDebugInfo itself be the place where thread
|
56 |
+
// local copy of CachingAllocatorInfo is stored. Then
|
57 |
+
// DefaultMobileCPUAllocator inspects this to decide if to use
|
58 |
+
// CachingAllocator. However, current lite interpreter does not support FORK,
|
59 |
+
// thus from the run method of lite interpreter we are not really gonna launch
|
60 |
+
// another instance of lite interpreter in a different thread. So for now not
|
61 |
+
// getting bothered about passing CachingAllocatorInfo across thread
|
62 |
+
// boundaries. c10::CachingAllocatorInfo caching_allocator_info;
|
63 |
+
size_t op_idx_ = 0;
|
64 |
+
};
|
65 |
+
|
66 |
+
class MobileModuleObserver {
|
67 |
+
public:
|
68 |
+
virtual ~MobileModuleObserver() = default;
|
69 |
+
|
70 |
+
virtual void onEnterRunMethod(const int32_t) {}
|
71 |
+
virtual void onExitRunMethod(
|
72 |
+
const std::unordered_map<std::string, std::string>&,
|
73 |
+
const std::string&,
|
74 |
+
const int32_t) {}
|
75 |
+
virtual void onFailRunMethod(
|
76 |
+
const std::unordered_map<std::string, std::string>&,
|
77 |
+
const std::string&,
|
78 |
+
const int32_t,
|
79 |
+
const char*) {}
|
80 |
+
virtual void onEnterLoadModel(const int32_t) {}
|
81 |
+
virtual void onExitLoadModel(
|
82 |
+
const int32_t,
|
83 |
+
const std::unordered_map<std::string, std::string>&) {
|
84 |
+
} // key: filename, value: file content
|
85 |
+
virtual void onFailLoadModel(const int32_t, const char*) {}
|
86 |
+
virtual void onFailLoadModel(
|
87 |
+
const int32_t,
|
88 |
+
const char*,
|
89 |
+
const std::unordered_map<std::string, std::string>&) {}
|
90 |
+
virtual std::vector<std::string> getDefaultExtraFiles() = 0;
|
91 |
+
virtual std::unordered_map<std::string, std::string> processMetadataFromExtra(
|
92 |
+
const std::unordered_map<std::string, std::string>&) = 0;
|
93 |
+
};
|
94 |
+
|
95 |
+
class MobileObserverConfig {
|
96 |
+
public:
|
97 |
+
void setModuleObserver(std::unique_ptr<MobileModuleObserver> reporter) {
|
98 |
+
module_observer_ = std::move(reporter);
|
99 |
+
}
|
100 |
+
MobileModuleObserver* getModuleObserver() {
|
101 |
+
return module_observer_.get();
|
102 |
+
}
|
103 |
+
|
104 |
+
private:
|
105 |
+
std::unique_ptr<MobileModuleObserver> module_observer_;
|
106 |
+
};
|
107 |
+
|
108 |
+
MobileObserverConfig& observerConfig();
|
109 |
+
|
110 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_bytecode.h
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <torch/csrc/jit/mobile/function.h>
|
3 |
+
|
4 |
+
namespace torch {
|
5 |
+
namespace jit {
|
6 |
+
namespace mobile {
|
7 |
+
using c10::IValue;
|
8 |
+
TORCH_API void parseInstructions(
|
9 |
+
const std::string& function_name,
|
10 |
+
c10::ivalue::TupleElements&& ins_list,
|
11 |
+
c10::ivalue::TupleElements& debug_handles_m_tuple,
|
12 |
+
mobile::Function* function);
|
13 |
+
TORCH_API void parseConstants(
|
14 |
+
const c10::ivalue::TupleElements& consts_list,
|
15 |
+
mobile::Function* function);
|
16 |
+
TORCH_API void parseTypes(
|
17 |
+
const c10::ivalue::TupleElements& types_list,
|
18 |
+
mobile::Function* function);
|
19 |
+
TORCH_API void parseRegisterSize(size_t rsize, mobile::Function* function);
|
20 |
+
TORCH_API void applyUpgrader(
|
21 |
+
mobile::Function* function,
|
22 |
+
uint64_t operator_version);
|
23 |
+
} // namespace mobile
|
24 |
+
} // namespace jit
|
25 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/prim_ops_registery.h
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/ivalue.h>
|
4 |
+
#include <functional>
|
5 |
+
#include <vector>
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace jit {
|
9 |
+
namespace mobile {
|
10 |
+
|
11 |
+
using Stack = std::vector<c10::IValue>;
|
12 |
+
|
13 |
+
void registerPrimOpsFunction(
|
14 |
+
const std::string& name,
|
15 |
+
const std::function<void(Stack&)>& fn);
|
16 |
+
|
17 |
+
bool hasPrimOpsFn(const std::string& name);
|
18 |
+
|
19 |
+
std::function<void(Stack&)>& getPrimOpsFn(const std::string& name);
|
20 |
+
|
21 |
+
class prim_op_fn_register {
|
22 |
+
public:
|
23 |
+
prim_op_fn_register(
|
24 |
+
const std::string& name,
|
25 |
+
const std::function<void(Stack&)>& fn) {
|
26 |
+
registerPrimOpsFunction(name, fn);
|
27 |
+
}
|
28 |
+
};
|
29 |
+
|
30 |
+
} // namespace mobile
|
31 |
+
} // namespace jit
|
32 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/profiler_edge.h
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <torch/csrc/autograd/profiler_kineto.h>
|
3 |
+
#include <torch/csrc/jit/mobile/module.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
namespace mobile {
|
8 |
+
|
9 |
+
// If we dont have kineto available then edge profiler does not
|
10 |
+
// work since it relies on Kineto
|
11 |
+
#ifdef USE_KINETO
|
12 |
+
class TORCH_API KinetoEdgeCPUProfiler {
|
13 |
+
public:
|
14 |
+
// This profiler only profiles KINETO events
|
15 |
+
// No GPU_FALLBACK or NVTX
|
16 |
+
/*
|
17 |
+
* @param m is the instance of mobile Module which is being profiled.
|
18 |
+
* Note that this implies that KinetoEdgeCPUProfiler can be used
|
19 |
+
* to profile specific Module (see usage below), unliked ProfilerKineto
|
20 |
+
* which can profile pytorch runtime in arbitrary scope.
|
21 |
+
* @param fname is the name of the file to which chrome trace is written.
|
22 |
+
* @param report_input_shapes: whether to record shapes of op's inputs.
|
23 |
+
* @param with_stack: whether to record model's python stacktrace for the op.
|
24 |
+
* @param with_flops: whether to report flops corresponding to the op.
|
25 |
+
* @param with_modules: whether to report original python module
|
26 |
+
* hierarchy to which the op belongs.
|
27 |
+
* @param events
|
28 |
+
* @param adjust_vulkan_timestamps: whether to adjust vulkan timestamps from
|
29 |
+
* query pool to align with cpu event times
|
30 |
+
*
|
31 |
+
* Usage pattern for this profiler must be as follows:
|
32 |
+
*
|
33 |
+
* {
|
34 |
+
* KinetoEdgeCPUProfiler(m, filename, args);
|
35 |
+
* m.forward(...);
|
36 |
+
* }
|
37 |
+
*
|
38 |
+
* The reason being that KinetoEdgeCPUProfiler has a dependency on Module
|
39 |
+
* and thus it must not outlive it.
|
40 |
+
*
|
41 |
+
* Thus, when KinetoEdgeCPUProfiler is used as RAII to do profiling
|
42 |
+
* within certain scope. In that scope, the captured reference to
|
43 |
+
* Module will outlive KinetoEdgeCPUProfiler. This is gauranteed because
|
44 |
+
* KinetoEdgeCPUProfiler must be constructed later than Module, on stack.
|
45 |
+
*
|
46 |
+
* An example of the anti-pattern and wrong usage is:
|
47 |
+
*
|
48 |
+
* std::shared_ptr<KinetoMobileCPUProfiler> profiler(m, filename, args);
|
49 |
+
* m.forward(...);
|
50 |
+
*
|
51 |
+
* Since KinetoEdgeCPUProfiler object would then be constructed on heap
|
52 |
+
* with its lifetime managed manually or via smart pointers.
|
53 |
+
*/
|
54 |
+
KinetoEdgeCPUProfiler(
|
55 |
+
const torch::jit::mobile::Module& m,
|
56 |
+
const std::string& fname,
|
57 |
+
const bool report_input_shapes = false,
|
58 |
+
const bool profile_memory = false,
|
59 |
+
const bool with_stack = false,
|
60 |
+
const bool with_flops = false,
|
61 |
+
const bool with_modules = false,
|
62 |
+
std::vector<std::string> events = {},
|
63 |
+
const bool adjust_vulkan_timestamps = false);
|
64 |
+
|
65 |
+
const std::unique_ptr<torch::autograd::profiler::ProfilerResult>&
|
66 |
+
disableProfiler();
|
67 |
+
const std::unique_ptr<torch::autograd::profiler::ProfilerResult>&
|
68 |
+
getProfilerResult();
|
69 |
+
void recordBackendEvent(
|
70 |
+
const int64_t start_time_us,
|
71 |
+
const int64_t end_time_us,
|
72 |
+
const int64_t debug_handle,
|
73 |
+
const std::string& event_name,
|
74 |
+
const std::string& backend_name);
|
75 |
+
void recordBackendMemoryEvent(
|
76 |
+
void* ptr,
|
77 |
+
int64_t alloc_size,
|
78 |
+
size_t total_allocated,
|
79 |
+
size_t total_reserved,
|
80 |
+
c10::Device device);
|
81 |
+
|
82 |
+
~KinetoEdgeCPUProfiler();
|
83 |
+
|
84 |
+
private:
|
85 |
+
/*
|
86 |
+
* We store a reference to Module to make such dependency explicit, since
|
87 |
+
* a Module reference is already stored in a functor.
|
88 |
+
*/
|
89 |
+
const mobile::Module& m_;
|
90 |
+
std::string trace_file_name_;
|
91 |
+
std::unique_ptr<torch::autograd::profiler::ProfilerResult> profiler_result_;
|
92 |
+
};
|
93 |
+
|
94 |
+
TORCH_API KinetoEdgeCPUProfiler* getCurrentEdgeProfiler();
|
95 |
+
|
96 |
+
#define RECORD_BACKEND_EVENT_TO_EDGE_PROFILER( \
|
97 |
+
start_time_us, end_time_us, debug_handle, event_name, backend_name) \
|
98 |
+
if (mobile::getCurrentEdgeProfiler()) { \
|
99 |
+
mobile::getCurrentEdgeProfiler()->recordBackendEvent( \
|
100 |
+
start_time_us, end_time_us, debug_handle, event_name, backend_name); \
|
101 |
+
}
|
102 |
+
|
103 |
+
#define RECORD_BACKEND_MEMORY_EVENT_TO_EDGE_PROFILER( \
|
104 |
+
ptr, alloc_size, total_allocated, total_reserved, device) \
|
105 |
+
if (mobile::getCurrentEdgeProfiler()) { \
|
106 |
+
mobile::getCurrentEdgeProfiler()->recordBackendMemoryEvent( \
|
107 |
+
ptr, alloc_size, total_allocated, total_reserved, device); \
|
108 |
+
}
|
109 |
+
#else
|
110 |
+
|
111 |
+
#define RECORD_BACKEND_EVENT_TO_EDGE_PROFILER( \
|
112 |
+
start_time_us, end_time_us, debug_handle, event_name, backend_name)
|
113 |
+
|
114 |
+
#define RECORD_BACKEND_MEMORY_EVENT_TO_EDGE_PROFILER( \
|
115 |
+
ptr, alloc_size, total_allocated, total_reserved, device)
|
116 |
+
#endif
|
117 |
+
} // namespace mobile
|
118 |
+
} // namespace jit
|
119 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/promoted_prim_ops.h
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <torch/csrc/jit/mobile/prim_ops_registery.h>
|
3 |
+
#include <torch/csrc/jit/mobile/register_ops_common_utils.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
void tupleIndex(Stack& stack);
|
9 |
+
|
10 |
+
void raiseException(Stack& stack);
|
11 |
+
|
12 |
+
void is(Stack& stack);
|
13 |
+
|
14 |
+
void unInitialized(Stack& stack);
|
15 |
+
|
16 |
+
void isNot(Stack& stack);
|
17 |
+
|
18 |
+
void aten_format(Stack& stack);
|
19 |
+
|
20 |
+
void size(Stack& stack);
|
21 |
+
|
22 |
+
void sym_size(Stack& stack);
|
23 |
+
|
24 |
+
void sym_size_int(Stack& stack);
|
25 |
+
|
26 |
+
void sym_stride_int(Stack& stack);
|
27 |
+
|
28 |
+
void sym_numel(Stack& stack);
|
29 |
+
|
30 |
+
void sym_storage_offset(Stack& stack);
|
31 |
+
|
32 |
+
void sym_stride(Stack& stack);
|
33 |
+
|
34 |
+
void device(Stack& stack);
|
35 |
+
|
36 |
+
void device_with_index(Stack& stack);
|
37 |
+
|
38 |
+
void dtype(Stack& stack);
|
39 |
+
|
40 |
+
void layout(Stack& stack);
|
41 |
+
|
42 |
+
void toPrimDType(Stack& stack);
|
43 |
+
|
44 |
+
void dim(Stack& stack);
|
45 |
+
|
46 |
+
void _not(Stack& stack);
|
47 |
+
|
48 |
+
void boolTensor(Stack& stack);
|
49 |
+
|
50 |
+
void toList(Stack& stack);
|
51 |
+
|
52 |
+
void numToTensorScalar(Stack& stack);
|
53 |
+
|
54 |
+
void isCuda(Stack& stack);
|
55 |
+
|
56 |
+
void numToTensorBool(Stack& stack);
|
57 |
+
|
58 |
+
void dictIndex(Stack& stack);
|
59 |
+
|
60 |
+
void raiseExceptionWithMessage(Stack& stack);
|
61 |
+
|
62 |
+
} // namespace jit
|
63 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/quantization.h
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Export.h>
|
4 |
+
#include <string>
|
5 |
+
|
6 |
+
namespace torch {
|
7 |
+
namespace jit {
|
8 |
+
namespace mobile {
|
9 |
+
class Module;
|
10 |
+
namespace quantization {
|
11 |
+
/*
|
12 |
+
* Device side PTQ API.
|
13 |
+
* Once the model has been prepared for quantization on server side, such model
|
14 |
+
* is sent to device. On device side the model is further trained. At the end of
|
15 |
+
* the training, before the model is readied for inference, we need to quantize
|
16 |
+
* the model.
|
17 |
+
* Usage of this API is as follows.
|
18 |
+
* PTQQuanizationHelper ptq_helper;
|
19 |
+
* ptq_helper.quantize_dynamic(m, "forward");
|
20 |
+
* Args:
|
21 |
+
* m: Captured by reference, an instance of mobile::Module. This module will be
|
22 |
+
* mutated in place to replace its <method_name> method with quantized
|
23 |
+
* equivalent. method:name: Name of the method to be quantized. AOT preparation
|
24 |
+
* for quantization must also have been done for this method. Returns: In place
|
25 |
+
* mutated `m` whose size should be smaller due to weight quantization and whose
|
26 |
+
* <method_name> method should use quantized ops
|
27 |
+
*/
|
28 |
+
class TORCH_API PTQQuanizationHelper {
|
29 |
+
public:
|
30 |
+
PTQQuanizationHelper() = default;
|
31 |
+
void quantize_dynamic(
|
32 |
+
torch::jit::mobile::Module& m,
|
33 |
+
const std::string& method_name);
|
34 |
+
};
|
35 |
+
} // namespace quantization
|
36 |
+
} // namespace mobile
|
37 |
+
} // namespace jit
|
38 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/register_ops_common_utils.h
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/Context.h>
|
4 |
+
#include <ATen/NativeFunctions.h>
|
5 |
+
#include <ATen/core/ivalue.h>
|
6 |
+
#include <ATen/core/stack.h>
|
7 |
+
#include <torch/csrc/jit/runtime/jit_exception.h>
|
8 |
+
#include <torch/csrc/jit/runtime/vararg_functions.h>
|
9 |
+
|
10 |
+
namespace torch {
|
11 |
+
namespace jit {
|
12 |
+
|
13 |
+
inline void noop(Stack& n) {}
|
14 |
+
|
15 |
+
int64_t normalizeIndex(int64_t idx, int64_t list_size);
|
16 |
+
|
17 |
+
// reference function THPVariable_to in python_variable_methods.cpp
|
18 |
+
static C10_UNUSED at::Tensor to_dispatch(
|
19 |
+
at::Tensor self,
|
20 |
+
c10::optional<at::Device> device,
|
21 |
+
c10::optional<at::ScalarType> scalarType,
|
22 |
+
bool non_blocking,
|
23 |
+
bool copy) {
|
24 |
+
if (device && device->is_cuda()) {
|
25 |
+
at::globalContext().lazyInitCUDA();
|
26 |
+
}
|
27 |
+
if (!device && !scalarType && !copy) {
|
28 |
+
return self;
|
29 |
+
} else if (!device) {
|
30 |
+
return self.to(*scalarType, non_blocking, copy);
|
31 |
+
} else if (!scalarType) {
|
32 |
+
return self.to(*device, non_blocking, copy);
|
33 |
+
} else {
|
34 |
+
return self.to(*device, *scalarType, non_blocking, copy);
|
35 |
+
}
|
36 |
+
}
|
37 |
+
|
38 |
+
// Convert the tensor pointed to by \p data to a nested list. \p dim is the
|
39 |
+
// number of dimensions in the tensor and \p cur_dim is the dimension being
|
40 |
+
// processed by the current invocation. \p ty is the expected output IR type of
|
41 |
+
// the operation. \p is the scalar type of \p data. \p sizes and \p strides are
|
42 |
+
// the sizes and strides of the tensor operand and \p element_size is the size
|
43 |
+
// in bytes of one tensor element.
|
44 |
+
IValue tensorToListRecursive(
|
45 |
+
char* data,
|
46 |
+
int64_t cur_dim,
|
47 |
+
int64_t num_tensor_dims,
|
48 |
+
at::TypePtr ty,
|
49 |
+
at::ScalarType scalar_ty,
|
50 |
+
at::IntArrayRef sizes,
|
51 |
+
at::IntArrayRef strides,
|
52 |
+
size_t element_size);
|
53 |
+
|
54 |
+
} // namespace jit
|
55 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/add_if_then_else.h
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
TORCH_API bool AddIfThenElseOp(std::shared_ptr<Graph>& graph);
|
9 |
+
|
10 |
+
} // namespace jit
|
11 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/canonicalize.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
TORCH_API std::shared_ptr<Graph> Canonicalize(
|
9 |
+
const std::shared_ptr<Graph>& graph,
|
10 |
+
bool keep_unique_names = true);
|
11 |
+
|
12 |
+
TORCH_API void CanonicalizeOutputs(std::shared_ptr<Graph>& graph);
|
13 |
+
|
14 |
+
TORCH_API c10::optional<const Use> firstOrLastUse(Value* v, bool find_first);
|
15 |
+
|
16 |
+
TORCH_API bool isBeforeOrAfter(
|
17 |
+
const Use& a,
|
18 |
+
const Use& b,
|
19 |
+
bool checking_before);
|
20 |
+
|
21 |
+
} // namespace jit
|
22 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/concat_opt.h
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
// Eliminates common inputs among `aten::cat` ops.
|
9 |
+
TORCH_API bool EliminateConcatCommonInputs(const std::shared_ptr<Graph>& graph);
|
10 |
+
|
11 |
+
// Expands `aten::cat` ops into `aten::copy` ops and eliminates redudancies
|
12 |
+
// in the buffers used for concatenation if possible.
|
13 |
+
TORCH_API void ExpandConcatAndEliminateRedundancy(
|
14 |
+
const std::shared_ptr<Graph>& graph);
|
15 |
+
|
16 |
+
TORCH_API bool CombineConcats(const std::shared_ptr<Graph>& graph);
|
17 |
+
|
18 |
+
} // namespace jit
|
19 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/constant_propagation.h
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
// Runs constant propagation on all objects unless ignore_custom_classes is
|
9 |
+
// specified as true, in which case user defined classes are skipped. This is
|
10 |
+
// useful to prevent early fusion of packing operations, which end up lowering
|
11 |
+
// away information about their constructors (e.g. packed::linear_clamp_prepack
|
12 |
+
// and prepacked::conv2d_clamp_prepack)
|
13 |
+
// Returns True if the pass made a change to the graph
|
14 |
+
TORCH_API bool ConstantPropagation(
|
15 |
+
std::shared_ptr<Graph>& graph,
|
16 |
+
bool ignore_custom_classes = false);
|
17 |
+
|
18 |
+
// runs constant propagation only on ops that have non-aliasing inputs & outputs
|
19 |
+
// Returns True if the pass made a change to the graph
|
20 |
+
TORCH_API bool ConstantPropagationImmutableTypes(std::shared_ptr<Graph>& graph);
|
21 |
+
|
22 |
+
// Runs the node if its inputs are constants. Callers of this function must
|
23 |
+
// make their own determination if constant prop is appropriate - for example
|
24 |
+
// non-deterministic ops or ops with side effects. If ignore_custom_classes is
|
25 |
+
// specified, nodes that output user defined classes are not run.
|
26 |
+
TORCH_API c10::optional<Stack> runNodeIfInputsAreConstant(
|
27 |
+
const Node* node,
|
28 |
+
bool ignore_custom_classes = false,
|
29 |
+
AliasDb* db = nullptr);
|
30 |
+
|
31 |
+
} // namespace jit
|
32 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/create_functional_graphs.h
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/Export.h>
|
4 |
+
#include <torch/csrc/jit/ir/ir.h>
|
5 |
+
|
6 |
+
namespace torch {
|
7 |
+
namespace jit {
|
8 |
+
|
9 |
+
TORCH_API void CreateFunctionalGraphs(const std::shared_ptr<Graph>& graph);
|
10 |
+
|
11 |
+
TORCH_API void InlineFunctionalGraphs(const std::shared_ptr<Graph>& graph);
|
12 |
+
|
13 |
+
} // namespace jit
|
14 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/dead_code_elimination.h
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
// If given a top-level graph, DCE will construct do alias analysis that allows
|
9 |
+
// for "smarter" dead code elimination (we will eliminate mutable ops if we can
|
10 |
+
// prove the mutated values are not used). Otherwise, we will not allow DCE to
|
11 |
+
// eliminate mutable ops.
|
12 |
+
//
|
13 |
+
// So, prefer to use the graph version if you can.
|
14 |
+
enum class DCESideEffectPolicy : uint8_t {
|
15 |
+
// default behavior: dead code elimination will check if a node has side
|
16 |
+
// effects
|
17 |
+
// and not delete it if it does.
|
18 |
+
DONT_DELETE_NODES_WITH_SIDE_EFFECTS,
|
19 |
+
// with this flag, dead code elimination will not check if a node has side
|
20 |
+
// effects and treat nodes with side effects like any other node,
|
21 |
+
// i.e. delete them if their outputs aren't used anywhere.
|
22 |
+
ALLOW_DELETING_NODES_WITH_SIDE_EFFECTS
|
23 |
+
};
|
24 |
+
|
25 |
+
TORCH_API void EliminateDeadCode(
|
26 |
+
const std::shared_ptr<Graph>& graph,
|
27 |
+
DCESideEffectPolicy sideEffectPolicy =
|
28 |
+
DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS);
|
29 |
+
TORCH_API void EliminateDeadCode(
|
30 |
+
Block* block,
|
31 |
+
bool recurse = true,
|
32 |
+
DCESideEffectPolicy sideEffectPolicy =
|
33 |
+
DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS);
|
34 |
+
|
35 |
+
// Invoke the user-provided callback on all live values before deleting anything
|
36 |
+
TORCH_API void EliminateDeadCode(
|
37 |
+
Block* block,
|
38 |
+
std::function<void(const std::unordered_set<const Value*>&)> cb,
|
39 |
+
DCESideEffectPolicy sideEffectPolicy =
|
40 |
+
DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS);
|
41 |
+
} // namespace jit
|
42 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/decompose_ops.h
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
TORCH_API void DecomposeOps(std::shared_ptr<Graph>& graph);
|
9 |
+
|
10 |
+
}
|
11 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fold_linear_bn.h
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/api/module.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
struct TORCH_API LinearBNParameters {
|
9 |
+
at::Tensor linear_w;
|
10 |
+
at::Tensor linear_b;
|
11 |
+
at::Tensor bn_rm;
|
12 |
+
at::Tensor bn_rv;
|
13 |
+
double bn_eps = 0.0;
|
14 |
+
at::Tensor bn_w;
|
15 |
+
at::Tensor bn_b;
|
16 |
+
};
|
17 |
+
|
18 |
+
/**
|
19 |
+
* Given the current weight and bias tensors of a Linear module and parameters
|
20 |
+
* of the BatchNorm module we're folding with, compute the updated values
|
21 |
+
* for the weight and bias.
|
22 |
+
*
|
23 |
+
* The function is basically copied from torch/nn/utils/fusion.py
|
24 |
+
*/
|
25 |
+
TORCH_API std::tuple<at::Tensor, at::Tensor> computeUpdatedLinearWeightAndBias(
|
26 |
+
const LinearBNParameters& p);
|
27 |
+
|
28 |
+
} // namespace jit
|
29 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_add_relu_fusion.h
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/api/module.h>
|
4 |
+
#include <torch/csrc/jit/ir/ir.h>
|
5 |
+
|
6 |
+
namespace torch {
|
7 |
+
namespace jit {
|
8 |
+
|
9 |
+
TORCH_API extern std::function<void(std::shared_ptr<Graph>&)>&
|
10 |
+
getFuseFrozenConvAddReluImpl();
|
11 |
+
|
12 |
+
TORCH_API void FuseFrozenConvAddRelu(std::shared_ptr<Graph>& graph);
|
13 |
+
|
14 |
+
} // namespace jit
|
15 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_conv_folding.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
// Fuses Convolution -> Batchnorm into a single Convolution by
|
9 |
+
// folding batchnorm weights into conv weights.
|
10 |
+
// This pass only works on Frozen Graphs; otherwise it is a No-Op.
|
11 |
+
TORCH_API bool FoldFrozenConvBatchnorm(std::shared_ptr<Graph>& graph);
|
12 |
+
|
13 |
+
// Fuses Convolution -> Add/Sub into a single Convolution by
|
14 |
+
// folding add constant tensor into conv weights.
|
15 |
+
// This pass only works on Frozen Graphs; otherwise it is a No-Op.
|
16 |
+
TORCH_API bool FoldFrozenConvAddOrSub(std::shared_ptr<Graph>& graph);
|
17 |
+
|
18 |
+
// Fuses Convolution -> Mul/Div into a single Convolution by
|
19 |
+
// folding add constant tensor into conv weights.
|
20 |
+
// This pass only works on Frozen Graphs; otherwise it is a No-Op.
|
21 |
+
TORCH_API bool FoldFrozenConvMulOrDiv(std::shared_ptr<Graph>& graph);
|
22 |
+
|
23 |
+
} // namespace jit
|
24 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/frozen_linear_transpose.h
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
// Transposes the weight matrix for frozen linear modules.
|
9 |
+
// and converts it into a matmul
|
10 |
+
TORCH_API bool FrozenLinearTranspose(std::shared_ptr<Graph>& graph);
|
11 |
+
|
12 |
+
} // namespace jit
|
13 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/fuse_linear.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/** \brief Fusing linear patterns as single at::linear for easier pattern
|
2 |
+
* matching in later passes
|
3 |
+
*/
|
4 |
+
#pragma once
|
5 |
+
|
6 |
+
#include <torch/csrc/jit/ir/ir.h>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace jit {
|
10 |
+
|
11 |
+
/** \brief Match the at::linear pattern and fuse it into a single at::linear
|
12 |
+
* This pass fuse the addmm or matmul + add generated by JIT back to linear
|
13 |
+
* This pass can be deleted once the JIT can emit the aten::linear in the future
|
14 |
+
*/
|
15 |
+
TORCH_API void FuseLinear(std::shared_ptr<Graph>& graph);
|
16 |
+
|
17 |
+
/** Swap functional linear CallFunctions to aten::linear
|
18 |
+
*/
|
19 |
+
TORCH_API void SwapFunctionalLinear(std::shared_ptr<Graph>& graph);
|
20 |
+
/** Swap all functional linear CallFunctions in module
|
21 |
+
*/
|
22 |
+
TORCH_API void SwapFunctionalLinear(Module& module);
|
23 |
+
} // namespace jit
|
24 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/graph_rewrite_helper.h
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
#include <torch/csrc/jit/ir/irparser.h>
|
5 |
+
#include <torch/csrc/jit/ir/subgraph_matcher.h>
|
6 |
+
#include <torch/csrc/jit/passes/subgraph_rewrite.h>
|
7 |
+
|
8 |
+
namespace torch {
|
9 |
+
namespace jit {
|
10 |
+
namespace graph_rewrite_helper {
|
11 |
+
|
12 |
+
std::string getFuncName(Value* func_value);
|
13 |
+
Value* getValue(
|
14 |
+
const std::string& name,
|
15 |
+
const std::unordered_map<const Value*, Value*>& match_vmap,
|
16 |
+
const std::unordered_map<std::string, Value*>& vmap);
|
17 |
+
c10::optional<IValue> getIValue(
|
18 |
+
const std::string& name,
|
19 |
+
const std::unordered_map<const Value*, Value*>& match_vmap,
|
20 |
+
const std::unordered_map<std::string, Value*>& vmap);
|
21 |
+
TORCH_API void replaceConvolutionWithAtenConv(std::shared_ptr<Graph>& graph);
|
22 |
+
|
23 |
+
bool isClampFusable(
|
24 |
+
const Match& match,
|
25 |
+
const std::unordered_map<std::string, Value*>& vmap);
|
26 |
+
|
27 |
+
// This struct contains a compiled IR patterns slated for use in the
|
28 |
+
// findPatternMatches function. The struct encapsulates the common
|
29 |
+
// information from parseIR that is used in conjunction with the
|
30 |
+
// pattern matching facility. A const instance of this struct can
|
31 |
+
// also be stored away to cache the compiled IR pattern and reduce
|
32 |
+
// runtime cost
|
33 |
+
struct PatternInfo {
|
34 |
+
std::string pattern_string;
|
35 |
+
std::unique_ptr<Graph> pattern_graph;
|
36 |
+
std::unordered_map<std::string, Value*> vmap;
|
37 |
+
std::vector<MatchFilter> filters;
|
38 |
+
|
39 |
+
static PatternInfo parse_from_str(
|
40 |
+
std::string pattern_string,
|
41 |
+
const std::vector<MatchFilter>& filters = {}) {
|
42 |
+
PatternInfo rv{
|
43 |
+
std::move(pattern_string),
|
44 |
+
std::make_unique<Graph>(),
|
45 |
+
decltype(vmap){},
|
46 |
+
filters};
|
47 |
+
parseIR(rv.pattern_string, rv.pattern_graph.get(), rv.vmap);
|
48 |
+
return rv;
|
49 |
+
}
|
50 |
+
};
|
51 |
+
|
52 |
+
} // namespace graph_rewrite_helper
|
53 |
+
} // namespace jit
|
54 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lift_closures.h
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/Export.h>
|
4 |
+
#include <torch/csrc/jit/ir/ir.h>
|
5 |
+
|
6 |
+
namespace torch {
|
7 |
+
namespace jit {
|
8 |
+
|
9 |
+
TORCH_API void liftClosures(const std::shared_ptr<Graph>& graph);
|
10 |
+
|
11 |
+
} // namespace jit
|
12 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/lower_grad_of.h
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
// This pass removes 'grad_of' nodes, replacing them with conditionals of
|
9 |
+
// the form:
|
10 |
+
// if any_defined(inputs):
|
11 |
+
// outputs = <original_computation>
|
12 |
+
// else:
|
13 |
+
// outputs = undefineds
|
14 |
+
TORCH_API void LowerGradOf(Graph& g);
|
15 |
+
|
16 |
+
} // namespace jit
|
17 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/normalize_ops.h
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
// This pass converts aten ops to a normalized form. It is
|
9 |
+
// run immediately after IR generation in both the tracer and compiler,
|
10 |
+
// so downstream consumers of the IR do not need handle ops in their
|
11 |
+
// pre-normalized form.
|
12 |
+
// Currently only handles normalization of op aliases.
|
13 |
+
TORCH_API void NormalizeOps(const std::shared_ptr<Graph>& graph);
|
14 |
+
|
15 |
+
const std::unordered_map<Symbol, Symbol>& getOperatorAliasMap();
|
16 |
+
|
17 |
+
} // namespace jit
|
18 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/onnx.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
#include <torch/csrc/onnx/onnx.h>
|
5 |
+
#include <unordered_map>
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace jit {
|
9 |
+
|
10 |
+
TORCH_API std::shared_ptr<Graph> ToONNX(
|
11 |
+
std::shared_ptr<Graph>& state,
|
12 |
+
::torch::onnx::OperatorExportTypes operator_export_type);
|
13 |
+
TORCH_API std::unordered_map<Value*, Value*> BlockToONNX(
|
14 |
+
Block* old_block,
|
15 |
+
Block* new_block,
|
16 |
+
::torch::onnx::OperatorExportTypes operator_export_type,
|
17 |
+
std::unordered_map<Value*, Value*>& env,
|
18 |
+
bool is_sub_block = false);
|
19 |
+
TORCH_API void NodeToONNX(
|
20 |
+
Node* old_node,
|
21 |
+
Block* new_block,
|
22 |
+
::torch::onnx::OperatorExportTypes operator_export_type,
|
23 |
+
std::unordered_map<Value*, Value*>& env);
|
24 |
+
TORCH_API void RemovePrintOps(std::shared_ptr<Graph>& graph);
|
25 |
+
TORCH_API void PreprocessCaffe2Ops(std::shared_ptr<Graph>& graph);
|
26 |
+
|
27 |
+
} // namespace jit
|
28 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_dict_idioms.h
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
// Peephole Optimizes Dict Ops such as len() and __getitem__
|
9 |
+
// 1. getitem optimizations
|
10 |
+
// Given a function like this:
|
11 |
+
// def foo():
|
12 |
+
// d = {0 : 1}
|
13 |
+
// x = d[0]
|
14 |
+
// return x
|
15 |
+
// This pass produces (after dead code elimination):
|
16 |
+
// def foo(a, b):
|
17 |
+
// return 1
|
18 |
+
//
|
19 |
+
// This optimization can only happen if the dict is not modified
|
20 |
+
// and the dict has constant, non overlapping keys.
|
21 |
+
//
|
22 |
+
// 2. len optimizations
|
23 |
+
// Given a function like this:
|
24 |
+
// def foo():
|
25 |
+
// d = {0 : 1}
|
26 |
+
// return len(d)
|
27 |
+
// This pass produces (after dead code elimination):
|
28 |
+
// def foo():
|
29 |
+
// return 1
|
30 |
+
//
|
31 |
+
// This has the same requirements as the getitem optimizations.
|
32 |
+
//
|
33 |
+
// Currently this is invoked as part of PeepholeOptimize
|
34 |
+
// return true if graph is modified.
|
35 |
+
TORCH_API bool PeepholeOptimizeDictIdioms(const std::shared_ptr<Graph>& graph);
|
36 |
+
|
37 |
+
} // namespace jit
|
38 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/peephole_non_tensor.h
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
// return true if graph is modified
|
9 |
+
// Optimizing General Graph Patterns that
|
10 |
+
// are not covered in peephole.cpp and peephole_list_idioms
|
11 |
+
TORCH_API bool PeepholeOptimizeNonTensor(const std::shared_ptr<Graph>& graph);
|
12 |
+
|
13 |
+
} // namespace jit
|
14 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/refine_tuple_types.h
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
// updates the types of tuples according to the type of their current inputs.
|
9 |
+
TORCH_API void RefineTupleTypes(std::shared_ptr<Graph>& graph);
|
10 |
+
|
11 |
+
} // namespace jit
|
12 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_exceptions.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
// Considering prim::RaiseException nodes unreachable, simplify prim::If nodes
|
9 |
+
// when one of the branches contains prim::RaiseException.
|
10 |
+
//
|
11 |
+
// This pass is illegal in general case as the modified graph might not throw
|
12 |
+
// an exception that the original graph would throw. The purpose of the pass is
|
13 |
+
// to cleanup the graph in a "risky" way by removing pathways leading to
|
14 |
+
// RaiseExceptions nodes. In some sense, this pass could be considered as a
|
15 |
+
// "Release" mode, while the original graph was in a "Debug" mode.
|
16 |
+
// The pass should only be used when such transformation is guaranteed to be
|
17 |
+
// safe by some other mechanisms. For instance, when we know exact shapes of
|
18 |
+
// tensors flowing through the graph and tensors with such shapes never cause
|
19 |
+
// exceptions.
|
20 |
+
TORCH_API void EliminateExceptions(std::shared_ptr<Graph>& graph);
|
21 |
+
|
22 |
+
} // namespace jit
|
23 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/remove_inplace_ops.h
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
|
5 |
+
#include <memory>
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace jit {
|
9 |
+
// see .cpp for docs
|
10 |
+
TORCH_API void RemoveInplaceOps(const std::shared_ptr<Graph>& graph);
|
11 |
+
|
12 |
+
TORCH_API void ImplicitCastForBinaryInplaceOps(Block* block);
|
13 |
+
} // namespace jit
|
14 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/requires_grad_analysis.h
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/Export.h>
|
4 |
+
|
5 |
+
#include <memory>
|
6 |
+
|
7 |
+
namespace torch {
|
8 |
+
namespace jit {
|
9 |
+
|
10 |
+
struct Graph;
|
11 |
+
struct ArgumentSpec;
|
12 |
+
|
13 |
+
TORCH_API void PropagateRequiresGrad(std::shared_ptr<Graph>& graph);
|
14 |
+
|
15 |
+
} // namespace jit
|
16 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/specialize_autogradzero.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
|
5 |
+
namespace torch {
|
6 |
+
namespace jit {
|
7 |
+
|
8 |
+
// propagate autograd zero information through a gradient graph and
|
9 |
+
// remove grad_of blocks if present.
|
10 |
+
// Note: this is a very limited pass. It only propagates autograd zeros for
|
11 |
+
// operations generated by the symbolic autodiff code and cleans up
|
12 |
+
// AutogradAdds when possible. Outputs of other nodes are conservatively
|
13 |
+
// marked Unknown and not optimized.
|
14 |
+
TORCH_API void specializeAutogradZero(std::shared_ptr<Graph> g);
|
15 |
+
|
16 |
+
struct ProfilingRecord;
|
17 |
+
|
18 |
+
TORCH_API void InsertProfileNodesForSpecializeAutogradZero(ProfilingRecord* pr);
|
19 |
+
|
20 |
+
} // namespace jit
|
21 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/subgraph_rewrite.h
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/** This file defines API for pattern-based subgraph rewrites.
|
2 |
+
*
|
3 |
+
* The API can be used for finding concrete patterns in the model and replacing
|
4 |
+
* the corresponding subgraphs with another subgraph. A special case of such
|
5 |
+
* rewrites is fusion, where the new subgraph consists of just a single node.
|
6 |
+
*
|
7 |
+
* There is a default set of the most common patterns that everyone could use.
|
8 |
+
* Alternatively, an arbitrary pattern can be registered.
|
9 |
+
*/
|
10 |
+
#pragma once
|
11 |
+
|
12 |
+
#include <torch/csrc/jit/api/module.h>
|
13 |
+
#include <torch/csrc/jit/ir/ir.h>
|
14 |
+
|
15 |
+
#include <functional>
|
16 |
+
#include <unordered_set>
|
17 |
+
#include <vector>
|
18 |
+
|
19 |
+
namespace torch {
|
20 |
+
namespace jit {
|
21 |
+
|
22 |
+
// Forward declarations.
|
23 |
+
struct RewritePatternDescr;
|
24 |
+
struct Match;
|
25 |
+
|
26 |
+
using MatchFilter = std::function<
|
27 |
+
bool(const Match&, const std::unordered_map<std::string, Value*>&)>;
|
28 |
+
|
29 |
+
/** Run pattern-based subgraph rewrites on all methods in the module.
|
30 |
+
*
|
31 |
+
* This pass will go through all methods in the module and try to replace all
|
32 |
+
* recognized patterns (see SubgraphRewriter::RegisterDefaultPatterns for the
|
33 |
+
* list of these patterns).
|
34 |
+
*/
|
35 |
+
TORCH_API Module PatternBasedRewrite(const Module& module);
|
36 |
+
|
37 |
+
/** A class implementing API for pattern-based subgraph rewrites.
|
38 |
+
*
|
39 |
+
* To perform pattern-based subgraph rewrites on a module using this API, one
|
40 |
+
* needs to create an object of such class, register rewrite patterns and run
|
41 |
+
* the transformation pass (`runOnModule`).
|
42 |
+
*
|
43 |
+
* To use standard patterns, one could use `RegisterDefaultPatterns`.
|
44 |
+
*
|
45 |
+
* To enable rewrites of custom patterns, the custom patterns must be registered
|
46 |
+
* with `RegisterRewritePattern`.
|
47 |
+
*/
|
48 |
+
class TORCH_API SubgraphRewriter {
|
49 |
+
public:
|
50 |
+
// Run pattern-based subgraph rewrite pass on the module.
|
51 |
+
Module runOnModule(const Module& module);
|
52 |
+
|
53 |
+
// Run pattern-based subgraph rewrite pass on the graph (used in testing).
|
54 |
+
// `filter` is a function that does extra filtering on the match. If it
|
55 |
+
// returns false for a given Match, we'll skip the Match. The filter
|
56 |
+
// function's arguments consist of a Match and a value map from parsing the
|
57 |
+
// pattern graph. Both the Match and the value map are necessary because we
|
58 |
+
// need to 1) do extra filtering on the matched result as well as 2) refer to
|
59 |
+
// the values in the matched result through the values in the pattern graph.
|
60 |
+
void runOnGraph(
|
61 |
+
std::shared_ptr<Graph>& graph,
|
62 |
+
const std::vector<MatchFilter>& filters);
|
63 |
+
|
64 |
+
void runOnGraph(
|
65 |
+
std::shared_ptr<Graph>& graph,
|
66 |
+
const MatchFilter& filter =
|
67 |
+
[](const Match&, const std::unordered_map<std::string, Value*>&) {
|
68 |
+
return true;
|
69 |
+
}) {
|
70 |
+
runOnGraph(graph, std::vector<MatchFilter>({filter}));
|
71 |
+
}
|
72 |
+
|
73 |
+
// Register standard rewrite patterns.
|
74 |
+
void RegisterDefaultPatterns();
|
75 |
+
|
76 |
+
/** Register a custom rewrite pattern.
|
77 |
+
*
|
78 |
+
* The method takes two parameters specifying the pattern:
|
79 |
+
* \p PATTERN - IR string representing the pattern subgraph.
|
80 |
+
* \p REPLACEMENT - IR string representing the replacement subgraph.
|
81 |
+
* \p value name map - vector of pairs mapping values in the replacement graph
|
82 |
+
* to the values in the pattern graph. Used for preserving source range info
|
83 |
+
* across graph rewrite.
|
84 |
+
*
|
85 |
+
* See examples of pattern registering in `RegisterDefaultPatterns`.
|
86 |
+
*/
|
87 |
+
void RegisterRewritePattern(
|
88 |
+
const std::string& pattern,
|
89 |
+
const std::string& replacement,
|
90 |
+
const std::vector<std::pair<std::string, std::string>>& value_name_pair =
|
91 |
+
{});
|
92 |
+
|
93 |
+
private:
|
94 |
+
std::vector<RewritePatternDescr> patterns_;
|
95 |
+
std::unordered_set<Node*> nodes_to_delete_;
|
96 |
+
|
97 |
+
void rewriteSinglePatternOnGraph(
|
98 |
+
std::shared_ptr<Graph>& graph,
|
99 |
+
const RewritePatternDescr& pattern,
|
100 |
+
const std::vector<MatchFilter>& filters);
|
101 |
+
|
102 |
+
bool overlapsWithPreviousMatches(const Match* match);
|
103 |
+
};
|
104 |
+
|
105 |
+
/** Rewrite pattern descriptor.
|
106 |
+
*
|
107 |
+
* This structure is used in the implementation of `SubgraphRewriter` and
|
108 |
+
* is not supposed to be used externally.
|
109 |
+
*/
|
110 |
+
struct RewritePatternDescr {
|
111 |
+
std::string pattern;
|
112 |
+
std::string replacement;
|
113 |
+
std::unordered_map<std::string, std::string> value_name_map;
|
114 |
+
};
|
115 |
+
|
116 |
+
} // namespace jit
|
117 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_analysis.h
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/Export.h>
|
4 |
+
#include <torch/csrc/jit/ir/ir.h>
|
5 |
+
#include <unordered_map>
|
6 |
+
#include <utility>
|
7 |
+
#include <variant>
|
8 |
+
|
9 |
+
namespace torch {
|
10 |
+
namespace jit {
|
11 |
+
|
12 |
+
// CAUTION NOT TO BE USED, STILL A WIP, NOT STABLE
|
13 |
+
|
14 |
+
TORCH_API void PropagateShapesOnGraph(std::shared_ptr<Graph>& graph);
|
15 |
+
|
16 |
+
// CAUTION NOT TO BE USED, STILL A WIP, NOT STABLE
|
17 |
+
// From [beg, end) attempt to propagate shapes and
|
18 |
+
// build up a graph that will compute all remaining symbolic
|
19 |
+
// shapes in [beg, end) that can be executed before beg
|
20 |
+
|
21 |
+
struct ShapeComputeGraphMapping {
|
22 |
+
ShapeComputeGraphMapping(
|
23 |
+
std::shared_ptr<Graph> partial_eval_shape_graph,
|
24 |
+
std::unordered_map<Value*, Value*>
|
25 |
+
enclosing_graph_value_to_shape_graph_input,
|
26 |
+
std::unordered_map<Value*, int64_t> graph_output_to_symbolic_shape_dim)
|
27 |
+
: partial_eval_shape_graph(std::move(partial_eval_shape_graph)),
|
28 |
+
enclosing_graph_value_to_shape_graph_input_(
|
29 |
+
std::move(enclosing_graph_value_to_shape_graph_input)),
|
30 |
+
graph_output_to_symbolic_shape_dim_(
|
31 |
+
std::move(graph_output_to_symbolic_shape_dim)){};
|
32 |
+
|
33 |
+
std::shared_ptr<Graph> partial_eval_shape_graph;
|
34 |
+
std::unordered_map<Value*, Value*>
|
35 |
+
enclosing_graph_value_to_shape_graph_input_;
|
36 |
+
std::unordered_map<Value*, int64_t> graph_output_to_symbolic_shape_dim_;
|
37 |
+
};
|
38 |
+
|
39 |
+
TORCH_API c10::optional<ShapeComputeGraphMapping>
|
40 |
+
PropagateShapesAndBuildLargeShapeComputeGraph(
|
41 |
+
std::shared_ptr<Graph>& graph,
|
42 |
+
Node* beg,
|
43 |
+
Node* end);
|
44 |
+
|
45 |
+
// don't insert complete tensor shapes in shape compute graphs and instead
|
46 |
+
// rely on our partial evaluation pipeline to propagate information.
|
47 |
+
// this is a good proxy for our ability to propagate non-complete shape
|
48 |
+
// information.
|
49 |
+
TORCH_API bool setSymbolicShapeAnalysisTestMode(bool value);
|
50 |
+
TORCH_API bool symbolicShapeAnalysisTestModeEnabled();
|
51 |
+
|
52 |
+
using SSAInput = std::variant<IValue, c10::SymbolicShape>;
|
53 |
+
TORCH_API c10::optional<std::vector<c10::SymbolicShape>>
|
54 |
+
calculateSymbolicShapesOnOp(
|
55 |
+
const FunctionSchema* schema,
|
56 |
+
const std::vector<SSAInput>& inputs);
|
57 |
+
} // namespace jit
|
58 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/symbolic_shape_cache.h
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
4 |
+
#include <torch/csrc/jit/passes/symbolic_shape_analysis.h>
|
5 |
+
|
6 |
+
namespace torch {
|
7 |
+
namespace jit {
|
8 |
+
|
9 |
+
struct TORCH_API CanonicalizedSymbolicShape {
|
10 |
+
// TODO: Consider in the future if it is reasonable to
|
11 |
+
// merge code with SymbolicShape or VaryingShape while keeping
|
12 |
+
// the two not implicitly convertable (and cause bugs).
|
13 |
+
CanonicalizedSymbolicShape(
|
14 |
+
const c10::SymbolicShape& orig_shape,
|
15 |
+
std::unordered_map<int64_t, int64_t>& ss_map) {
|
16 |
+
init(orig_shape, ss_map);
|
17 |
+
}
|
18 |
+
|
19 |
+
CanonicalizedSymbolicShape(c10::SymbolicShape& orig_shape) {
|
20 |
+
std::unordered_map<int64_t, int64_t> new_ssmap;
|
21 |
+
init(orig_shape, new_ssmap);
|
22 |
+
}
|
23 |
+
|
24 |
+
size_t hash() const;
|
25 |
+
|
26 |
+
c10::SymbolicShape toSymbolicShape(
|
27 |
+
std::unordered_map<int64_t, int64_t>& inverse_ss_map) const;
|
28 |
+
|
29 |
+
TORCH_API friend bool operator==(
|
30 |
+
const CanonicalizedSymbolicShape& a,
|
31 |
+
const CanonicalizedSymbolicShape& b);
|
32 |
+
|
33 |
+
private:
|
34 |
+
c10::optional<std::vector<int64_t>> values_;
|
35 |
+
|
36 |
+
void init(
|
37 |
+
const c10::SymbolicShape& orig_shape,
|
38 |
+
std::unordered_map<int64_t, int64_t>& ss_map);
|
39 |
+
};
|
40 |
+
|
41 |
+
// SHAPE CACHE API
|
42 |
+
TORCH_API c10::optional<std::vector<at::SymbolicShape>>
|
43 |
+
get_cached_shape_function(
|
44 |
+
const FunctionSchema* schema,
|
45 |
+
const std::vector<SSAInput>& arg_vec);
|
46 |
+
|
47 |
+
TORCH_API void cache_shape_function(
|
48 |
+
const FunctionSchema* schema,
|
49 |
+
const std::vector<SSAInput>& arg_vec,
|
50 |
+
const std::vector<at::SymbolicShape>& ret_vec);
|
51 |
+
|
52 |
+
// For use in test code
|
53 |
+
TORCH_API void clear_shape_cache();
|
54 |
+
TORCH_API size_t get_shape_cache_size();
|
55 |
+
|
56 |
+
} // namespace jit
|
57 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/argument_spec.h
ADDED
@@ -0,0 +1,511 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <ATen/core/jit_type.h>
|
4 |
+
#include <ATen/core/stack.h>
|
5 |
+
#include <c10/util/hash.h>
|
6 |
+
#include <c10/util/irange.h>
|
7 |
+
#include <torch/csrc/Export.h>
|
8 |
+
#include <torch/csrc/autograd/variable.h>
|
9 |
+
#include <torch/csrc/jit/ir/ir.h>
|
10 |
+
#include <ostream>
|
11 |
+
#include <vector>
|
12 |
+
|
13 |
+
C10_CLANG_DIAGNOSTIC_PUSH()
|
14 |
+
#if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32")
|
15 |
+
C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32")
|
16 |
+
#endif
|
17 |
+
|
18 |
+
namespace torch::jit {
|
19 |
+
|
20 |
+
// GraphExecutor creates specializations of Graphs for different
|
21 |
+
// dimensionalitities and types of inputs.
|
22 |
+
|
23 |
+
struct ArgumentInfo {
|
24 |
+
friend struct ArgumentSpec;
|
25 |
+
using plain_data_type = uint64_t;
|
26 |
+
|
27 |
+
bool defined() const {
|
28 |
+
return defined_;
|
29 |
+
}
|
30 |
+
at::Device device() const {
|
31 |
+
return at::Device(DeviceType(dev_type_), device_);
|
32 |
+
}
|
33 |
+
// XXX: It is guaranteed that this will return false when called on non-tensor
|
34 |
+
// arguments
|
35 |
+
bool requires_grad() const {
|
36 |
+
return requires_grad_;
|
37 |
+
}
|
38 |
+
int dim() const {
|
39 |
+
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
40 |
+
return dim_;
|
41 |
+
}
|
42 |
+
at::ScalarType type() const {
|
43 |
+
return at::ScalarType(type_);
|
44 |
+
}
|
45 |
+
TypePtr toType() const {
|
46 |
+
if (!defined())
|
47 |
+
return TensorType::get();
|
48 |
+
|
49 |
+
return TensorType::create(
|
50 |
+
type(), device(), c10::optional<size_t>(dim()), requires_grad());
|
51 |
+
}
|
52 |
+
operator TypePtr() const {
|
53 |
+
return toType();
|
54 |
+
}
|
55 |
+
|
56 |
+
private:
|
57 |
+
unsigned defined_ : 1;
|
58 |
+
unsigned requires_grad_ : 1;
|
59 |
+
unsigned : 5;
|
60 |
+
unsigned dim_ : 8;
|
61 |
+
unsigned device_ : 8;
|
62 |
+
unsigned type_ : 8;
|
63 |
+
unsigned dev_type_ : 16;
|
64 |
+
unsigned : 16;
|
65 |
+
};
|
66 |
+
|
67 |
+
static_assert(
|
68 |
+
std::is_standard_layout<ArgumentInfo>::value,
|
69 |
+
"ArgumentInfo is to be a POD struct");
|
70 |
+
static_assert(
|
71 |
+
sizeof(ArgumentInfo) == sizeof(ArgumentInfo::plain_data_type),
|
72 |
+
"ArgumentInfo is expected to be a 32-bit struct");
|
73 |
+
|
74 |
+
struct ArgumentSpec {
|
75 |
+
ArgumentSpec(size_t num_flat_tensor_inputs, size_t num_flat_optional_inputs)
|
76 |
+
: hash_code(c10::hash_combine(
|
77 |
+
num_flat_tensor_inputs,
|
78 |
+
num_flat_optional_inputs)) {
|
79 |
+
tensor_args.reserve(num_flat_tensor_inputs);
|
80 |
+
optional_presence.reserve(num_flat_optional_inputs);
|
81 |
+
}
|
82 |
+
|
83 |
+
void addOptional(const IValue& input) {
|
84 |
+
bool is_present = !input.isNone();
|
85 |
+
optional_presence.push_back(is_present);
|
86 |
+
hash_code = c10::hash_combine(hash_code, is_present);
|
87 |
+
}
|
88 |
+
|
89 |
+
void addTensor(const IValue& input, bool with_grad) {
|
90 |
+
AT_ASSERT(input.isTensor(), "Expected Tensor but found ", input.tagKind());
|
91 |
+
tensor_args.emplace_back();
|
92 |
+
auto& arg = tensor_args.back();
|
93 |
+
// Initialize all fields to 0. This is convenient, because e.g.
|
94 |
+
// requires_grad() can be checked even on tensors AND will make
|
95 |
+
// padding bits all 0s.
|
96 |
+
std::memset(&arg, 0, sizeof(ArgumentInfo));
|
97 |
+
|
98 |
+
// [argspec refcounting] reinterpret the IValue to avoid having to refcount
|
99 |
+
// the Tensor microbenchmarks
|
100 |
+
// https://github.com/zdevito/pytorch/commit/21e7200a0a0fc456bea2f10e95b1781f83933d10
|
101 |
+
// show overhead in extra refcounting along this path
|
102 |
+
const at::Tensor* t = reinterpret_cast<const at::Tensor*>(&input);
|
103 |
+
arg.defined_ = t->defined();
|
104 |
+
if (arg.defined_) {
|
105 |
+
arg.requires_grad_ = with_grad && autograd::Variable(*t).requires_grad();
|
106 |
+
arg.dim_ = t->dim();
|
107 |
+
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
108 |
+
at::Device device = t->device();
|
109 |
+
arg.dev_type_ =
|
110 |
+
// NOLINTNEXTLINE(bugprone-signed-char-misuse)
|
111 |
+
static_cast<std::underlying_type<DeviceType>::type>(device.type());
|
112 |
+
// NOLINTNEXTLINE(bugprone-signed-char-misuse)
|
113 |
+
arg.device_ = device.index();
|
114 |
+
arg.type_ = static_cast<unsigned>(t->scalar_type());
|
115 |
+
}
|
116 |
+
combineHash(arg);
|
117 |
+
}
|
118 |
+
|
119 |
+
void combineHash(const ArgumentInfo& arg) {
|
120 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
121 |
+
ArgumentInfo::plain_data_type arg_data;
|
122 |
+
std::memcpy(&arg_data, &arg, sizeof(ArgumentInfo));
|
123 |
+
hash_code = c10::hash_combine(hash_code, arg_data);
|
124 |
+
}
|
125 |
+
|
126 |
+
// equality is fast: check ninputs, and then check the raw array data,
|
127 |
+
// there are no size/stride indirections
|
128 |
+
// hopefully std::vector<bool> has fast equality
|
129 |
+
bool operator==(const ArgumentSpec& spec) const {
|
130 |
+
if (optional_presence != spec.optional_presence) {
|
131 |
+
return false;
|
132 |
+
}
|
133 |
+
if (tensor_args.size() != spec.tensor_args.size())
|
134 |
+
return false;
|
135 |
+
// NB: we need to break out early when there are no elements, because
|
136 |
+
// passing a nullptr to memcmp is UB.
|
137 |
+
if (tensor_args.empty())
|
138 |
+
return true;
|
139 |
+
return std::memcmp(
|
140 |
+
tensor_args.data(),
|
141 |
+
spec.tensor_args.data(),
|
142 |
+
tensor_args.size() * sizeof(ArgumentInfo)) == 0;
|
143 |
+
}
|
144 |
+
bool operator!=(const ArgumentSpec& spec) const {
|
145 |
+
return !(*this == spec);
|
146 |
+
}
|
147 |
+
size_t numTensors() const {
|
148 |
+
return tensor_args.size();
|
149 |
+
}
|
150 |
+
const ArgumentInfo& tensorAt(size_t i) const {
|
151 |
+
return tensor_args[i];
|
152 |
+
}
|
153 |
+
size_t numOptionals() const {
|
154 |
+
return optional_presence.size();
|
155 |
+
}
|
156 |
+
bool isPresent(size_t i) const {
|
157 |
+
return optional_presence[i];
|
158 |
+
}
|
159 |
+
size_t hashCode() const {
|
160 |
+
return hash_code;
|
161 |
+
}
|
162 |
+
|
163 |
+
private:
|
164 |
+
size_t hash_code; // precomputed on construction
|
165 |
+
std::vector<ArgumentInfo> tensor_args;
|
166 |
+
std::vector<bool> optional_presence;
|
167 |
+
};
|
168 |
+
|
169 |
+
namespace {
|
170 |
+
static constexpr size_t ARG_SPEC_DEPTH_LIMIT = 128;
|
171 |
+
}
|
172 |
+
|
173 |
+
// ArgumentSpecCreator takes an initial graph and comes up with a set
|
174 |
+
// of simple instructions to compute the ArgumentSpec given a set of
|
175 |
+
// input tensors.
|
176 |
+
struct TORCH_API ArgumentSpecCreator {
|
177 |
+
// instructs acts on a stack of a list of input IValues
|
178 |
+
// at the beginning the stack contains a single list of the inputs to the
|
179 |
+
// function the ENTER_ instructs descend into subobjects and push new lists
|
180 |
+
// onto the stack
|
181 |
+
enum Inst : char {
|
182 |
+
ENTER_TUPLE, // consume a tuple ivalue from the top-most list, and push the
|
183 |
+
// list of its elements onto the stack as a new list
|
184 |
+
ENTER_OBJECT, // same as ENTER_TUPLE, but the input is a class
|
185 |
+
LEAVE, // pop the top-most list from the stack
|
186 |
+
SKIP, // consume an element from the top-most list, and discard
|
187 |
+
SPECIALIZE_OPTIONAL_TENSOR, // consume a optional tensor for the top-most
|
188 |
+
// list, and add it to the ArgSpec key being
|
189 |
+
// created
|
190 |
+
SPECIALIZE_TENSOR, // consume a tensor for the top-most
|
191 |
+
// list, and add it to the ArgSpec key being created
|
192 |
+
SPECIALIZE_OPTIONAL,
|
193 |
+
// consume a nontensor optional from the top-most list,
|
194 |
+
// and add it to the ArgSpec key being created
|
195 |
+
};
|
196 |
+
ArgumentSpecCreator(Graph& graph);
|
197 |
+
ArgumentSpec create(bool with_grad, const Stack& stack) const;
|
198 |
+
void specializeTypes(Graph& g, const ArgumentSpec& spec) const;
|
199 |
+
void dump() const;
|
200 |
+
using WrittenSlots = std::unordered_set<std::string>;
|
201 |
+
|
202 |
+
private:
|
203 |
+
void scan(
|
204 |
+
const TypePtr& typ,
|
205 |
+
size_t depth,
|
206 |
+
const WrittenSlots& written_slots);
|
207 |
+
size_t num_inputs_;
|
208 |
+
size_t num_tensors_ = 0;
|
209 |
+
size_t num_optionals_ = 0;
|
210 |
+
std::vector<Inst> instructions_;
|
211 |
+
};
|
212 |
+
|
213 |
+
// CompleteArgumentSpec represents one particular specialization.
|
214 |
+
// It is designed so that it can be created, hashed, and compared quickly
|
215 |
+
// since it is used along the hot-path of the JIT to check if the code
|
216 |
+
// we have created is valid for the given inputs.
|
217 |
+
|
218 |
+
// COmpleteArgumentInfoPOD is only used internally in CompleteArgumentSpec
|
219 |
+
// API users should use ArgumentInfo
|
220 |
+
struct CompleteArgumentInfoPOD {
|
221 |
+
// total size is 64-bit
|
222 |
+
unsigned is_tensor : 8; // all other fields are invalid if this is false
|
223 |
+
unsigned type : 8; // scalar type
|
224 |
+
unsigned defined : 1;
|
225 |
+
unsigned requires_grad : 1;
|
226 |
+
signed device : 14;
|
227 |
+
unsigned dev_type : 16;
|
228 |
+
unsigned
|
229 |
+
total_dims : 16; // all TensorInfoPODs are in CompleteArgumentSpec's
|
230 |
+
// tensor_info() array. total_dims is the total number of
|
231 |
+
// dimensions seen so far in all previous members of
|
232 |
+
// tensor_info(), including this tensor 2*total_dims
|
233 |
+
// becomes the offset into the sizes_strides list for the
|
234 |
+
// _next_ tensor in the tensor_info array for tensor 0,
|
235 |
+
// the offset is always 0
|
236 |
+
};
|
237 |
+
|
238 |
+
static_assert(
|
239 |
+
sizeof(CompleteArgumentInfoPOD) == sizeof(int64_t),
|
240 |
+
"CompleteArgumentInfoPOD must be 64-bit struct for CompleteArgumentSpec encoding to work");
|
241 |
+
|
242 |
+
struct CompleteArgumentInfo;
|
243 |
+
|
244 |
+
struct CompleteArgumentSpec {
|
245 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
246 |
+
CompleteArgumentSpec(bool with_grad, at::ArrayRef<IValue> inputs)
|
247 |
+
: hash_code(0), ninputs(inputs.size()) {
|
248 |
+
int32_t all_dims = 0;
|
249 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
250 |
+
const int32_t num_inputs = inputs.size();
|
251 |
+
for (const auto i : c10::irange(num_inputs)) {
|
252 |
+
if (!inputs[i].isTensor())
|
253 |
+
continue;
|
254 |
+
auto& tensor = inputs[i].toTensor();
|
255 |
+
all_dims += tensor.defined() ? tensor.ndimension() : 0;
|
256 |
+
}
|
257 |
+
// allocate enough room for all TensorPODs and dimensions
|
258 |
+
data.resize(ninputs + all_dims * 2);
|
259 |
+
|
260 |
+
// and reinterpret our data array as these structs
|
261 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
262 |
+
auto* pods = reinterpret_cast<CompleteArgumentInfoPOD*>(data.data());
|
263 |
+
int64_t* next_dim = sizes_strides();
|
264 |
+
int32_t total_dims = 0;
|
265 |
+
for (const auto i : c10::irange(num_inputs)) {
|
266 |
+
auto& pod = pods[i];
|
267 |
+
pod.is_tensor = static_cast<uint32_t>(inputs[i].isTensor());
|
268 |
+
if (pod.is_tensor) {
|
269 |
+
at::Tensor t = inputs[i].toTensor();
|
270 |
+
pod.defined = t.defined();
|
271 |
+
if (pod.defined) {
|
272 |
+
pod.type = static_cast<int>(t.scalar_type());
|
273 |
+
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
274 |
+
at::Device device = t.device();
|
275 |
+
// NOLINTNEXTLINE(bugprone-signed-char-misuse)
|
276 |
+
pod.dev_type = static_cast<std::underlying_type<DeviceType>::type>(
|
277 |
+
device.type());
|
278 |
+
// NOLINTNEXTLINE(bugprone-signed-char-misuse)
|
279 |
+
pod.device = device.index();
|
280 |
+
pod.requires_grad = with_grad && t.requires_grad();
|
281 |
+
total_dims += t.ndimension();
|
282 |
+
auto sizes = t.sizes();
|
283 |
+
std::copy(sizes.begin(), sizes.end(), next_dim);
|
284 |
+
next_dim += sizes.size();
|
285 |
+
auto strides = t.strides();
|
286 |
+
std::copy(strides.begin(), strides.end(), next_dim);
|
287 |
+
next_dim += strides.size();
|
288 |
+
}
|
289 |
+
}
|
290 |
+
// each POD has a running tally of all dimensions including its own
|
291 |
+
TORCH_CHECK(
|
292 |
+
total_dims < std::numeric_limits<uint16_t>::max(),
|
293 |
+
"The number of dims cannot be packed into CompleteArgumentSpec:",
|
294 |
+
total_dims);
|
295 |
+
pod.total_dims = total_dims;
|
296 |
+
}
|
297 |
+
// we precompute the hash_code to minimize the time inside of hash
|
298 |
+
// table operations where we may need to hold a compiler cache lock.
|
299 |
+
hash_code = c10::hash_combine(0, ninputs);
|
300 |
+
for (auto d : data) {
|
301 |
+
hash_code = c10::hash_combine(hash_code, d);
|
302 |
+
}
|
303 |
+
}
|
304 |
+
|
305 |
+
// equality is fast: check ninputs, and then check the raw array data,
|
306 |
+
// there are no size/stride indirections
|
307 |
+
bool operator==(const CompleteArgumentSpec& spec) const {
|
308 |
+
return ninputs == spec.ninputs && data == spec.data;
|
309 |
+
}
|
310 |
+
bool operator!=(const CompleteArgumentSpec& spec) const {
|
311 |
+
return !(*this == spec);
|
312 |
+
}
|
313 |
+
friend struct CompleteArgumentInfo;
|
314 |
+
CompleteArgumentInfo at(size_t i) const;
|
315 |
+
size_t size() const {
|
316 |
+
return ninputs;
|
317 |
+
}
|
318 |
+
size_t hashCode() const {
|
319 |
+
return hash_code;
|
320 |
+
}
|
321 |
+
|
322 |
+
private:
|
323 |
+
ArrayRef<CompleteArgumentInfoPOD> tensor_info() const {
|
324 |
+
return ArrayRef<CompleteArgumentInfoPOD>(
|
325 |
+
reinterpret_cast<const CompleteArgumentInfoPOD*>(data.data()), ninputs);
|
326 |
+
}
|
327 |
+
// the start of the sizes_strides information, which comes after the
|
328 |
+
// CompleteArgumentInfoPOD list.
|
329 |
+
const int64_t* sizes_strides() const {
|
330 |
+
return data.data() + ninputs;
|
331 |
+
}
|
332 |
+
int64_t* sizes_strides() {
|
333 |
+
return data.data() + ninputs;
|
334 |
+
}
|
335 |
+
size_t hash_code; // precomputed on construction
|
336 |
+
size_t ninputs;
|
337 |
+
// layout is ninputs of TensorPOD (each 64-bit) followed by their size and
|
338 |
+
// stride info for 3 tensors:
|
339 |
+
// [t0POD][t1POD][t2POD]...
|
340 |
+
// [t0 sizes][t0 strides][t1 sizes][t1 strides][t2 sizes][t2 strides]
|
341 |
+
std::vector<int64_t> data;
|
342 |
+
};
|
343 |
+
|
344 |
+
// public view of compressed CompleteArgumentInfo
|
345 |
+
struct CompleteArgumentInfo {
|
346 |
+
CompleteArgumentInfo(const CompleteArgumentSpec& spec, const int i)
|
347 |
+
: spec(spec), i(i) {}
|
348 |
+
bool isTensor() const {
|
349 |
+
return pod(i).is_tensor;
|
350 |
+
}
|
351 |
+
at::ScalarType type() const {
|
352 |
+
return at::ScalarType(pod(i).type);
|
353 |
+
}
|
354 |
+
bool defined() const {
|
355 |
+
return pod(i).defined;
|
356 |
+
}
|
357 |
+
bool requires_grad() const {
|
358 |
+
return pod(i).requires_grad;
|
359 |
+
}
|
360 |
+
at::Device device() const {
|
361 |
+
return at::Device(
|
362 |
+
DeviceType(pod(i).dev_type),
|
363 |
+
static_cast<c10::DeviceIndex>(pod(i).device));
|
364 |
+
}
|
365 |
+
int ndimension() const {
|
366 |
+
// See [valid range], it is always valid to ask for offset for (i + 1)
|
367 |
+
return (sizes_strides_offset(i + 1) - sizes_strides_offset(i)) / 2;
|
368 |
+
}
|
369 |
+
at::IntArrayRef sizes() const {
|
370 |
+
return at::IntArrayRef(
|
371 |
+
spec.sizes_strides() + sizes_strides_offset(i), ndimension());
|
372 |
+
}
|
373 |
+
at::IntArrayRef strides() const {
|
374 |
+
int ndim = ndimension();
|
375 |
+
return at::IntArrayRef(
|
376 |
+
spec.sizes_strides() + sizes_strides_offset(i) + ndim, ndim);
|
377 |
+
}
|
378 |
+
operator TypePtr() const {
|
379 |
+
if (!defined())
|
380 |
+
return TensorType::get();
|
381 |
+
return TensorType::create(
|
382 |
+
type(),
|
383 |
+
device(),
|
384 |
+
c10::VaryingShape<int64_t>{sizes()},
|
385 |
+
c10::VaryingShape<int64_t>{strides()},
|
386 |
+
requires_grad());
|
387 |
+
}
|
388 |
+
|
389 |
+
private:
|
390 |
+
// offsetinto sizes_strides() array where the sizes start for tensor j
|
391 |
+
// [valid range] valid range is [0, ninputs]
|
392 |
+
// (i.e. you can ask for the offset at ninputs, which would be the offset of
|
393 |
+
// the next tensor if it existed)
|
394 |
+
int sizes_strides_offset(int j) const {
|
395 |
+
if (j == 0)
|
396 |
+
return 0;
|
397 |
+
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
|
398 |
+
return 2 * pod(j - 1).total_dims;
|
399 |
+
}
|
400 |
+
const CompleteArgumentInfoPOD& pod(int j) const {
|
401 |
+
return spec.tensor_info().at(j);
|
402 |
+
}
|
403 |
+
const CompleteArgumentSpec& spec;
|
404 |
+
const int i;
|
405 |
+
};
|
406 |
+
|
407 |
+
inline std::ostream& operator<<(std::ostream& out, const ArgumentInfo& info) {
|
408 |
+
if (!info.defined()) {
|
409 |
+
return out << "<undefined>";
|
410 |
+
}
|
411 |
+
out << "Tensor(device=" << info.device() << ", type=" << toString(info.type())
|
412 |
+
<< ", requires_grad=" << info.requires_grad() << ", dims=" << info.dim()
|
413 |
+
<< ")";
|
414 |
+
return out;
|
415 |
+
}
|
416 |
+
|
417 |
+
inline std::ostream& operator<<(std::ostream& out, const ArgumentSpec& spec) {
|
418 |
+
out << "{";
|
419 |
+
for (const auto i : c10::irange(spec.numTensors())) {
|
420 |
+
if (i > 0)
|
421 |
+
out << ", ";
|
422 |
+
out << spec.tensorAt(i);
|
423 |
+
}
|
424 |
+
out << "; ";
|
425 |
+
for (const auto i : c10::irange(spec.numOptionals())) {
|
426 |
+
if (i > 0)
|
427 |
+
out << ", ";
|
428 |
+
out << spec.isPresent(i);
|
429 |
+
}
|
430 |
+
out << "}";
|
431 |
+
return out;
|
432 |
+
}
|
433 |
+
|
434 |
+
inline std::ostream& operator<<(
|
435 |
+
std::ostream& out,
|
436 |
+
const CompleteArgumentInfo& info) {
|
437 |
+
if (!info.defined()) {
|
438 |
+
return out << "<undefined>";
|
439 |
+
}
|
440 |
+
out << "Tensor(device=" << info.device() << ", type=" << toString(info.type())
|
441 |
+
<< ", requires_grad=" << info.requires_grad()
|
442 |
+
<< ", sizes=" << info.sizes() << ", strides=" << info.strides() << ")";
|
443 |
+
return out;
|
444 |
+
}
|
445 |
+
|
446 |
+
inline std::ostream& operator<<(
|
447 |
+
std::ostream& out,
|
448 |
+
const CompleteArgumentSpec& spec) {
|
449 |
+
out << "{";
|
450 |
+
for (const auto i : c10::irange(spec.size())) {
|
451 |
+
if (i > 0)
|
452 |
+
out << ", ";
|
453 |
+
out << spec.at(i);
|
454 |
+
}
|
455 |
+
out << "}";
|
456 |
+
return out;
|
457 |
+
}
|
458 |
+
|
459 |
+
inline CompleteArgumentInfo CompleteArgumentSpec::at(size_t i) const {
|
460 |
+
return CompleteArgumentInfo(*this, i);
|
461 |
+
}
|
462 |
+
|
463 |
+
inline c10::optional<int8_t> convertOptional(
|
464 |
+
c10::optional<c10::ScalarType> const& from) {
|
465 |
+
return (from) ? c10::optional<int8_t>(static_cast<int8_t>(*from))
|
466 |
+
: c10::optional<int8_t>{};
|
467 |
+
}
|
468 |
+
|
469 |
+
} // namespace torch::jit
|
470 |
+
|
471 |
+
namespace std {
|
472 |
+
|
473 |
+
template <typename T>
|
474 |
+
struct hash<c10::VaryingShape<T>> {
|
475 |
+
size_t operator()(const c10::VaryingShape<T>& vs) const {
|
476 |
+
return c10::get_hash(
|
477 |
+
vs.size(),
|
478 |
+
vs.size() ? vs.sizes().value() : std::vector<c10::optional<T>>());
|
479 |
+
}
|
480 |
+
};
|
481 |
+
|
482 |
+
template <>
|
483 |
+
struct hash<c10::TensorType> {
|
484 |
+
size_t operator()(const c10::TensorType& ptt) const {
|
485 |
+
return c10::get_hash<
|
486 |
+
c10::optional<int8_t>,
|
487 |
+
c10::VaryingShape<int64_t>,
|
488 |
+
c10::VaryingShape<int64_t>,
|
489 |
+
c10::optional<bool>>(
|
490 |
+
torch::jit::convertOptional(ptt.scalarType()),
|
491 |
+
ptt.sizes(),
|
492 |
+
ptt.strides(),
|
493 |
+
ptt.requiresGrad());
|
494 |
+
}
|
495 |
+
};
|
496 |
+
|
497 |
+
template <>
|
498 |
+
struct hash<torch::jit::ArgumentSpec> {
|
499 |
+
size_t operator()(const torch::jit::ArgumentSpec& spec) const {
|
500 |
+
return spec.hashCode();
|
501 |
+
}
|
502 |
+
};
|
503 |
+
template <>
|
504 |
+
struct hash<torch::jit::CompleteArgumentSpec> {
|
505 |
+
size_t operator()(const torch::jit::CompleteArgumentSpec& spec) const {
|
506 |
+
return spec.hashCode();
|
507 |
+
}
|
508 |
+
};
|
509 |
+
} // namespace std
|
510 |
+
|
511 |
+
C10_CLANG_DIAGNOSTIC_POP()
|