Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step120/zero/14.attention.query_key_value.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/15.input_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/15.input_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step120/zero/21.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/21.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step120/zero/21.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
- ckpts/universal/global_step120/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step120/zero/25.input_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step120/zero/4.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/4.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
- venv/lib/python3.10/site-packages/torchgen/api/types/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/api/types/__pycache__/signatures.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/ATenOpList.cpp +36 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/CompositeViewCopyKernels.cpp +73 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunction.h +23 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyNativeFunctions.cpp +13 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyNativeFunctions.h +19 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/FunctionalInverses.h +33 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Functions.cpp +103 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/LazyIr.h +19 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/MethodOperators.h +24 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeFunctions.h +33 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeMetaFunctions.h +19 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Operator.h +18 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Operators.h +74 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RedispatchFunctions.cpp +15 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterBackendSelect.cpp +54 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchDefinitions.ini +24 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterFunctionalization.cpp +110 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterSchema.cpp +13 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegistrationDeclarations.h +4 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UfuncCPU.cpp +19 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UfuncCPUKernel.cpp +14 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/BUILD.bazel +4 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/README.md +3 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__init__.py +0 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/context.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_annotated_fn_args.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd_functions.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_inplace_or_view_type.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_python_functions.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_trace_type.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_factories.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_type.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_view_funcs.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/load_derivatives.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/build.bzl +14 -0
- venv/lib/python3.10/site-packages/torchgen/packaged/autograd/context.py +31 -0
ckpts/universal/global_step120/zero/14.attention.query_key_value.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2fd726774bdff6db7eef0f640903584ff8c9312963084b9a32944e2630700aaf
|
3 |
+
size 50332828
|
ckpts/universal/global_step120/zero/15.input_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6973b1995760f4d91a78f5b1f22de65a877ae7cf6299d6188a38d8a96ad6ca83
|
3 |
+
size 9372
|
ckpts/universal/global_step120/zero/15.input_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:450ee7dfb054cd04a42164204cef27f660de018ec6d10541242b132e0e5130d6
|
3 |
+
size 9387
|
ckpts/universal/global_step120/zero/21.mlp.dense_h_to_4h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ea96eecb060c02ab8753e44d8a59608260f127b4276c0433b10f39ac60a80acf
|
3 |
+
size 33555612
|
ckpts/universal/global_step120/zero/21.mlp.dense_h_to_4h.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4ca178b6d6b805fa5f22540d1e55966a9273e04f2b8d217e5f11b569ffba7f54
|
3 |
+
size 33555627
|
ckpts/universal/global_step120/zero/21.mlp.dense_h_to_4h.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ad58a4e21c6dd99b95aa13925fa2e35f89aa0d5eeca05838f1805349d6180a7f
|
3 |
+
size 33555533
|
ckpts/universal/global_step120/zero/21.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:652b6d286ceb42dedfbefc1e193a7dd1e12aa8d9b63d58e4f32e4b1b4cb2b8a6
|
3 |
+
size 33555627
|
ckpts/universal/global_step120/zero/25.input_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d2f1707a70cc503f6a5d9c85000eb1f5b5d9f3ff65cad44146d2e554ddab058c
|
3 |
+
size 9387
|
ckpts/universal/global_step120/zero/4.mlp.dense_h_to_4h.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d6731ea219caa92c648642ed6f01b6e819c3317d7cd429e041e01e91e9d248c2
|
3 |
+
size 33555612
|
ckpts/universal/global_step120/zero/4.mlp.dense_h_to_4h.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:595d59527deaeb870a1c430d93526f4a80832a36d9391347a813981b3c568bd1
|
3 |
+
size 33555627
|
venv/lib/python3.10/site-packages/torchgen/api/types/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (252 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torchgen/api/types/__pycache__/signatures.cpython-310.pyc
ADDED
Binary file (14.7 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/ATenOpList.cpp
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <ATen/core/ATenOpList.h>
|
2 |
+
|
3 |
+
#include <string>
|
4 |
+
#include <cstring>
|
5 |
+
#include <utility>
|
6 |
+
#include <unordered_set>
|
7 |
+
#include <ATen/core/operator_name.h>
|
8 |
+
|
9 |
+
// ${generated_comment}
|
10 |
+
|
11 |
+
namespace at {
|
12 |
+
|
13 |
+
namespace {
|
14 |
+
struct OpNameEquals final {
|
15 |
+
bool operator()(const std::pair<const char*, const char*>& lhs, const std::pair<const char*, const char*>& rhs) const {
|
16 |
+
return 0 == strcmp(lhs.first, rhs.first) && 0 == strcmp(lhs.second, rhs.second);
|
17 |
+
}
|
18 |
+
};
|
19 |
+
|
20 |
+
struct OpNameHash final {
|
21 |
+
size_t operator()(const std::pair<const char*, const char*>& p) const {
|
22 |
+
// use std::hash<std::string> because std::hash<const char*> would hash pointers and not pointed-to strings
|
23 |
+
return std::hash<std::string>()(p.first) ^ (~ std::hash<std::string>()(p.second));
|
24 |
+
}
|
25 |
+
};
|
26 |
+
}
|
27 |
+
|
28 |
+
bool is_custom_op(const c10::OperatorName& opName) {
|
29 |
+
static std::unordered_set<std::pair<const char*, const char*>, OpNameHash, OpNameEquals> ops {
|
30 |
+
${aten_ops}
|
31 |
+
{"", ""}
|
32 |
+
};
|
33 |
+
return ops.count(std::make_pair(
|
34 |
+
opName.name.c_str(), opName.overload_name.c_str())) == 0;
|
35 |
+
}
|
36 |
+
}
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/CompositeViewCopyKernels.cpp
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
2 |
+
// ${generated_comment}
|
3 |
+
|
4 |
+
#include <ATen/InferSize.h>
|
5 |
+
#include <ATen/Tensor.h>
|
6 |
+
#include <ATen/native/Resize.h>
|
7 |
+
|
8 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
9 |
+
#include <ATen/Operators.h>
|
10 |
+
#else
|
11 |
+
#include <ATen/ops/clone.h>
|
12 |
+
$ops_headers
|
13 |
+
#endif
|
14 |
+
|
15 |
+
namespace at {
|
16 |
+
namespace native {
|
17 |
+
|
18 |
+
// This file contains a number of kernels for aten functions that are fully code-generated.
|
19 |
+
// TODO: rename this file to something more generic.
|
20 |
+
|
21 |
+
namespace {
|
22 |
+
at::Tensor clone_arg(const at::Tensor& t) {
|
23 |
+
return t.clone();
|
24 |
+
}
|
25 |
+
|
26 |
+
std::vector<at::Tensor> clone_arg(const at::TensorList& t_list) {
|
27 |
+
std::vector<at::Tensor> out(t_list.size());
|
28 |
+
for (const auto& i : c10::irange(t_list.size())) {
|
29 |
+
out[i] = t_list[i].clone();
|
30 |
+
}
|
31 |
+
return out;
|
32 |
+
}
|
33 |
+
|
34 |
+
// duped with gen_resize_out_helper from structured kernels
|
35 |
+
void copy_arg(const at::Tensor& dst, const at::Tensor& src) {
|
36 |
+
TORCH_CHECK(src.dtype() == dst.dtype(),
|
37 |
+
"Expected out tensor to have dtype ", src.dtype(), ", but got ", dst.dtype(), " instead");
|
38 |
+
TORCH_CHECK(src.device() == dst.device(),
|
39 |
+
"Expected out tensor to have device ", src.device(), ", but got ", dst.device(), " instead");
|
40 |
+
dst.copy_(src);
|
41 |
+
}
|
42 |
+
|
43 |
+
void copy_arg(const at::TensorList& dst, const at::TensorList& src) {
|
44 |
+
TORCH_INTERNAL_ASSERT(dst.size() == src.size());
|
45 |
+
for (const auto& i : c10::irange(dst.size())) {
|
46 |
+
copy_arg(dst[i], src[i]);
|
47 |
+
}
|
48 |
+
}
|
49 |
+
|
50 |
+
// TODO: this doesn't handle restriding empty tensors correctly; see
|
51 |
+
// gen_resize_out_helper for the correct algorithm
|
52 |
+
|
53 |
+
void resize_out_helper(const at::Tensor& dst, const at::Tensor& src) {
|
54 |
+
at::native::resize_output(dst, src.sizes());
|
55 |
+
}
|
56 |
+
|
57 |
+
void resize_out_helper(const at::TensorList& dst, const at::TensorList& src) {
|
58 |
+
TORCH_INTERNAL_ASSERT(dst.size() == src.size());
|
59 |
+
for (const auto& i : c10::irange(dst.size())) {
|
60 |
+
at::native::resize_output(dst[i], src[i].sizes());
|
61 |
+
}
|
62 |
+
}
|
63 |
+
}
|
64 |
+
|
65 |
+
|
66 |
+
${CompositeViewCopyKernel_Definitions}
|
67 |
+
|
68 |
+
${GeneratedCompositeFunctional_Definitions}
|
69 |
+
|
70 |
+
${GeneratedCompositeOut_Definitions}
|
71 |
+
|
72 |
+
} // namespace native
|
73 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyFunction.h
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// ${generated_comment}
|
3 |
+
|
4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
5 |
+
|
6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
7 |
+
#include <c10/core/MemoryFormat.h>
|
8 |
+
#include <c10/core/Scalar.h>
|
9 |
+
#include <ATen/core/Reduction.h>
|
10 |
+
|
11 |
+
// Forward declarations of any types needed in the operator signatures.
|
12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
14 |
+
#include <ATen/core/ATen_fwd.h>
|
15 |
+
|
16 |
+
namespace at {
|
17 |
+
|
18 |
+
namespace ${dispatch_namespace} {
|
19 |
+
|
20 |
+
${dispatch_namespaced_declarations}
|
21 |
+
|
22 |
+
} // namespace ${dispatch_namespace}
|
23 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyNativeFunctions.cpp
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// ${generated_comment}
|
2 |
+
${includes}
|
3 |
+
${native_functions_include}
|
4 |
+
|
5 |
+
namespace {
|
6 |
+
${helper_fns}
|
7 |
+
} // namespace
|
8 |
+
|
9 |
+
${namespace_prologue}
|
10 |
+
|
11 |
+
${native_function_definitions}
|
12 |
+
|
13 |
+
${namespace_epilogue}
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/DispatchKeyNativeFunctions.h
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// an external backend might generate file within its code tree
|
4 |
+
// and check all the source files within the tree with clang-format.
|
5 |
+
// so, disable it since the backend might have a different config.
|
6 |
+
// clang-format off
|
7 |
+
|
8 |
+
// ${generated_comment}
|
9 |
+
|
10 |
+
#include <ATen/Tensor.h>
|
11 |
+
|
12 |
+
${namespace_prologue}
|
13 |
+
|
14 |
+
struct ${class_name} {
|
15 |
+
|
16 |
+
${dispatch_declarations}
|
17 |
+
|
18 |
+
};
|
19 |
+
${namespace_epilogue}
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/FunctionalInverses.h
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// ${generated_comment}
|
4 |
+
|
5 |
+
#include <ATen/Tensor.h>
|
6 |
+
|
7 |
+
namespace at {
|
8 |
+
namespace functionalization {
|
9 |
+
|
10 |
+
enum class InverseReturnMode {
|
11 |
+
/// Specifies that functional inverses should always return a view.
|
12 |
+
AlwaysView,
|
13 |
+
/// Specifies that functional inverses should always return a non-view / copy.
|
14 |
+
NeverView,
|
15 |
+
/// Specifies that functional inverses should return a view unless a (copying) scatter
|
16 |
+
/// inverse exists, in which case that will be used instead.
|
17 |
+
/// This avoids as_strided() calls that can be difficult for subclasses to handle.
|
18 |
+
ViewOrScatterInverse,
|
19 |
+
};
|
20 |
+
|
21 |
+
struct FunctionalInverses {
|
22 |
+
|
23 |
+
${view_inverse_declarations}
|
24 |
+
|
25 |
+
// NB: These are not generated! They're manually implemented in the template.
|
26 |
+
// TODO: Change codegen to generate these. See the following link:
|
27 |
+
// https://github.com/pytorch/pytorch/blob/main/torchgen/model.py#L2583-L2585
|
28 |
+
static at::Tensor chunk_inverse(const at::Tensor & base, const at::Tensor & mutated_view, InverseReturnMode inverse_return_mode, int64_t mutated_view_idx, int chunks, int dim);
|
29 |
+
static at::Tensor narrow_inverse(const at::Tensor & base, const at::Tensor & mutated_view, InverseReturnMode inverse_return_mode, int dim, c10::SymInt start, c10::SymInt length);
|
30 |
+
|
31 |
+
};
|
32 |
+
}
|
33 |
+
}
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Functions.cpp
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <array>
|
2 |
+
|
3 |
+
#include <ATen/Functions.h>
|
4 |
+
#include <ATen/Utils.h>
|
5 |
+
#include <c10/core/Allocator.h>
|
6 |
+
|
7 |
+
namespace at {
|
8 |
+
|
9 |
+
Tensor TensorMaker::make_tensor() {
|
10 |
+
AutoDispatchBelowADInplaceOrView guard{}; // TODO: Remove.
|
11 |
+
tracer::impl::NoTracerDispatchMode tracer_guard{};
|
12 |
+
|
13 |
+
check_size_nonnegative(sizes_);
|
14 |
+
|
15 |
+
TORCH_CHECK_VALUE(
|
16 |
+
!deleter_ || !ctx_,
|
17 |
+
"The deleter and context arguments are mutually exclusive.");
|
18 |
+
|
19 |
+
if (device_ == nullopt) {
|
20 |
+
device_ = globalContext().getDeviceFromPtr(data_, opts_.device().type());
|
21 |
+
}
|
22 |
+
|
23 |
+
if (opts_.device().has_index()) {
|
24 |
+
// clang-format off
|
25 |
+
TORCH_CHECK_VALUE(
|
26 |
+
opts_.device() == *device_,
|
27 |
+
"Specified device ", opts_.device(), " does not match device of data ", *device_);
|
28 |
+
// clang-format on
|
29 |
+
}
|
30 |
+
|
31 |
+
std::size_t size_bytes = computeStorageSize();
|
32 |
+
|
33 |
+
DataPtr data_ptr{};
|
34 |
+
if (deleter_) {
|
35 |
+
data_ptr = makeDataPtrFromDeleter();
|
36 |
+
} else {
|
37 |
+
data_ptr = makeDataPtrFromContext();
|
38 |
+
}
|
39 |
+
|
40 |
+
TORCH_CHECK(!resizeable_ || allocator_ != nullptr, "Must specify an allocator with allocator() if you want to use resizeable_storage()");
|
41 |
+
Storage storage{Storage::use_byte_size_t{}, size_bytes, std::move(data_ptr), /*allocator=*/allocator_, /*resizeable=*/resizeable_};
|
42 |
+
|
43 |
+
Tensor tensor = detail::make_tensor<TensorImpl>(
|
44 |
+
std::move(storage), opts_.computeDispatchKey(), opts_.dtype());
|
45 |
+
|
46 |
+
TensorImpl* tensor_impl = tensor.unsafeGetTensorImpl();
|
47 |
+
if (strides_) {
|
48 |
+
tensor_impl->set_sizes_and_strides(sizes_, *strides_);
|
49 |
+
} else {
|
50 |
+
tensor_impl->set_sizes_contiguous(sizes_);
|
51 |
+
}
|
52 |
+
if (storage_offset_) {
|
53 |
+
tensor_impl->set_storage_offset(*storage_offset_);
|
54 |
+
}
|
55 |
+
|
56 |
+
return tensor;
|
57 |
+
}
|
58 |
+
|
59 |
+
std::size_t TensorMaker::computeStorageSize() const noexcept {
|
60 |
+
std::size_t itemsize = opts_.dtype().itemsize();
|
61 |
+
|
62 |
+
if (strides_) {
|
63 |
+
auto storage_size = detail::computeStorageNbytes(sizes_, *strides_, itemsize);
|
64 |
+
if (storage_offset_) {
|
65 |
+
storage_size += storage_offset_.value();
|
66 |
+
}
|
67 |
+
return storage_size;
|
68 |
+
}
|
69 |
+
|
70 |
+
std::size_t size = 1;
|
71 |
+
for (std::int64_t s : sizes_) {
|
72 |
+
size *= static_cast<std::size_t>(s);
|
73 |
+
}
|
74 |
+
auto storage_size = size * itemsize;
|
75 |
+
if (storage_offset_) {
|
76 |
+
storage_size += storage_offset_.value();
|
77 |
+
}
|
78 |
+
return storage_size;
|
79 |
+
}
|
80 |
+
|
81 |
+
inline DataPtr TensorMaker::makeDataPtrFromDeleter() noexcept {
|
82 |
+
return InefficientStdFunctionContext::makeDataPtr(data_, std::move(deleter_), *device_);
|
83 |
+
}
|
84 |
+
|
85 |
+
inline DataPtr TensorMaker::makeDataPtrFromContext() noexcept {
|
86 |
+
return DataPtr{data_, ctx_.release(), ctx_.get_deleter(), *device_};
|
87 |
+
}
|
88 |
+
|
89 |
+
IntArrayRef TensorMaker::makeTempSizes() const noexcept {
|
90 |
+
static std::int64_t zeros[5] = {0, 0, 0, 0, 0};
|
91 |
+
if (opts_.has_memory_format()) {
|
92 |
+
MemoryFormat format = *opts_.memory_format_opt();
|
93 |
+
if (format == MemoryFormat::ChannelsLast) {
|
94 |
+
return IntArrayRef(zeros, 4);
|
95 |
+
}
|
96 |
+
if (format == MemoryFormat::ChannelsLast3d) {
|
97 |
+
return IntArrayRef(zeros, 5);
|
98 |
+
}
|
99 |
+
}
|
100 |
+
return IntArrayRef(zeros, 1);
|
101 |
+
}
|
102 |
+
|
103 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/LazyIr.h
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// This file contains autogenerated LazyTensor IR nodes
|
4 |
+
${lazy_ir_sysinc}
|
5 |
+
${lazy_ir_inc}
|
6 |
+
|
7 |
+
${namespace_prologue}
|
8 |
+
using at::operator<<;
|
9 |
+
|
10 |
+
// kNullValue is used to contribute a static hash value any time
|
11 |
+
// a node has an Optional<Value> input that is nullopt. It is important
|
12 |
+
// to differentiate between HASH(nullopt, something) and HASH(something, nullopt),
|
13 |
+
// and using kNullValue in the hash function in the order of arguments
|
14 |
+
// serves this purpose.
|
15 |
+
static const torch::lazy::Value kNullValue = torch::lazy::Value();
|
16 |
+
|
17 |
+
${ir_declarations}
|
18 |
+
|
19 |
+
${namespace_epilogue}
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/MethodOperators.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// ${generated_comment}
|
4 |
+
|
5 |
+
#ifdef TORCH_ASSERT_NO_OPERATORS
|
6 |
+
#error This change adds a dependency on native_functions.yaml, \
|
7 |
+
meaning the file will need to be re-compiled every time an operator \
|
8 |
+
is changed or added. Consider if your change would be better placed in \
|
9 |
+
another file, or if a more specific header might achieve the same goal. \
|
10 |
+
See NOTE: [Tensor vs. TensorBase]
|
11 |
+
#endif
|
12 |
+
|
13 |
+
// Forward declarations of any types needed in the operator signatures.
|
14 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
15 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
16 |
+
#include <ATen/core/ATen_fwd.h>
|
17 |
+
|
18 |
+
${MethodOperators_includes}
|
19 |
+
|
20 |
+
namespace at {
|
21 |
+
namespace _ops {
|
22 |
+
${MethodOperators_declarations}
|
23 |
+
} // namespace _ops
|
24 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeFunctions.h
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// ${generated_comment}
|
4 |
+
|
5 |
+
#ifdef TORCH_ASSERT_NO_OPERATORS
|
6 |
+
#error This change adds a dependency on native_functions.yaml, \
|
7 |
+
meaning the file will need to be re-compiled every time an operator \
|
8 |
+
is changed or added. Consider if your change would be better placed in \
|
9 |
+
another file, or if a more specific header might achieve the same goal. \
|
10 |
+
See NOTE: [Tensor vs. TensorBase]
|
11 |
+
#endif
|
12 |
+
|
13 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
14 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
15 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
16 |
+
Consider including a specific operator from <ATen/ops/{my_operator}_native.h> \
|
17 |
+
and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
18 |
+
#endif
|
19 |
+
|
20 |
+
#include <c10/core/Scalar.h>
|
21 |
+
#include <c10/core/Storage.h>
|
22 |
+
#include <c10/core/TensorOptions.h>
|
23 |
+
#include <c10/util/Deprecated.h>
|
24 |
+
#include <c10/util/Optional.h>
|
25 |
+
#include <c10/core/QScheme.h>
|
26 |
+
#include <ATen/core/Reduction.h>
|
27 |
+
#include <ATen/core/Tensor.h>
|
28 |
+
#include <tuple>
|
29 |
+
#include <vector>
|
30 |
+
|
31 |
+
${NativeFunctions_includes}
|
32 |
+
|
33 |
+
${NativeFunctions_declarations}
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/NativeMetaFunctions.h
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// ${generated_comment}
|
4 |
+
|
5 |
+
#include <ATen/core/Tensor.h>
|
6 |
+
#include <ATen/core/IListRef.h>
|
7 |
+
#include <ATen/TensorMeta.h>
|
8 |
+
#include <ATen/TensorIterator.h>
|
9 |
+
|
10 |
+
${NativeMetaFunctions_includes}
|
11 |
+
|
12 |
+
namespace at {
|
13 |
+
|
14 |
+
namespace meta {
|
15 |
+
|
16 |
+
${NativeMetaFunctions_declarations}
|
17 |
+
|
18 |
+
} // namespace meta
|
19 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Operator.h
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// ${generated_comment}
|
4 |
+
|
5 |
+
#include <tuple>
|
6 |
+
#include <vector>
|
7 |
+
|
8 |
+
// Forward declarations of any types needed in the operator signatures.
|
9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
11 |
+
#include <ATen/core/ATen_fwd.h>
|
12 |
+
|
13 |
+
namespace at {
|
14 |
+
namespace _ops {
|
15 |
+
|
16 |
+
${declarations}
|
17 |
+
|
18 |
+
}} // namespace at::_ops
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/Operators.h
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// ${generated_comment}
|
4 |
+
|
5 |
+
#ifdef TORCH_ASSERT_NO_OPERATORS
|
6 |
+
#error This change adds a dependency on native_functions.yaml, \
|
7 |
+
meaning the file will need to be re-compiled every time an operator \
|
8 |
+
is changed or added. Consider if your change would be better placed in \
|
9 |
+
another file, or if a more specific header might achieve the same goal. \
|
10 |
+
See NOTE: [Tensor vs. TensorBase]
|
11 |
+
#endif
|
12 |
+
|
13 |
+
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
14 |
+
#error This change adds a dependency on all pytorch operators, meaning the \
|
15 |
+
file will need to be re-compiled every time an operator is changed or added. \
|
16 |
+
Consider including a specific operator from <ATen/ops/{my_operator}_ops.h> \
|
17 |
+
and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
18 |
+
#endif
|
19 |
+
|
20 |
+
#include <c10/core/SymInt.h>
|
21 |
+
#include <c10/core/SymIntArrayRef.h>
|
22 |
+
#include <c10/core/Scalar.h>
|
23 |
+
#include <c10/core/TensorOptions.h>
|
24 |
+
#include <c10/core/QScheme.h>
|
25 |
+
#include <c10/util/OptionalArrayRef.h>
|
26 |
+
#include <tuple>
|
27 |
+
#include <vector>
|
28 |
+
|
29 |
+
${Operators_includes}
|
30 |
+
|
31 |
+
// Extension writers: do you write wrapper functions? Are you frustrated with
|
32 |
+
// resolving overloads of operators? Are you frustrated with dealing with
|
33 |
+
// pointer-to-methods and resolving overloads of pointer-to-methods?? Look no
|
34 |
+
// further, this is the utility for you.
|
35 |
+
//
|
36 |
+
// Given an operator schema: aten::op.overload(...
|
37 |
+
//
|
38 |
+
// Use ATEN_FN2(op, overload) to get a *function* version of the operator
|
39 |
+
// that is guaranteed to not be overloaded. This means that you can safely
|
40 |
+
// decltype(&ATEN_FN2(op, overload)) it. NB: the 2 means this macro takes 2 args.
|
41 |
+
//
|
42 |
+
// Given an operator schema without an overload name: aten::op(...
|
43 |
+
//
|
44 |
+
// Use ATEN_FN(op) to get an unambiguous *function* version of the operator.
|
45 |
+
//
|
46 |
+
// There is some interesting behavior for out= operations.
|
47 |
+
// ATEN_FN2(sin, out) gives a function that is *faithful* to the schema;
|
48 |
+
// that is, the order of arguments is exactly what it looks like in the schema.
|
49 |
+
|
50 |
+
#define ATEN_FN2(op_name, overload) at::_ops::op_name##_##overload::call
|
51 |
+
#define ATEN_FN(op_name) at::_ops::op_name::call
|
52 |
+
|
53 |
+
// Separately, ATEN_OP(op) and ATEN_OP2(op, overload) define a class containing compile-time
|
54 |
+
// metadata about a given aten operator.
|
55 |
+
// Notable data on the class includes:
|
56 |
+
// - ATEN_OP2(add, Tensor)::name // returns the string name: "add"
|
57 |
+
// - ATEN_OP2(add, Tensor)::overload_name // returns the string overload name: "Tensor"
|
58 |
+
// - ATEN_OP2(add, Tensor)::schema // returns the C++ schema type: at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &)
|
59 |
+
// - ATEN_OP2(add, Tensor)::schema_str // returns the string jit type: "add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"
|
60 |
+
|
61 |
+
#define ATEN_OP2(op_name, overload) at::_ops::op_name##_##overload
|
62 |
+
#define ATEN_OP(op_name) at::_ops::op_name
|
63 |
+
|
64 |
+
// WARNING: Please do not call any of the ops in the _ops namespace directly.
|
65 |
+
// Use the ATEN_FN macros. We do not guarantee stability of the naming
|
66 |
+
// scheme for the functions in at::_ops
|
67 |
+
|
68 |
+
// See Note [The ATen Operators API] for details of the at::_ops namespace
|
69 |
+
|
70 |
+
namespace at {
|
71 |
+
namespace _ops {
|
72 |
+
${Operators_declarations}
|
73 |
+
} // namespace _ops
|
74 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RedispatchFunctions.cpp
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// ${generated_comment}
|
2 |
+
|
3 |
+
#include <ATen/RedispatchFunctions.h>
|
4 |
+
#include <ATen/Functions.h>
|
5 |
+
|
6 |
+
#include <ATen/core/dispatch/Dispatcher.h>
|
7 |
+
#include <ATen/core/op_registration/adaption.h>
|
8 |
+
|
9 |
+
namespace at {
|
10 |
+
|
11 |
+
namespace redispatch {
|
12 |
+
${function_redispatch_definitions}
|
13 |
+
} // namespace redispatch
|
14 |
+
|
15 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterBackendSelect.cpp
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// We register ops with a higher priority dispatch key (BackendSelect) than the usual backend-specific keys (e.g. CPU)
|
2 |
+
// which makes calls to the factory functions dispatch to here.
|
3 |
+
// We then 'manually' compute a lower-priority to re-dispatch to (e.g. CPU) to get to the eventually correct backend.
|
4 |
+
// ${generated_comment}
|
5 |
+
|
6 |
+
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
7 |
+
#include <ATen/core/Tensor.h>
|
8 |
+
#include <ATen/core/dispatch/DispatchKeyExtractor.h>
|
9 |
+
#include <torch/library.h>
|
10 |
+
|
11 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
12 |
+
#include <ATen/Operators.h>
|
13 |
+
#else
|
14 |
+
#include <ATen/ops/is_pinned_ops.h>
|
15 |
+
#include <ATen/ops/_pin_memory_ops.h>
|
16 |
+
|
17 |
+
${ops_headers}
|
18 |
+
#endif
|
19 |
+
|
20 |
+
namespace at {
|
21 |
+
|
22 |
+
namespace {
|
23 |
+
|
24 |
+
${backend_select_method_definitions}
|
25 |
+
|
26 |
+
bool is_pinned(const Tensor& self, c10::optional<at::Device> device) {
|
27 |
+
// Only CPU tensors can be pinned
|
28 |
+
if (!self.is_cpu()) {
|
29 |
+
return false;
|
30 |
+
}
|
31 |
+
// TODO: fetch scalar type from Tensor? But it doesn't really matter...
|
32 |
+
DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(c10::nullopt, self.layout(), device.value_or(at::kCUDA)));
|
33 |
+
return at::_ops::is_pinned::redispatch(_dk, self, device);
|
34 |
+
}
|
35 |
+
|
36 |
+
at::Tensor _pin_memory(const Tensor& self, c10::optional<at::Device> device) {
|
37 |
+
TORCH_CHECK(self.device().is_cpu(), "cannot pin '", self.toString(), "' only dense CPU tensors can be pinned");
|
38 |
+
DispatchKeySet _dk = c10::DispatchKeySet(c10::computeDispatchKey(c10::nullopt, self.layout(), device.value_or(at::kCUDA)));
|
39 |
+
if (self.is_nested()) {
|
40 |
+
constexpr auto nested_key_set = c10::DispatchKeySet(
|
41 |
+
{c10::DispatchKey::NestedTensor, c10::DispatchKey::AutogradNestedTensor});
|
42 |
+
_dk = _dk.add(self.key_set() & nested_key_set);
|
43 |
+
}
|
44 |
+
return at::_ops::_pin_memory::redispatch(_dk, self, device);
|
45 |
+
}
|
46 |
+
|
47 |
+
TORCH_LIBRARY_IMPL(aten, BackendSelect, m) {
|
48 |
+
${backend_select_function_registrations};
|
49 |
+
m.impl(TORCH_SELECTIVE_NAME("aten::is_pinned"), TORCH_FN(is_pinned));
|
50 |
+
m.impl(TORCH_SELECTIVE_NAME("aten::_pin_memory"), TORCH_FN(_pin_memory));
|
51 |
+
}
|
52 |
+
|
53 |
+
} // namespace
|
54 |
+
} // at
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterDispatchDefinitions.ini
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
${ns_prologue}
|
2 |
+
|
3 |
+
// NB: TORCH_LIBRARY_IMPL must be in an anonymous namespace to avoid
|
4 |
+
// ambiguity with conflicting identifiers that may have been defined in
|
5 |
+
// at namespace already.
|
6 |
+
namespace {
|
7 |
+
|
8 |
+
${dispatch_helpers}
|
9 |
+
|
10 |
+
${dispatch_anonymous_definitions}
|
11 |
+
|
12 |
+
${static_init_dispatch_registrations}
|
13 |
+
|
14 |
+
} // anonymous namespace
|
15 |
+
|
16 |
+
${deferred_dispatch_registrations}
|
17 |
+
|
18 |
+
namespace ${dispatch_namespace} {
|
19 |
+
|
20 |
+
${dispatch_namespaced_definitions}
|
21 |
+
|
22 |
+
} // namespace ${dispatch_namespace}
|
23 |
+
|
24 |
+
${ns_epilogue}
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterFunctionalization.cpp
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
2 |
+
// ${generated_comment}
|
3 |
+
|
4 |
+
#include <ATen/core/LegacyTypeDispatch.h>
|
5 |
+
#include <ATen/EmptyTensor.h>
|
6 |
+
#include <ATen/FunctionalTensorWrapper.h>
|
7 |
+
#include <ATen/FunctionalInverses.h>
|
8 |
+
#include <ATen/MemoryOverlap.h>
|
9 |
+
#include <torch/library.h>
|
10 |
+
|
11 |
+
#ifndef AT_PER_OPERATOR_HEADERS
|
12 |
+
#include <ATen/Operators.h>
|
13 |
+
#include <ATen/NativeFunctions.h>
|
14 |
+
#else
|
15 |
+
// needed for the meta tensor calls to get stride info in functionalization
|
16 |
+
#include <ATen/ops/empty_strided_native.h>
|
17 |
+
// needed for special handling of copy_().
|
18 |
+
// See Note [functionalizating copy_() and not preserving strides]
|
19 |
+
#include <ATen/ops/to_ops.h>
|
20 |
+
#include <ATen/ops/expand_copy_ops.h>
|
21 |
+
|
22 |
+
$ops_headers
|
23 |
+
#endif
|
24 |
+
|
25 |
+
namespace at {
|
26 |
+
namespace functionalization {
|
27 |
+
|
28 |
+
// This keyset is used by functionalization when it calls into meta kernels
|
29 |
+
// to accurately propagate stride metadata.
|
30 |
+
// Exclude any modes: the purpose of calling into meta kernels is only as an implementation
|
31 |
+
// detail to perform shape inference, and we don't want any modal keys to run.
|
32 |
+
// Specifically, we want to prevent functionalization and Python modes from running.
|
33 |
+
constexpr auto exclude_keys_for_meta_dispatch =
|
34 |
+
c10::functorch_transforms_ks |
|
35 |
+
c10::DispatchKeySet({
|
36 |
+
c10::DispatchKey::FuncTorchDynamicLayerBackMode,
|
37 |
+
c10::DispatchKey::FuncTorchDynamicLayerFrontMode,
|
38 |
+
c10::DispatchKey::Python,
|
39 |
+
c10::DispatchKey::PreDispatch,
|
40 |
+
|
41 |
+
});
|
42 |
+
|
43 |
+
// Helper around at::has_internal_overlap.
|
44 |
+
// The ATen util is used in hot-path eager mode: it's always fast,
|
45 |
+
// but might return TOO_HARD sometimes.
|
46 |
+
// During functionalization, we're ok taking a bit longer
|
47 |
+
// to detect memory overlap.
|
48 |
+
inline bool has_internal_overlap_helper(const at::Tensor t) {
|
49 |
+
auto has_overlap = at::has_internal_overlap(t);
|
50 |
+
if (has_overlap == at::MemOverlap::Yes) return true;
|
51 |
+
if (has_overlap == at::MemOverlap::No) return false;
|
52 |
+
return false;
|
53 |
+
}
|
54 |
+
|
55 |
+
|
56 |
+
inline Tensor to_meta(const Tensor& t) {
|
57 |
+
if (!t.defined()) return t;
|
58 |
+
return at::native::empty_strided_meta_symint(t.sym_sizes(), t.sym_strides(),
|
59 |
+
/*dtype=*/c10::make_optional(t.scalar_type()), /*layout=*/c10::make_optional(t.layout()),
|
60 |
+
/*device=*/c10::make_optional(c10::Device(kMeta)), /*pin_memory=*/c10::nullopt);
|
61 |
+
}
|
62 |
+
|
63 |
+
inline c10::optional<Tensor> to_meta(const c10::optional<Tensor>& t) {
|
64 |
+
if (t.has_value()) {
|
65 |
+
return c10::make_optional<Tensor>(to_meta(*t));
|
66 |
+
}
|
67 |
+
return c10::nullopt;
|
68 |
+
}
|
69 |
+
|
70 |
+
inline std::vector<Tensor> to_meta(at::ITensorListRef t_list) {
|
71 |
+
std::vector<Tensor> outputs;
|
72 |
+
outputs.reserve(t_list.size());
|
73 |
+
for (const auto& tensor : t_list) {
|
74 |
+
outputs.push_back(to_meta(tensor));
|
75 |
+
}
|
76 |
+
return outputs;
|
77 |
+
}
|
78 |
+
|
79 |
+
inline c10::List<Tensor> to_meta(const c10::List<Tensor>& t_list) {
|
80 |
+
c10::List<Tensor> outputs;
|
81 |
+
outputs.reserve(t_list.size());
|
82 |
+
for (const auto i : c10::irange(t_list.size())) {
|
83 |
+
outputs.push_back(to_meta(t_list[i]));
|
84 |
+
}
|
85 |
+
return outputs;
|
86 |
+
}
|
87 |
+
|
88 |
+
inline c10::List<c10::optional<Tensor>> to_meta(const c10::List<c10::optional<Tensor>>& t_list) {
|
89 |
+
c10::List<c10::optional<Tensor>> outputs;
|
90 |
+
outputs.reserve(t_list.size());
|
91 |
+
for (const auto i : c10::irange(t_list.size())) {
|
92 |
+
outputs.push_back(to_meta(t_list[i]));
|
93 |
+
}
|
94 |
+
return outputs;
|
95 |
+
}
|
96 |
+
|
97 |
+
|
98 |
+
${func_definitions}
|
99 |
+
|
100 |
+
} // namespace functionalization
|
101 |
+
|
102 |
+
namespace {
|
103 |
+
|
104 |
+
TORCH_LIBRARY_IMPL(aten, Functionalize, m) {
|
105 |
+
${func_registrations};
|
106 |
+
}
|
107 |
+
|
108 |
+
} // namespace
|
109 |
+
|
110 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegisterSchema.cpp
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// ${generated_comment}
|
2 |
+
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
3 |
+
#include <torch/library.h>
|
4 |
+
|
5 |
+
namespace at {
|
6 |
+
TORCH_LIBRARY(aten, m) {
|
7 |
+
${aten_schema_registrations};
|
8 |
+
// Distributed Ops
|
9 |
+
// Implementations located in torch/csrc/jit/runtime/register_distributed_ops.cpp
|
10 |
+
m.def("get_gradients(int context_id) -> Dict(Tensor, Tensor)");
|
11 |
+
}
|
12 |
+
${schema_registrations}
|
13 |
+
} // namespace at
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/RegistrationDeclarations.h
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// This file contains all native_functions that can be registered to
|
2 |
+
// and the schema string that they should be registered with
|
3 |
+
|
4 |
+
${registration_declarations}
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UfuncCPU.cpp
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#define TORCH_ASSERT_NO_OPERATORS
|
2 |
+
|
3 |
+
#include <ATen/native/DispatchStub.h>
|
4 |
+
#include <ATen/TensorIterator.h>
|
5 |
+
#include <ATen/TensorMeta.h>
|
6 |
+
|
7 |
+
namespace at {
|
8 |
+
|
9 |
+
// NB: this is explicitly copied here (via codegen) rather than
|
10 |
+
// included via NativeFunctions.h to avoid recompiling this file when
|
11 |
+
// NativeFunctions.h changes
|
12 |
+
namespace meta {
|
13 |
+
${meta_declaration}
|
14 |
+
}
|
15 |
+
|
16 |
+
namespace native {
|
17 |
+
${native_declaration}
|
18 |
+
${native_definitions}
|
19 |
+
}} // namespace at::native
|
venv/lib/python3.10/site-packages/torchgen/packaged/ATen/templates/UfuncCPUKernel.cpp
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#define TORCH_ASSERT_NO_OPERATORS
|
2 |
+
|
3 |
+
#include <ATen/native/ufunc/${name}.h>
|
4 |
+
#include <ATen/native/DispatchStub.h>
|
5 |
+
#include <ATen/TensorIterator.h>
|
6 |
+
#include <ATen/native/cpu/Loops.h>
|
7 |
+
#include <ATen/cpu/vec/vec.h>
|
8 |
+
#include <ATen/Dispatch.h>
|
9 |
+
#include <c10/core/Scalar.h>
|
10 |
+
|
11 |
+
namespace at {
|
12 |
+
namespace native {
|
13 |
+
${native_definitions}
|
14 |
+
}} // namespace at::native
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/BUILD.bazel
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
load("//:tools/bazel.bzl", "rules")
|
2 |
+
load(":build.bzl", "define_targets")
|
3 |
+
|
4 |
+
define_targets(rules = rules)
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/README.md
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
If you add a file to this directory, you **MUST** update
|
2 |
+
`torch/CMakeLists.txt` and add the file as a dependency to
|
3 |
+
the `add_custom_command` call.
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (194 Bytes). View file
|
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/context.cpython-310.pyc
ADDED
Binary file (1.42 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_annotated_fn_args.cpython-310.pyc
ADDED
Binary file (4.25 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd.cpython-310.pyc
ADDED
Binary file (3.3 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_autograd_functions.cpython-310.pyc
ADDED
Binary file (20.8 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_inplace_or_view_type.cpython-310.pyc
ADDED
Binary file (15.3 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_python_functions.cpython-310.pyc
ADDED
Binary file (28.2 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_trace_type.cpython-310.pyc
ADDED
Binary file (11.9 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_factories.cpython-310.pyc
ADDED
Binary file (3.91 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_variable_type.cpython-310.pyc
ADDED
Binary file (46.3 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/gen_view_funcs.cpython-310.pyc
ADDED
Binary file (9.76 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/__pycache__/load_derivatives.cpython-310.pyc
ADDED
Binary file (24.3 kB). View file
|
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/build.bzl
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def define_targets(rules):
|
2 |
+
rules.py_library(
|
3 |
+
name = "autograd",
|
4 |
+
srcs = rules.glob(["*.py"]),
|
5 |
+
data = rules.glob([
|
6 |
+
"*.yaml",
|
7 |
+
"templates/*",
|
8 |
+
]),
|
9 |
+
visibility = ["//:__subpackages__"],
|
10 |
+
deps = [
|
11 |
+
rules.requirement("PyYAML"),
|
12 |
+
"//torchgen",
|
13 |
+
],
|
14 |
+
)
|
venv/lib/python3.10/site-packages/torchgen/packaged/autograd/context.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import functools
|
2 |
+
from typing import Callable
|
3 |
+
|
4 |
+
from torchgen.api.autograd import NativeFunctionWithDifferentiabilityInfo as NFWDI
|
5 |
+
from torchgen.context import native_function_manager
|
6 |
+
from torchgen.utils import T
|
7 |
+
|
8 |
+
|
9 |
+
# Like tools.api.context.with_native_function, but for
|
10 |
+
# NativeFunctionWithDifferentiabilityInfo.
|
11 |
+
def with_native_function_with_differentiability_info(
|
12 |
+
func: Callable[[NFWDI], T]
|
13 |
+
) -> Callable[[NFWDI], T]:
|
14 |
+
@functools.wraps(func)
|
15 |
+
def wrapper(f: NFWDI) -> T:
|
16 |
+
with native_function_manager(f.func):
|
17 |
+
return func(f)
|
18 |
+
|
19 |
+
return wrapper
|
20 |
+
|
21 |
+
|
22 |
+
# Like the above but with an additional dispatch key string argument
|
23 |
+
def with_native_function_with_differentiability_info_and_key(
|
24 |
+
func: Callable[[NFWDI, str], T]
|
25 |
+
) -> Callable[[NFWDI, str], T]:
|
26 |
+
@functools.wraps(func)
|
27 |
+
def wrapper(f: NFWDI, key: str) -> T:
|
28 |
+
with native_function_manager(f.func):
|
29 |
+
return func(f, key)
|
30 |
+
|
31 |
+
return wrapper
|