text
stringlengths 145
7.65M
|
---|
==========================================================================================================================================
SOURCE CODE FILE: analysis.h
LINES: 1
SIZE: 9.16 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\analysis.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/ir.h>
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
#include <torch/csrc/jit/tensorexpr/stmt.h>
#include <torch/csrc/jit/tensorexpr/tensor.h>
#include <utility>
namespace torch::jit::tensorexpr {
class HasRand : public IRVisitor {
public:
HasRand(StmtPtr stmt) : stmt_(std::move(stmt)) {
stmt_->accept(this);
}
bool has_rand() const {
return has_rand_;
}
private:
void visit(const IntrinsicsPtr& v) override {
if (v->op_type() == IntrinsicsOp::kRand) {
has_rand_ = true;
} else {
IRVisitor::visit(v);
}
}
StmtPtr stmt_;
bool has_rand_ = false;
};
template <typename Op>
class NodeFinder : public IRVisitor {
public:
void visit(const NodePtr<Op>& v) override {
nodes.push_back((NodePtr<Op>)v);
IRVisitor::visit(v);
}
static std::vector<NodePtr<Op>> find(const StmtPtr& s) {
NodeFinder<Op> nf;
s->accept(&nf);
return nf.nodes;
}
static std::vector<NodePtr<Op>> find(const ExprPtr& e) {
NodeFinder<Op> nf;
e->accept(&nf);
return nf.nodes;
}
std::vector<NodePtr<Op>> nodes;
};
class VarFinder : public IRVisitor {
public:
void visit(const VarPtr& v) override {
vars_.insert(v);
IRVisitor::visit(v);
}
static std::unordered_set<VarPtr> find(const StmtPtr& s) {
VarFinder nf;
s->accept(&nf);
return nf.vars();
}
static std::unordered_set<VarPtr> find(const ExprPtr& e) {
VarFinder nf;
e->accept(&nf);
return nf.vars();
}
const std::unordered_set<VarPtr>& vars() {
return vars_;
}
private:
std::unordered_set<VarPtr> vars_;
};
class BufFinder : public IRVisitor {
public:
void visit(const BufPtr& v) override {
bufs_.insert(v);
IRVisitor::visit(v);
}
static std::unordered_set<BufPtr> find(const StmtPtr& s) {
BufFinder nf;
s->accept(&nf);
return nf.bufs();
}
static std::unordered_set<BufPtr> find(const ExprPtr& e) {
BufFinder nf;
e->accept(&nf);
return nf.bufs();
}
const std::unordered_set<BufPtr>& bufs() {
return bufs_;
}
private:
std::unordered_set<BufPtr> bufs_;
};
// Finds all kinds of write operations to the provided Buf.
class WritesToBuf : public IRVisitor {
public:
WritesToBuf(BufPtr target) : target_(std::move(target)) {}
std::vector<StmtPtr> writes() {
return writes_;
}
static std::vector<StmtPtr> find(const StmtPtr& s, BufPtr b) {
WritesToBuf finder(std::move(b));
s->accept(&finder);
return finder.writes();
}
private:
void visit(const StorePtr& v) override {
if (v->buf() == target_) {
writes_.push_back(v);
}
}
void visit(const AtomicAddPtr& v) override {
if (v->buf() == target_) {
writes_.push_back(v);
}
}
BufPtr target_;
std::vector<StmtPtr> writes_;
};
class StmtsReadingBuf : public IRVisitor {
public:
StmtsReadingBuf(BufPtr target) : target_(std::move(target)) {}
std::vector<StmtPtr> reads() {
return reads_;
}
static std::vector<StmtPtr> find(const StmtPtr& s, BufPtr b) {
StmtsReadingBuf finder(std::move(b));
s->accept(&finder);
return finder.reads();
}
private:
bool readsBuffer(const StmtPtr& s) {
auto loads = NodeFinder<Load>::find(s);
for (const auto& l : loads) {
if (l->buf() == target_) {
return true;
}
}
return false;
}
void visit(const StorePtr& v) override {
if (readsBuffer(v)) {
reads_.push_back(v);
}
}
void visit(const LetPtr& v) override {
if (readsBuffer(v)) {
reads_.push_back(v);
}
}
void visit(const CondPtr& v) override {
if (readsBuffer(v)) {
reads_.push_back(v);
}
}
void visit(const AtomicAddPtr& v) override {
if (readsBuffer(v)) {
reads_.push_back(v);
}
}
BufPtr target_;
std::vector<StmtPtr> reads_;
};
class ExternalAllocBufFinder : public IRVisitor {
public:
void visit(const ExternalCallWithAllocPtr& v) override {
const auto& bufs_out = v->buf_out_args();
bufs_.insert(bufs_out.begin(), bufs_out.end());
IRVisitor::visit(v);
}
static std::unordered_set<BufPtr> find(const StmtPtr& s) {
ExternalAllocBufFinder f;
s->accept(&f);
return f.bufs();
}
static std::unordered_set<BufPtr> find(const ExprPtr& e) {
ExternalAllocBufFinder f;
e->accept(&f);
return f.bufs();
}
const std::unordered_set<BufPtr>& bufs() {
return bufs_;
}
private:
std::unordered_set<BufPtr> bufs_;
};
// Traverses the IR to determine if a particular Var is modified within it.
class ModifiesVarChecker : public IRVisitor {
public:
ModifiesVarChecker(VarPtr v) : var_(std::move(v)) {}
static bool check(const StmtPtr& s, VarPtr v) {
ModifiesVarChecker checker(std::move(v));
s->accept(&checker);
return checker.found();
}
bool found() {
return found_;
}
private:
void visit(const StorePtr& v) override {
if (v->buf()->base_handle() == var_) {
found_ = true;
return;
}
IRVisitor::visit(v);
}
void visit(const AtomicAddPtr& v) override {
if (v->buf()->base_handle() == var_) {
found_ = true;
return;
}
IRVisitor::visit(v);
}
void visit(const LetPtr& v) override {
if (v->var() == var_) {
found_ = true;
return;
}
IRVisitor::visit(v);
}
void visit(const ForPtr& v) override {
if (v->var() == var_) {
found_ = true;
return;
}
IRVisitor::visit(v);
}
VarPtr var_;
bool found_{false};
};
// Traverse the Block stmt to identify the live range of the specified buf. The
// live range, indicated by a pair of integers, specifies the first and last
// stmt in block stmts that access to the buf.
class BufLiveRange : public IRVisitor {
public:
BufLiveRange(BufPtr b) : buf_(std::move(b)) {}
static std::tuple<int32_t, int32_t> liveRange(const StmtPtr& s, BufPtr b) {
BlockPtr block = to<Block>(s);
// We Only analyze buffer live ranges for block stmts.
if (!block) {
return std::make_tuple(0, 0);
}
BufLiveRange analyzer(std::move(b));
block->accept(&analyzer);
return analyzer.getLiveRange();
}
private:
std::tuple<int32_t, int32_t> getLiveRange() {
return std::make_tuple(begin_, end_);
}
bool hasBufReads(const StmtPtr& s) {
auto loads1 = NodeFinder<Load>::find(s);
for (const auto& l : loads1) {
if (l->buf() == buf_) {
return true;
}
}
auto loads2 = NodeFinder<ExternalCall>::find(s);
for (const auto& l : loads2) {
for (const auto& lb : l->buf_args()) {
if (lb == buf_) {
return true;
}
}
}
auto loads3 = NodeFinder<ExternalCallWithAlloc>::find(s);
for (const auto& l : loads3) {
for (const auto& lb : l->buf_args()) {
if (lb == buf_) {
return true;
}
}
}
return false;
}
bool hasBufWrites(const StmtPtr& s) {
auto writes1 = NodeFinder<Store>::find(s);
for (const auto& w : writes1) {
if (w->buf() == buf_) {
return true;
}
}
auto writes2 = NodeFinder<ExternalCall>::find(s);
for (const auto& w : writes2) {
if (w->buf() == buf_) {
return true;
}
}
auto writes3 = NodeFinder<ExternalCallWithAlloc>::find(s);
for (const auto& w : writes3) {
for (const auto& wb : w->buf_out_args()) {
if (wb == buf_) {
return true;
}
}
}
return false;
}
void findAccAndUpdateLiveRange(const StmtPtr& s) {
bool has_reads = hasBufReads(s), has_writes = hasBufWrites(s);
if (has_reads || has_writes) {
if (begin_ == -1) {
begin_ = curr_index_;
};
end_ = curr_index_;
}
}
void visit(const BlockPtr& v) override {
for (const StmtPtr& s : *v) {
curr_index_ += 1;
findAccAndUpdateLiveRange(s);
}
}
BufPtr buf_;
int32_t begin_ = -1;
int32_t end_ = -1;
int32_t curr_index_ = -1;
};
// A class that analyzes the given program relevant for Block backend
// It creates a map of multi dim buffers and their flat versions
class CreateBufferMap : public IRVisitor {
public:
const std::unordered_map<std::string, BufPtr>& getBufferMap() const {
return map_input_to_tensor_bufs_;
}
private:
void visit(const StorePtr& v) override {
auto load_node = to<Load>(v->value());
if (load_node) {
auto t_buf = load_node->buf();
map_input_to_tensor_bufs_.emplace(t_buf->name_hint(), v->buf());
} else {
auto add_node = to<Add>(v->value());
auto mul_node = to<Mul>(v->value());
// This means for now, v->value() can be Add or Mul
TORCH_INTERNAL_ASSERT(add_node || mul_node, buildErrorMessage());
map_input_to_tensor_bufs_.emplace(v->buf()->name_hint(), v->buf());
}
v->value()->accept(this);
}
std::unordered_map<std::string, BufPtr> map_input_to_tensor_bufs_;
};
} // namespace torch::jit::tensorexpr
```
|
===============================================================================================================================================
SOURCE CODE FILE: block_codegen.h
LINES: 1
SIZE: 4.33 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\block_codegen.h
ENCODING: utf-8
```h
#pragma once
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <ATen/ATen.h>
#include <torch/csrc/jit/resource_guard.h>
#include <torch/csrc/jit/tensorexpr/analysis.h>
#include <torch/csrc/jit/tensorexpr/codegen.h>
#include <torch/csrc/jit/tensorexpr/ir.h>
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
#include <torch/csrc/jit/tensorexpr/unique_name_manager.h>
namespace torch::jit::tensorexpr {
// A class that analyzes the given program relevant for Block backend.
class BlockAnalysis : public IRVisitor {
public:
bool is_buf_store_target(const BufPtr& buf) const {
return store_targets_.count(buf) > 0;
}
const std::unordered_set<BufPtr>& loads() const {
return loads_;
}
const std::unordered_set<BufPtr>& stores() const {
return store_targets_;
}
int64_t block_size() const {
return block_size_;
}
bool areBufsInMap(const std::unordered_set<BufPtr>& bufs) const;
BufPtr getMultiDimBuf(const BufPtr& buf) const;
std::string getInputName(const BufPtr& buf) const;
std::string getFlatInputName(const BufPtr& buf) const {
return getInputName(buf) + "_flat";
}
std::unordered_map<std::string, BufPtr> getBufferMap() const {
return map_input_to_tensor_bufs_;
}
private:
void visit(const StorePtr& v) override;
void visit(const LoadPtr& v) override;
void visit(const ForPtr& v) override;
std::unordered_map<std::string, BufPtr> map_input_to_tensor_bufs_;
std::unordered_set<BufPtr> store_targets_;
std::unordered_set<BufPtr> loads_;
int64_t block_size_ = 32;
};
// A class that overrides the underlying IRPrinter to produce Block.
class BlockPrinter : public IRPrinter {
public:
BlockPrinter(std::ostream* os, BlockAnalysis* block_analysis)
: IRPrinter(*os), block_analysis_(block_analysis) {}
using IRPrinter::name_manager;
using IRPrinter::visit;
private:
BlockAnalysis* block_analysis_;
std::unordered_map<std::string, int> dim_values_map;
std::vector<std::string> dim_names = {"N", "H", "W", "C"};
std::vector<std::string> flat_dim_names = {"N", "NH", "NHW", "NHWC"};
void PrintTensorInfo(const std::unordered_set<BufPtr>& bufs);
void PrintArguments(const std::unordered_set<BufPtr>& bufs);
void PrintBufferInfo(const std::unordered_set<BufPtr>& bufs);
void PrintDistribution(const std::unordered_set<BufPtr>& bufs);
void PrintLoop(const std::unordered_set<BufPtr>& bufs, bool block_idx = true);
void PrintReshapeInfo(
const std::unordered_set<BufPtr>& bufs,
bool reverse = false);
void PrintDMAs(const std::unordered_set<BufPtr>& bufs);
void PrintAdjustBuffers(const std::unordered_set<BufPtr>& bufs);
void visit(const ForPtr& v) override;
void visit(const LoadPtr& v) override;
void visit(const StorePtr& v) override;
void visit(const BlockPtr& v) override;
void visit(const AddPtr& v) override;
void visit(const MulPtr& v) override;
};
class TORCH_API BlockCodeGen : public CodeGen {
public:
template <typename... Ts>
/* implicit */
BlockCodeGen(StmtPtr stmt, Ts... ts)
: CodeGen(
stmt,
std::vector<BufferArg>({BufferArg(ts)...}),
at::Device(at::kCPU)) {
Initialize();
}
BlockCodeGen(
StmtPtr stmt,
const std::vector<BufferArg>& buffer_args,
at::Device device = at::Device(at::kCPU),
const std::string& kernel_func_name = "func")
: CodeGen(std::move(stmt), buffer_args, device, kernel_func_name) {
Initialize();
}
~BlockCodeGen() override;
void call(const std::vector<CallArg>& args) override;
void call_raw(const std::vector<void*>& args) override;
void Initialize();
std::string getCodeText(const std::string& attr = "") override {
return oss_.str();
}
private:
UniqueNameManager* name_manager() {
if (!printer_) {
throw std::runtime_error("Null IRPrinter is not expected");
}
return printer_->name_manager();
}
std::ostream& os() {
return printer_->os();
}
std::ostringstream oss_;
std::unique_ptr<BlockPrinter> printer_;
std::unique_ptr<BlockAnalysis> block_analysis_;
std::string GetUniqueFuncName(const std::string& func_prefix);
};
} // namespace torch::jit::tensorexpr
```
|
==================================================================================================================================================
SOURCE CODE FILE: bounds_inference.h
LINES: 1
SIZE: 2.19 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\bounds_inference.h
ENCODING: utf-8
```h
#pragma once
#include <unordered_map>
#include <vector>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/tensorexpr/mem_dependency_checker.h>
namespace torch::jit::tensorexpr {
class Expr;
class Buf;
class Stmt;
enum C10_API_ENUM TensorAccessKind { kLoad, kStore, kMutate };
struct TORCH_API TensorAccessBoundsInfo {
TensorAccessKind kind;
std::vector<ExprPtr> start;
std::vector<ExprPtr> stop;
};
using BoundsInfo =
std::unordered_map<BufPtr, std::vector<TensorAccessBoundsInfo>>;
TORCH_API BoundsInfo
inferBounds(const StmtPtr& s, bool distinctAccessKinds = true);
// Bounds inference caching the analysis. The MemDependencyChecker must already
// have been run.
TORCH_API BoundsInfo getInferredBounds(
analysis::MemDependencyChecker& analyzer,
const StmtPtr& s,
bool distinctAccessKinds = true);
TORCH_API BoundsInfo getInferredBounds(
analysis::MemDependencyChecker& analyzer,
const ExprPtr& e,
bool distinctAccessKinds = true);
TORCH_API void printBoundsInfo(const BoundsInfo& v);
TORCH_API std::vector<ExprPtr> getBoundExtents(
const std::vector<TensorAccessBoundsInfo>& infos);
// The kind of dependency found, in increasing order of exclusivity.
enum class HazardKind {
ReadAfterWrite,
WriteAfterRead,
WriteAfterWrite,
NoDependency,
};
TORCH_API HazardKind getPotentialHazards(
analysis::MemDependencyChecker& analyzer,
const StmtPtr& A,
const StmtPtr& B);
// Returns true if there is a conflicting overlap between accesses in
// statements A and B. A conflicting overlap is an overlap in buffer accesses
// where at least one of the accesses is a Store.
TORCH_API bool hasConflictingOverlap(
analysis::MemDependencyChecker& analyzer,
const StmtPtr& A,
const StmtPtr& B);
// Same as above, between accesses in stores S1 and S2.
TORCH_API bool isOverlapping(
analysis::MemDependencyChecker& analyzer,
const StorePtr& S1,
const StorePtr& S2);
// Same as above, between accesses in store S and load L.
TORCH_API bool isOverlapping(
analysis::MemDependencyChecker& analyzer,
const StorePtr& S,
const LoadPtr& L);
} // namespace torch::jit::tensorexpr
```
|
================================================================================================================================================
SOURCE CODE FILE: bounds_overlap.h
LINES: 1
SIZE: 4.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\bounds_overlap.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/expr.h>
#include <torch/csrc/jit/tensorexpr/ir.h>
#include <utility>
#include <vector>
namespace torch::jit::tensorexpr::analysis {
// A simple class containing the start and end of a range in a single dimension.
struct TORCH_API Bound {
ExprPtr start{nullptr};
ExprPtr end{nullptr};
// This stores whether or not the start and end of this Bound have previously
// been swapped. This occurs when the bound is in a loop with a negative
// stride.
bool swapped{false};
Bound() = default;
Bound(ExprPtr s, ExprPtr e) : start(std::move(s)), end(std::move(e)) {}
void print() const;
bool equals(const Bound& other) const;
// The comparison operators are conservative. If the compare operator returns
// true, it means that all the elements satisfy the logical expression. But
// the false does not mean the opposite comparison is satisfied. It could be
// but not always.
bool operator==(const Bound& other) const;
bool operator!=(const Bound& other) const;
bool operator<(const Bound& other) const;
bool operator<=(const Bound& other) const;
bool operator>(const Bound& other) const;
bool operator>=(const Bound& other) const;
void swap() noexcept {
std::swap(start, end);
swapped = !swapped;
}
};
struct BoundHash {
size_t operator()(const Bound& b) const {
return std::hash<ExprPtr>()(b.start) ^ std::hash<ExprPtr>()(b.end);
}
};
// The type of overlap found. Each condition is true only if none of the
// previous conditions hold.
// ContainedOrEqual: All elements in the Bound A are in the Bound B (this
// includes the case where the bounds are equal).
// Contains: All elements in the Bound B are in the Bound B.
// PartialOverlap: Any elements in the Bound B are in the Bound A.
// NoOverlap: No elements in the Bound A are in the bound B.
enum class OverlapKind {
ContainedOrEqual,
Contains,
PartialOverlap,
NoOverlap
};
// The Bound comparison result.
// True: Every Bound element always satisfies the given comparison operator
// False: Every Bound element always does NOT satisfy the given comparison
// operator
// NotDetermined: Some elements satisfy the given comparison operator and
// some elements not
enum class CmpEvalResult { True, False, NotDetermined };
// Returns the kind of overlap between Bound A and Bound A in a single
// dimension.
OverlapKind TORCH_API boundOverlap(const Bound& A, const Bound& B);
// The comparison is conservative and the compare result is deterministic.
// It means that every element of the Bound to be compared needs to satisfy
// the given comparison operator.
CmpEvalResult TORCH_API compareBound(
const Bound& a,
const Bound& b,
const CompareSelectOperation& cmp_op);
// A multi dimensional bound representing the bound of a set of indices.
using IndexBounds = std::vector<Bound>;
// Returns true if two IndexBounds are equivalent.
bool TORCH_API indexBoundsEquals(const IndexBounds& A, const IndexBounds& B);
// Flattens a multi dimensional bound to a single dimension. The IndexBounds "a"
// *must* encapsulate the entire range of the buffer.
Bound TORCH_API flattenBounds(const IndexBounds& a);
// Determines the kind of overlap in X dimensions.
OverlapKind TORCH_API overlaps(const IndexBounds& a, const IndexBounds& b);
// Returns the Bound slices created by subtracing bound B from bound A.
// Multiple Bounds can be returned in the case where B slices A into two
// distinct regions with no overlap.
//
// For example:
// subtractBound((0, 10), (2, 4)) => [(0, 1), (5, 10)]
// bound A: (0, 10)
// bound B: (2, 4)
// If we remove slice (2, 4) from the slice (0, 10), we will be left
// with 2 slices, one at the start (0, 1), and one at the end (5, 10).
// So, the result of this subtraction is [(0, 1), (5, 10)].
//
// Note: this doesn't use IndexBounds because the Bounds returned do not
// represent multiple different dimensions.
std::vector<Bound> TORCH_API subtractBound(const Bound& a, const Bound& b);
// Returns the bound slices created by subtracting the IndexBounds B from A.
std::vector<IndexBounds> TORCH_API subtractIndicesBounds(
const IndexBounds& A,
const IndexBounds& B,
OverlapKind overlap);
std::vector<IndexBounds> TORCH_API
subtractIndicesBounds(const IndexBounds& A, const IndexBounds& B);
} // namespace torch::jit::tensorexpr::analysis
```
|
=========================================================================================================================================
SOURCE CODE FILE: codegen.h
LINES: 1
SIZE: 7.52 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\codegen.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ATen.h>
#include <torch/csrc/jit/tensorexpr/ir.h>
#include <torch/csrc/jit/tensorexpr/tensor.h>
#include <utility>
namespace torch::jit::tensorexpr {
template <typename T>
class PaddedBuffer;
class TORCH_API CodeGen {
public:
class BufferArg;
class CallArg;
template <typename... Ts>
CodeGen(StmtPtr stmt, Ts... ts)
: stmt_(std::move(stmt)), buffer_args_({BufferArg(ts)...}) {}
CodeGen(
StmtPtr stmt,
std::vector<BufferArg> buffer_args,
at::Device device = at::kCPU,
std::string kernel_func_name = "func");
virtual ~CodeGen() = default;
StmtPtr stmt() const {
return stmt_;
}
void set_stmt(StmtPtr s) {
stmt_ = std::move(s);
}
void apply_mutator(IRMutator* mutator) {
stmt_ = stmt_->accept_mutator(mutator);
}
void apply_visitor(IRVisitor* visitor) {
stmt_->accept(visitor);
}
std::vector<BufferArg>& buffer_args() {
return buffer_args_;
}
const std::vector<BufferArg>& buffer_args() const {
return buffer_args_;
}
at::Device device() {
return device_;
}
// This function returns the generated code as
// a string.
virtual std::string getCodeText(
const std::string& attr [[maybe_unused]] = "") {
return "";
}
// TODO: Figure out how to unify these call interfaces.
/// Call a function with a vector of CallArgs, which are tagged
/// unions that properly type the arguments.
virtual void call(const std::vector<CallArg>& args) = 0;
/// Call a function faster than a regular `call` by assuming that
/// the generated kernel already knows the type of the arguments, so
/// they can be type-punned with `void*`s.
virtual void call_raw(const std::vector<void*>& args) = 0;
/// Call a function even faster than a regular call, by assuming
/// that the number of thread blocks can be derived from `numel` via
/// a simple division, rather than evaluating an expression.
virtual void call_with_numel(void** args, int64_t numel);
virtual at::Tensor empty_strided(
c10::IntArrayRef size,
c10::IntArrayRef stride,
std::optional<c10::ScalarType> dtype_opt,
std::optional<c10::Layout> layout_opt,
std::optional<c10::Device> device_opt,
std::optional<bool> pin_memory_opt) {
return at::empty_strided(
size, stride, dtype_opt, layout_opt, device_opt, pin_memory_opt);
}
const std::string& kernel_func_name() const {
return kernel_func_name_;
}
void allocIntermediateBufs();
protected:
static void* argToPtr(const BufferArg& bufferArg, const CallArg& callArg);
private:
StmtPtr stmt_;
std::vector<BufferArg> buffer_args_;
at::Device device_ = at::kCPU;
std::string kernel_func_name_ = "func";
};
class TORCH_API ExtCallMemoryReuse : public IRMutator {
static std::unordered_map<std::string, std::string> makeExtCallFuncNameMap();
static const std::unordered_map<std::string, std::string> extCallFuncNameMap_;
public:
explicit ExtCallMemoryReuse(
const std::vector<CodeGen::BufferArg>& bufferArgs);
~ExtCallMemoryReuse() override = default;
StmtPtr mutate(const ExternalCallPtr& v) override;
private:
std::unordered_set<BufPtr> bufferArgs_;
};
class CodeGen::BufferArg {
public:
BufferArg(const Tensor& tensor) : buf_(tensor.buf()) {}
BufferArg(const VarHandle& var) : var_(var.node()), isVar_(true) {}
BufferArg(const BufHandle& buf) : buf_(buf.node()) {}
BufferArg(BufPtr buf) : buf_(std::move(buf)) {}
VarPtr var() const {
return isVar_ ? var_ : buf_->base_handle();
}
BufPtr buf() const {
return buf_;
}
bool isVar() const {
return isVar_;
}
Dtype dtype() const {
return isVar_ ? var_->dtype() : buf_->dtype();
}
private:
VarPtr var_ = nullptr;
BufPtr buf_ = nullptr;
bool isVar_ = false;
};
class CodeGen::CallArg {
public:
template <typename T>
CallArg(const PaddedBuffer<T>& buffer);
template <typename T>
CallArg(const std::vector<T>& buffer)
: data_(const_cast<T*>(buffer.data())) {}
CallArg(void* ptr) : data_(ptr) {}
#define ARG_TYPE_CTOR(Type, Name) \
CallArg(Type v) { \
memcpy(buffer_, &v, sizeof(Type)); \
data_ = (void*)buffer_; \
}
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, ARG_TYPE_CTOR)
#undef ARG_TYPE_CTOR
void* data() const {
return data_;
}
CallArg(const CallArg& rhs) {
if (rhs.data_ == rhs.buffer_) {
memcpy(this->buffer_, rhs.buffer_, sizeof(rhs.buffer_));
this->data_ = (void*)(this->buffer_);
} else {
this->data_ = rhs.data_;
}
}
CallArg& operator=(const CallArg& rhs) {
if (this == &rhs) {
return *this;
}
if (rhs.data_ == rhs.buffer_) {
memcpy(this->buffer_, rhs.buffer_, sizeof(rhs.buffer_));
this->data_ = (void*)(this->buffer_);
} else {
this->data_ = rhs.data_;
}
return *this;
}
#define ARG_PTR_DEFINE(Type, Name) \
Type* Name##Ptr() const { \
TORCH_INTERNAL_ASSERT(data_ == (void*)buffer_); \
return (Type*)data_; \
}
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, ARG_PTR_DEFINE)
#undef ARG_PTR_DEFINE
private:
void* data_;
// Regarding a scalar value, CallArg uses void**=&data_ to store it. But the
// bit width of a pointer is 32bit on a 32bit platform. It cannot store the
// scalar if the bit width of the scalar is larger than 32bit, such as double
// and long. Hence, we add 8 bytes buffer dedicated to storing the scalar
// value regardless its bit width is less or greater than 32bits.
char buffer_[8] = {0}; // 64bits
};
class RegisterCodeGenList {
public:
TORCH_API static RegisterCodeGenList& GetInstance();
using StmtFactoryMethod = std::function<std::unique_ptr<CodeGen>(
StmtPtr stmt,
const std::vector<CodeGen::BufferArg>&,
at::Device device,
const std::string& kernel_func_name)>;
TORCH_API StmtFactoryMethod FindStmtFactoryMethod(const std::string& name);
RegisterCodeGenList(const RegisterCodeGenList&) = delete;
RegisterCodeGenList& operator=(const RegisterCodeGenList&) = delete;
private:
template <class CodeGenType>
friend class RegisterCodeGen;
RegisterCodeGenList() = default;
TORCH_API void AddStmtFactoryMethod(
const std::string& name,
const StmtFactoryMethod& stmt_factory_method);
std::unordered_map<std::string, StmtFactoryMethod> stmt_factory_methods_;
};
template <class CodeGenType>
class RegisterCodeGen {
public:
explicit RegisterCodeGen(const std::string& name) {
RegisterCodeGenList& codegen_list = RegisterCodeGenList::GetInstance();
codegen_list.AddStmtFactoryMethod(
name,
[](const StmtPtr& stmt,
const std::vector<CodeGen::BufferArg>& params,
at::Device device,
const std::string& kernel_func_name) {
return std::make_unique<CodeGenType>(
stmt, params, device, kernel_func_name);
});
}
};
TORCH_API std::unique_ptr<CodeGen> CreateCodeGen(
const std::string& name,
StmtPtr stmt,
const std::vector<CodeGen::BufferArg>& params,
at::Device device = at::kCPU,
const std::string& kernel_func_name = "func");
class TORCH_API GenericIntrinsicsExpander : public IRMutator {
protected:
ExprPtr mutate(const IntrinsicsPtr& v) override;
};
} // namespace torch::jit::tensorexpr
```
|
=============================================================================================================================================
SOURCE CODE FILE: cpp_codegen.h
LINES: 1
SIZE: 2.39 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\cpp_codegen.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/codegen.h>
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
namespace torch::jit::tensorexpr {
class CppVarNameRewriter;
// Generates C++ code from the IR.
//
// Vector operations are unrolled.
// For example:
// C[Ramp(0, 1, 3)] = A[Ramp(0, 2, 3)] + B[Ramp(0, 3, 3)];
// is unrolled into:
// C[0] = A[0] + B[0];
// C[1] = A[2] + B[3];
// C[2] = A[4] + B[6];
class TORCH_API CppPrinter : public IRPrinter {
public:
explicit CppPrinter(std::ostream* os);
~CppPrinter() override;
void printPrologue();
using IRPrinter::visit;
// Binary expressions.
void visit(const ModPtr&) override;
void visit(const MaxPtr&) override;
void visit(const MinPtr&) override;
// Conditional expressions.
void visit(const CompareSelectPtr&) override;
void visit(const IfThenElsePtr&) override;
// Tensor operations.
void visit(const AllocatePtr&) override;
void visit(const FreePtr&) override;
void visit(const LoadPtr&) override;
void visit(const StorePtr&) override;
// Casts.
void visit(const CastPtr&) override;
void visit(const BitCastPtr&) override;
// Calls.
void visit(const IntrinsicsPtr&) override;
void visit(const ExternalCallPtr&) override;
// Vars.
void visit(const LetPtr&) override;
void visit(const VarPtr&) override;
// Vector data types.
void visit(const RampPtr&) override;
void visit(const BroadcastPtr&) override;
private:
int lane_;
std::unordered_map<VarPtr, ExprPtr> vector_vars_;
};
class TORCH_API CppCodeGen : public CodeGen {
public:
CppCodeGen(
StmtPtr stmt,
const std::vector<BufferArg>& buffer_args,
at::Device device = at::kCPU,
const std::string& kernel_func_name = "func");
~CppCodeGen() override;
void call(const std::vector<CallArg>& args) override;
void call_raw(const std::vector<void*>& args) override;
template <typename... Ts>
void operator()(const Ts&... ts) {
call(std::vector<CallArg>({CallArg(ts)...}));
}
std::string getCodeText(const std::string& attr = "") override {
return oss_.str();
}
private:
void init();
std::ostream& os() {
return printer_->os();
}
std::ostringstream oss_;
std::unique_ptr<CppPrinter> printer_;
std::unique_ptr<CppVarNameRewriter> var_name_rewriter_;
};
} // namespace torch::jit::tensorexpr
```
|
================================================================================================================================================
SOURCE CODE FILE: cpp_intrinsics.h
LINES: 1
SIZE: 0.65 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\cpp_intrinsics.h
ENCODING: utf-8
```h
#pragma once
namespace torch::jit::tensorexpr {
constexpr auto cpp_intrinsics_definition = R"(
namespace std {
template <typename T,
std::enable_if_t<std::is_floating_point_v<T>, int> = 0>
T rsqrt(T v) {
return 1.0f / std::sqrt(v);
}
template <typename T,
std::enable_if_t<std::is_floating_point_v<T>, int> = 0>
T frac(T v) {
T intpart;
return std::modf(v, &intpart);
}
template <typename From, typename To>
To bitcast(const From& v) {
assert(sizeof(To) == sizeof(From));
To res;
std::memcpy(&res, &v, sizeof(From));
return res;
}
} // namespace std
)";
} // namespace torch::jit::tensorexpr
```
|
==============================================================================================================================================
SOURCE CODE FILE: cuda_codegen.h
LINES: 1
SIZE: 8.32 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\cuda_codegen.h
ENCODING: utf-8
```h
#pragma once
#include <unordered_set>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/nvrtc_stub/ATenNVRTC.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include <c10/cuda/CUDAGuard.h>
#include <torch/csrc/jit/resource_guard.h>
#include <torch/csrc/jit/tensorexpr/codegen.h>
#include <torch/csrc/jit/tensorexpr/eval.h>
#include <torch/csrc/jit/tensorexpr/ir.h>
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
#include <torch/csrc/jit/tensorexpr/llvm_codegen.h>
#include <torch/csrc/jit/tensorexpr/unique_name_manager.h>
namespace torch::jit::tensorexpr {
// A class that analyzes the given program relevant for Cuda backends.
class CudaAnalysis : public IRVisitor {
public:
CudaAnalysis() {
gpu_block_extents_ = {alloc<IntImm>(1), alloc<IntImm>(1), alloc<IntImm>(1)};
gpu_thread_extents_ = {
alloc<IntImm>(1), alloc<IntImm>(1), alloc<IntImm>(1)};
}
bool is_buf_store_target(const BufPtr& buf) const {
return store_targets_.count(buf) > 0;
}
const std::unordered_set<VarPtr>& thread_local_bufs() const {
return thread_local_bufs_;
}
const std::unordered_set<VarPtr>& cross_block_bufs() const {
return cross_block_bufs_;
}
const std::vector<ExprPtr>& gpu_block_extents() const {
return gpu_block_extents_;
}
const std::vector<ExprPtr>& gpu_thread_extents() const {
return gpu_thread_extents_;
}
private:
void visit(const StorePtr& v) override {
store_targets_.insert(v->buf());
}
void visit(const AllocatePtr& v) override;
void visit(const FreePtr& v) override;
void visit(const PlacementAllocatePtr& v) override;
void visit(const ForPtr& v) override;
std::unordered_set<BufPtr> store_targets_;
std::unordered_set<VarPtr> thread_local_bufs_;
std::unordered_set<VarPtr> cross_block_bufs_;
std::vector<ExprPtr> gpu_block_extents_;
std::vector<ExprPtr> gpu_thread_extents_;
};
// An IRMutator that replaces binding loop options with Cuda metavars, and masks
// statements blocks which should execute with less reach than the launch
// parameter extent.
//
// We do this by segmenting each block into chunks which should have the same
// execution parameters, then if those params differ from the max mask each dim.
class GPUMetaVarRewriter : public IRMutator {
public:
explicit GPUMetaVarRewriter(const CudaAnalysis* cuda_analysis)
: cuda_analysis_(cuda_analysis) {
gpu_block_vars_ = {
alloc<Var>("blockIdx.x", kInt),
alloc<Var>("blockIdx.y", kInt),
alloc<Var>("blockIdx.z", kInt)};
gpu_thread_vars_ = {
alloc<Var>("threadIdx.x", kInt),
alloc<Var>("threadIdx.y", kInt),
alloc<Var>("threadIdx.z", kInt)};
current_block_reach_ = {
alloc<IntImm>(1), alloc<IntImm>(1), alloc<IntImm>(1)};
current_thread_reach_ = {
alloc<IntImm>(1), alloc<IntImm>(1), alloc<IntImm>(1)};
}
StmtPtr mutate(const ForPtr& v) override;
StmtPtr mutate(const BlockPtr& v) override;
const std::vector<VarPtr>& gpu_block_vars() const {
return gpu_block_vars_;
}
const std::vector<VarPtr>& gpu_thread_vars() const {
return gpu_thread_vars_;
}
const std::vector<ExprPtr>& gpu_block_extents() const {
return cuda_analysis_->gpu_block_extents();
}
const std::vector<ExprPtr>& gpu_thread_extents() const {
return cuda_analysis_->gpu_thread_extents();
}
private:
// When processing a block, stores the contents of each sub-segment.
class Segment {
public:
void reset(bool mask) {
stmts_.clear();
mask_ = mask;
}
bool empty() const {
return stmts_.empty();
}
std::vector<StmtPtr>& stmts() {
return stmts_;
}
bool mask() {
return mask_;
}
private:
std::vector<StmtPtr> stmts_;
bool mask_{true};
};
// Returns true if the current execution scope is equivalent to the launch
// parameters.
bool isFullExtent();
std::vector<VarPtr> gpu_block_vars_;
std::vector<VarPtr> gpu_thread_vars_;
std::vector<ExprPtr> current_block_reach_;
std::vector<ExprPtr> current_thread_reach_;
const CudaAnalysis* cuda_analysis_;
};
// A class that overrides the underlying IRPrinter to produce Cuda C.
class CudaPrinter : public IRPrinter {
public:
explicit CudaPrinter(
std::ostream* os,
const CudaAnalysis* cuda_analysis,
bool has_random)
: IRPrinter(*os), cuda_analysis_(cuda_analysis) {
if (has_random) {
rand_func_ = alloc<Var>("rand", kHandle);
}
}
void visit(const CastPtr& v) override;
void visit(const IntrinsicsPtr& v) override;
void visit(const ForPtr& v) override;
void visit(const LoadPtr& v) override;
void visit(const StorePtr& v) override;
void visit(const AtomicAddPtr& v) override;
void visit(const MaxPtr& v) override;
void visit(const MinPtr& v) override;
void visit(const IfThenElsePtr& v) override;
void visit(const BlockPtr& v) override;
void visit(const AllocatePtr& v) override;
void visit(const FreePtr& v) override;
void visit(const LetPtr& v) override;
void visit(const ExternalCallPtr& v) override;
VarPtr rand_func() const {
return rand_func_;
}
std::string dtypeToCppString(const Dtype& dtype) override;
using IRPrinter::name_manager;
using IRPrinter::visit;
private:
VarPtr rand_func_;
const CudaAnalysis* cuda_analysis_;
void print_flat_alloc(const AllocatePtr& alloc);
};
// Construct Cuda C from the buffer and tensor input, and invoke the
// kernel when real arguments are provided.
class TORCH_CUDA_CU_API CudaCodeGen : public CodeGen {
public:
template <typename... Ts>
CudaCodeGen(StmtPtr stmt, Ts... ts)
: CodeGen(
stmt,
std::vector<BufferArg>({BufferArg(ts)...}),
at::Device(at::kCUDA, at::cuda::current_device())) {
Initialize();
}
CudaCodeGen(
StmtPtr stmt,
const std::vector<BufferArg>& buffer_args,
at::Device device = at::Device(at::kCUDA, at::cuda::current_device()),
const std::string& kernel_func_name = "func")
: CodeGen(std::move(stmt), buffer_args, device, kernel_func_name) {
Initialize();
}
~CudaCodeGen() override;
void call(const std::vector<CallArg>& args) override;
void call_raw(const std::vector<void*>& args) override;
void call_with_numel(void** args, int64_t numel) override;
template <typename... Ts>
void operator()(const Ts&... ts) {
call(std::vector<CallArg>({CallArg(ts)...}));
}
at::Tensor empty_strided(
c10::IntArrayRef size,
c10::IntArrayRef stride,
std::optional<c10::ScalarType> dtype_opt,
std::optional<c10::Layout> layout_opt,
std::optional<c10::Device> device_opt,
std::optional<bool> pin_memory_opt) override;
const std::vector<ExprPtr>& gpu_block_extents() const {
return cuda_analysis_->gpu_block_extents();
}
const std::vector<ExprPtr>& gpu_thread_extents() const {
return cuda_analysis_->gpu_thread_extents();
}
std::string getCodeText(const std::string& attr = "") override {
return oss_.str();
}
private:
void Initialize();
void CompileToNVRTC(const std::string& code, const std::string& func_name);
UniqueNameManager* name_manager() {
if (!printer_) {
throw std::runtime_error("Null IRPrinter is not expected");
}
return printer_->name_manager();
}
std::ostream& os() {
return printer_->os();
}
std::ostringstream oss_;
std::unique_ptr<CudaPrinter> printer_;
std::unique_ptr<CudaAnalysis> cuda_analysis_;
std::unique_ptr<GPUMetaVarRewriter> metavar_rewriter_;
std::unordered_set<std::string> taken_func_names;
std::mutex eval_lock_;
CUfunction function_{nullptr};
bool has_random_ = false;
int thread_block_size_ = -1;
std::vector<bool> arg_pos_in_extents_;
#ifdef TORCH_ENABLE_LLVM
std::vector<ExprEval<LLVMCodeGen>> block_extents_eval_;
std::vector<ExprEval<LLVMCodeGen>> thread_extents_eval_;
#else
std::vector<ExprEval<SimpleIREvaluator>> block_extents_eval_;
std::vector<ExprEval<SimpleIREvaluator>> thread_extents_eval_;
#endif
std::string GetUniqueFuncName(const std::string& func_prefix);
};
} // namespace torch::jit::tensorexpr
```
|
=============================================================================================================================================
SOURCE CODE FILE: cuda_random.h
LINES: 1
SIZE: 2.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\cuda_random.h
ENCODING: utf-8
```h
#pragma once
namespace torch::jit::tensorexpr {
constexpr auto philox_random_string = R"(
class Philox {
public:
__device__ inline Philox(unsigned long long seed,
unsigned long long subsequence,
unsigned long long offset) {
key.x = (unsigned int)seed;
key.y = (unsigned int)(seed >> 32);
counter = make_uint4(0, 0, 0, 0);
counter.z = (unsigned int)(subsequence);
counter.w = (unsigned int)(subsequence >> 32);
STATE = 0;
incr_n(offset / 4);
}
__device__ inline unsigned long operator()() {
if(STATE == 0) {
uint4 counter_ = counter;
uint2 key_ = key;
for(int i = 0; i < 9; i++) {
counter_ = single_round(counter_, key_);
key_.x += (kPhilox10A); key_.y += (kPhilox10B);
}
output = single_round(counter_, key_);
incr();
}
unsigned long ret;
switch(STATE) {
case 0: ret = output.x; break;
case 1: ret = output.y; break;
case 2: ret = output.z; break;
case 3: ret = output.w; break;
}
STATE = (STATE + 1) % 4;
return ret;
}
private:
uint4 counter;
uint4 output;
uint2 key;
unsigned int STATE;
__device__ inline void incr_n(unsigned long long n) {
unsigned int nlo = (unsigned int)(n);
unsigned int nhi = (unsigned int)(n >> 32);
counter.x += nlo;
if (counter.x < nlo)
nhi++;
counter.y += nhi;
if (nhi <= counter.y)
return;
if (++counter.z)
return;
++counter.w;
}
__device__ inline void incr() {
if (++counter.x)
return;
if (++counter.y)
return;
if (++counter.z)
return;
++counter.w;
}
__device__ unsigned int mulhilo32(unsigned int a, unsigned int b,
unsigned int *result_high) {
*result_high = __umulhi(a, b);
return a*b;
}
__device__ inline uint4 single_round(uint4 ctr, uint2 key) {
unsigned int hi0;
unsigned int hi1;
unsigned int lo0 = mulhilo32(kPhiloxSA, ctr.x, &hi0);
unsigned int lo1 = mulhilo32(kPhiloxSB, ctr.z, &hi1);
uint4 ret = {hi1 ^ ctr.y ^ key.x, lo1, hi0 ^ ctr.w ^ key.y, lo0};
return ret;
}
static const unsigned long kPhilox10A = 0x9E3779B9;
static const unsigned long kPhilox10B = 0xBB67AE85;
static const unsigned long kPhiloxSA = 0xD2511F53;
static const unsigned long kPhiloxSB = 0xCD9E8D57;
};
// Inverse of 2^32.
#define M_RAN_INVM32 2.3283064e-10f
__device__ __inline__ float Uint32ToFloat(unsigned int x) {
return x * M_RAN_INVM32;
}
)";
} // namespace torch::jit::tensorexpr
```
|
======================================================================================================================================
SOURCE CODE FILE: eval.h
LINES: 1
SIZE: 9.95 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\eval.h
ENCODING: utf-8
```h
#pragma once
#include <cmath>
#include <cstring>
#include <utility>
#include <vector>
#include <c10/macros/Macros.h>
#include <c10/util/Logging.h>
#include <torch/csrc/jit/tensorexpr/codegen.h>
#include <torch/csrc/jit/tensorexpr/exceptions.h>
#include <torch/csrc/jit/tensorexpr/ir.h>
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
#include <torch/csrc/jit/tensorexpr/tensor.h>
#include <torch/csrc/jit/tensorexpr/types.h>
#include <torch/csrc/jit/tensorexpr/var_substitutor.h>
namespace torch::jit::tensorexpr {
class InterpValue {
public:
InterpValue() : dtype_(kInt) {
Intvalues.push_back(0);
}
template <typename T>
InterpValue(Dtype dtype, T v) : dtype_(dtype) {
#define TYPE_CASE(Type, Name) \
if (dtype == k##Name) { \
Name##values.push_back(v); \
return; \
}
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE)
#undef TYPE_CASE
throw unsupported_dtype();
}
#define VALUE_CTOR(Type, Name) \
InterpValue(Type v) : dtype_(k##Name) { \
Name##values.push_back(v); \
}
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_CTOR)
#undef VALUE_CTOR
explicit InterpValue(c10::quint8 v) : dtype_(kQUInt8) {
QUInt8values.emplace_back(v.val_);
}
explicit InterpValue(c10::qint8 v) : dtype_(kQInt8) {
QInt8values.emplace_back(v.val_);
}
#define VALUE_VEC_CTOR(Type, Name) \
InterpValue(const std::vector<Type>& v) \
: dtype_(Dtype(k##Name, v.size())), Name##values(v) {}
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_VEC_CTOR)
VALUE_VEC_CTOR(c10::quint8, QUInt8)
VALUE_VEC_CTOR(c10::qint8, QInt8)
#undef VALUE_VEC_CTOR
template <typename T>
T as() const;
template <typename T>
const std::vector<T>& as_vec() const;
int64_t intValue() const;
Dtype dtype() const {
return dtype_;
}
private:
Dtype dtype_;
#define VALUE_STORAGE(Type, Name) std::vector<Type> Name##values;
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_STORAGE)
VALUE_STORAGE(c10::qint8, QInt8)
VALUE_STORAGE(c10::quint8, QUInt8)
#undef VALUE_STORAGE
void* ptr{nullptr};
};
#define VALUE_AS_DISPATCH(Type, Name) \
template <> \
inline Type InterpValue::as<Type>() const { \
if (dtype_ != k##Name) { \
throw unsupported_dtype(); \
} \
return Name##values[0]; \
}
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_AS_DISPATCH)
VALUE_AS_DISPATCH(c10::quint8, QUInt8)
VALUE_AS_DISPATCH(c10::qint8, QInt8)
#undef VALUE_AS_DISPATCH
#define VALUE_AS_VEC_DISPATCH(Type, Name) \
template <> \
inline const std::vector<Type>& InterpValue::as_vec<Type>() const { \
if (dtype_.scalar_type() != ScalarType::Name) { \
throw unsupported_dtype(); \
} \
return Name##values; \
}
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_AS_VEC_DISPATCH)
VALUE_AS_VEC_DISPATCH(c10::quint8, QUInt8)
VALUE_AS_VEC_DISPATCH(c10::qint8, QInt8)
#undef VALUE_AS_VEC_DISPATCH
template <typename Type>
auto underlyingValue(Type x) {
return x;
}
template <>
inline auto underlyingValue<c10::quint8>(c10::quint8 x) {
return x.val_;
}
template <>
inline auto underlyingValue<c10::qint8>(c10::qint8 x) {
return x.val_;
}
template <typename To, typename From>
To raw_bitcast(const From& src) {
TORCH_CHECK(sizeof(To) == sizeof(From), "Invalid bitcast invocation");
To storage;
std::memcpy(&storage, &src, sizeof(To));
return reinterpret_cast<To&>(storage);
}
class SimpleIREvaluatorImpl;
class TORCH_API SimpleIREvaluator : public CodeGen {
public:
SimpleIREvaluator(
StmtPtr stmt,
const std::vector<BufferArg>& buffer_args,
at::Device device = at::kCPU,
const std::string& kernel_func_name = "func");
~SimpleIREvaluator() override;
void call(const std::vector<CallArg>& args) override;
void call_raw(const std::vector<void*>& args) override;
template <typename... Ts>
void operator()(const Ts&... ts) {
std::vector<CallArg> args({CallArg(ts)...});
call(args);
}
void bindVar(const VarPtr& v, const ExprPtr& e);
InterpValue value() const;
private:
void bindArg(const BufferArg& buf, void* data);
void expand_intrinsics() {
GenericIntrinsicsExpander intrinsics_expander;
apply_mutator(&intrinsics_expander);
}
std::unique_ptr<SimpleIREvaluatorImpl> impl_;
};
template <class CodeGenType>
class ExprEval {
public:
using BufferArg = CodeGen::BufferArg;
using CallArg = CodeGen::CallArg;
template <typename... Ts>
ExprEval(const ExprHandle& expr, Ts... ts)
: ExprEval(expr, {BufferArg(ts)...}) {}
ExprEval(const ExprHandle& expr, const std::vector<BufferArg>& buffer_args)
: dtype_(expr.dtype()) {
std::vector<BufferArg> buffer_args_extended = buffer_args;
BufHandle ret_buf("ret_val", {1}, dtype_);
std::vector<ExprHandle> indices;
ExprHandle zero = IntImm::make(0);
indices.reserve(ret_buf.ndim());
for (size_t i = 0; i < ret_buf.ndim(); i++) {
indices.push_back(zero);
}
StmtPtr store_stmt = Store::make(ret_buf, indices, expr);
buffer_args_extended.emplace_back(ret_buf);
codegen_.reset(new CodeGenType(store_stmt, buffer_args_extended));
}
template <typename... Ts>
void operator()(Ts... ts) {
call(ts...);
}
void operator()(const std::vector<CallArg>& call_args) {
call(call_args);
}
void bindVar(VarPtr v, ExprPtr e) {
codegen_->bindVar(v, e);
}
void bindVar(const VarHandle& v, const ExprHandle& e) {
codegen_->bindVar(v.node(), e.node());
}
template <typename... Ts>
void call(Ts... ts) {
call({CallArg(ts)...});
}
void call(const std::vector<CallArg>& call_args) {
std::vector<CallArg> call_args_extended = call_args;
switch (dtype_.scalar_type()) {
#define TYPE_CASE(Type, Name) \
case ScalarType::Name: { \
std::vector<Type> ret_val_arg(1); \
call_args_extended.emplace_back(ret_val_arg); \
codegen_->call(call_args_extended); \
ret_value_ = InterpValue(ret_val_arg[0]); \
} break;
AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TYPE_CASE);
TYPE_CASE(c10::quint8, QUInt8);
TYPE_CASE(c10::qint8, QInt8);
#undef TYPE_CASE
case ScalarType::Bool: {
std::vector<unsigned char> ret_val_arg(1);
call_args_extended.emplace_back(ret_val_arg.data());
codegen_->call(call_args_extended);
ret_value_ = InterpValue((bool)ret_val_arg[0]);
} break;
default:
throw unsupported_dtype();
}
}
void call_raw(const std::vector<void*>& args) {
std::vector<void*> args_extended = args;
switch (dtype_.scalar_type()) {
#define TYPE_CASE(Type, Name) \
case ScalarType::Name: { \
std::vector<Type> ret_val_arg(1); \
args_extended.push_back(ret_val_arg.data()); \
codegen_->call_raw(args_extended); \
ret_value_ = InterpValue(ret_val_arg[0]); \
} break;
AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TYPE_CASE);
TYPE_CASE(c10::quint8, QUInt8);
TYPE_CASE(c10::qint8, QInt8);
#undef TYPE_CASE
case ScalarType::Bool: {
std::vector<unsigned char> ret_val_arg(1);
args_extended.push_back(ret_val_arg.data());
codegen_->call_raw(args_extended);
ret_value_ = InterpValue((bool)ret_val_arg[0]);
} break;
default:
throw unsupported_dtype();
}
}
template <typename T>
T value(const std::vector<void*>& args) {
call_raw(args);
return ret_value_.as<T>();
}
template <typename T, typename... Ts>
T value(Ts... ts) {
call(std::forward<Ts>(ts)...);
return ret_value_.as<T>();
}
Dtype dtype() {
return dtype_;
}
private:
Dtype dtype_;
std::unique_ptr<CodeGenType> codegen_;
InterpValue ret_value_;
};
// Evaluates the given expression and returns an int64_t value if the result of
// the given expression is int64_t.
std::optional<int64_t> evalInt(ExprPtr e);
// Substitutes the given vars with their corresponding expressions in the input
// expression.
inline ExprPtr Substitute(const ExprPtr& expr, const VarMapping& var_mapping) {
VarSubMutator var_sub(var_mapping);
return expr->accept_mutator(&var_sub);
}
// Substitutes the given vars with their corresponding expressions in the input
// statement.
inline StmtPtr Substitute(const StmtPtr& stmt, const VarMapping& var_mapping) {
VarSubMutator var_sub(var_mapping);
return stmt->accept_mutator(&var_sub);
}
// Creates a clone of the input expression and substitutes the given vars with
// their corresponding expressions in the clone.
// NOTE: This works because cloning reuses variables and does not create new
// ones, and `VarMapping` input has variables as the key.
inline ExprPtr SubstituteInClone(
const ExprPtr& expr,
const VarMapping& var_mapping) {
VarSubMutator var_sub(var_mapping);
return Expr::clone(expr)->accept_mutator(&var_sub);
}
// Creates a clone of the input statement and substitutes the given vars with
// their corresponding expressions in the clone.
// NOTE: This works because cloning reuses variables and does not create new
// ones, and `VarMapping` input has variables as the key.
inline StmtPtr SubstituteInClone(
const StmtPtr& stmt,
const VarMapping& var_mapping) {
VarSubMutator var_sub(var_mapping);
return Stmt::clone(stmt)->accept_mutator(&var_sub);
}
} // namespace torch::jit::tensorexpr
```
|
============================================================================================================================================
SOURCE CODE FILE: exceptions.h
LINES: 1
SIZE: 3.20 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\exceptions.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
#include <stdexcept>
// Forward declarations of types
namespace torch::jit::tensorexpr {
class Expr;
class Stmt;
} // namespace torch::jit::tensorexpr
// Forward declarations of functions
namespace std {
TORCH_API std::string to_string(const torch::jit::tensorexpr::ExprPtr&);
TORCH_API std::string to_string(const torch::jit::tensorexpr::StmtPtr&);
} // namespace std
namespace torch::jit::tensorexpr {
class unsupported_dtype : public std::runtime_error {
public:
explicit unsupported_dtype() : std::runtime_error("UNSUPPORTED DTYPE") {}
explicit unsupported_dtype(const std::string& err)
: std::runtime_error("UNSUPPORTED DTYPE: " + err) {}
};
class out_of_range_index : public std::runtime_error {
public:
explicit out_of_range_index() : std::runtime_error("OUT OF RANGE INDEX") {}
explicit out_of_range_index(const std::string& err)
: std::runtime_error("OUT OF RANGE INDEX: " + err) {}
};
class unimplemented_lowering : public std::runtime_error {
public:
explicit unimplemented_lowering()
: std::runtime_error("UNIMPLEMENTED LOWERING") {}
explicit unimplemented_lowering(const ExprPtr& expr)
: std::runtime_error("UNIMPLEMENTED LOWERING: " + std::to_string(expr)) {}
explicit unimplemented_lowering(const StmtPtr& stmt)
: std::runtime_error("UNIMPLEMENTED LOWERING: " + std::to_string(stmt)) {}
};
class malformed_input : public std::runtime_error {
public:
explicit malformed_input() : std::runtime_error("MALFORMED INPUT") {}
explicit malformed_input(const std::string& err)
: std::runtime_error("MALFORMED INPUT: " + err) {}
explicit malformed_input(const ExprPtr& expr)
: std::runtime_error("MALFORMED INPUT: " + std::to_string(expr)) {}
explicit malformed_input(const std::string& err, const ExprPtr& expr)
: std::runtime_error(
"MALFORMED INPUT: " + err + " - " + std::to_string(expr)) {}
explicit malformed_input(const StmtPtr& stmt)
: std::runtime_error("MALFORMED INPUT: " + std::to_string(stmt)) {}
explicit malformed_input(const std::string& err, const StmtPtr& stmt)
: std::runtime_error(
"MALFORMED INPUT: " + err + " - " + std::to_string(stmt)) {}
};
class malformed_ir : public std::runtime_error {
public:
explicit malformed_ir() : std::runtime_error("MALFORMED IR") {}
explicit malformed_ir(const std::string& err)
: std::runtime_error("MALFORMED IR: " + err) {}
explicit malformed_ir(const ExprPtr& expr)
: std::runtime_error("MALFORMED IR: " + std::to_string(expr)) {}
explicit malformed_ir(const std::string& err, const ExprPtr& expr)
: std::runtime_error(
"MALFORMED IR: " + err + " - " + std::to_string(expr)) {}
explicit malformed_ir(const StmtPtr& stmt)
: std::runtime_error("MALFORMED IR: " + std::to_string(stmt)) {}
explicit malformed_ir(const std::string& err, const StmtPtr& stmt)
: std::runtime_error(
"MALFORMED IR: " + err + " - " + std::to_string(stmt)) {}
};
TORCH_API std::string buildErrorMessage(const std::string& s = "");
} // namespace torch::jit::tensorexpr
```
|
======================================================================================================================================
SOURCE CODE FILE: expr.h
LINES: 1
SIZE: 14.33 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\expr.h
ENCODING: utf-8
```h
/**
* This file implements the core classes for Tensor Expressions.
*
* The structure of the expressions is inspired by Halide/TVM IR.
*/
#pragma once
#include <c10/core/MemoryFormat.h>
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
#include <torch/csrc/jit/tensorexpr/ir_mutator.h>
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
#include <torch/csrc/jit/tensorexpr/types.h>
#include <optional>
#include <utility>
namespace torch::jit::tensorexpr {
enum IRNodeType {
kPrimitive,
kAdd,
kSub,
kMul,
kDiv,
kMod,
kMax,
kMin,
kAnd,
kOr,
kLshift,
kRshift,
kXor,
kCompareSelect,
kCast,
kBitCast,
kOther,
};
// The common base between all expression node.
class TORCH_API Expr : public std::enable_shared_from_this<Expr> {
public:
explicit Expr(Dtype dtype, IRNodeType expr_type = kOther)
: dtype_(dtype), expr_type_(expr_type) {}
virtual ~Expr() = default;
Dtype dtype() const {
return dtype_;
}
virtual void accept(IRVisitor* visitor) = 0;
virtual ExprPtr accept_mutator(IRMutator* mutator) = 0;
IRNodeType expr_type() const {
return expr_type_;
}
// Is this a fixed (constant) immediate value.
virtual bool isConstant() const {
return false;
}
void set_dtype(Dtype dtype) {
dtype_ = dtype;
}
/*
* Make a deep copy of the given expression.
*
* All sub-expressions inside the given expressions are also cloned. Note
* that the variables are not deep-copied since they are immutable.
*/
static ExprPtr clone(const ExprPtr& s);
protected:
std::shared_ptr<Expr> getptr() {
return shared_from_this();
}
private:
Dtype dtype_;
IRNodeType expr_type_;
};
// A CRTP pattern to accept visitors for children class,
// and dispatch back to the children.
template <class Op, class Base = Expr>
class ExprNode : public Base {
public:
using ExprNodeBase = ExprNode<Op>;
void accept(IRVisitor* visitor) override {
visitor->visit(static_to<Op>(Base::getptr()));
}
ExprPtr accept_mutator(IRMutator* mutator) override;
// pass the constructor to the base class
using Base::Base;
};
// A wrapper object to the underlying ExprNode.
// Also serves the primary way to build and operate on other expressions.
class TORCH_API ExprHandle {
public:
ExprHandle() = default;
explicit ExprHandle(ExprPtr node) : base_expr_node_(std::move(node)) {}
ExprPtr node() {
return base_expr_node_;
}
ExprPtr node() const {
return base_expr_node_;
}
bool empty() const {
return base_expr_node_ == nullptr;
}
#define IMM_EXPR_DECLARE(Type, Name) ExprHandle(Type v);
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_EXPR_DECLARE)
#undef IMM_EXPR_DECLARE
template <class Op>
NodePtr<Op> AsNode() {
return to<Op>(this->node());
}
template <class Op>
NodePtr<Op> AsNode() const {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
return const_cast<ExprHandle*>(this)->AsNode<Op>();
}
Dtype dtype() const {
return node()->dtype();
}
// Handling the math operators.
ExprHandle operator+(const ExprHandle& other) const;
ExprHandle operator-(const ExprHandle& other) const;
ExprHandle operator*(const ExprHandle& other) const;
ExprHandle operator/(const ExprHandle& other) const;
ExprHandle operator%(const ExprHandle& other) const;
ExprHandle operator==(const ExprHandle& other) const;
ExprHandle operator!=(const ExprHandle& other) const;
ExprHandle operator>(const ExprHandle& other) const;
ExprHandle operator>=(const ExprHandle& other) const;
ExprHandle operator<(const ExprHandle& other) const;
ExprHandle operator<=(const ExprHandle& other) const;
ExprHandle operator&(const ExprHandle& other) const;
ExprHandle operator|(const ExprHandle& other) const;
ExprHandle operator&&(const ExprHandle& other) const;
ExprHandle operator||(const ExprHandle& other) const;
ExprHandle operator^(const ExprHandle& other) const;
ExprHandle operator<<(const ExprHandle& other) const;
ExprHandle operator>>(const ExprHandle& other) const;
private:
ExprPtr base_expr_node_ = nullptr;
};
// The underlying representation node to a Var.
// Currently, each Var object represents a unique variable, even though the
// names might be the same. We should consider add a unique_name as well.
class TORCH_API Var : public ExprNode<Var> {
public:
static ExprHandle make(const std::string& name_hint, Dtype dtype) {
return ExprHandle(alloc<Var>(name_hint, dtype));
}
static ExprHandle make(Dtype dtype) {
return ExprHandle(alloc<Var>("", dtype));
}
// TODO: unique_name
const std::string& name_hint() const {
return name_hint_;
}
void set_name_hint(const std::string& name) {
name_hint_ = name;
}
void set_name_hint(std::string&& name) {
name_hint_ = std::move(name);
}
Var(std::string name_hint, Dtype dtype)
: ExprNodeBase(dtype, kPrimitive), name_hint_(std::move(name_hint)) {}
private:
std::string name_hint_;
};
TORCH_API std::vector<ExprPtr> make_contiguous_strides(
const std::vector<ExprHandle>& dims);
TORCH_API std::vector<ExprPtr> make_channels_last_strides(
const std::vector<ExprHandle>& dims);
class TORCH_API Buf : public ExprNode<Buf> {
public:
static BufHandle make(const std::vector<ExprHandle>& dims, Dtype dtype);
static BufHandle make(
const std::string& name_hint,
const std::vector<ExprHandle>& dims,
const std::vector<ExprHandle>& strides,
Dtype dtype);
static BufHandle make(
const std::string& name_hint,
const std::vector<ExprHandle>& dims,
Dtype dtype,
std::optional<ExprHandle> initializer = std::nullopt,
const std::optional<std::vector<ExprHandle>>& strides = std::nullopt,
std::optional<ExprHandle> qscale = std::nullopt,
std::optional<ExprHandle> qzero = std::nullopt);
// TODO: unique_name
VarPtr base_handle() const {
return base_handle_;
}
void set_base_handle(VarPtr base_handle) {
base_handle_ = std::move(base_handle);
}
const std::string& name_hint() const {
return base_handle_->name_hint();
}
void set_name_hint(const std::string& name_hint) {
base_handle_->set_name_hint(name_hint);
}
Buf(const std::string& name_hint,
const std::vector<ExprPtr>& dims,
Dtype dtype,
ExprPtr initializer = nullptr,
std::optional<std::vector<ExprPtr>> strides = std::nullopt,
ExprPtr qscale = nullptr,
ExprPtr qzero = nullptr)
: Buf(alloc<Var>(name_hint, kHandle),
dims,
dtype,
std::move(initializer),
std::move(strides),
std::move(qscale),
std::move(qzero)) {}
Buf(const VarPtr& var,
std::vector<ExprPtr> dims,
Dtype dtype,
ExprPtr initializer = nullptr,
std::optional<std::vector<ExprPtr>> strides = std::nullopt,
ExprPtr qscale = nullptr,
ExprPtr qzero = nullptr);
size_t ndim() const {
return dims_.size();
}
ExprPtr dim(size_t index) const {
if (index >= ndim()) {
throw out_of_range_index();
}
return dims_[index];
}
std::vector<ExprPtr> dims() const {
return dims_;
}
void set_dims(std::vector<ExprPtr> dims) {
dims_ = std::move(dims);
}
std::vector<ExprPtr> strides() const {
return strides_;
}
void set_strides(std::vector<ExprPtr> strides) {
strides_ = std::move(strides);
}
ExprPtr initializer() const {
return initializer_;
}
ExprPtr qzero() const {
return qzero_;
}
ExprPtr qscale() const {
return qscale_;
}
void set_qzero(ExprPtr qzero) {
qzero_ = std::move(qzero);
}
void set_qscale(ExprPtr qscale) {
qscale_ = std::move(qscale);
}
bool hasConstantDims() const {
for (const auto& d : dims_) {
if (!d->isConstant()) {
return false;
}
}
return true;
}
bool is_contiguous(
at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const;
// The channels-last 1d can benefit the performance of some operators like
// conv1d. But the MemoryFormat enum has not covered this layout yet. Hence,
// we abstract a dedicated function to check channels-last 1d contiguous.
//
// Channels-last 1d:
// dims: n c l
// strides(nlc): c*l 1 c
bool is_channels_last_1d_contiguous() const {
if (dims_.size() != 3) {
return false;
}
return is_stride_one(1) && is_cont_with(2, 1) && is_cont_with(0, 2);
}
private:
bool is_cont_with(int cur_dim, int adjacent_dim) const;
bool is_stride_one(int cur_dim) const;
VarPtr base_handle_;
std::vector<ExprPtr> dims_;
std::vector<ExprPtr> strides_;
ExprPtr initializer_;
// qscale_ and qzero_ are used only for quantized dtypes Bufs: kQUInt8, kQInt8
ExprPtr qscale_;
ExprPtr qzero_;
};
class TORCH_API BufHandle : public ExprHandle {
public:
BufHandle(
const std::string& name_hint,
const std::vector<ExprHandle>& dims,
Dtype dtype)
: ExprHandle(Buf::make(name_hint, dims, dtype)) {}
BufHandle(
const std::string& name_hint,
const std::vector<ExprHandle>& dims,
const std::vector<ExprHandle>& strides,
Dtype dtype)
: ExprHandle(Buf::make(name_hint, dims, strides, dtype)) {}
BufHandle(const std::vector<ExprHandle>& dims, Dtype dtype)
: ExprHandle(Buf::make("_", dims, dtype)) {}
explicit BufHandle(Dtype dtype) : ExprHandle(Buf::make("_", {}, dtype)) {}
explicit BufHandle(BufPtr node) : ExprHandle(std::move(node)) {}
BufPtr node() const {
return static_to<Buf>(ExprHandle::node());
}
BufPtr node() {
return static_to<Buf>(ExprHandle::node());
}
template <typename... Ts>
inline ExprHandle load(const Ts&... ts) const;
template <typename T>
inline ExprHandle load(const std::vector<T>& args) const;
inline ExprHandle load(const std::vector<ExprHandle>& args) const;
StorePtr store(const std::vector<ExprHandle>& args, const ExprHandle& val)
const;
bool operator==(const BufHandle& other) const {
return this->node() == other.node();
}
bool operator!=(const BufHandle& other) const {
return !(*this == other);
}
const std::string& name_hint() const {
return this->node()->name_hint();
}
bool empty() const {
return (this->node() == nullptr);
}
size_t ndim() const {
return node()->ndim();
}
std::vector<ExprHandle> dims() const;
ExprHandle dim(size_t index) const {
return ExprHandle(node()->dim(index));
}
bool is_contiguous(
at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const {
return node()->is_contiguous(memory_format);
}
bool is_channels_last_1d_contiguous() const {
return node()->is_channels_last_1d_contiguous();
}
};
// An expression to construct the underlying variable node.
// Note: do not store any info here, since it is often possible to slice this
// object. For example: VarHandle x('x'); ExprHandle x2 = x;
class TORCH_API VarHandle : public ExprHandle {
public:
// Creates an empty VarHandle whose base Var is set to nullptr.
VarHandle() = default;
explicit VarHandle(Dtype dtype) : ExprHandle(Var::make(dtype)) {}
VarHandle(const std::string& name_hint, Dtype dtype)
: ExprHandle(Var::make(name_hint, dtype)) {}
explicit VarHandle(VarPtr node) : ExprHandle(std::move(node)) {}
VarPtr node() const {
return static_to<Var>(ExprHandle::node());
}
bool operator==(const VarHandle& other) const {
return this->node() == other.node();
}
bool operator!=(const VarHandle& other) const {
return !(*this == other);
}
const std::string& name_hint() const {
return this->node()->name_hint();
}
bool empty() const {
return (this->node() == nullptr);
}
};
template <class Op, class Base>
ExprPtr ExprNode<Op, Base>::accept_mutator(IRMutator* mutator) {
return mutator->mutate(static_to<Op>(Base::getptr()));
}
inline bool same_node(const ExprHandle& expr1, const ExprHandle& expr2) {
return expr1.AsNode<Expr>() == expr2.AsNode<Expr>();
}
TORCH_API ExprHandle sin(const ExprHandle& v);
TORCH_API ExprHandle cos(const ExprHandle& v);
TORCH_API ExprHandle tan(const ExprHandle& v);
TORCH_API ExprHandle asin(const ExprHandle& v);
TORCH_API ExprHandle acos(const ExprHandle& v);
TORCH_API ExprHandle atan(const ExprHandle& v);
TORCH_API ExprHandle sinh(const ExprHandle& v);
TORCH_API ExprHandle cosh(const ExprHandle& v);
TORCH_API ExprHandle tanh(const ExprHandle& v);
TORCH_API ExprHandle sigmoid(const ExprHandle& v);
TORCH_API ExprHandle exp(const ExprHandle& v);
TORCH_API ExprHandle expm1(const ExprHandle& v);
TORCH_API ExprHandle abs(const ExprHandle& v);
TORCH_API ExprHandle log(const ExprHandle& v);
TORCH_API ExprHandle fast_tanh(const ExprHandle& v);
TORCH_API ExprHandle fast_sigmoid(const ExprHandle& v);
TORCH_API ExprHandle fast_log(const ExprHandle& v);
TORCH_API ExprHandle log_vml(const ExprHandle& v);
TORCH_API ExprHandle log2(const ExprHandle& v);
TORCH_API ExprHandle log10(const ExprHandle& v);
TORCH_API ExprHandle log1p(const ExprHandle& v);
TORCH_API ExprHandle erf(const ExprHandle& v);
TORCH_API ExprHandle erfc(const ExprHandle& v);
TORCH_API ExprHandle sqrt(const ExprHandle& v);
TORCH_API ExprHandle rsqrt(const ExprHandle& v);
TORCH_API ExprHandle ceil(const ExprHandle& v);
TORCH_API ExprHandle floor(const ExprHandle& v);
TORCH_API ExprHandle round(const ExprHandle& v);
TORCH_API ExprHandle trunc(const ExprHandle& v);
TORCH_API ExprHandle frac(const ExprHandle& v);
TORCH_API ExprHandle lgamma(const ExprHandle& v);
TORCH_API ExprHandle atan2(const ExprHandle& v1, const ExprHandle& v2);
TORCH_API ExprHandle pow(const ExprHandle& v1, const ExprHandle& v2);
TORCH_API ExprHandle fmod(const ExprHandle& v1, const ExprHandle& v2);
TORCH_API ExprHandle remainder(const ExprHandle& v1, const ExprHandle& v2);
TORCH_API ExprHandle isnan(const ExprHandle& v1);
TORCH_API ExprHandle Relu(const ExprHandle& v1);
TORCH_API ExprHandle
ifThenElse(const ExprHandle& c, const ExprHandle& t, const ExprHandle& f);
TORCH_API ExprHandle expr_to_vec(const ExprHandle& v, int lanes);
} // namespace torch::jit::tensorexpr
```
|
====================================================================================================================================================
SOURCE CODE FILE: external_functions.h
LINES: 1
SIZE: 3.46 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\external_functions.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/Config.h>
#include <ATen/Functions.h>
#include <c10/macros/Macros.h>
#include <torch/csrc/Export.h>
#include <cstdint>
#include <vector>
#define FOR_ALL_EXTERNAL_FUNCTIONS(_) \
_(nnc_aten_adaptive_avg_pool2d) \
_(nnc_aten_addmm) \
_(nnc_aten_conv2d) \
_(nnc_aten_conv1d) \
_(nnc_aten_conv1d_out) \
_(nnc_aten_dequantize) \
_(nnc_aten_dequantize_out) \
_(nnc_aten_embedding) \
_(nnc_aten_matmul) \
_(nnc_aten_mv) \
_(nnc_aten_mm) \
_(nnc_aten_mean) \
_(nnc_aten_max_red) \
_(nnc_aten_max_red_out) \
_(nnc_aten_quantized_conv1d) \
_(nnc_aten_quantized_conv1d_out) \
_(nnc_aten_quantized_conv2d) \
_(nnc_aten_quantized_conv2d_out) \
_(nnc_aten_quantized_conv2d_relu) \
_(nnc_aten_quantized_conv2d_relu_out) \
_(nnc_aten_quantized_linear) \
_(nnc_aten_quantized_linear_out) \
_(nnc_aten_quantized_linear_relu) \
_(nnc_aten_quantized_add) \
_(nnc_aten_quantized_cat) \
_(nnc_aten_quantized_mul) \
_(nnc_aten_quantized_mul_out) \
_(nnc_aten_quantized_mul_scalar) \
_(nnc_aten_quantized_mul_scalar_out) \
_(nnc_aten_quantized_relu) \
_(nnc_aten_quantized_sigmoid) \
_(nnc_aten_quantized_sigmoid_out) \
_(nnc_aten_quantize_per_tensor) \
_(nnc_aten_quantize_per_tensor_out) \
_(nnc_aten_triangular_solve) \
_(nnc_aten_upsample_nearest2d) \
_(nnc_aten_upsample_nearest2d_out) \
_(nnc_prepacked_conv2d_clamp_run) \
_(nnc_prepacked_linear_clamp_run)
#define DECLARE_EXTERNAL_FUNCTION(NAME) \
TORCH_API void NAME( \
int64_t bufs_num, \
void** buf_data, \
int64_t* buf_ranks, \
int64_t* buf_dims, \
int64_t* buf_strides, \
int8_t* buf_dtypes, \
int64_t args_num, \
int64_t* extra_args);
namespace torch::jit::tensorexpr {
struct QIData final {
double scale;
int64_t zero;
c10::ScalarType scalarType;
};
std::vector<at::Tensor> constructTensors(
int64_t bufs_num,
void** buf_data,
int64_t* buf_ranks,
int64_t* buf_dims,
int64_t* buf_strides,
int8_t* buf_dtypes,
std::optional<std::vector<std::pair<size_t, QIData>>> qdataArg =
std::nullopt);
std::vector<at::Tensor> constructTensors2(
int64_t bufs_in_num,
void** buf_data,
int64_t* buf_ranks,
int64_t* buf_dims,
int64_t* buf_strides,
int8_t* buf_dtypes,
std::optional<std::vector<std::pair<size_t, QIData>>> qdataArg =
std::nullopt,
size_t bufs_out_num = 0);
#ifdef C10_MOBILE
extern "C" {
#endif
void DispatchParallel(
int8_t* func,
int64_t start,
int64_t stop,
int8_t* packed_data) noexcept;
FOR_ALL_EXTERNAL_FUNCTIONS(DECLARE_EXTERNAL_FUNCTION)
#if AT_MKLDNN_ENABLED()
DECLARE_EXTERNAL_FUNCTION(nnc_mkldnn_prepacked_conv_run)
#endif
TORCH_API void nnc_aten_free(size_t bufs_num, void** ptrs) noexcept;
#ifdef C10_MOBILE
} // extern "C"
#endif
} // namespace torch::jit::tensorexpr
#undef DECLARE_EXTERNAL_FUNCTION
```
|
=========================================================================================================================================================
SOURCE CODE FILE: external_functions_core.h
LINES: 1
SIZE: 0.47 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\external_functions_core.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ATen.h>
#include <ATen/Parallel.h>
#include <torch/csrc/Export.h>
#include <cstdint>
namespace torch::jit::tensorexpr {
#ifdef C10_MOBILE
extern "C" {
#endif
void DispatchParallel(
int8_t* func,
int64_t start,
int64_t stop,
int8_t* packed_data) noexcept;
TORCH_API void nnc_aten_free(size_t bufs_num, void** ptrs) noexcept;
#ifdef C10_MOBILE
} // extern "C"
#endif
} // namespace torch::jit::tensorexpr
```
|
=============================================================================================================================================================
SOURCE CODE FILE: external_functions_registry.h
LINES: 1
SIZE: 2.31 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\external_functions_registry.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <cstdint>
#include <string>
#include <unordered_map>
namespace torch::jit::tensorexpr {
// The external functions that could be called from NNC must have the same
// signature defined by `NNCExternalFunction`.
//
// Why this signature?
// It was picked for two reasons: 1) it should be generic enough to represent
// most of the ops we might want to call, 2) it should be possible to generate a
// code for this call in LLVM codegen.
// The first 5 parameters allow to pass any number of contiguous CPU tensors in
// case we need to run aten ops (TODO: support different devices). The first
// buffer in the array is assumed to be the output buffer. We couldn't use
// `at::Tensor` (or `c10::IValue`) type there directly as it would mean that
// we'd need to declare it in LLVM codegen in LLVM IR form, which would be very
// cumbersome and hard to maintain. Note that the dimensions of all tensors are
// concatenated into a single array buf_dims. We do not need to pass its length,
// since it can be deduced from total number of buffers and their ranks.
//
// The last 2 arguments allow to pass any non-tensor arguments encoded as an
// array of int64_t values. The way they are encoded is not specified and could
// be arbitrary - whatever the most convenient for the specific bridge function
// is.
//
// The bridge functions must not throw exceptions - properly propagating them
// from the generated code is too cumbersome, and thus all calls to functions
// that could throw must be wrapped with try-catch blocks.
using NNCExternalFunction = void (*)(
int64_t bufs_num,
void** buf_data,
int64_t* buf_ranks,
int64_t* buf_dims,
int64_t* buf_strides,
int8_t* buf_dtypes,
int64_t args_num,
int64_t* extra_args);
// Return a global map "function-name" -> "function-pointer" for all registered
// in NNC external functions
TORCH_API std::unordered_map<std::string, NNCExternalFunction>&
getNNCFunctionRegistry();
// To register a new external function in NNC one needs to create an instance of
// this struct
struct RegisterNNCExternalFunction {
RegisterNNCExternalFunction(const std::string& name, NNCExternalFunction fn) {
getNNCFunctionRegistry()[name] = fn;
}
};
} // namespace torch::jit::tensorexpr
```
|
===========================================================================================================================================
SOURCE CODE FILE: fwd_decls.h
LINES: 1
SIZE: 3.04 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\fwd_decls.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/ScalarType.h>
#include <memory>
namespace torch::jit::tensorexpr {
template <typename Node>
using NodePtr = std::shared_ptr<Node>;
template <typename To, typename From>
NodePtr<To> to(const NodePtr<From>& x) {
return std::dynamic_pointer_cast<To>(x);
}
template <typename To, typename From>
NodePtr<To> static_to(NodePtr<From> x) {
return std::static_pointer_cast<To>(x);
}
template <typename Node, typename... Args>
NodePtr<Node> alloc(Args&&... args) {
return std::make_shared<Node>(std::forward<Args>(args)...);
}
class Buf;
class Expr;
class Stmt;
class Var;
using BufPtr = NodePtr<Buf>;
using ExprPtr = NodePtr<Expr>;
using StmtPtr = NodePtr<Stmt>;
using VarPtr = NodePtr<Var>;
class ExprHandle;
class VarHandle;
class BufHandle;
class Add;
class And;
class BitCast;
class Broadcast;
class Cast;
class CompareSelect;
class Div;
class IfThenElse;
class Intrinsics;
class Let;
class Load;
class Lshift;
class Max;
class MaxTerm;
class Min;
class MinTerm;
class Mod;
class Mul;
class Or;
class Polynomial;
class Ramp;
class ReduceOp;
class RoundOff;
class Rshift;
class Store;
class Sub;
class Term;
class Xor;
using AddPtr = NodePtr<Add>;
using AndPtr = NodePtr<And>;
using BitCastPtr = NodePtr<BitCast>;
using BroadcastPtr = NodePtr<Broadcast>;
using CastPtr = NodePtr<Cast>;
using CompareSelectPtr = NodePtr<CompareSelect>;
using DivPtr = NodePtr<Div>;
using IfThenElsePtr = NodePtr<IfThenElse>;
using IntrinsicsPtr = NodePtr<Intrinsics>;
using LetPtr = NodePtr<Let>;
using LoadPtr = NodePtr<Load>;
using LshiftPtr = NodePtr<Lshift>;
using MaxPtr = NodePtr<Max>;
using MaxTermPtr = NodePtr<MaxTerm>;
using MinPtr = NodePtr<Min>;
using MinTermPtr = NodePtr<MinTerm>;
using ModPtr = NodePtr<Mod>;
using MulPtr = NodePtr<Mul>;
using OrPtr = NodePtr<Or>;
using PolynomialPtr = NodePtr<Polynomial>;
using RampPtr = NodePtr<Ramp>;
using ReduceOpPtr = NodePtr<ReduceOp>;
using RoundOffPtr = NodePtr<RoundOff>;
using RshiftPtr = NodePtr<Rshift>;
using StorePtr = NodePtr<Store>;
using SubPtr = NodePtr<Sub>;
using TermPtr = NodePtr<Term>;
using XorPtr = NodePtr<Xor>;
class Allocate;
class AtomicAdd;
class Block;
class Cond;
class ExternalCall;
class ExternalCallWithAlloc;
class For;
class Free;
class FreeExt;
class PlacementAllocate;
class SyncThreads;
using AllocatePtr = NodePtr<Allocate>;
using AtomicAddPtr = NodePtr<AtomicAdd>;
using BlockPtr = NodePtr<Block>;
using CondPtr = NodePtr<Cond>;
using ExternalCallPtr = NodePtr<ExternalCall>;
using ExternalCallWithAllocPtr = NodePtr<ExternalCallWithAlloc>;
using ForPtr = NodePtr<For>;
using FreePtr = NodePtr<Free>;
using FreeExtPtr = NodePtr<FreeExt>;
using PlacementAllocatePtr = NodePtr<PlacementAllocate>;
using SyncThreadsPtr = NodePtr<SyncThreads>;
#define IMM_DECLARE(Type, Name) \
class Name##Imm; \
using Name##ImmPtr = NodePtr<Name##Imm>;
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_DECLARE)
#undef IMM_DECLARE
} // namespace torch::jit::tensorexpr
```
|
===========================================================================================================================================
SOURCE CODE FILE: graph_opt.h
LINES: 1
SIZE: 4.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\graph_opt.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit::tensorexpr {
// Optimize aten::cat ops in the given subgraph.
//
// Moving users of cat to its inputs.
// Cat ops get lowered into multiple loops, one per input. When the result
// of cat is used by some other op, it results in a situation where inlining
// of cat does not happen. This in turn results in intermediate buffers
// being created for the result of cat, since it is not inlined.
//
// For example, consider the following graph:
// graph(%x : Float(10, strides=[1], device=cpu),
// %y : Float(20, strides=[1], device=cpu)):
// %dim : int = prim::Constant[value=0]()
// %xy_list : Tensor[] = prim::ListConstruct(%x, %y)
// %cat : Float(60, strides=[1], device=cpu) = aten::cat(%xy_list, %dim)
// %5 : Float(60, strides=[1], device=cpu) = aten::log(%cat)
// return (%5))IR";
//
// This will get lowered into:
// Allocate(aten_cat);
// for (...)
// aten_cat[...] = x[...]
// for (...)
// aten_cat[...] = y[...]
// for (...)
// aten_log[...] = log(aten_cat[...])
// Free(aten_cat);
// Note that aten_cat is not inlined into aten_log and it results in
// an intermediate buffer allocation as well.
//
// Optimization:
// We move the ops that use the result of `cat` into its inputs whenever
// possible.
//
// The graph above will be transformed to:
// graph(%x : Float(10, strides=[1], device=cpu),
// %y : Float(20, strides=[1], device=cpu)):
// %3 : int = prim::Constant[value=0]()
// %7 : Float(10, strides=[1], device=cpu) = aten::log(%x)
// %8 : Float(20, strides=[1], device=cpu) = aten::log(%y)
// %9 : Tensor[] = prim::ListConstruct(%7, %8)
// %10 : Float(60, strides=[1], device=cpu) = aten::cat(%9, %3)
// return (%10)
//
// This will get lowered into:
// for (...)
// aten_cat[...] = log(x[...])
// for (...)
// aten_cat[...] = log(y[...])
// aten_cat is the output buffer here.
bool OptimizeCat(const std::shared_ptr<Graph>& graph);
TORCH_API void annotateInputShapes(
const std::shared_ptr<Graph>& graph,
const std::vector<std::optional<at::Tensor>>& example_inputs);
TORCH_API std::shared_ptr<Graph> removeUnusedSelfArgument(
const std::shared_ptr<Graph>& graph);
TORCH_API std::shared_ptr<Graph> removeGraphOutput(
const std::shared_ptr<Graph>& graph,
size_t idx);
TORCH_API std::shared_ptr<Graph> replaceListOutputWithTuple(
const std::shared_ptr<Graph>& graph);
// Perform \p ITERS rounds of "trimming" for the given \p GRAPH.
//
// Trimming means that we try to remove a small portion of the graph while
// keeping it valid. This is useful for debugging when we try to find a minimal
// example reproducing the issue at hand. When ITERS is 0, the graph remains
// unchanged, when ITERS is a big number, the graph usually becomes empty.
TORCH_API std::shared_ptr<Graph> trimGraph(
const std::shared_ptr<Graph>& graph,
int64_t iters);
// Scan all values in the given graph and replace each dimension with a size Xi
// present in \p SIZES with a symbolic shape Yi. Return a vector of symbol
// values [Y0, Y1, .., Yn].
//
// For example:
// Input:
// graph(%x : Float(10, 20, 30, 40)):
// %y : Float(10, 20, 30, 40) = aten::relu(%x)
// return %y
//
// If we run makeShapesSymbolic(graph, {20, 40}), then we'll get:
//
// graph(%x : Float(10, SS(-3), 30, SS(-5))):
// %y : Float(10, SS(-3), 30, SS(-5)) = aten::relu(%x)
// return %y
//
// and get {-3, -5} as the return value.
TORCH_API std::vector<int64_t> makeShapesSymbolic(
std::shared_ptr<Graph>& graph,
const std::vector<int64_t>& sizes);
// Inspect the graph and report whether it can be converted to TE IR.
// TODO: add error reporting for graphs that can't be converted.
TORCH_API bool isGraphCompilable(const std::shared_ptr<Graph>& graph);
// Examine the graph and (hackily) fill in missing tensor type info, such as
// scalar type, device, and strides. Ideally, this should be done by a proper
// dtype/device/shape propagation passes, but until they are ready we can use
// this, not always correct, workaround pass.
TORCH_API void fixupMissingShapeInfo(const std::shared_ptr<Graph>& graph);
} // namespace torch::jit::tensorexpr
```
|
==============================================================================================================================================
SOURCE CODE FILE: half_support.h
LINES: 1
SIZE: 5.96 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\half_support.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/codegen.h>
#include <torch/csrc/jit/tensorexpr/ir.h>
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
#include <torch/csrc/jit/tensorexpr/tensor.h>
namespace torch::jit::tensorexpr {
// Walk the Statement looking for Half size loads/stores.
class HalfChecker : public IRVisitor {
public:
HalfChecker(const std::vector<CodeGen::BufferArg>& args) {
for (const auto& BA : args) {
hasHalf_ |= BA.dtype().scalar_type() == ScalarType::Half;
}
}
bool hasHalf() const {
return hasHalf_;
}
bool hasBFloat16() const {
return hasBFloat16_;
}
void visit(const LoadPtr& v) override {
hasHalf_ |= v->dtype().scalar_type() == ScalarType::Half;
hasBFloat16_ |= v->dtype().scalar_type() == ScalarType::BFloat16;
IRVisitor::visit(v);
}
void visit(const StorePtr& v) override {
hasHalf_ |= v->buf()->dtype().scalar_type() == ScalarType::Half;
hasBFloat16_ |= v->buf()->dtype().scalar_type() == ScalarType::BFloat16;
IRVisitor::visit(v);
}
void visit(const HalfImmPtr& v) override {
hasHalf_ = true;
}
void visit(const BFloat16ImmPtr& v) override {
hasBFloat16_ = true;
}
void visit(const CastPtr& v) override {
hasHalf_ |= v->dtype().scalar_type() == ScalarType::Half;
hasBFloat16_ |= v->dtype().scalar_type() == ScalarType::BFloat16;
IRVisitor::visit(v);
}
private:
bool hasHalf_{false};
bool hasBFloat16_{false};
};
class HalfRewriter : public IRMutator {
ExprPtr mutate(const LoadPtr& v) override {
ExprPtr child = IRMutator::mutate(v);
if (!isHalf(child)) {
return child;
}
ExprPtr ret = alloc<Cast>(
child->dtype().cloneWithScalarType(ScalarType::Float), child);
inserted_half_casts_.insert(ret);
return ret;
}
StmtPtr mutate(const StorePtr& v) override {
// Since mutation changes the `value()` expression in-place, we need to
// get the dtype of the `value()` before that is mutated.
auto newType = v->value()->dtype();
ExprPtr new_val = v->value()->accept_mutator(this);
auto bufType = v->buf()->dtype();
if (isHalf(newType.scalar_type())) {
new_val = alloc<Cast>(newType, new_val);
inserted_half_casts_.insert(new_val);
}
// The scalar_type of value is not Half while the buf is Half
if (!isHalf(newType.scalar_type()) && isHalf(bufType.scalar_type())) {
new_val = alloc<Cast>(
newType.cloneWithScalarType(bufType.scalar_type()), new_val);
inserted_half_casts_.insert(new_val);
}
v->set_value(new_val);
return v;
}
ExprPtr mutate(const HalfImmPtr& v) override {
return alloc<Cast>(kFloat, v);
}
ExprPtr mutate(const BFloat16ImmPtr& v) override {
return alloc<Cast>(kFloat, v);
}
ExprPtr mutate(const CastPtr& v) override {
ExprPtr child = v->src_value()->accept_mutator(this);
// just don't allow half casts we didn't insert.
if (isHalf(v)) {
if (inserted_half_casts_.count(v) < 1) {
v->set_src_value(child);
v->set_dtype(v->dtype().cloneWithScalarType(c10::kFloat));
return v;
}
}
// Remove Half(Float()) and friends.
CastPtr cast_child = to<Cast>(child);
if (cast_child) {
auto cast_to_double = v->dtype().scalar_type() == ScalarType::Double;
auto from_half = isHalf(cast_child->src_value());
// Cannot simplify the double(float(half)) to double(half) as NNC does
// not support cast BF16 to double directly.
auto not_cast_half_to_doulbe = !(cast_to_double && from_half);
if (v->dtype().is_floating_point() &&
cast_child->dtype().is_floating_point() && not_cast_half_to_doulbe) {
return alloc<Cast>(v->dtype(), cast_child->src_value());
}
}
if (child == v->src_value()) {
return v;
}
return alloc<Cast>(v->dtype(), child);
}
StmtPtr mutate(const LetPtr& v) override {
if (isHalf(v->var()->dtype().scalar_type())) {
VarPtr load_new_var = alloc<Var>(v->var()->name_hint(), kFloat);
ExprPtr new_value = alloc<Cast>(
v->var()->dtype().cloneWithScalarType(ScalarType::Float),
v->value()->accept_mutator(this));
var_map[v->var()] = load_new_var;
return alloc<Let>(load_new_var, new_value);
}
return IRMutator::mutate(v);
}
ExprPtr mutate(const VarPtr& v) override {
auto it = var_map.find(v);
if (it != var_map.end()) {
return it->second;
}
return v;
}
template <typename T>
ExprPtr mutateArithmetic(T v) {
IRMutator::mutate(v);
if (isHalf(v)) {
v->set_dtype(v->dtype().cloneWithScalarType(c10::kFloat));
}
return v;
}
ExprPtr mutate(const AddPtr& v) override {
return mutateArithmetic(v);
}
ExprPtr mutate(const SubPtr& v) override {
return mutateArithmetic(v);
}
ExprPtr mutate(const MulPtr& v) override {
return mutateArithmetic(v);
}
ExprPtr mutate(const DivPtr& v) override {
return mutateArithmetic(v);
}
ExprPtr mutate(const MaxPtr& v) override {
return mutateArithmetic(v);
}
ExprPtr mutate(const MinPtr& v) override {
return mutateArithmetic(v);
}
ExprPtr mutate(const CompareSelectPtr& v) override {
return mutateArithmetic(v);
}
ExprPtr mutate(const BroadcastPtr& v) override {
return mutateArithmetic(v);
}
ExprPtr mutate(const IfThenElsePtr& v) override {
return mutateArithmetic(v);
}
ExprPtr mutate(const IntrinsicsPtr& v) override {
return mutateArithmetic(v);
}
private:
static bool isHalf(ScalarType st) {
return st == ScalarType::Half || st == ScalarType::BFloat16;
}
static bool isHalf(const ExprPtr& v) {
return isHalf(v->dtype().scalar_type());
}
std::unordered_set<ExprPtr> inserted_half_casts_;
std::unordered_map<VarPtr, VarPtr> var_map;
};
} // namespace torch::jit::tensorexpr
```
|
===============================================================================================================================================
SOURCE CODE FILE: hash_provider.h
LINES: 1
SIZE: 7.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\hash_provider.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/ir.h>
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
#include <torch/csrc/jit/tensorexpr/tensor.h>
#include <utility>
namespace torch::jit::tensorexpr {
struct TORCH_API SimplifierHashType {
SimplifierHashType() = default;
explicit SimplifierHashType(size_t s) : _h(s) {}
bool operator==(const SimplifierHashType& other) const;
bool operator!=(const SimplifierHashType& other) const;
bool operator<(const SimplifierHashType& other) const;
bool operator==(const size_t other) const;
bool operator!=(const size_t other) const;
size_t _h{0};
};
} // namespace torch::jit::tensorexpr
namespace std {
template <>
struct hash<torch::jit::tensorexpr::SimplifierHashType> {
size_t operator()(const torch::jit::tensorexpr::SimplifierHashType& k) const {
return k._h;
}
};
} // namespace std
namespace torch::jit::tensorexpr {
#define CACHE_GUARD() \
if (cachedHash(v)) { \
return; \
}
class Term;
class Polynomial;
/* Expression hasher providing comparable values representing sub-exprs.
* Uses memoization to avoid excessive recursion. */
class TORCH_API HashProvider : public IRVisitor {
public:
template <class T>
SimplifierHashType hash(T e) {
e->accept(this);
return hashOf(e);
}
bool cachedHash(const ExprPtr& e) {
return exprToHash_.find(e) != exprToHash_.end();
}
bool cachedHash(const StmtPtr& s) {
return stmtToHash_.find(s) != stmtToHash_.end();
}
void clearCache() {
exprToHash_.clear();
stmtToHash_.clear();
}
void visit(const AddPtr& v) override;
void visit(const SubPtr& v) override;
void visit(const MulPtr& v) override;
void visit(const DivPtr& v) override;
void visit(const ModPtr& v) override;
void visit(const RoundOffPtr& v) override;
void visit(const MaxPtr& v) override;
void visit(const MinPtr& v) override;
void visit(const AndPtr& v) override;
void visit(const OrPtr& v) override;
void visit(const XorPtr& v) override;
void visit(const LshiftPtr& v) override;
void visit(const RshiftPtr& v) override;
void visit(const CompareSelectPtr& v) override;
#define IMM_VISIT(Type, Name) \
void visit(const Name##ImmPtr& v) override { \
CACHE_GUARD(); \
putHash(v, hash_combine(#Name, v->value())); \
}
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_VISIT)
#undef IMM_VISIT
void visit(const CastPtr& v) override;
void visit(const VarPtr& v) override;
void visit(const RampPtr& v) override;
void visit(const LoadPtr& v) override;
void visit(const StorePtr& v) override;
void visit(const BlockPtr& v) override;
void visit(const ForPtr& v) override;
void visit(const BroadcastPtr& v) override;
void visit(const IfThenElsePtr& v) override;
void visit(const IntrinsicsPtr& v) override;
void visit(const AllocatePtr& v) override;
void visit(const FreePtr& v) override;
void visit(const CondPtr& v) override;
void visit(const TermPtr& v) override;
void visit(const PolynomialPtr& v) override;
void visit(const MaxTermPtr& v) override;
void visit(const MinTermPtr& v) override;
template <typename... Types>
SimplifierHashType hash_combine(const Types&... args) {
SimplifierHashType seed;
_hash_combine(seed, args...);
return seed;
}
private:
SimplifierHashType hashOf(const ExprPtr& e) {
auto it = exprToHash_.find(e);
if (it != exprToHash_.end()) {
return it->second;
}
// As a failsafe fall back to IRPrinter.
std::stringstream ss;
IRPrinter printer(ss);
e->accept(&printer);
SimplifierHashType hash = SimplifierHashType(te_hash(ss.str()));
putHash(e, hash);
return hash;
}
SimplifierHashType hashOf(const StmtPtr& s) {
auto it = stmtToHash_.find(s);
if (it != stmtToHash_.end()) {
return it->second;
}
// As a failsafe fall back to IRPrinter.
std::stringstream ss;
IRPrinter printer(ss);
s->accept(&printer);
SimplifierHashType hash = SimplifierHashType(te_hash(ss.str()));
putHash(s, hash);
return hash;
}
// Hash funcs for various types, numbers are random.
template <typename T>
void _hash_combine(SimplifierHashType& seed, const T& val) {
seed._h ^= te_hash(val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4);
}
void _hash_combine(SimplifierHashType& seed, const char* val) {
seed._h ^= te_hash(val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4);
}
// at:::Half doesn't have a prime_number_hash, so cast to short.
void _hash_combine(SimplifierHashType& seed, const at::Half& val) {
seed._h ^=
te_hash((uint16_t)val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4);
}
void _hash_combine(SimplifierHashType& seed, const Dtype& val) {
seed._h ^= te_hash(val.ToCppString()) + 0x1f752c19 + (seed._h << 7) +
(seed._h >> 4);
}
void _hash_combine(SimplifierHashType& seed, ExprPtr e) {
_hash_combine(seed, hash(std::move(e)));
}
template <typename T, typename... Types>
void _hash_combine(
SimplifierHashType& seed,
const T& val,
const Types&... args) {
_hash_combine(seed, val);
_hash_combine(seed, args...);
}
void putHash(const ExprPtr& e, SimplifierHashType h) {
auto res = exprToHash_.emplace(e, h);
if (res.second == false) {
// This is always a logic bug since we should check the cache first.
throw std::runtime_error("hash collision");
}
}
void putHash(const StmtPtr& s, SimplifierHashType h) {
auto res = stmtToHash_.emplace(s, h);
if (res.second == false) {
// This is always a logic bug since we should check the cache first.
throw std::runtime_error("hash collision");
}
}
std::unordered_map<ExprPtr, SimplifierHashType> exprToHash_;
std::unordered_map<StmtPtr, SimplifierHashType> stmtToHash_;
UniqueNameManager name_manager_;
size_t te_hash(SimplifierHashType val) {
return val._h;
}
size_t te_hash(int64_t val) {
// put the thing down.
size_t h = val ^ 0x647AA4D20C0B;
// bit flip it.
size_t h2 = ~h;
// and reverse byte order.
size_t h3 = 0;
for (unsigned int i = 0; i < 64; i += 8) {
h3 |= ((h2 >> i) & 0xFF) << (64 - i - 8);
}
return h3;
}
size_t te_hash(int32_t val) {
int64_t v2 = val;
return te_hash(v2);
}
size_t te_hash(uint32_t val) {
int64_t v2 = val;
return te_hash(v2);
}
size_t te_hash(uint64_t val) {
int64_t v2 = val;
return te_hash(v2);
}
size_t te_hash(int16_t val) {
int64_t v2 = val;
return te_hash(v2);
}
size_t te_hash(std::string val) {
size_t hash{0};
int64_t intval{0};
int64_t s = val.size() - 1;
while (s >= 0) {
for (unsigned int i = 0; i < 8; ++i) {
if (s < 0)
break;
int64_t c = val[s];
intval |= (c << (i * 8));
s--;
}
hash ^= te_hash(intval);
intval = 0;
}
return hash;
}
size_t te_hash(double d) {
int64_t* n = reinterpret_cast<int64_t*>(&d);
return te_hash(*n);
}
size_t te_hash(float d) {
int32_t* n = reinterpret_cast<int32_t*>(&d);
return te_hash(*n);
}
size_t te_hash(at::Half d) {
int16_t* n = reinterpret_cast<int16_t*>(&d);
return te_hash(*n);
}
size_t te_hash(at::BFloat16 d) {
int16_t* n = reinterpret_cast<int16_t*>(&d);
return te_hash(*n);
}
};
} // namespace torch::jit::tensorexpr
```
|
===================================================================================================================================================
SOURCE CODE FILE: intrinsic_symbols.h
LINES: 1
SIZE: 0.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\intrinsic_symbols.h
ENCODING: utf-8
```h
#pragma once
#ifdef TORCH_ENABLE_LLVM
#include <c10/util/ArrayRef.h>
namespace torch {
namespace jit {
namespace tensorexpr {
struct SymbolAddress {
const char* symbol;
void* address;
SymbolAddress(const char* sym, void* addr) : symbol(sym), address(addr) {}
};
c10::ArrayRef<SymbolAddress> getIntrinsicSymbols();
} // namespace tensorexpr
} // namespace jit
} // namespace torch
#endif // TORCH_ENABLE_LLVM
```
|
====================================================================================================================================
SOURCE CODE FILE: ir.h
LINES: 1
SIZE: 23.31 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\ir.h
ENCODING: utf-8
```h
#pragma once
#include <string>
#include <utility>
#include <vector>
#include <torch/csrc/jit/tensorexpr/exceptions.h>
#include <torch/csrc/jit/tensorexpr/expr.h>
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
#include <torch/csrc/jit/tensorexpr/stmt.h>
#include <ATen/core/ivalue.h>
namespace torch::jit::tensorexpr {
enum CompareSelectOperation {
kEQ = 0,
kGT,
kGE,
kLT,
kLE,
kNE,
};
enum CompareSelectBias {
kUnbiased,
kLikely,
kUnlikely,
};
inline int getPrecedence(IRNodeType ty) {
// Match C++ operator precedence rules, since some pretty-print expressions to
// C++. SEE: https://en.cppreference.com/w/cpp/language/operator_precedence
switch (ty) {
case kPrimitive:
return 0;
case kCast:
case kBitCast:
return 2;
case kAdd:
case kSub:
return 6;
case kMul:
case kDiv:
case kMod:
return 5;
case kMax:
case kMin:
return 99;
case kAnd:
return 11;
case kOr:
return 13;
case kLshift:
case kRshift:
return 7;
case kXor:
return 12;
case kCompareSelect:
return 16;
default:
return 99;
}
}
class TORCH_API Cast : public ExprNode<Cast> {
public:
ExprPtr src_value() const {
return src_value_;
}
void set_src_value(ExprPtr src_value) {
src_value_ = std::move(src_value);
}
static ExprHandle make(Dtype dtype, const ExprHandle& src_value) {
return ExprHandle(alloc<Cast>(dtype, src_value.node()));
}
Cast(Dtype dtype, ExprPtr src_value)
: ExprNodeBase(dtype, kCast), src_value_(std::move(src_value)) {}
bool isConstant() const override {
return src_value_->isConstant();
}
private:
ExprPtr src_value_;
};
template <typename T>
ExprHandle cast(const ExprHandle& src_value) {
return Cast::make(Dtype(ToDtype<T>(), src_value.dtype().lanes()), src_value);
}
// This is a bitwise cast, akin to bitcast in LLVM
class TORCH_API BitCast : public ExprNode<BitCast> {
public:
ExprPtr src_value() const {
return src_value_;
}
void set_src_value(ExprPtr src_value) {
src_value_ = std::move(src_value);
}
static ExprHandle make(Dtype dtype, const ExprHandle& src_value) {
return ExprHandle(alloc<BitCast>(dtype, src_value.node()));
}
BitCast(Dtype dtype, ExprPtr src_value)
: ExprNodeBase(dtype, kBitCast), src_value_(std::move(src_value)) {
TORCH_CHECK(src_value_->dtype().byte_size() == dtype.byte_size());
}
bool isConstant() const override {
return src_value_->isConstant();
}
private:
ExprPtr src_value_;
};
template <typename T>
ExprHandle bitcast(const ExprHandle& src_value) {
return BitCast::make(
Dtype(ToDtype<T>(), src_value.dtype().lanes()), src_value);
}
// Represent the expression node for binary operators.
// A CRTP pattern to share common code among the operators.
template <typename Op>
class BinaryOpNode : public ExprNode<Op> {
public:
ExprPtr lhs() const {
return this->lhs_;
}
ExprPtr rhs() const {
return this->rhs_;
}
void set_lhs(ExprPtr lhs) {
lhs_ = std::move(lhs);
}
void set_rhs(ExprPtr rhs) {
rhs_ = std::move(rhs);
}
static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) {
return ExprHandle(alloc<Op>(lhs.node(), rhs.node()));
}
BinaryOpNode(
ExprPtr lhs_v,
ExprPtr rhs_v,
IRNodeType expr_type,
ScalarType ret_type = ScalarType::Undefined)
: ExprNode<Op>(
BinaryOpDtype(lhs_v->dtype(), rhs_v->dtype(), ret_type),
expr_type),
lhs_(CastIfNeeded(std::move(lhs_v), ExprNode<Op>::dtype())),
rhs_(CastIfNeeded(std::move(rhs_v), ExprNode<Op>::dtype())) {}
private:
static ExprPtr CastIfNeeded(ExprPtr expr, Dtype dst_dtype) {
if (expr->dtype() == dst_dtype) {
return expr;
}
return Cast::make(dst_dtype, ExprHandle(std::move(expr))).node();
}
ExprPtr lhs_;
ExprPtr rhs_;
};
namespace detail {
template <typename T>
void bin_op_deducer(BinaryOpNode<T>);
bool bin_op_deducer(...);
} // namespace detail
class TORCH_API Add : public BinaryOpNode<Add> {
public:
Add(ExprPtr lhs, ExprPtr rhs)
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kAdd) {}
};
class TORCH_API Sub : public BinaryOpNode<Sub> {
public:
Sub(ExprPtr lhs, ExprPtr rhs)
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kSub) {}
};
class TORCH_API Mul : public BinaryOpNode<Mul> {
public:
Mul(ExprPtr lhs, ExprPtr rhs)
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMul) {}
};
class TORCH_API Div : public BinaryOpNode<Div> {
public:
Div(ExprPtr lhs, ExprPtr rhs)
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kDiv) {}
};
class TORCH_API Mod : public BinaryOpNode<Mod> {
public:
Mod(ExprPtr lhs, ExprPtr rhs)
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMod) {}
};
template <typename Op>
class BitwiseOpNode : public BinaryOpNode<Op> {
public:
BitwiseOpNode(ExprPtr lhs, ExprPtr rhs, IRNodeType type)
: BinaryOpNode<Op>(std::move(lhs), std::move(rhs), type) {}
static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) {
if (!lhs.dtype().is_integral()) {
throw unsupported_dtype();
}
if (lhs.dtype() != rhs.dtype()) {
throw malformed_input("lhs/rhs dtype mismatch");
}
return BinaryOpNode<Op>::make(lhs, rhs);
}
};
class TORCH_API And : public BitwiseOpNode<And> {
public:
And(ExprPtr lhs, ExprPtr rhs)
: BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kAnd) {}
};
class TORCH_API Or : public BitwiseOpNode<Or> {
public:
Or(ExprPtr lhs, ExprPtr rhs)
: BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kOr) {}
};
class TORCH_API Xor : public BitwiseOpNode<Xor> {
public:
Xor(ExprPtr lhs, ExprPtr rhs)
: BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kXor) {}
};
class TORCH_API Lshift : public BitwiseOpNode<Lshift> {
public:
Lshift(ExprPtr lhs, ExprPtr rhs)
: BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kLshift) {}
};
class TORCH_API Rshift : public BitwiseOpNode<Rshift> {
public:
Rshift(ExprPtr lhs, ExprPtr rhs)
: BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kRshift) {}
};
// TODO: add TORCH_API
// Currently adding it results in a compilation error on Windows
class Max : public BinaryOpNode<Max> {
private:
bool propagate_nans_;
public:
Max(ExprPtr lhs, ExprPtr rhs, bool propagate_nans)
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMax),
propagate_nans_(propagate_nans) {}
bool propagate_nans() const {
return propagate_nans_;
}
static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) = delete;
static ExprHandle make(
const ExprHandle& lhs,
const ExprHandle& rhs,
bool propagate_nans) {
return ExprHandle(alloc<Max>(lhs.node(), rhs.node(), propagate_nans));
}
};
// TODO: add TORCH_API
// Currently adding it results in a compilation error on Windows
class Min : public BinaryOpNode<Min> {
private:
bool propagate_nans_;
public:
Min(ExprPtr lhs, ExprPtr rhs, bool propagate_nans)
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMin),
propagate_nans_(propagate_nans) {}
bool propagate_nans() const {
return propagate_nans_;
}
static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) = delete;
static ExprHandle make(
const ExprHandle& lhs,
const ExprHandle& rhs,
bool propagate_nans) {
return ExprHandle(alloc<Min>(lhs.node(), rhs.node(), propagate_nans));
}
};
// Encode typed immediate values e.g. IntImm, FloatImm.
#define IMM_DECLARE(Type, Name) \
class TORCH_API Name##Imm : public ExprNode<Name##Imm> { \
public: \
Name##Imm(Type value) \
: ExprNodeBase(k##Name, kPrimitive), value_(value) {} \
bool isConstant() const override { \
return true; \
} \
Type value() const { \
return value_; \
} \
static ExprHandle make(Type value) { \
return ExprHandle(alloc<Name##Imm>(value)); \
} \
\
private: \
Type value_; \
};
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_DECLARE)
#undef IMM_DECLARE
// Get immediate by ScalarType.
template <typename T>
ExprPtr getImmediateByType(ScalarType immType, T initialVal) {
switch (immType) {
#define TYPE_CASE(Type, Name) \
case ScalarType::Name: \
return alloc<Name##Imm>(Type(initialVal));
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE)
#undef TYPE_CASE
default:
throw unsupported_dtype();
}
return nullptr;
}
template <typename T>
ExprPtr getImmediateByType(Dtype dtype, T initialVal) {
return getImmediateByType<T>(dtype.scalar_type(), initialVal);
}
template <typename T>
ExprPtr immLike(const ExprPtr& e, T v) {
return getImmediateByType<T>(e->dtype(), v);
}
template <typename T>
ExprPtr immLike(const ExprHandle& e, T v) {
return immLike(e.node(), v);
}
inline std::optional<int64_t> intValue(const ExprPtr& e) {
#define TYPE_CASE(Type, Name) \
if (auto v = to<Name##Imm>(e)) { \
return v->value(); \
}
AT_FORALL_INT_TYPES(TYPE_CASE);
#undef TYPE_CASE
return std::nullopt;
}
inline std::optional<int64_t> intValue(const ExprHandle& e) {
return intValue(e.node());
}
template <typename T>
T immediateAs(const ExprPtr& e) {
#define TYPE_CASE(Type, Name) \
if (Name##ImmPtr imm = to<Name##Imm>(e)) { \
return imm->value(); \
}
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE)
#undef TYPE_CASE
throw unsupported_dtype();
return 0;
}
template <typename T>
T immediateAs(const ExprHandle& e) {
return immediateAs<T>(e.node());
}
template <typename T>
bool immediateEquals(const ExprPtr& e, T val) {
#define TYPE_CASE(Type, Name) \
if (Name##ImmPtr imm = to<Name##Imm>(e)) { \
return imm->value() == val; \
}
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE)
#undef TYPE_CASE
throw unsupported_dtype();
return false;
}
TORCH_API bool immediateIsNegative(const ExprPtr& e);
TORCH_API bool immediateIsPositive(const ExprPtr& e);
TORCH_API bool immediateIsZero(const ExprPtr& e);
// Represents a ramp vector node:
// [base, base + 1 * stride, ... , base + (lanes - 1) * stride]
class TORCH_API Ramp : public ExprNode<Ramp> {
public:
ExprPtr base() const {
return base_;
}
ExprPtr stride() const {
return stride_;
}
void set_base(ExprPtr base) {
base_ = std::move(base);
}
void set_stride(ExprPtr stride) {
stride_ = std::move(stride);
}
static ExprHandle make(
const ExprHandle& base,
const ExprHandle& stride,
int64_t lanes) {
if (stride.dtype() != base.dtype()) {
throw malformed_input("Bad stride in Ramp");
}
return ExprHandle(alloc<Ramp>(base.node(), stride.node(), lanes));
}
int64_t lanes() const {
return lanes_;
}
Ramp(ExprPtr base, ExprPtr stride, int64_t lanes)
: ExprNodeBase(Dtype(base->dtype(), lanes)),
base_(std::move(base)),
stride_(std::move(stride)),
lanes_(lanes) {}
private:
ExprPtr base_;
ExprPtr stride_;
int64_t lanes_;
};
class TORCH_API Load : public ExprNode<Load> {
public:
VarPtr base_handle() const {
return buf_->base_handle();
}
std::vector<ExprPtr> indices() const {
return indices_;
}
ExprPtr flat_index() const {
TORCH_CHECK(indices_.size() == 1, "Indices haven't been flattened.");
return indices_[0];
}
BufPtr buf() const {
return buf_;
}
void set_buf(BufPtr buf) {
buf_ = std::move(buf);
}
void set_indices(std::vector<ExprPtr> indices) {
indices_ = std::move(indices);
}
static ExprHandle make(
Dtype dtype,
const BufHandle& buf,
const std::vector<ExprHandle>& indices);
static ExprHandle make(
const BufHandle& buf,
const std::vector<ExprHandle>& indices);
Load(Dtype dtype, BufPtr base_handle, std::vector<ExprPtr> indices);
Load(const BufPtr& base_handle, const std::vector<ExprPtr>& indices);
private:
BufPtr buf_;
std::vector<ExprPtr> indices_;
};
class TORCH_API Broadcast : public ExprNode<Broadcast> {
public:
ExprPtr value() const {
return value_;
}
void set_value(ExprPtr value) {
value_ = std::move(value);
}
int64_t lanes() const {
return lanes_;
}
static ExprHandle make(const ExprHandle& value, int64_t lanes) {
return ExprHandle(alloc<Broadcast>(value.node(), lanes));
}
Broadcast(ExprPtr value, int64_t lanes)
: ExprNodeBase(Dtype(value->dtype(), lanes)),
value_(std::move(value)),
lanes_(lanes) {}
private:
ExprPtr value_;
int64_t lanes_;
};
class TORCH_API IfThenElse : public ExprNode<IfThenElse> {
public:
ExprPtr condition() const {
return condition_;
}
// Lazily evaluated only if condition is true
ExprPtr true_value() const {
return true_;
}
// Lazily evaluated only if condition is false
ExprPtr false_value() const {
return false_;
}
void set_condition(ExprPtr condition) {
condition_ = std::move(condition);
}
void set_true_value(ExprPtr true_value) {
true_ = std::move(true_value);
}
void set_false_value(ExprPtr false_value) {
false_ = std::move(false_value);
}
static ExprHandle make(
const ExprHandle& c,
const ExprHandle& t,
const ExprHandle& f) {
if (!c.dtype().is_integral()) {
throw unsupported_dtype();
}
if (c.dtype().lanes() != 1) {
throw unsupported_dtype();
}
if (t.dtype() != f.dtype()) {
throw malformed_input("Bad dtype in IfThenElse");
}
return ExprHandle(alloc<IfThenElse>(c.node(), t.node(), f.node()));
}
IfThenElse(ExprPtr c, ExprPtr t, ExprPtr f)
: ExprNodeBase(t->dtype()),
condition_(std::move(c)),
true_(std::move(t)),
false_(std::move(f)) {}
private:
ExprPtr condition_;
ExprPtr true_;
ExprPtr false_;
};
class TORCH_API CompareSelect : public ExprNode<CompareSelect> {
public:
CompareSelectOperation compare_select_op() const {
return compare_op_;
}
ExprPtr lhs() const {
return this->lhs_;
}
ExprPtr rhs() const {
return this->rhs_;
}
ExprPtr ret_val1() const {
return this->ret_val1_;
}
ExprPtr ret_val2() const {
return this->ret_val2_;
}
void set_lhs(ExprPtr lhs) {
lhs_ = std::move(lhs);
}
void set_rhs(ExprPtr rhs) {
rhs_ = std::move(rhs);
}
void set_ret_val1(ExprPtr ret_val1) {
ret_val1_ = std::move(ret_val1);
}
void set_ret_val2(ExprPtr ret_val2) {
ret_val2_ = std::move(ret_val2);
}
CompareSelectBias bias() const {
return bias_;
}
static ExprHandle make(
const ExprHandle& lhs,
const ExprHandle& rhs,
CompareSelectOperation cmp_op,
CompareSelectBias bias = kUnbiased) {
if (lhs.dtype() != rhs.dtype()) {
throw malformed_input("bad dtype in CompareSelect");
}
return ExprHandle(alloc<CompareSelect>(
lhs.node(),
rhs.node(),
IntImm::make(1).node(),
IntImm::make(0).node(),
cmp_op,
bias));
}
static ExprHandle make(
const ExprHandle& lhs,
const ExprHandle& rhs,
const ExprHandle& ret_val1,
const ExprHandle& ret_val2,
CompareSelectOperation cmp_op,
CompareSelectBias bias = kUnbiased) {
if (lhs.dtype() != rhs.dtype() || ret_val1.dtype() != ret_val2.dtype()) {
throw malformed_input("bad dtype in CompareSelect");
}
return ExprHandle(alloc<CompareSelect>(
lhs.node(),
rhs.node(),
ret_val1.node(),
ret_val2.node(),
cmp_op,
bias));
}
CompareSelect(
ExprPtr lhs,
ExprPtr rhs,
ExprPtr ret_val1,
ExprPtr ret_val2,
CompareSelectOperation cmp_op,
CompareSelectBias bias = kUnbiased)
: ExprNodeBase(ret_val1->dtype()),
lhs_(std::move(lhs)),
rhs_(std::move(rhs)),
ret_val1_(std::move(ret_val1)),
ret_val2_(std::move(ret_val2)),
compare_op_(cmp_op),
bias_(bias) {}
CompareSelect(
ExprPtr lhs,
ExprPtr rhs,
CompareSelectOperation cmp_op,
CompareSelectBias bias = kUnbiased)
: ExprNodeBase(kInt),
lhs_(std::move(lhs)),
rhs_(std::move(rhs)),
ret_val1_(alloc<IntImm>(1)),
ret_val2_(alloc<IntImm>(0)),
compare_op_(cmp_op),
bias_(bias) {}
private:
ExprPtr lhs_;
ExprPtr rhs_;
ExprPtr ret_val1_;
ExprPtr ret_val2_;
CompareSelectOperation compare_op_;
CompareSelectBias bias_;
};
enum IntrinsicsOp {
kSin,
kCos,
kTan,
kAsin,
kAcos,
kAtan,
kAtan2,
kSinh,
kCosh,
kTanh,
kSigmoid,
kExp,
kExpm1,
kAbs,
kLog,
kLog2,
kLog10,
kLog1p,
kErf,
kErfc,
kSqrt,
kRsqrt,
kPow,
kCeil,
kFloor,
kRound,
kTrunc,
kFmod,
kRemainder,
kLgamma,
kFrac,
kIsNan,
kRand, // We need more discussions on this. Should we consider stateful?
kMaxIntrinsicsOp,
};
class TORCH_API Intrinsics : public ExprNode<Intrinsics> {
public:
static ExprHandle make(IntrinsicsOp op_type, const ExprHandle& v1) {
return ExprHandle(alloc<Intrinsics>(op_type, v1.node()));
}
static ExprHandle make(
IntrinsicsOp op_type,
const ExprHandle& v1,
const ExprHandle& v2) {
return ExprHandle(alloc<Intrinsics>(op_type, v1.node(), v2.node()));
}
static ExprHandle make(
IntrinsicsOp op_type,
const std::vector<ExprHandle>& params) {
std::vector<ExprPtr> params_nodes(params.size());
for (size_t i = 0; i < params.size(); i++) {
params_nodes[i] = params[i].node();
}
return ExprHandle(alloc<Intrinsics>(op_type, params_nodes));
}
static ExprHandle make(IntrinsicsOp op_type, Dtype dtype) {
return ExprHandle(alloc<Intrinsics>(op_type, dtype));
}
IntrinsicsOp op_type() const {
return op_type_;
}
std::string func_name() const {
switch (op_type()) {
case kSin:
return "sin";
case kCos:
return "cos";
case kTan:
return "tan";
case kAsin:
return "asin";
case kAcos:
return "acos";
case kAtan:
return "atan";
case kAtan2:
return "atan2";
case kSinh:
return "sinh";
case kCosh:
return "cosh";
case kTanh:
return "tanh";
case kSigmoid:
return "sigmoid";
case kExp:
return "exp";
case kAbs:
return "abs";
case kLog:
return "log";
case kLog2:
return "log2";
case kLog10:
return "log10";
case kLog1p:
return "log1p";
case kErf:
return "erf";
case kSqrt:
return "sqrt";
case kRsqrt:
return "rsqrt";
case kPow:
return "pow";
case kCeil:
return "ceil";
case kFloor:
return "floor";
case kRound:
return "round";
case kTrunc:
return "trunc";
case kRand:
return "rand";
case kFmod:
return "fmod";
case kRemainder:
return "remainder";
case kLgamma:
return "lgamma";
case kExpm1:
return "expm1";
case kErfc:
return "erfc";
case kFrac:
return "frac";
case kIsNan:
return "isnan";
default:
throw std::runtime_error(
"invalid op_type: " + std::to_string(op_type()));
}
}
Intrinsics(IntrinsicsOp op_type, Dtype dtype)
: ExprNodeBase(IntrinsicsDtype(op_type, dtype)),
params_({}),
op_type_(op_type) {
if (OpArgCount(op_type) != 0) {
throw malformed_input("bad arg count in Intrinsics");
}
}
Intrinsics(IntrinsicsOp op_type, ExprPtr v1)
: ExprNodeBase(IntrinsicsDtype(op_type, v1->dtype())),
params_({std::move(v1)}),
op_type_(op_type) {
if (OpArgCount(op_type) != 1) {
throw malformed_input("bad arg count in Intrinsics");
}
}
Intrinsics(IntrinsicsOp op_type, ExprPtr v1, ExprPtr v2)
: ExprNodeBase(IntrinsicsDtype(op_type, v1->dtype(), v2->dtype())),
params_({std::move(v1), std::move(v2)}),
op_type_(op_type) {
if (OpArgCount(op_type) != 2) {
throw malformed_input("bad arg count in Intrinsics");
}
}
Intrinsics(IntrinsicsOp op_type, const std::vector<ExprPtr>& params)
: ExprNodeBase(IntrinsicsDtype(op_type, params)),
params_(params),
op_type_(op_type) {
if (OpArgCount(op_type) != nparams()) {
throw malformed_input("bad arg count in Intrinsics");
}
}
Intrinsics(IntrinsicsOp op_type, Dtype dtype, std::vector<ExprPtr> params)
: ExprNodeBase(IntrinsicsDtype(op_type, dtype)),
params_(std::move(params)),
op_type_(op_type) {
if (OpArgCount(op_type) != nparams()) {
throw malformed_input("bad arg count in Intrinsics");
}
}
bool isPure() const {
return op_type_ != kRand;
}
size_t nparams() const {
return params_.size();
}
ExprPtr param(size_t index) const {
return params_[index];
}
const std::vector<ExprPtr>& params() const {
return params_;
}
void set_params(std::vector<ExprPtr> params) {
params_ = std::move(params);
}
static size_t OpArgCount(IntrinsicsOp op_type);
private:
static Dtype IntrinsicsDtype(IntrinsicsOp op_type, Dtype dt1);
static Dtype IntrinsicsDtype(IntrinsicsOp op_type, Dtype dt1, Dtype dt2);
static Dtype IntrinsicsDtype(
IntrinsicsOp op_type,
const std::vector<ExprPtr>& params);
std::vector<ExprPtr> params_;
IntrinsicsOp op_type_;
};
TORCH_API std::vector<ExprPtr> ExprHandleVectorToExprVector(
const std::vector<ExprHandle>&);
TORCH_API std::vector<ExprHandle> ExprVectorToExprHandleVector(
const std::vector<ExprPtr>&);
TORCH_API std::vector<VarPtr> VarHandleVectorToVarVector(
const std::vector<VarHandle>&);
TORCH_API std::vector<VarHandle> VarVectorToVarHandleVector(
const std::vector<VarPtr>&);
TORCH_API ExprPtr flatten_index(
const std::vector<ExprPtr>& dims,
const std::vector<ExprPtr>& indices,
const std::vector<ExprPtr>& strides);
} // namespace torch::jit::tensorexpr
```
|
===========================================================================================================================================
SOURCE CODE FILE: ir_cloner.h
LINES: 1
SIZE: 2.35 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\ir_cloner.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/ScalarType.h>
#include <torch/csrc/Export.h>
#include <vector>
#include <torch/csrc/jit/tensorexpr/ir_mutator.h>
namespace torch::jit::tensorexpr {
class TORCH_API IRCloner : public IRMutator {
public:
~IRCloner() override = default;
ExprPtr mutate(const AddPtr& v) override;
ExprPtr mutate(const SubPtr& v) override;
ExprPtr mutate(const MulPtr& v) override;
ExprPtr mutate(const DivPtr& v) override;
ExprPtr mutate(const ModPtr& v) override;
ExprPtr mutate(const MaxPtr& v) override;
ExprPtr mutate(const MinPtr& v) override;
ExprPtr mutate(const AndPtr& v) override;
ExprPtr mutate(const OrPtr& v) override;
ExprPtr mutate(const XorPtr& v) override;
ExprPtr mutate(const LshiftPtr& v) override;
ExprPtr mutate(const RshiftPtr& v) override;
ExprPtr mutate(const CompareSelectPtr& v) override;
#define IMM_MUTATE_DECLARE(Type, Name) \
ExprPtr mutate(const Name##ImmPtr& v) override;
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_MUTATE_DECLARE)
#undef IMM_MUTATE_DECLARE
ExprPtr mutate(const CastPtr& v) override;
ExprPtr mutate(const BitCastPtr& v) override;
ExprPtr mutate(const VarPtr& v) override;
ExprPtr mutate(const BufPtr& v) override;
ExprPtr mutate(const RampPtr& v) override;
ExprPtr mutate(const LoadPtr& v) override;
ExprPtr mutate(const BroadcastPtr& v) override;
ExprPtr mutate(const IfThenElsePtr& v) override;
ExprPtr mutate(const IntrinsicsPtr& v) override;
ExprPtr mutate(const TermPtr& v) override;
ExprPtr mutate(const PolynomialPtr& v) override;
ExprPtr mutate(const RoundOffPtr& v) override;
ExprPtr mutate(const MaxTermPtr& v) override;
ExprPtr mutate(const MinTermPtr& v) override;
ExprPtr mutate(const ReduceOpPtr& v) override;
StmtPtr mutate(const ForPtr& v) override;
StmtPtr mutate(const BlockPtr& v) override;
StmtPtr mutate(const StorePtr& v) override;
StmtPtr mutate(const AtomicAddPtr& v) override;
StmtPtr mutate(const SyncThreadsPtr& v) override;
StmtPtr mutate(const ExternalCallPtr& v) override;
StmtPtr mutate(const ExternalCallWithAllocPtr& v) override;
StmtPtr mutate(const AllocatePtr& v) override;
StmtPtr mutate(const FreePtr& v) override;
StmtPtr mutate(const LetPtr& v) override;
StmtPtr mutate(const CondPtr& v) override;
};
} // namespace torch::jit::tensorexpr
```
|
============================================================================================================================================
SOURCE CODE FILE: ir_mutator.h
LINES: 1
SIZE: 2.37 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\ir_mutator.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/ScalarType.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
namespace torch::jit::tensorexpr {
class TORCH_API IRMutator {
public:
virtual ~IRMutator() = default;
virtual ExprPtr mutate(const AddPtr& v);
virtual ExprPtr mutate(const SubPtr& v);
virtual ExprPtr mutate(const MulPtr& v);
virtual ExprPtr mutate(const DivPtr& v);
virtual ExprPtr mutate(const ModPtr& v);
virtual ExprPtr mutate(const MaxPtr& v);
virtual ExprPtr mutate(const MinPtr& v);
virtual ExprPtr mutate(const AndPtr& v);
virtual ExprPtr mutate(const OrPtr& v);
virtual ExprPtr mutate(const XorPtr& v);
virtual ExprPtr mutate(const LshiftPtr& v);
virtual ExprPtr mutate(const RshiftPtr& v);
virtual ExprPtr mutate(const CompareSelectPtr& v);
#define IMM_MUTATE_DECLARE(Type, Name) \
virtual ExprPtr mutate(const Name##ImmPtr& v);
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_MUTATE_DECLARE)
#undef IMM_MUTATE_DECLARE
virtual ExprPtr mutate(const CastPtr& v);
virtual ExprPtr mutate(const BitCastPtr& v);
virtual ExprPtr mutate(const VarPtr& v);
virtual ExprPtr mutate(const BufPtr& v);
virtual ExprPtr mutate(const RampPtr& v);
virtual ExprPtr mutate(const LoadPtr& v);
virtual ExprPtr mutate(const BroadcastPtr& v);
virtual ExprPtr mutate(const IfThenElsePtr& v);
virtual ExprPtr mutate(const IntrinsicsPtr& v);
virtual ExprPtr mutate(const TermPtr& v);
virtual ExprPtr mutate(const PolynomialPtr& v);
virtual ExprPtr mutate(const RoundOffPtr& v);
virtual ExprPtr mutate(const MaxTermPtr& v);
virtual ExprPtr mutate(const MinTermPtr& v);
virtual ExprPtr mutate(const ReduceOpPtr& v);
virtual StmtPtr mutate(const ForPtr& v);
virtual StmtPtr mutate(const BlockPtr& v);
virtual StmtPtr mutate(const StorePtr& v);
virtual StmtPtr mutate(const AtomicAddPtr& v);
virtual StmtPtr mutate(const SyncThreadsPtr& v);
virtual StmtPtr mutate(const ExternalCallPtr& v);
virtual StmtPtr mutate(const ExternalCallWithAllocPtr& v);
virtual StmtPtr mutate(const AllocatePtr& v);
virtual StmtPtr mutate(const FreePtr& v);
virtual StmtPtr mutate(const FreeExtPtr& v);
virtual StmtPtr mutate(const PlacementAllocatePtr& v);
virtual StmtPtr mutate(const LetPtr& v);
virtual StmtPtr mutate(const CondPtr& v);
};
} // namespace torch::jit::tensorexpr
```
|
============================================================================================================================================
SOURCE CODE FILE: ir_printer.h
LINES: 1
SIZE: 4.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\ir_printer.h
ENCODING: utf-8
```h
#pragma once
#include <ostream>
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
#include <torch/csrc/jit/tensorexpr/ir.h>
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
#include <torch/csrc/jit/tensorexpr/unique_name_manager.h>
namespace torch::jit::tensorexpr {
class Tensor;
class TORCH_API IRPrinter : public IRVisitor {
public:
explicit IRPrinter(std::ostream& os) : printer_os_(this, os) {}
void print(ExprHandle);
void print(Expr&);
void print(Stmt&);
void visit(const AddPtr& v) override;
void visit(const SubPtr& v) override;
void visit(const MulPtr& v) override;
void visit(const DivPtr& v) override;
void visit(const ModPtr& v) override;
void visit(const MaxPtr& v) override;
void visit(const MinPtr& v) override;
void visit(const AndPtr& v) override;
void visit(const OrPtr& v) override;
void visit(const XorPtr& v) override;
void visit(const LshiftPtr& v) override;
void visit(const RshiftPtr& v) override;
void visit(const CompareSelectPtr& v) override;
#define IMM_PRINT_VISIT(Type, Name) void visit(const Name##ImmPtr& v) override;
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_PRINT_VISIT)
#undef IMM_PRINT_VISIT
void visit(const CastPtr& v) override;
void visit(const BitCastPtr& v) override;
void visit(const VarPtr& v) override;
void visit(const BufPtr& v) override;
void visit(const RampPtr& v) override;
void visit(const LoadPtr& v) override;
void visit(const BroadcastPtr& v) override;
void visit(const IfThenElsePtr& v) override;
void visit(const IntrinsicsPtr& v) override;
void visit(const TermPtr& v) override;
void visit(const PolynomialPtr& v) override;
void visit(const RoundOffPtr& v) override;
void visit(const MaxTermPtr& v) override;
void visit(const MinTermPtr& v) override;
void visit(const ReduceOpPtr& v) override;
void visit(const AtomicAddPtr& v) override;
void visit(const SyncThreadsPtr& v) override;
void visit(const ExternalCallPtr& v) override;
void visit(const ExternalCallWithAllocPtr& v) override;
void visit(const StorePtr& v) override;
void visit(const ForPtr& v) override;
void visit(const CondPtr& v) override;
void visit(const BlockPtr& v) override;
void visit(const AllocatePtr& v) override;
void visit(const FreePtr& v) override;
void visit(const FreeExtPtr& v) override;
void visit(const PlacementAllocatePtr& v) override;
void visit(const LetPtr& v) override;
// A child class may have a difference rule for generating dtype
// string, e.g. CUDA needs int64_t to be generated as long long.
virtual std::string dtypeToCppString(const Dtype& dtype);
std::ostream& os() {
return printer_os_;
}
class PrinterStream : public std::ostream {
public:
PrinterStream(IRPrinter* printer, std::ostream& os)
: std::ostream(os.rdbuf()), printer_(printer) {
initialize_imbue();
}
void initialize_imbue();
IRPrinter* printer() {
return printer_;
}
private:
IRPrinter* printer_ = nullptr;
};
protected:
std::string to_string(CompareSelectOperation op);
UniqueNameManager* name_manager() {
return &name_manager_;
}
void emitIndent();
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
int indent_ = 0;
private:
PrinterStream printer_os_;
UniqueNameManager name_manager_;
};
TORCH_API std::ostream& operator<<(std::ostream& stream, const Expr&);
TORCH_API std::ostream& operator<<(std::ostream& stream, const ExprHandle&);
TORCH_API std::ostream& operator<<(std::ostream& stream, const Stmt&);
TORCH_API std::ostream& operator<<(std::ostream& stream, const Tensor&);
TORCH_API void print(const ExprPtr& expr);
TORCH_API void print(const StmtPtr& stmt);
TORCH_API void print(const Tensor& t);
} // namespace torch::jit::tensorexpr
namespace std {
using torch::jit::tensorexpr::Expr;
using torch::jit::tensorexpr::ExprPtr;
using torch::jit::tensorexpr::Stmt;
using torch::jit::tensorexpr::StmtPtr;
using torch::jit::tensorexpr::Tensor;
TORCH_API std::string to_string(const ExprPtr& expr);
TORCH_API std::string to_string(const StmtPtr& stmt);
TORCH_API std::string to_string(const Tensor& t);
} // namespace std
```
|
===============================================================================================================================================
SOURCE CODE FILE: ir_simplifier.h
LINES: 1
SIZE: 15.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\ir_simplifier.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/bounds_overlap.h>
#include <torch/csrc/jit/tensorexpr/eval.h>
#include <torch/csrc/jit/tensorexpr/hash_provider.h>
#include <torch/csrc/jit/tensorexpr/ir.h>
#include <torch/csrc/jit/tensorexpr/ir_mutator.h>
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
#include <torch/csrc/jit/tensorexpr/types.h>
#include <utility>
/* IR Simplification
*
* Simplifies expressions in two stages:
* 1. Recursively traverse the map combining similar operations into Terms
* (interacted via Multiplication) and Polynomials (interacted via Addition). We
* reorder the components of each Term or Polynomial into a consistent order to
* allow combination or cancelling of like terms.
* 2. Once the format of the tree is minimal, expand each Term into a sequence
* of Muls, and each Polynomial into a sequence of Ads.
*/
namespace torch::jit::tensorexpr {
// A bunch of helpers for determine the Dtype of the output of a multi argument
// Term or Polynomial.
template <class ExprType>
Dtype promoteTypesVec(const ExprPtr& s, const std::vector<ExprType>& v) {
Dtype t = s->dtype();
bool first = true;
for (const auto& e : v) {
if (first) {
t = Dtype(t.scalar_type(), e->dtype().lanes());
first = false;
}
t = promoteTypes(t, e->dtype());
}
return t;
}
template <class ExprType>
Dtype promoteTypesVec(const std::vector<ExprType>& v) {
if (v.empty()) {
throw malformed_input("empty list of types");
}
Dtype t = v[0]->dtype();
for (const auto& e : v) {
t = promoteTypes(t, e->dtype());
}
return t;
}
template <class ExprType>
Dtype promoteTypesMap(
const ExprPtr& s,
std::unordered_map<SimplifierHashType, ExprType>& m) {
Dtype t = s->dtype();
bool first = true;
for (auto& e : m) {
if (first) {
t = Dtype(t.scalar_type(), e.second->dtype().lanes());
first = false;
}
t = promoteTypes(t, e.second->dtype());
}
return t;
}
template <class ExprType>
Dtype promoteTypesVar(ExprType e) {
return e->dtype();
}
template <class ExprType, class... Args>
Dtype promoteTypesVar(ExprType e, Args... es) {
Dtype lhs = e->dtype();
Dtype rhs = promoteTypesVar(es...);
if (e->isConstant()) {
lhs = Dtype(lhs.scalar_type(), rhs.lanes());
}
return promoteTypes(lhs, rhs);
}
// Uses the evaluator to fold an Expression with constant terms.
// E.g. evaluateOp(Add(3, 4)) => 7.
// Expr v must not have any unbound Vars.
inline ExprPtr evaluateOp(const ExprPtr& v) {
ExprHandle handle(v);
ExprEval<SimpleIREvaluator> eval(handle);
switch (v->dtype().scalar_type()) {
#define TYPE_CASE(Type, Name) \
case ScalarType::Name: { \
Type val = eval.value<Type>(); \
return getImmediateByType(v->dtype().scalar_type(), val); \
}
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE)
#undef TYPE_CASE
default:
LOG(FATAL) << "Unsupported datatype: " << v->dtype();
return nullptr;
}
return nullptr;
}
// A Term represents a grouping of Exprs through multiplication.
// E.g. product(scalar, *variables).
class Term : public ExprNode<Term> {
public:
template <class... Args>
Term(HashProvider& hasher, ExprPtr s, Args... ts)
: ExprNodeBase(promoteTypesVar(s, ts...)), scalar_(s), hasher_(hasher) {
CHECK(s->isConstant());
addComponent(ts...);
sort();
}
Term(HashProvider& hasher, ExprPtr s, std::vector<ExprPtr> v)
: ExprNodeBase(promoteTypesVec(s, v)),
variables_(std::move(v)),
scalar_(std::move(s)),
hasher_(hasher) {
sort();
}
// Convenience constructor from a map of hash -> var, used when merging Terms.
Term(
HashProvider& hasher,
const ExprPtr& s,
std::unordered_map<SimplifierHashType, ExprPtr> varmap)
: ExprNodeBase(promoteTypesMap(s, varmap)), scalar_(s), hasher_(hasher) {
for (auto& p : varmap) {
addComponent(p.second);
}
sort();
}
ExprPtr scalar() const {
return scalar_;
}
const std::vector<ExprPtr>& variables() const {
return variables_;
}
HashProvider& hasher() const {
return hasher_;
}
// Produce a hash of just the variable components of this term, to determine
// if it can be combined with another term.
SimplifierHashType hashVars() const;
private:
std::vector<ExprPtr> variables_;
ExprPtr scalar_;
HashProvider& hasher_;
void addComponent() {}
void addComponent(ExprPtr e) {
variables_.push_back(std::move(e));
}
template <class... Es>
void addComponent(ExprPtr e, Es&&... es) {
addComponent(std::move(e));
addComponent(std::forward<Es>(es)...);
}
// Sort by hash to normalize order of components.
void sort();
};
// Polynomial represents a grouping of Exprs by addition.
// E.g. sum(*variables, scalar).
// This would better be called Expression, but, naming conflict...
class Polynomial : public ExprNode<Polynomial> {
public:
template <class... Args>
Polynomial(HashProvider& hasher, ExprPtr s, Args... ts)
: ExprNodeBase(promoteTypesVar(s, ts...)), scalar_(s), hasher_(hasher) {
CHECK(s->isConstant());
addTerm(ts...);
sort();
}
Polynomial(HashProvider& hasher, const ExprPtr& s, std::vector<TermPtr> v)
: ExprNodeBase(promoteTypesVec(s, v)),
variables_(std::move(v)),
scalar_(s),
hasher_(hasher) {
sort();
}
// Helper constructor for list of terms with no scalar component.
Polynomial(HashProvider& hasher, std::vector<TermPtr> terms)
: ExprNodeBase(promoteTypesVec(terms)),
variables_(std::move(terms)),
scalar_(getImmediateByType(dtype(), 0)),
hasher_(hasher) {
sort();
}
// Convenience constructor for map of hash -> var, used when merging
// Polynomials.
Polynomial(
HashProvider& hasher,
const ExprPtr& s,
std::unordered_map<SimplifierHashType, TermPtr> varmap)
: ExprNodeBase(promoteTypesMap(s, varmap)), scalar_(s), hasher_(hasher) {
for (auto& p : varmap) {
addTerm(p.second);
}
sort();
}
ExprPtr scalar() const {
return scalar_;
}
const std::vector<TermPtr>& variables() const {
return variables_;
}
HashProvider& hasher() const {
return hasher_;
}
SimplifierHashType hashVars() const;
private:
std::vector<TermPtr> variables_;
ExprPtr scalar_;
HashProvider& hasher_;
void addTerm(TermPtr t) {
variables_.push_back(std::move(t));
}
template <class... Ts>
void addTerm(TermPtr t, Ts&&... ts) {
addTerm(std::move(t));
addTerm(std::forward<Ts>(ts)...);
}
// Sort by hash to normalize order of terms.
void sort();
};
class RoundOff : public BinaryOpNode<RoundOff> {
public:
RoundOff(ExprPtr lhs, ExprPtr rhs)
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kOther) {}
};
class MaxTerm : public ExprNode<MaxTerm> {
public:
template <class... Args>
MaxTerm(HashProvider& hasher, ExprPtr s, bool p, Args... ts)
: ExprNodeBase(s ? promoteTypesVar(s, ts...) : promoteTypesVar(ts...)),
scalar_(s),
hasher_(hasher),
propagate_nans_(p) {
addComponent(ts...);
uniquefy();
}
MaxTerm(
HashProvider& hasher,
const ExprPtr& s,
bool p,
std::vector<ExprPtr> v)
: ExprNodeBase(s ? promoteTypesVec(s, v) : promoteTypesVec(v)),
variables_(std::move(v)),
scalar_(s),
hasher_(hasher),
propagate_nans_(p) {
uniquefy();
}
bool propagate_nans() const {
return propagate_nans_;
}
ExprPtr scalar() const {
return scalar_;
}
const std::vector<ExprPtr>& variables() const {
return variables_;
}
HashProvider& hasher() const {
return hasher_;
}
private:
std::vector<ExprPtr> variables_;
ExprPtr scalar_;
HashProvider& hasher_;
bool propagate_nans_;
void addComponent() {}
void addComponent(ExprPtr e) {
variables_.push_back(std::move(e));
}
template <class... Es>
void addComponent(ExprPtr e, Es&&... es) {
addComponent(std::move(e));
addComponent(std::forward<Es>(es)...);
}
// Uniquefy the terms using their hash.
void uniquefy();
};
class MinTerm : public ExprNode<MinTerm> {
public:
template <class... Args>
MinTerm(HashProvider& hasher, ExprPtr s, bool p, Args... ts)
: ExprNodeBase(s ? promoteTypesVar(s, ts...) : promoteTypesVar(ts...)),
scalar_(s),
hasher_(hasher),
propagate_nans_(p) {
addComponent(ts...);
uniquefy();
}
MinTerm(
HashProvider& hasher,
const ExprPtr& s,
bool p,
std::vector<ExprPtr> v)
: ExprNodeBase(s ? promoteTypesVec(s, v) : promoteTypesVec(v)),
variables_(std::move(v)),
scalar_(s),
hasher_(hasher),
propagate_nans_(p) {
uniquefy();
}
bool propagate_nans() const {
return propagate_nans_;
}
ExprPtr scalar() const {
return scalar_;
}
const std::vector<ExprPtr>& variables() const {
return variables_;
}
HashProvider& hasher() const {
return hasher_;
}
private:
std::vector<ExprPtr> variables_;
ExprPtr scalar_;
HashProvider& hasher_;
bool propagate_nans_;
void addComponent() {}
void addComponent(ExprPtr e) {
variables_.push_back(std::move(e));
}
template <class... Es>
void addComponent(ExprPtr e, Es&&... es) {
addComponent(std::move(e));
addComponent(std::forward<Es>(es)...);
}
// Uniquefy the terms using their hash.
void uniquefy();
};
// Context-sensitive IR simplification
using VarBoundInfo = std::unordered_map<VarPtr, analysis::Bound>;
class TORCH_API SimplifierUnderContext : public IRMutator {
public:
~SimplifierUnderContext() override = default;
// Add boundary info for index variables in for-loops
StmtPtr mutate(const ForPtr& v) override;
ExprPtr mutate(const DivPtr& v) override;
ExprPtr mutate(const ModPtr& v) override;
ExprPtr mutate(const CompareSelectPtr& v) override;
ExprPtr mutate(const IfThenElsePtr& v) override;
protected:
bool getLoopBoundInfo(const ExprPtr& expr, analysis::Bound* loop_bound_info);
protected:
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
HashProvider hasher_;
VarBoundInfo var_bound_info_;
};
// Stmt simplification should occur in both modes.
class TORCH_API PolynomialBase : public IRMutator {
public:
~PolynomialBase() override = default;
StmtPtr mutate(const BlockPtr& v) override;
StmtPtr mutate(const CondPtr& v) override;
StmtPtr mutate(const ForPtr& v) override;
// Trivially factorize terms by GCD of scalar components.
TermPtr factorizePolynomial(const PolynomialPtr& poly);
HashProvider& hasher() {
return hasher_;
}
protected:
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
HashProvider hasher_;
};
// Simplify the IR by combining arithmetic expressions over common terms.
class TORCH_API PolynomialTransformer : public PolynomialBase {
public:
using PolynomialBase::mutate;
// Inserts term into the provided map, in the case of a hash collision
// combines the term with the existing and updates the map.
void addOrUpdateTerm(
std::unordered_map<SimplifierHashType, TermPtr>& varmap,
const TermPtr& term);
// Add Polynomial expressions, combining Terms representing the same
// variables.
ExprPtr addPolynomials(const PolynomialPtr& lhs, const PolynomialPtr& rhs);
// Insert a new Term into the provided polynomial. If the new term has
// common variables to an existing term it is combined.
ExprPtr insertTerm(const PolynomialPtr& poly, const TermPtr& term);
// Merge and simplify addition.
ExprPtr mutate(const AddPtr& v) override;
// Subtract one term from another, cancelling if necessary.
ExprPtr subTerms(const TermPtr& lhs, TermPtr rhs, bool negated);
// Subtract the RHS Polynomial from the LHS Polynomial, cancelling out where
// possible.
ExprPtr subPolynomials(const PolynomialPtr& lhs, const PolynomialPtr& rhs);
// Merge and simplify subtraction.
ExprPtr mutate(const SubPtr& v) override;
// Multiply two terms together, usually creating a new term with the variable
// lists concatenated.
TermPtr mulTerms(const TermPtr& lhs, const TermPtr& rhs);
// Multiply a Polynomial by a Term.
ExprPtr polyByTerm(const PolynomialPtr& poly, const TermPtr& term);
// Match a rounding pattern and create a RoundOff if found.
ExprPtr isRoundOff(const ExprPtr& lhs, const ExprPtr& rhs);
// Inserts a new component into a term, simplifying if possible.
ExprPtr insertIntoTerm(const TermPtr& term, const ExprPtr& expr);
// Merge and simplify multiplication.
ExprPtr mutate(const MulPtr& v) override;
ExprPtr mutate(const DivPtr& v) override;
ExprPtr mutate(const ModPtr& v) override;
ExprPtr mutate(const AndPtr& v) override;
ExprPtr mutate(const XorPtr& v) override;
ExprPtr mutate(const LshiftPtr& v) override;
ExprPtr mutate(const RshiftPtr& v) override;
ExprPtr mutate(const MaxPtr& v) override;
ExprPtr mutate(const MinPtr& v) override;
ExprPtr mutate(const CompareSelectPtr& v) override;
ExprPtr mutate(const IntrinsicsPtr& v) override;
ExprPtr mutate(const CastPtr& v) override;
ExprPtr mutate(const IfThenElsePtr& v) override;
static ExprPtr simplify(ExprPtr e);
static ExprHandle simplify(const ExprHandle& e);
static StmtPtr simplify(StmtPtr e);
};
// Expands Terms and Polynomial expressions into primitive operations.
// Does some simple factorization and reordering.
class TORCH_API TermExpander : public PolynomialBase {
PolynomialTransformer* simplifier_;
std::set<VarPtr> eliminated_allocations_;
public:
using PolynomialBase::mutate;
TermExpander(PolynomialTransformer* simplifier) : simplifier_(simplifier) {}
bool check_safe() {
return eliminated_allocations_.empty();
}
// Expand Terms out to a series of Muls.
ExprPtr mutate(const TermPtr& v) override;
// Expand Polynomials out to a series of Adds.
ExprPtr mutate(const PolynomialPtr& v) override;
// Expand MaxTerms to a series of Max ops.
ExprPtr mutate(const MaxTermPtr& v) override;
// Expand MinTerms to a series of Min ops.
ExprPtr mutate(const MinTermPtr& v) override;
// Expand RoundOff to it's component: Mul(Div(lhs, rhs), rhs).
ExprPtr mutate(const RoundOffPtr& v) override;
// Eliminate zero length allocations.
StmtPtr mutate(const AllocatePtr& v) override;
StmtPtr mutate(const FreePtr& v) override;
// Override to enable condition fusing.
BlockPtr fuseConditions(BlockPtr v);
StmtPtr fuseSyncThreads(BlockPtr block);
StmtPtr mutate(const BlockPtr& v) override;
};
class TORCH_API IRSimplifier {
public:
static StmtPtr simplify(StmtPtr s);
static ExprPtr simplify(ExprPtr e);
static ExprHandle simplify(const ExprHandle& e) {
return ExprHandle(simplify(e.node()));
}
};
// Flattens the buf and performs the simplifier on the flattened dims.
ExprPtr buf_flat_size(const BufPtr& v);
// Returns true if expressions A and B can be simplified to an equal expression.
TORCH_API bool exprEquals(const ExprPtr& A, const ExprPtr& B);
} // namespace torch::jit::tensorexpr
```
|
=============================================================================================================================================
SOURCE CODE FILE: ir_verifier.h
LINES: 1
SIZE: 1.32 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\ir_verifier.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
namespace torch::jit::tensorexpr {
class Expr;
class ExprHandle;
class Mod;
class And;
class Or;
class Xor;
class Lshift;
class Rshift;
class CompareSelect;
class Ramp;
class Load;
class IfThenElse;
class Intrinsics;
class Stmt;
class ExternalCall;
class Store;
class For;
class Block;
class TORCH_API IRVerifier : public IRVisitor {
public:
IRVerifier() = default;
void visit(const ModPtr& v) override;
void visit(const AndPtr& v) override;
void visit(const OrPtr& v) override;
void visit(const XorPtr& v) override;
void visit(const LshiftPtr& v) override;
void visit(const RshiftPtr& v) override;
void visit(const CompareSelectPtr& v) override;
void visit(const RampPtr& v) override;
void visit(const LoadPtr& v) override;
void visit(const IfThenElsePtr& v) override;
void visit(const IntrinsicsPtr& v) override;
void visit(const ExternalCallPtr& v) override;
void visit(const StorePtr& v) override;
void visit(const ForPtr& v) override;
void visit(const BlockPtr& v) override;
};
TORCH_API void verify(const StmtPtr&);
TORCH_API void verify(const ExprPtr&);
TORCH_API void verify(const ExprHandle&);
} // namespace torch::jit::tensorexpr
```
|
============================================================================================================================================
SOURCE CODE FILE: ir_visitor.h
LINES: 1
SIZE: 2.19 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\ir_visitor.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/ScalarType.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
namespace torch::jit::tensorexpr {
class TORCH_API IRVisitor {
public:
virtual ~IRVisitor() = default;
virtual void visit(const AddPtr& v);
virtual void visit(const SubPtr& v);
virtual void visit(const MulPtr& v);
virtual void visit(const DivPtr& v);
virtual void visit(const ModPtr& v);
virtual void visit(const MaxPtr& v);
virtual void visit(const MinPtr& v);
virtual void visit(const AndPtr& v);
virtual void visit(const OrPtr& v);
virtual void visit(const XorPtr& v);
virtual void visit(const LshiftPtr& v);
virtual void visit(const RshiftPtr& v);
virtual void visit(const CompareSelectPtr& v);
#define IMM_PRINT_VISIT(Type, Name) virtual void visit(const Name##ImmPtr& v);
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_PRINT_VISIT)
#undef IMM_PRINT_VISIT
virtual void visit(const CastPtr& v);
virtual void visit(const BitCastPtr& v);
virtual void visit(const VarPtr& v);
virtual void visit(const BufPtr& v);
virtual void visit(const RampPtr& v);
virtual void visit(const LoadPtr& v);
virtual void visit(const ForPtr& v);
virtual void visit(const BlockPtr& v);
virtual void visit(const StorePtr& v);
virtual void visit(const BroadcastPtr& v);
virtual void visit(const IfThenElsePtr& v);
virtual void visit(const IntrinsicsPtr& v);
virtual void visit(const AllocatePtr& v);
virtual void visit(const FreePtr& v);
virtual void visit(const FreeExtPtr& v);
virtual void visit(const PlacementAllocatePtr& v);
virtual void visit(const LetPtr& v);
virtual void visit(const CondPtr& v);
virtual void visit(const TermPtr& v);
virtual void visit(const PolynomialPtr& v);
virtual void visit(const RoundOffPtr& v);
virtual void visit(const MaxTermPtr& v);
virtual void visit(const MinTermPtr& v);
virtual void visit(const ReduceOpPtr& v);
virtual void visit(const AtomicAddPtr& v);
virtual void visit(const SyncThreadsPtr& v);
virtual void visit(const ExternalCallPtr& v);
virtual void visit(const ExternalCallWithAllocPtr& v);
};
} // namespace torch::jit::tensorexpr
```
|
========================================================================================================================================
SOURCE CODE FILE: kernel.h
LINES: 1
SIZE: 13.45 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\kernel.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/passes/symbolic_shape_runtime_fusion.h>
#include <torch/csrc/jit/passes/utils/subgraph_utils.h>
#include <torch/csrc/jit/runtime/interpreter.h>
#include <torch/csrc/jit/tensorexpr/analysis.h>
#include <torch/csrc/jit/tensorexpr/codegen.h>
#include <torch/csrc/jit/tensorexpr/lowerings.h>
#include <torch/csrc/jit/tensorexpr/tensor.h>
namespace torch::jit::tensorexpr {
struct SmallSizeTPairHash {
public:
std::size_t operator()(const std::pair<size_t, size_t>& x) const {
// hashing input index and then dim index
return x.first * 128 + x.second;
}
};
// Returns true if the TE fuser supports this conv2d.
bool conv2dIsSupportedJit(const Node* node);
// Returns true if the TE fuser supports this conv2d with mkldnn prepacked conv.
bool mkldnnPrepackedConvIsSupportedJit(const Node* node);
// Returns true if the TE _convolution node is Conv2d.
bool isConv2d(const Node* node);
// Returns true if the TE fuser supports this matmul.
bool matmulIsSupported(const Node* node);
template <typename T>
inline std::vector<int64_t> bufferSizes(const T& t) {
std::vector<int64_t> sizes;
for (size_t i = 0; i < t->ndim(); i++) {
sizes.push_back(*intValue(t->dim(i)));
}
return sizes;
}
// Get the dimensions of a value.
std::vector<ExprHandle> valueShape(const ArgValue& v);
// If v is a tensor, broadcast it to match the shape of axes, or return
// directly if v is a constant.
ExprHandle tensorOrConstant(
const ArgValue& v,
const std::vector<ExprHandle>& axes);
int64_t normalizeAndCheckIndex(int64_t idx, int64_t list_size);
ExprHandle broadcast(const BufHandle& b, const std::vector<ExprHandle>& axes);
ExprHandle constant(const ArgValue& v);
std::vector<ExprHandle> computeIndicesToBroadcast(
const std::vector<ExprHandle>& outputAxes,
const std::vector<ExprHandle>& inputSizes);
inline std::string getArgValueName(const ArgValue& a) {
if (std::holds_alternative<tensorexpr::BufHandle>(a)) {
return "BufHandle";
} else if (std::holds_alternative<tensorexpr::VarHandle>(a)) {
return "VarHandle";
} else if (std::holds_alternative<double>(a)) {
return "double";
} else if (std::holds_alternative<int64_t>(a)) {
return "int64_t";
} else if (std::holds_alternative<bool>(a)) {
return "bool";
} else if (std::holds_alternative<BufList>(a)) {
return "BufList";
} else if (std::holds_alternative<DoubleList>(a)) {
return "DoubleList";
} else if (std::holds_alternative<IntList>(a)) {
return "IntList";
} else if (std::holds_alternative<ArgNone>(a)) {
return "None";
} else {
throw std::runtime_error("ArgValue type not handled in string conversion");
}
}
template <class T>
std::vector<T> convertVecArgValue(const std::vector<ArgValue>& v) {
std::vector<T> res;
for (auto& x : v) {
auto val = std::get_if<T>(&x);
if (val) {
res.push_back(*val);
} else {
throw std::runtime_error(
"vector type not homogeneous - found " + getArgValueName(x) +
", expected " + getArgValueName(v[0]));
}
}
return res;
}
class TORCH_API TensorExprKernel {
struct ConstantDescr {
BufPtr buf;
// Only one of ptr and node is used at a time
// 1) ptr for the constant tensors
// 2) node for the constant custom class objects
void* ptr = nullptr;
Node* node = nullptr;
};
public:
// Constructor Params:
// * subgraph
// - the graph that needs to be compiled.
// * kernel_func_name
// - the name that should be used for the generated kernel.
// * custom_lowerings
// - map that represents custom lowering definitions for a set of ops.
// * symbolic_shape_inputs
// - a list of symbolic graph inputs that represent the symbolic dims of
// the input tensors.
// * pre_alloc
// - a flag to control pre-allocation of buffers.
explicit TensorExprKernel(
const std::shared_ptr<Graph>& subgraph,
std::string kernel_func_name,
std::unordered_map<c10::Symbol, NNCLoweringFunction> custom_lowerings =
{},
std::vector<int64_t> symbolic_shape_inputs = {},
bool pre_alloc = false,
std::unordered_map<
const torch::jit::Value*,
std::vector<torch::jit::StrideInput>> symbolic_strides = {});
explicit TensorExprKernel(
const std::shared_ptr<Graph>& subgraph,
std::unordered_map<c10::Symbol, NNCLoweringFunction> custom_lowerings =
{},
std::vector<int64_t> symbolic_shape_inputs = {},
bool pre_alloc = false,
std::unordered_map<
const torch::jit::Value*,
std::vector<torch::jit::StrideInput>> symbolic_strides = {})
: TensorExprKernel(
subgraph,
SubgraphUtils::generateNameForGraph(subgraph),
std::move(custom_lowerings),
std::move(symbolic_shape_inputs),
pre_alloc,
std::move(symbolic_strides)) {}
void run(Stack& stack) const;
void runFast(
const std::vector<void*>& inputs,
const std::vector<void*>& outputs) const;
// Expected format of stack:
// ... <outputs> <inputs>
// i.e., output IValues must be below the input IValues in the stack.
void runWithAllocatedOutputs(Stack& stack) const;
void fallback(Stack& stack) const {
InterpreterState(code_).run(stack);
}
void recompile();
StmtPtr getCodeGenStmt();
std::string getCodeText(const std::string& attr = "") {
return codegen_->getCodeText(attr);
}
const std::shared_ptr<Graph> graph() {
return graph_;
}
const std::vector<ConstantDescr>& getConstantDescriptors() const {
return constants_;
}
const std::vector<CodeGen::BufferArg>& getBufferArgs() const {
return bufferArgs_;
}
const std::string& getKernelName() const {
return (codegen_ ? codegen_->kernel_func_name() : kernel_func_name_);
}
const std::vector<int64_t>& getSymbolicShapeInputs() const {
return symbolic_shape_inputs_;
}
private:
enum BackendType {
kUninitialized,
kSimpleIREval,
kLLVMCodeGen,
kCudaCodeGen,
kBlockCodeGen,
};
enum MemoryLayoutPolicy {
kContiguous,
kChannelsLastNdContiguous,
};
void compile();
void genInputDebugNames();
void runKernel(Stack& stack) const;
std::vector<ExprHandle> sizesForValue(const torch::jit::Value* v);
// These functions broadcast shape and also store a `hasBroadcast_` variable.
std::vector<ExprHandle> broadcastShapesMut(
const std::vector<ExprHandle>& a,
const std::vector<ExprHandle>& b);
std::vector<ExprHandle> broadcastShapesMut(
std::vector<std::vector<ExprHandle>> shapes);
ArgValue toArg(const torch::jit::Value* v) const;
ExprHandle constant(const torch::jit::Value* v);
Tensor computeValue(const torch::jit::Value* v);
void bindConstant(const torch::jit::Value* v);
StmtPtr transformLoops(BackendType backendType, StmtPtr st);
std::string getCodeGenName(BackendType backendType);
void getStaticOutputSizesAndStrides(
const at::ArrayRef<IValue>& inputs,
std::vector<std::vector<int64_t>>* static_sizes,
std::vector<std::vector<int64_t>>* static_strides) const;
std::vector<CodeGen::CallArg> prepareRunArgs(
const at::ArrayRef<IValue>& inputs,
std::vector<at::Tensor>& outputs) const;
BackendType inferBackendTypeFromDevice(at::Device device);
Tensor bindInput(const torch::jit::Value* input);
BlockPtr bindAllInputs();
// Deduce the memory layout policy to be propagated within
// NNC fusion group. The memory layout policy could be `kContiguous`
// or `kChannelsLastNdContiguous`.
// `kContiguous`: Always convert the non-contiguous input tensors and
// internal buffers to contiguous.
// `kChannelsLastNdContiguous`: Always convert the input tensors and
// internal buffers to channels-last contiguous.
// Currently, the rule is simple.
// If all the input and out tensors of NNC fusion group are channels-last
// contiguous, the policy is `kChannelsLastNdContiguous`. Otherwise, it
// is always `kContiguous`.
void deduceMemoryLayoutPolicy();
Tensor convertSymbolicOutputToCorrectStrides(torch::jit::Value* v);
Tensor convertStaticShapeOutputToCorrectStrides(torch::jit::Value* v);
Tensor convertSymbolicOutputToCorrectStrides(
const std::vector<ExprHandle>& sizes,
const std::vector<size_t>& sorted_stride_indices_descending,
const std::vector<ExprPtr>& strides,
BufPtr& buf);
NNCLoweringFunction getCustomLoweringFor(c10::Symbol op) const;
std::unordered_map<c10::Symbol, NNCLoweringFunction> getCustomLowerings()
const {
return custom_lowerings_;
}
// Allocate memory for intermediate buffers at compile time.
// Specifically, we pre-allocate memory for intermediate buffers with static
// size and manage these buffers in the way we manage JIT constant tensors:
// push the buf args into the stack so NNC IR can access them at runtime.
std::vector<BufPtr> preAllocIntermediateBufs(
const std::vector<BufPtr>& interm_bufs);
struct UnpackedTensorOptions {
std::optional<c10::ScalarType> dtype;
std::optional<c10::Layout> layout;
std::optional<c10::Device> device;
std::optional<bool> pinned_memory;
UnpackedTensorOptions(const c10::TensorOptions& opts)
: dtype(c10::optTypeMetaToScalarType(opts.dtype_opt())),
layout(opts.layout_opt()),
device(opts.device_opt()),
pinned_memory(opts.pinned_memory_opt()) {}
};
ExprHandle getVarForShape(const c10::ShapeSymbol& ss);
std::vector<ExprHandle> computeInputTensorDims(
const torch::jit::Value* input);
ExprHandle getStrideArg(size_t tensor_input, size_t stride_index);
std::vector<ExprHandle> sizesFromSymbolicShape(
const c10::SymbolicShape& shape);
std::vector<ExprHandle> getInputStrides(
const torch::jit::Value* input,
const std::vector<ExprHandle>& inputTensorDims);
std::vector<torch::jit::StrideInput>& getSymbolicStrideDesc(
const torch::jit::Value* value);
// Apply the optimizations to the graph owned by the current fusion group,
// like concatenation optimization, post-op fusion, and some other graph-level
// optimizations.
void optimizeOwningGraph();
int64_t nInputs_ = 0;
int64_t nOutputs_ = 0;
std::vector<CodeGen::BufferArg> bufferArgs_;
std::vector<std::vector<int64_t>> tensorOutputSizes_;
std::vector<std::vector<int64_t>> tensorOutputStrides_;
std::vector<torch::jit::StrideInput> tensorOutputStrideDesc_;
std::vector<bool> isOutputScalar_;
std::vector<UnpackedTensorOptions> tensorOutputTensorOptions_;
std::unordered_set<BufPtr> bufOutputs_;
std::unordered_set<BufPtr> bufsToBeParallelized_;
std::unordered_map<const torch::jit::Value*, BufPtr> bufs_;
std::unordered_map<const torch::jit::Value*, VarHandle> scalars_;
std::unordered_map<const torch::jit::Value*, std::string> input_name_map_;
std::unique_ptr<CodeGen> codegen_;
at::Device device_ = at::kCPU;
std::shared_ptr<Graph> graph_;
Code code_;
bool allow_fallback_{false};
bool use_fallback_{false};
bool hasRandom_{false};
bool hasBroadcast_{false};
std::unordered_map<const torch::jit::Value*, std::vector<ExprHandle>>
known_sizes_;
std::vector<std::vector<ExprHandle>> tensorOutputSymbolicSizes_;
// A map from ShapeSymbol.value() to the corresponding Var.
std::unordered_map<int64_t, VarHandle> shapeSymbolToVar_;
std::unordered_map<ExprPtr, size_t> shapeSymbolInputPos_;
// List of values corresponding to the ShapeSymbols that are inputs to
// kernel being compiled. The order of these values correspond to the order
// of the symbolic inputs at the end of the list of inputs to the kernel.
std::vector<int64_t> symbolic_shape_inputs_;
bool has_symbolic_shapes_{false};
std::vector<at::Tensor> unpacked_constant_tensors_;
std::vector<ConstantDescr> constants_;
std::unordered_map<c10::Symbol, NNCLoweringFunction> custom_lowerings_;
StmtPtr stmt_ = nullptr;
bool pre_alloc_{false};
std::string kernel_func_name_;
// index of stack, stride index of tensor that will be appended as a codegen
// arg
std::vector<std::pair<size_t, size_t>> input_stride_args_;
// map from <input index, tensor dimension> to stride as arg VarHandle
std::unordered_map<std::pair<size_t, size_t>, VarHandle, SmallSizeTPairHash>
strideArgToVar_;
std::unordered_map<
const torch::jit::Value*,
std::vector<torch::jit::StrideInput>>
symbolic_strides_;
// Memory layout to be propagated with fusion group
MemoryLayoutPolicy memory_layout_policy_ = MemoryLayoutPolicy::kContiguous;
};
TORCH_API int& getTECudaPointwiseLoopLevels();
TORCH_API int& getTECudaPointwiseBlockCount();
TORCH_API int& getTECudaPointwiseBlockSize();
TORCH_API bool& getTEGenerateBlockCode();
TORCH_API bool& getTEMustUseLLVMOnCPU();
TORCH_API bool fallbackAllowed();
TORCH_API bool setFallbackAllowed(bool value);
TORCH_API bool& getCatWoConditionals();
TORCH_API bool& getOptConditionals();
TORCH_API std::optional<at::Device> pickDeviceType(
const at::ArrayRef<torch::jit::Value*>& inputs);
bool isContiguous(
const torch::jit::Value* v,
at::MemoryFormat memory_format = at::MemoryFormat::Contiguous);
} // namespace torch::jit::tensorexpr
```
|
==============================================================================================================================================
SOURCE CODE FILE: llvm_codegen.h
LINES: 1
SIZE: 3.89 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\llvm_codegen.h
ENCODING: utf-8
```h
#pragma once
#ifdef TORCH_ENABLE_LLVM
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/tensorexpr/codegen.h>
#include <torch/csrc/jit/tensorexpr/ir.h>
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
#include <optional>
#include <unordered_map>
#include <vector>
namespace torch {
namespace jit {
namespace tensorexpr {
class LLVMCodeGenImpl;
class LLVMCodeGenCallee;
class TORCH_API LLVMCodeGen : public CodeGen {
public:
explicit LLVMCodeGen(
StmtPtr stmt,
const std::vector<BufferArg>& args,
at::Device device = at::kCPU,
const std::string& kernel_func_name = "func",
Dtype dtype = kInt,
std::optional<std::string> triple = std::nullopt,
std::optional<std::string> cpu = std::nullopt,
std::optional<std::string> attrs = std::nullopt);
explicit LLVMCodeGen(StmtPtr stmt);
LLVMCodeGen() = delete;
~LLVMCodeGen() override;
// Cleans up all the memory used during LLVM code generation pass except
// the generated kernel. After calling this method, users should not call
// methods like `getCodeText` that require the LLVMCodeGenImpl data. However,
// users can continue to call this kernel using `call` and `call_raw`.
void cleanup_memory();
TORCH_API void call(const std::vector<CallArg>& args) override;
TORCH_API void call_raw(const std::vector<void*>& args) override;
TORCH_API void call_with_numel(void** args, int64_t numel) override;
at::Tensor empty_strided(
c10::IntArrayRef size,
c10::IntArrayRef stride,
std::optional<c10::ScalarType> dtype_opt,
std::optional<c10::Layout> layout_opt,
std::optional<c10::Device> device_opt,
std::optional<bool> pin_memory_opt) override;
template <typename T>
T value() {
return value<T>(nullptr);
}
template <typename T>
T value(std::vector<void*>& args) {
return value<T>(args.data());
}
template <typename T>
T value(void** args) {
T (*fp)(void**) = (T(*)(void**))getKernelAddress(callee_.get());
T rv = fp(args);
return rv;
}
std::string getCodeText(const std::string& attr = "") override;
private:
void* getKernelAddress(LLVMCodeGenCallee* callee);
std::unique_ptr<LLVMCodeGenCallee> callee_;
std::unique_ptr<LLVMCodeGenImpl> impl_;
};
struct TORCH_API LLVMCodeGenBuilder {
using BufferArg = CodeGen::BufferArg;
LLVMCodeGenBuilder(StmtPtr stmt, std::vector<BufferArg> args)
: stmt_(stmt), args_(std::move(args)) {}
LLVMCodeGenBuilder& device(at::Device device) {
device_ = device;
return *this;
}
LLVMCodeGenBuilder& kernelFuncName(std::string name) {
kernelFuncName_ = std::move(name);
return *this;
}
LLVMCodeGenBuilder& dtype(Dtype d) {
dtype_ = d;
return *this;
}
LLVMCodeGenBuilder& triple(std::string triple) {
triple_ = std::move(triple);
return *this;
}
LLVMCodeGenBuilder& cpu(std::string cpu) {
cpu_ = std::move(cpu);
return *this;
}
LLVMCodeGenBuilder& attrs(std::string attrs) {
attrs_ = std::move(attrs);
return *this;
}
std::unique_ptr<LLVMCodeGen> build() {
return std::make_unique<LLVMCodeGen>(
stmt_, args_, device_, kernelFuncName_, dtype_, triple_, cpu_, attrs_);
}
private:
StmtPtr stmt_;
std::vector<BufferArg> args_;
at::Device device_ = at::kCPU;
std::string kernelFuncName_ = "func";
Dtype dtype_ = kInt;
std::optional<std::string> triple_ = std::nullopt;
std::optional<std::string> cpu_ = std::nullopt;
std::optional<std::string> attrs_ = std::nullopt;
};
TORCH_API std::optional<std::string>& LLVMTargetTriple();
TORCH_API std::optional<std::string>& LLVMTargetCPU();
TORCH_API std::optional<std::string>& LLVMTargetAttrs();
TORCH_API bool& LLVMAOTWorkflow();
} // namespace tensorexpr
} // namespace jit
} // namespace torch
#endif // TORCH_ENABLE_LLVM
```
|
==========================================================================================================================================
SOURCE CODE FILE: llvm_jit.h
LINES: 1
SIZE: 1.97 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\llvm_jit.h
ENCODING: utf-8
```h
#pragma once
#ifdef TORCH_ENABLE_LLVM
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <torch/csrc/Export.h>
#include <optional>
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override")
#include <llvm/ExecutionEngine/JITSymbol.h>
C10_DIAGNOSTIC_POP()
#include <llvm/ExecutionEngine/Orc/Core.h>
#include <llvm/ExecutionEngine/Orc/ThreadSafeModule.h>
#include <llvm/Target/TargetMachine.h>
#include <memory>
#include <string>
namespace torch {
namespace jit {
namespace tensorexpr {
inline std::string formatError(llvm::Error&& err, const char* msg) {
static constexpr const char* defaultErrorMsg =
"Unexpected failure in LLVM JIT";
std::string errorMsg(msg ? msg : defaultErrorMsg);
llvm::raw_string_ostream ss(errorMsg);
ss << ": " << err;
return ss.str();
}
template <typename T>
T assertSuccess(llvm::Expected<T> valOrErr, const char* msg = nullptr) {
TORCH_INTERNAL_ASSERT(valOrErr, formatError(valOrErr.takeError(), msg));
return std::move(*valOrErr);
}
inline void assertSuccess(llvm::Error err, const char* msg = nullptr) {
TORCH_INTERNAL_ASSERT(!err, formatError(std::move(err), msg));
}
} // namespace tensorexpr
} // namespace jit
} // namespace torch
namespace llvm {
namespace orc {
class PytorchLLVMJITImpl;
class TORCH_API PytorchLLVMJIT {
public:
PytorchLLVMJIT(
std::optional<std::string> triple,
std::optional<std::string> cpu,
std::optional<std::string> attrs);
~PytorchLLVMJIT();
void addModule(std::unique_ptr<Module> M, std::unique_ptr<LLVMContext> C);
JITSymbol findSymbol(const std::string Name);
bool hasSymbol(const std::string& Name);
TargetMachine& getTargetMachine();
const DataLayout& getDataLayout();
private:
// Use the PImpl idiom here to hide the no-rtti parts of the JIT structure.
std::unique_ptr<PytorchLLVMJITImpl> impl_;
};
} // end namespace orc
} // end namespace llvm
#endif // ENABLE LLVM
```
|
==========================================================================================================================================
SOURCE CODE FILE: loopnest.h
LINES: 1
SIZE: 21.80 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\loopnest.h
ENCODING: utf-8
```h
#pragma once
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
namespace torch::jit::tensorexpr {
class Expr;
class Var;
class Buf;
class Tensor;
class Function;
class Stmt;
class For;
class Block;
class Store;
class Dtype;
class TORCH_API LoopNest {
public:
// A constructor for building a LoopNest from a list of Tensors
LoopNest(
const std::vector<Tensor>& output_tensors,
const std::vector<Tensor>& tensors_to_compute);
// A convenience constructor for the case when all tensors are output tensors
LoopNest(const std::vector<Tensor>& output_tensors);
// A constructor for building a LoopNest from an Stmt and a list of output
// buffers.
LoopNest(StmtPtr stmt, std::unordered_set<BufPtr> output_bufs);
// A constructor for building a LoopNest from another loopnest. It clones the
// other loopnest's stmt.
LoopNest(const LoopNest& other);
StmtPtr root_stmt() const {
return root_stmt_;
}
std::vector<ForPtr> getLoopStmtsFor(const Tensor&) const;
std::vector<ForPtr> getLoopStmtsFor(const BufPtr&) const;
std::vector<ForPtr> getLoopStmtsFor(StmtPtr) const;
StmtPtr getLoopBodyFor(const Tensor&) const;
StmtPtr getLoopBodyFor(BufPtr) const;
// Returns the For stmt indexed by 'indices' in the 'root' For stmt.
//'indices' indicates the path to the returned loop from 'root' in AST, e.g.,
//
// root: for(int i...){
// j_loop: for (int j...){
// k1_loop: for (int k1...){
// A[i, j, k1] = ....
// }
// B[i, j] = ...
// k2_loop: for (int k2...){
// A[i, j, k2] = ...
// }
// }
// }
//
// the path from 'root' to 'j_loop' is [0]
// the path from 'root' to 'k1_loop' is [0, 0]
// the path from 'root' to 'k2_loop' is [0, 2]
ForPtr getLoopAt(ForPtr root, const std::vector<int>& indices) const;
// Returns the For stmt that is immediately enclosing the given stmt.
static ForPtr getParentLoop(const StmtPtr& st);
// Returns the list of For stmts corresponding to the loopnest that is
// enclosing the given stmt.
static std::vector<ForPtr> getEnclosingLoopNest(const StmtPtr& st);
// Returns a list of all Stmts that write to the given buf.
std::vector<StmtPtr> getAllWritesToBuf(BufPtr) const;
// The following methods return the For loops that contain writes to
// the given buf.
//
// For example, consider the following code:
// for i1
// for j1
// a[i1,j1] =
// for i2
// for j2
// for k2
// a[i2,j2] =
// for j3
// a[i2,j3] =
// Returns a list of For loops which directly contain a Stmt that writes
// to buf.
// For the above example:
// getAllInnermostLoopsWritingToBuf(a) => {j1, k2, j3}
std::vector<ForPtr> getAllInnermostLoopsWritingToBuf(BufPtr) const;
// Returns a list of For loopnests which contain a Stmt that writes to
// the given buf. Each loopnest here is a vector For loops.
// For the above example:
// getAllLoopNestsWritingToBuf(a) => {{i1,j1}, {i2,j2,k2}, {i2,j3}}
std::vector<std::vector<ForPtr>> getAllLoopNestsWritingToBuf(BufPtr) const;
StmtPtr simplify();
// Sanitize variables and buffer names.
// The pass assigns predefined names for loop index variables
// (i,j,k,l,m,n,o,p,i1,j1,k1,...) and ensures these names are not conflicting
// anywhere. It also removes duplicates from other Buf nad Var names as well
// as replaces illegal characters in them with underscores.
//
// Note: since it's currently technically possible to use the same variable
// as index in two different loops, this transformation finds such cases and
// introduces new variables to avoid duplication.
static StmtPtr sanitizeNames(StmtPtr s);
bool computeInline(const StmtPtr& s);
bool computeInline(const BufPtr& b);
void inlineIntermediateBufs(bool allow_duplicated_work);
// Optimizes conditionals.
//
// Currently, only the following pattern of conditionals is optimized.
// This corresponds to the conditional format that is generated to handle
// `aten::cat` op.
//
// for (int i = 0; i < 20; i++) {
// A[i] = IfThenElse(i<5 ? 1 : 0, B[i], C[i-5])
// }
//
// Constraints that must be satisfied for this optimization:
// * All conditions should be of the form "var < expr".
// * All conditions should have the same variable, say v.
// * The condition variable found should be the same as the inner-most
// loop variable. TODO: Remove this constraint.
// * If there are multiple stores that contain conditionals using the same
// loop variable, only the first conditional will be optimized.
// TODO: Remove this constraint.
bool optimizeConditionals();
// Splits the given loop into 2 nested loops with the given factor as the
// inner loop bound. If the factor does not evenly divide the loop bound,
// then the remaining iterations are extracted into a tail loop that is
// added after the given loop.
//
// For example, consider the following code:
// for (int i = 0; i < 100; ++i) {
// A[i] =
// }
//
// splitWithTail(i, 8, ...) will result in:
// for (int i_outer = 0; i_outer < 12; ++i_outer) {
// for (int i_inner = 0; i_inner < 8; ++i_inner) {
// A[i_outer * 8 + i_inner] =
// }
// }
// for (int i_tail = 0; i_tail < 4; ++i_tail) {
// A[i_tail + 96] =
// }
//
// The given loop will be transformed to the outer loop after splitting.
// So, the pointer to the input loop should be valid after splitting and
// will point to the outer loop. The `inner` and `tail` parameters will be
// set to point to the inner and tail loops that are generated.
static void splitWithTail(
const ForPtr& f,
int factor,
ForPtr* inner,
ForPtr* tail);
// A convenience wrapper when the caller does not need to access the
// split loops.
static void splitWithTail(const ForPtr& f, int factor);
// Splits the given loop into 2 nested loops with the given factor as the
// inner loop bound. If the factor does not evenly divide the loop bound,
// then a conditional is inserted into the body to handle the remaining
// iterations appropriately.
//
// For example, consider the following code:
// for (int i = 0; i < 100; ++i) {
// A[i] =
// }
//
// splitWithMask(i, 8, ...) will result in:
// for (int i_outer = 0; i_outer < 13; ++i_outer) {
// for (int i_inner = 0; i_inner < 8; ++i_inner) {
// if (i_outer * 8 + i_inner < 100) {
// A[i_outer * 8 + i_inner] =
// }
// }
// }
//
// The given loop will be transformed to the outer loop after splitting.
// So, the pointer to the input loop should be valid after splitting and
// will point to the outer loop. The `inner` parameter will be set to point
// to the inner loop that is generated.
static void splitWithMask(const ForPtr& f, int factor, ForPtr* inner);
// A convenience wrapper when the caller does not need to access the
// split loops.
static void splitWithMask(const ForPtr& f, int factor);
// The following methods support loop distribution.
// For example, consider the following code. This will be used to
// demonstrate the methods below.
//
// S0: for m
// S1: for i
// S2: A[i] = 0
// S3: for j
// S4: A[i] = A[i] +
// S5: B[i] = A[i]
// S6: for k
// S7: B[i] = B[i] +
// This method distributes the given loop over its body by splitting
// after every given pivot stmt.
//
// NOTE: Pivot stmts that are not in the given loop's body will be ignored.
//
// For the above example:
// distributeLoop(S1, {S3, S5})
// will result in:
// S0: for m
// S1: for i
// S2: A[i] = 0
// S3: for j
// S4: A[i] = A[i] +
// : for i
// S5: B[i] = A[i]
// : for i
// S6: for k
// S7: B[i] = B[i] +
static std::vector<ForPtr> distributeLoop(
const ForPtr& loop,
const std::unordered_set<StmtPtr>& pivots);
// This method distributes the given loop over every stmt in its body.
//
// For the above example:
// distributeLoop(S1)
// will result in:
// S0: for m
// S1: for i
// S2: A[i] = 0
// : for i
// S3: for j
// S4: A[i] = A[i] +
// : for i
// S5: B[i] = A[i]
// : for i
// S6: for k
// S7: B[i] = B[i] +
static std::vector<ForPtr> distributeLoop(const ForPtr& loop);
// Same as above, but also distribute parent loops.
// Returns the result of distributing the outermost loop.
//
// For the above example:
// distributeLoopAndParents(S1) will result in:
// S0: for m
// S1: for i
// S2: A[i] = 0
// : for m
// : for i
// S3: for j
// S4: A[i] = A[i] +
// : for m
// : for i
// S5: B[i] = A[i]
// : for m
// : for i
// S6: for k
// S7: B[i] = B[i] +
static std::vector<ForPtr> distributeLoopAndParents(const ForPtr& loop);
// This method distributes the given loop over its body by splitting
// after every For stmt in its body.
//
// For the above example:
// distributeLoopOverInnerLoops(S1)
// will result in:
// S0: for m
// S1: for i
// S2: A[i] = 0
// S3: for j
// S4: A[i] = A[i] +
// : for i
// S5: B[i] = A[i]
// S6: for k
// S7: B[i] = B[i] +
static std::vector<ForPtr> distributeLoopOverInnerLoops(const ForPtr& loop);
// Same as above, but also distribute parent loops.
// Returns the result of distributing the outermost loop.
//
// For the above example:
// distributeLoopAndParentsOverInnerLoops(S1)
// will result in:
// S0: for m
// S1: for i
// S2: A[i] = 0
// S3: for j
// S4: A[i] = A[i] +
// : for m
// : for i
// S5: B[i] = A[i]
// S6: for k
// S7: B[i] = B[i] +
static std::vector<ForPtr> distributeLoopAndParentsOverInnerLoops(
const ForPtr& loop);
// This method performs loop fusion.
// For example, consider the following code.
//
// S1: for m
// S2: A[m] = 0
// S3: for j
// S4: A[m] = A[m] +
// S5: for n
// S5: B[n] = A[n]
// S6: for k
// S7: B[n] = B[n] +
//
// fuseLoops({S1, S5}), will return the following loop:
// S1: for m
// S2: A[m] = 0
// S3: for j
// S4: A[m] = A[m] +
// S5: B[m] = A[m]
// S6: for k
// S7: B[m] = B[m] +
//
// This transformation is unsafe as it simply add all loops into the body of
// the first loop for fusion without correctness checks.
//
// Below are the two requirements to apply unsafeFuseLoops:
// * All the loops have the same parent.
// * There are no statements between these loops in their parent body.
static bool unsafeFuseLoops(const std::vector<ForPtr>& loops, ForPtr* fused);
// Loop fusion is done only when all the conditions below are satisfied.
// * All the loops have the same parent.
// * There are no statements between these loops in their parent body.
// * The start bounds are the same for all loops.
// * The stop bounds are the same for all loops.
// * Fusing the loops does not violate or add any dependencies.
static bool fuseLoops(const std::vector<ForPtr>& loops, ForPtr* fused);
static void reorderAxis(const ForPtr& a, const ForPtr& b);
// Reorder the given list of loops according to the permutation specified.
// Here `permutation[i]` represents the position of the loop in the input
// which will end up at position `i` after the reorder.
//
// For example, consider the following code:
// for p
// for q
// for r
// for s
// A[p,q,r,s] =
//
// reorder({p, q, r, s}, {2, 3, 0, 1}) will return the list of loops in the
// following form:
// for r
// for s
// for p
// for q
// A[p,q,r,s] =
static std::vector<ForPtr> reorder(
const std::vector<ForPtr>& loops,
const std::vector<size_t>& permutation);
// Tile takes a 2d domain (x, y) and splits it into small rectangular blocks
// each with shape (x_factor, y_factor). The traversal over the domain turns
// into an outer iteration over the blocks and an inner traversal over all
// points in the block.
// Note that if x dim % x_factor or y dim % y_factor does not equal to 0, the
// loop body will generate corresponding tailing loops.
// The transformation is in-place and returns 'xtail'.
//
// For example, consider the following code:
// for i: [0, 64)
// for j: [0, 64)
// for k: [0, 32)
// A[i, j] = B[i, k] + C[j, k]
//
// tile(i, j, 4, 8) will transform "i" for-stmt into the following nested
// loop:
// for i_outer: [0, 16)
// for j_outer: [0, 8)
// for i_inner: [0, 4)
// for j_inner: [0, 8)
// for k: [0, 32)
// A[i_outer * 4 + i_inner, j_outer * 8 + j_inner] =
// B[i_outer * 4 + i_inner, k] + C[j_outer * 8 + j_inner, k]
//
// tile(i, j, 4, 9) will transform "i" for-stmt into the following nested
// loop:
// for i_outer: [0, 16)
// for j_outer: [0, 7)
// for i_inner: [0, 4)
// for j_inner: [0, 9)
// for k: (0, 32)
// A[i_outer * 4 + i_inner, j_outer * 9 + j_inner] =
// B[i_outer * 4 + i_inner, k] + C[j_outer * 9 + j_inner, k]
// for j_tail: [0, 1)
// for i_inner: [0, 4)
// for k: (0, 32)
// A[i_outer * 4 + i_inner, 7 * 9 + j_tail] =
// B[i_outer * 4 + i_inner, k] + C[7 * 9 + j_tail, k]
ForPtr tile(const ForPtr& x, const ForPtr& y, int x_factor, int y_factor);
// Returns true if the given loops are perfectly nested, i.e., every loop
// (except the innermost) should have exactly one statement in its body
// and that statement must be the next inner loop.
static bool areLoopsPerfectlyNested(const std::vector<ForPtr>& loops);
// Returns true if the given loop has a loop-carried dependence.
static bool hasLoopCarriedDependence(const ForPtr& loop);
// Unrolls all the iterations of the given loop.
// Requires that the loop bounds are constant.
static void fullUnroll(const ForPtr& f, StmtPtr* unrolled);
static void fullUnroll(const ForPtr& f);
// Unrolls the given loop for the specified factor.
// This does not require constant bounds for the loop being unrolled.
static void unroll(const ForPtr& f, int factor, ForPtr* tail);
static void unroll(const ForPtr& f, int factor);
static bool normalize(const ForPtr& f);
static bool isNormalized(const ForPtr& f);
static bool flatten(const std::vector<ForPtr>& f, ForPtr* flattened);
static bool flatten(const std::vector<ForPtr>& f);
// Compresses the given buffer based on its use in the given Stmts.
//
// NOTE: This API assumes that there are no accesses to the given buffer
// outside the given statement. So, this should be called with the entire
// kernel statement to avoid incorrect buffer compressions.
//
// For example, given the input:
//
// for (int i = 0; i < 100; ++i) {
// for (int j = 0; j < 200; ++j) {
// A[i,j] = sin(i*j)
// }
// for (int j = 0; j < 199; ++j) {
// B[i,j] = A[i,j] + A[i, j+1]
// }
// }
//
// compressBuffer(A, ...) will compress buffer A from
// [100, 200] to [1, 200] and modify the code as follows:
//
// for (int i = 0; i < 100; ++i) {
// for (int j = 0; j < 200; ++j) {
// A[0,j] = sin(i*j)
// }
// for (int j = 0; j < 199; ++j) {
// B[i,j] = A[0,j] + A[0, j+1]
// }
// }
static void compressBuffer(const BufPtr& buf, const StmtPtr& stmt);
// Compresses all buffers in the given statement.
//
// NOTE: This API assumes that there are no accesses to buffers outside
// the given statement. So, this should be called with the entire
// kernel statement to avoid incorrect buffer compressions.
//
// TODO: Add an IR verifier check to detect invalidly compressed buffers.
static void compressAllBuffers(const StmtPtr& stmt);
// Get 'num' loops from the loopnest starting at 'f'.
static std::vector<ForPtr> getLoopStmtsInLoopNest(
const ForPtr& f,
size_t num);
// LoopOptions are propagated to tail.
static void sliceHead(
const ForPtr& f,
int factor,
ForPtr* head,
ForPtr* tail);
static void sliceHead(const ForPtr& f, int factor);
// LoopOptions are propagated to head.
static void sliceTail(
const ForPtr& f,
int factor,
ForPtr* head,
ForPtr* tail);
static void sliceTail(const ForPtr& f, int factor);
using AccessResult = std::pair<BufPtr, StmtPtr>;
// Insert a cache for the consumer's usages of the buffer produced in
// consumer, and redirect reads and writes in the consumer to that cache.
// Returns a pair of the new cache buffer, and the new rewritten consumer.
static AccessResult cacheAccesses(
const BufPtr& producer,
const std::string& name,
const StmtPtr& consumer);
// Insert a temporary computation of statement S in the scope of loop AT.
// S is assumed to be a Store or a Block containing a Store. Along with the
// computation itself, this transformation inserts Alloc/Free statements for
// the temporary buffer used in the computation.
static void computeAt(const StmtPtr& s, const ForPtr& at);
// Rfactor a reduction axis into a normal axis.
//
// Requirements:
// * S is the reduction store
// * S is the only statement in the innermost loop
// * There is at least two reduction arguments in S
// * OUTER_REDUCTION_FOR loop corresponds to the outermost reduction variable
// used in the store and all other reduction variables are index variables of
// children loops of OUTER_REDUCTION_FOR
// * OUTER_REDUCTION_FOR is a perfect loop nest, i.e. it has only loops
// corresponding to the other reduction variables and the store, nested into
// each other
//
// What it does:
// * Introduce a new buffer with an extra dimension of a size equal to the
// span of the loop OUTER_REDUCTION_FOR (the new buffer is returned via
// RFAC_BUF_PTR)
// * Insert an initialization store for the new buffer in
// OUTER_REDUCTION_FOR before its nested loop
// * Replace the reduction store to the original buffer with the reduction
// store to the temp buffer, removing the index var of OUTER_REDUCTION_FOR
// from reduction arguments
// * Insert a final reduction store over the extra dimension of the new
// buffer to the original buffer
// * Returns TRUE if the transformation succeeded and FALSE otherwise
//
// Example:
// Original IR:
// S1: for i # normal axis
// S2: X[i] = 0
// S3: for j # reduction axis
// S4: for k # reduction axis
// S5: X[i] = ReduceOp(X[i] + Y[i,j,k], reduce_axis={j,k})
//
// After RFACTOR(S5, S3)
// S1: for i # normal axis
// S2: X[i] = 0
// S3: for j # reduction axis for X, normal axis for X_rfac
// X_rfac[i,j] = 0
// S4: for k # reduction axis
// X_rfac[i,j] = ReduceOp(X_rfac[i,j] + Y[i,j,k], reduce_axis={k})
// X[i] = ReduceOp(X[i] + X_rfac[i,j], reduce_axis={j})
static bool rfactor(const StmtPtr& s, const ForPtr& outer_reduction_for);
static bool rfactor(
const StmtPtr& s,
const ForPtr& outer_reduction_for,
BufPtr* rfac_buf_ptr);
// Vectorize the given loop. This method requires that the given loop
// does not perform a reduction.
// It returns true if vectorization is successful and false otherwise.
static bool vectorize(const ForPtr&);
// Find the inner-most loops and vectorize them. Currently, this only works
// for the LLVM backend, when no reductions are involved.
void vectorizeInnerLoops();
void eliminateDeadStores();
void prepareForCodegen();
const std::unordered_set<BufPtr> getInputBufs() const;
const std::unordered_set<BufPtr> getOutputBufs() const {
return output_bufs_;
}
std::vector<BufPtr> getIntermediateBufs() const;
// Finds which is the outer For between a and b for loops. If neither of the 2
// Fors is an ancestor of the other, it returns nullptr.
static ForPtr findOuterFor(ForPtr a, ForPtr b);
private:
void initialize(
const std::vector<Tensor>& output_tensors,
const std::vector<Tensor>& tensors_to_compute);
StmtPtr root_stmt_;
std::unordered_set<BufPtr> output_bufs_;
};
TORCH_API StmtPtr FlattenIndexes(const StmtPtr& s);
// TODO: Revisit this once we decide on how dependencies analysis should look
// like. Maybe we would choose to use a different API and BufUse would be
// removed, or if we decide to keep it we need to properly document its API.
struct BufLoadOrStoreUse {
StmtPtr s;
bool isStore;
};
/*
* Returns a map ( Buf -> uses of this Buf), uses are represented as vectors of
* BufUse elements, which are StmtPtr and a bool isStore flag. The order of uses
* in the vectors reflects the order in which the uses appear in the given
* statement.
*/
std::unordered_map<BufPtr, std::vector<BufLoadOrStoreUse>> findLoadOrStoreUses(
const StmtPtr& s);
// replaces all invalid characters with underscore
TORCH_API std::string sanitizeName(const std::string& input_name);
} // namespace torch::jit::tensorexpr
```
|
========================================================================================================================================================
SOURCE CODE FILE: loopnest_randomization.h
LINES: 1
SIZE: 0.31 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\loopnest_randomization.h
ENCODING: utf-8
```h
#pragma once
namespace torch::jit::tensorexpr {
// Applies a series of loop optimizations chosen randomly. This is only for
// testing purposes. This allows automatic stress testing of NNC loop
// transformations.
void loopnestRandomization(int64_t seed, LoopNest& l);
} // namespace torch::jit::tensorexpr
```
|
===========================================================================================================================================
SOURCE CODE FILE: lowerings.h
LINES: 1
SIZE: 1.29 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\lowerings.h
ENCODING: utf-8
```h
// This file defines classes for registering standard lowerings from JIT to TE
// IR.
#pragma once
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/runtime/interpreter.h>
#include <torch/csrc/jit/tensorexpr/analysis.h>
#include <torch/csrc/jit/tensorexpr/codegen.h>
#include <torch/csrc/jit/tensorexpr/tensor.h>
namespace torch::jit::tensorexpr {
using ArgNone = std::monostate;
using BufList = std::vector<tensorexpr::BufHandle>;
using DoubleList = std::vector<double>;
using IntList = std::vector<int64_t>;
using ArgValue = std::variant<
tensorexpr::BufHandle,
tensorexpr::VarHandle,
double,
int64_t,
bool,
BufList,
DoubleList,
IntList,
std::string,
ArgNone>;
using NNCLoweringFunction = std::function<Tensor(
const std::vector<ArgValue>&,
const std::vector<ExprHandle>&,
const std::vector<ExprHandle>&,
const std::optional<ScalarType>&,
at::Device)>;
TORCH_API FunctionSchemaMap<NNCLoweringFunction>& getNNCLoweringRegistry();
TORCH_API NNCLoweringFunction getStandardLoweringFor(const std::string& op);
struct RegisterNNCLoweringsFunction {
RegisterNNCLoweringsFunction(
const std::vector<std::string>& schemas,
const NNCLoweringFunction& fn);
};
} // namespace torch::jit::tensorexpr
```
|
========================================================================================================================================================
SOURCE CODE FILE: mem_dependency_checker.h
LINES: 1
SIZE: 13.54 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\mem_dependency_checker.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/ScalarType.h>
#include <torch/csrc/Export.h>
#include <utility>
#include <vector>
#include <torch/csrc/jit/tensorexpr/bounds_overlap.h>
#include <torch/csrc/jit/tensorexpr/ir_mutator.h>
#include <torch/csrc/jit/tensorexpr/ir_simplifier.h>
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
#include <torch/csrc/jit/tensorexpr/stmt.h>
namespace torch::jit::tensorexpr::analysis {
enum class AccessType {
Input,
Output,
Load,
Store,
Call,
AtomicAdd,
Alloc,
Free
};
const char* AccessToString(AccessType a);
class AccessInfo;
using DependencySet = std::unordered_set<std::shared_ptr<AccessInfo>>;
/* AccessInfo
*
* Represents a single bounded memory access to a buffer, for instance a Load or
* a Store. Holds information relating to the specific access and links to
* connected accesses in the dependency graph.
*/
class TORCH_API AccessInfo {
public:
AccessInfo(
size_t id,
AccessType type,
StmtPtr stmt,
VarPtr var,
IndexBounds bounds)
: id_(id),
type_(type),
stmt_(std::move(stmt)),
expr_(nullptr),
var_(std::move(var)),
bounds_(std::move(bounds)) {}
AccessInfo(
size_t id,
AccessType type,
ExprPtr expr,
StmtPtr stmt,
VarPtr var,
IndexBounds bounds)
: id_(id),
type_(type),
stmt_(std::move(stmt)),
expr_(std::move(expr)),
var_(std::move(var)),
bounds_(std::move(bounds)) {}
// Id is a unique int representing the order this access occurred in the
// graph.
size_t id() const {
return id_;
}
// The type of the access (Load, Store, etc).
AccessType type() const {
return type_;
}
// The enclosing Stmt this access represents. E.g. if this is a Store then
// Stmt is the Store itself, while if the access is caused by an Expr, this is
// the most immediate parent Stmt.
StmtPtr stmt() const {
return stmt_;
}
// If the access is represented by an Expr (such as Load or Call) then this is
// it, otherwise it's nullptr.
ExprPtr expr() const {
return expr_;
}
// The Var representing the underlying Buffer.
VarPtr var() const {
return var_;
}
// A vector of Bounds representing the start and end expression for each
// dimension.
IndexBounds& bounds() {
return bounds_;
}
// Each access that this depends upon,
// eg. if this is a Load, then it contains every Store that immediately
// contributes to a load of the bounds.
// or: if this is a Store, it contains all reads on the RHS of the Store.
const std::map<size_t, std::shared_ptr<AccessInfo>>& dependencies() const {
return dependencies_;
}
// Each access that depends on this one.
// ie. this access is present in the dependencies map of all accesses that are
// dependent.
std::map<size_t, std::shared_ptr<AccessInfo>> dependents() const {
std::map<size_t, std::shared_ptr<AccessInfo>> res;
for (const auto& kv : dependents_) {
res.emplace(kv.first, kv.second.lock());
}
return res;
}
// Returns the symbolic expression of the indices of this access.
std::vector<ExprPtr> getIndices() const;
// Establishes a dependency or dependent relationship with another access.
void addDependency(const std::shared_ptr<AccessInfo>& write);
void addDependent(const std::shared_ptr<AccessInfo>& read);
// helper for checking dependencies.
bool hasDependency(const std::shared_ptr<AccessInfo>& info) const;
// Returns the set of all nodes that are direct (immediate) dependencies of
// this access.
DependencySet getDirectDependencies();
// likewise, returns all nodes that directly depend on this one.
DependencySet getDirectDependents();
// Returns the full list of all nodes in the graph that this access depends
// on, and all nodes they depend on, and so forth, back to the inputs.
DependencySet getIndirectDependencies();
// likewise, returns the full list of all nodes that depend on this node, and
// all nodes that depend on those nodes and so on down to the outputs.
DependencySet getIndirectDependents();
// Does this access represent a read of memory (Load, ReduceOp, Call, etc).
bool isRead() const;
// Does this access represent a write of memory (Store, etc).
bool isWrite() const;
// Helpers for dumping accesses in various formats.
void print() const;
void dumpDOT(std::ostream& os) const;
const char* AccessTypeColour() const;
private:
size_t id_;
AccessType type_;
StmtPtr stmt_;
ExprPtr expr_;
VarPtr var_;
IndexBounds bounds_;
// Yes these should be sorted.
std::map<size_t, std::shared_ptr<AccessInfo>> dependencies_;
std::map<size_t, std::weak_ptr<AccessInfo>> dependents_;
};
using VarBoundMap = std::unordered_map<VarPtr, Bound>;
/* MemDependencyChecker analyses a IR fragment and builds a dependency graph of
* accesses contained within.
*
* It's possible to retrieve the entire graph in node-object form, or can be
* used as an oracle for answering dependency questions. e.g:
*
* analyzer.hasIndirectDependency(BufA, BufB); or,
* analyzer.hasDirectDependency(LoadA, StoreB);
*/
class TORCH_API MemDependencyChecker : public IRVisitor {
struct Scope;
public:
MemDependencyChecker();
MemDependencyChecker(
const std::unordered_set<BufPtr>& inputs,
const std::unordered_set<BufPtr>& outputs);
MemDependencyChecker(
const std::vector<BufHandle>& inputs,
const std::vector<BufHandle>& outputs);
~MemDependencyChecker() override = default;
// Whether or not to allow loop execution order to influence dependency
// calculation. If the loop may later be parallelized you don't want this.
bool allowLoopExecutionOrderAnalysis(bool allow = true);
// Dependency Checking API.
// The goal is to have enough overloads here so you don't really have to think
// about it.
// Returns true if any read in A has a direct dependence on a write in B.
bool dependsDirectly(const StmtPtr& A, const StmtPtr& B);
bool dependsDirectly(const ExprPtr& A, const StmtPtr& B);
// Returns true of the output depends directly on a write contained in B.
bool dependsDirectly(const BufPtr& output, const StmtPtr& B);
// Returns true if a read in A depends directly on the provided input.
bool dependsDirectly(const StmtPtr& A, const BufPtr& input);
bool dependsDirectly(const ExprPtr& A, const BufPtr& input);
// Outputs/inputs cannot depend directly.
// Returns true if the access A has B as an immediate dependency.
bool dependsDirectly(
const std::shared_ptr<AccessInfo>& A,
const std::shared_ptr<AccessInfo>& B);
// Returns true if any read in A has an ancestor write contained in B.
bool dependsIndirectly(const StmtPtr& A, const StmtPtr& B);
bool dependsIndirectly(const ExprPtr& A, const StmtPtr& B);
// Returns true of the output depends indirectly on a write contained in B.
bool dependsIndirectly(const BufPtr& output, const StmtPtr& B);
// Returns true if a read in A depends indirectly on the provided input.
bool dependsIndirectly(const StmtPtr& A, const BufPtr& input);
bool dependsIndirectly(const ExprPtr& A, const BufPtr& input);
// returns true if the output uses any load of the input.
bool dependsIndirectly(const BufPtr& output, const BufPtr& input);
// Returns true if the access A has a dependency chain to access B.
bool dependsIndirectly(
const std::shared_ptr<AccessInfo>& A,
const std::shared_ptr<AccessInfo>& B);
// Returns the AccessInfo
std::shared_ptr<AccessInfo> accessFor(const StmtPtr& A) const;
std::shared_ptr<AccessInfo> accessFor(const ExprPtr& A) const;
// Returns all AccessInfos.
std::unordered_set<std::shared_ptr<AccessInfo>> accessesWithin(
const StmtPtr& A) const;
// TODO: this will return only the AccessInfo for A. It's included for
// completeness but be aware it wont return accesses used in the computation
// of A.
std::unordered_set<std::shared_ptr<AccessInfo>> accessesWithin(
const ExprPtr& A) const;
// Accesses relating to input and output buffers.
std::shared_ptr<AccessInfo> input(const BufPtr& B) const;
std::shared_ptr<AccessInfo> output(const BufPtr& B) const;
// Returns the full history of reads and writes.
const std::vector<std::shared_ptr<AccessInfo>>& getHistory() const;
// Dumps the dependency graph in DOT format.
void dumpDAG(const std::string& filename) const;
private:
// Node visitors.
void visit(const StorePtr& v) override;
void visit(const LoadPtr& v) override;
void visit(const ForPtr& v) override;
void visit(const CondPtr& v) override;
void visit(const IfThenElsePtr& v) override;
void visit(const CompareSelectPtr& v) override;
void visit(const BlockPtr& v) override;
void visit(const LetPtr& v) override;
void visit(const AtomicAddPtr& v) override;
void visit(const AllocatePtr& v) override;
void visit(const FreePtr& v) override;
using BoundRelationship = std::pair<IndexBounds, std::shared_ptr<AccessInfo>>;
// An internal struct holding the accesses found within a scope Block.
struct Scope {
Scope(BlockPtr b, std::shared_ptr<Scope> p)
: block(std::move(b)), parent(std::move(p)) {}
BlockPtr block;
std::shared_ptr<Scope> parent;
std::unordered_map<VarPtr, Bound> shadowedVarBounds;
std::unordered_set<VarPtr> localVars;
std::vector<std::shared_ptr<AccessInfo>> accesses_;
std::unordered_map<VarPtr, std::list<BoundRelationship>> openWrites_;
};
std::shared_ptr<Scope> currentScope_;
bool allowExecutionOrderAnalysis_{false};
std::unordered_multimap<StmtPtr, std::shared_ptr<AccessInfo>> stmtToAccess_;
std::unordered_multimap<ExprPtr, std::shared_ptr<AccessInfo>> exprToAccess_;
std::unordered_map<StmtPtr, std::vector<std::shared_ptr<AccessInfo>>>
scopeToAccesses_;
VarBoundMap knownVarBounds_;
// Finds all accesses that are reads within the scope of v.
template <typename StmtOrExprPtr>
DependencySet getAllReadsWithin(const StmtOrExprPtr& v) {
DependencySet reads;
auto insertAllReads = [&](const auto& nodes) {
for (const auto& l : nodes) {
auto bound = exprToAccess_.equal_range(l);
for (auto it = bound.first; it != bound.second; ++it) {
if (it->second->isRead()) {
reads.insert(it->second);
}
}
}
};
// Look for and insert accesses belonging to all nodes that act like
// reads.
insertAllReads(NodeFinder<Load>::find(v));
insertAllReads(NodeFinder<ReduceOp>::find(v));
return reads;
}
// Finds all accesses that are writes within the scope of v.
// Writes cannot occur in Exprs, so this is a little simpler.
DependencySet getAllWritesWithin(const StmtPtr& v) {
DependencySet writes;
// writes just Store currently.
auto stores = NodeFinder<Store>::find(v);
for (const auto& s : stores) {
auto bound = stmtToAccess_.equal_range(s);
for (auto it = bound.first; it != bound.second; ++it) {
if (it->second->isWrite()) {
writes.insert(it->second);
}
}
}
return writes;
}
// Templated helpers to work on either Exprs or Stmts.
template <typename StmtOrExprPtr>
bool dependsDirectlyHelper(const StmtOrExprPtr& A, const StmtPtr& B) {
auto aReads = getAllReadsWithin(A);
auto bWrites = getAllWritesWithin(B);
for (auto& read : aReads) {
for (auto& depPair : read->dependencies()) {
if (bWrites.count(depPair.second) != 0) {
return true;
}
}
}
return false;
}
template <typename StmtOrExprPtr>
bool dependsIndirectlyHelper(StmtOrExprPtr A, const StmtPtr& B) {
auto aReads = getAllReadsWithin(A);
auto bWrites = getAllWritesWithin(B);
auto aDeps = getAllWriteDependencies(aReads);
for (auto& dependency : aDeps) {
if (bWrites.count(dependency) != 0) {
return true;
}
}
return false;
}
DependencySet getAllWriteDependencies(const DependencySet& products);
// Maps for inputs and outputs, since they aren't present directly in the IR.
std::unordered_map<BufPtr, std::shared_ptr<AccessInfo>> inputs_;
std::unordered_map<BufPtr, std::shared_ptr<AccessInfo>> outputs_;
std::unordered_map<VarPtr, std::shared_ptr<AccessInfo>> intermediates_;
// Inserts accesses for Buf's: specifically for inputs and outputs.
void insertBuffers(
std::unordered_map<BufPtr, std::shared_ptr<AccessInfo>>& bufs,
AccessType type);
// Update the write history with a new write, adding dependencies and closing
// any overlapped writes (if possible).
void updateWriteHistory(
std::list<BoundRelationship>& writeHistory,
const std::shared_ptr<AccessInfo>& info,
size_t latestAccessToClose,
bool closeOverlapped = true,
bool insert = true);
// Merge a child scope into a parent scope, adding dependencies for open
// writes in the parent to accesses in the child.
void mergeScope(
const std::shared_ptr<Scope>& child,
const std::shared_ptr<Scope>& parent,
bool closeOverlapped = true);
// Binds symbolic vars in indices with the low and high bound for those vars.
std::vector<Bound> getIndicesBounds(const std::vector<ExprPtr>& indices);
size_t nextAccess_{0};
StmtPtr lastStmt_{nullptr};
};
} // namespace torch::jit::tensorexpr::analysis
```
|
==================================================================================================================================================
SOURCE CODE FILE: conv2d.h
LINES: 1
SIZE: 2.92 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\operators\conv2d.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/operators/misc.h>
#include <torch/csrc/jit/tensorexpr/tensor.h>
namespace torch::jit::tensorexpr {
// An API to compute 2D depthwise convolutions with bias.
TORCH_API Tensor conv2d_depthwise(
BufHandle input,
BufHandle weight,
BufHandle bias,
int stride,
int pad,
int groups);
// An API to compute 2D depthwise convolutions without bias.
TORCH_API Tensor conv2d_depthwise(
BufHandle input,
BufHandle weight,
int stride,
int pad,
int groups);
TORCH_API Tensor conv2d_depthwise(
BufHandle input,
BufHandle weight,
BufHandle bias,
ExprHandle N,
ExprHandle C,
ExprHandle H,
ExprHandle W,
ExprHandle K,
ExprHandle CperG,
ExprHandle R,
ExprHandle S,
ExprHandle stride,
ExprHandle pad,
ExprHandle groups);
TORCH_API Tensor conv2d_depthwise(
BufHandle input,
BufHandle weight,
ExprHandle N,
ExprHandle C,
ExprHandle H,
ExprHandle W,
ExprHandle K,
ExprHandle CperG,
ExprHandle R,
ExprHandle S,
ExprHandle stride,
ExprHandle pad,
ExprHandle groups);
bool conv2dIsSupported(
const TensorInfo& input,
const TensorInfo& weight,
const TensorInfo& bias,
const std::vector<int64_t>& stride,
const std::vector<int64_t>& pad,
const std::vector<int64_t>& dilation,
int64_t groups);
bool mkldnnPrepackedConvIsSupported(
const TensorInfo& input,
const TensorInfo& weight,
const std::vector<int64_t>& stride,
const std::vector<int64_t>& pad,
const std::vector<int64_t>& dilation,
int64_t groups);
Tensor computeConv2d(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
Tensor computeConv1d(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
Tensor computePrepackedConv2dClampRun(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
Tensor computePrepackedLinearClampRun(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
Tensor computeMkldnnPrepackedConvRun(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
} // namespace torch::jit::tensorexpr
```
|
==================================================================================================================================================
SOURCE CODE FILE: matmul.h
LINES: 1
SIZE: 0.61 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\operators\matmul.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/kernel.h>
namespace torch::jit::tensorexpr {
Tensor computeMatmul(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
Tensor computeAddMM(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
} // namespace torch::jit::tensorexpr
```
|
================================================================================================================================================
SOURCE CODE FILE: misc.h
LINES: 1
SIZE: 3.30 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\operators\misc.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
#include <torch/csrc/jit/tensorexpr/lowerings.h>
#include <torch/csrc/jit/tensorexpr/tensor.h>
namespace torch::jit::tensorexpr {
struct TensorInfo {
std::vector<int64_t> dims;
c10::ScalarType dtype;
};
std::optional<TensorInfo> getTensorInfo(const BufHandle& b);
int64_t normalizeAndCheckIndex(int64_t idx, int64_t list_size);
// Convert boolean to integer, if needed.
ExprHandle boolToInteger(const ExprHandle& x);
ExprHandle promoteToDtype(ExprHandle e, ScalarType dt);
void promoteInputs(
std::vector<ExprHandle>& inputs,
const int typeConstraints = kAllTypes);
ExprHandle promoteIntegerToDefaultType(const ExprHandle& e);
ExprHandle promoteHalfToFloat(const ExprHandle& e);
ExprHandle demoteOutput(
const ExprHandle& e,
const std::optional<ScalarType> type);
std::vector<ExprHandle> broadcastShapes(
std::vector<std::vector<ExprHandle>> shapes);
std::vector<ExprHandle> broadcastShapes(
const std::vector<ExprHandle>& a,
const std::vector<ExprHandle>& b);
std::vector<ExprHandle> valueShape(const ArgValue& v);
ExprHandle tensorOrConstant(
const ArgValue& v,
const std::vector<ExprHandle>& axes);
ExprHandle scalarOrConstant(const ArgValue& v);
ExprHandle broadcast(const BufHandle& b, const std::vector<ExprHandle>& axes);
ExprHandle constant(const ArgValue& v);
ExprHandle clamp(
const ExprHandle& cmin,
const ExprHandle& cmax,
const ExprHandle& input);
Tensor computeChunk(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
Tensor computeTranspose(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
Tensor computeExpand(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
Tensor computeReshape(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
Tensor computeFlatten(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
Tensor computeCatWoConditionals(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape);
Tensor computeCat(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
Tensor computeEmbedding(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
} // namespace torch::jit::tensorexpr
```
|
================================================================================================================================================
SOURCE CODE FILE: norm.h
LINES: 1
SIZE: 0.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\operators\norm.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/kernel.h>
namespace torch::jit::tensorexpr {
Tensor computeBatchNorm(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
} // namespace torch::jit::tensorexpr
```
|
=====================================================================================================================================================
SOURCE CODE FILE: operators.h
LINES: 1
SIZE: 0.47 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\operators\operators.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/operators/conv2d.h>
#include <torch/csrc/jit/tensorexpr/operators/matmul.h>
#include <torch/csrc/jit/tensorexpr/operators/misc.h>
#include <torch/csrc/jit/tensorexpr/operators/norm.h>
#include <torch/csrc/jit/tensorexpr/operators/pointwise.h>
#include <torch/csrc/jit/tensorexpr/operators/quantization.h>
#include <torch/csrc/jit/tensorexpr/operators/reduction.h>
#include <torch/csrc/jit/tensorexpr/operators/softmax.h>
```
|
=====================================================================================================================================================
SOURCE CODE FILE: pointwise.h
LINES: 1
SIZE: 3.17 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\operators\pointwise.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/kernel.h>
namespace torch::jit::tensorexpr {
TORCH_API Tensor computeSign(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::optional<std::vector<ExprHandle>>& outputStrides = std::nullopt);
Tensor computeOneOperand(
const std::string& name,
const std::vector<ArgValue>& inputValues,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
const std::function<ExprHandle(const ExprHandle&)>& innerExpr,
const int checkParamTypes = kAllTypes);
Tensor computeTwoOperand(
const std::string& name,
const std::vector<ArgValue>& inputValues,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
const std::function<ExprHandle(const ExprHandle&, const ExprHandle&)>&
innerExpr);
Tensor computeTwoOperandWithAlpha(
const std::string& name,
const std::vector<ArgValue>& inputValues,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
const std::function<ExprHandle(const ExprHandle&, const ExprHandle&)>&
innerExpr);
Tensor computeConditionWithTwoOperand(
const std::string& name,
const std::vector<ArgValue>& inputValues,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
const std::function<
ExprHandle(const ExprHandle&, const ExprHandle&, const ExprHandle&)>&
innerExpr);
Tensor computeThreeOperand(
const std::string& name,
const std::vector<ArgValue>& inputValues,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
const std::function<
ExprHandle(const ExprHandle&, const ExprHandle&, const ExprHandle&)>&
innerExpr,
bool promote_inputs = true);
Tensor computeFourOperand(
const std::string& name,
const std::vector<ArgValue>& inputValues,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
const std::function<ExprHandle(
const ExprHandle&,
const ExprHandle&,
const ExprHandle&,
const ExprHandle&)>& innerExpr);
Tensor computeNoop(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
Tensor computeScalar(
const std::string& name,
const std::vector<ArgValue>& inputValues,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
const std::function<ExprHandle(const ExprHandle&, const ExprHandle&)>&
innerExpr);
} // namespace torch::jit::tensorexpr
```
|
========================================================================================================================================================
SOURCE CODE FILE: quantization.h
LINES: 1
SIZE: 5.55 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\operators\quantization.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/kernel.h>
namespace torch::jit::tensorexpr {
TORCH_API ExprHandle quantizePerTensorQParamFromArg(ArgValue arg);
TORCH_API double immQScale(const BufHandle& qx);
TORCH_API int64_t immQZero(const BufHandle& qx);
TORCH_API ScalarType immQDType(const BufHandle& qx);
TORCH_API bool isQuantized(const BufHandle& qx);
TORCH_API Tensor computeQuantizePerTensor(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeQuantizePerTensorExternalCall(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeQuantizedConv1d(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeQuantizedConv2dPrepack(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeQuantizedConv1d(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeQuantizedConv2d(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeQuantizedConv2dRelu(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeQuantizedLinear(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeQuantizedLinearRelu(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeQuantizedAdd(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
Tensor computeQuantizedAddExternalCall(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeQuantizedMul(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeQuantizedMulScalar(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeQuantizedCat(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeQuantizedRelu(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeDequantize(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeDequantizeExternalCall(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeUpsampleNearest2d(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeUpsampleNearest2dExternalCall(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeQuantizedSigmoidExternalCall(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device);
} // namespace torch::jit::tensorexpr
```
|
=====================================================================================================================================================
SOURCE CODE FILE: reduction.h
LINES: 1
SIZE: 1.11 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\operators\reduction.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/kernel.h>
namespace torch::jit::tensorexpr {
TORCH_API Tensor computeSum(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeMean(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
TORCH_API Tensor computeAdaptiveAvgPool2d(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
Tensor computeMax(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
const std::optional<ScalarType>& outputType,
at::Device device);
} // namespace torch::jit::tensorexpr
```
|
===================================================================================================================================================
SOURCE CODE FILE: softmax.h
LINES: 1
SIZE: 0.33 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\operators\softmax.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/kernel.h>
namespace torch::jit::tensorexpr {
Tensor computeSoftmax(
const std::vector<ArgValue>& inputs,
const std::vector<ExprHandle>& outputShape,
const std::vector<ExprHandle>& outputStrides,
bool log_softmax);
} // namespace torch::jit::tensorexpr
```
|
===========================================================================================================================================
SOURCE CODE FILE: reduction.h
LINES: 1
SIZE: 8.96 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\reduction.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/tensorexpr/expr.h>
#include <torch/csrc/jit/tensorexpr/ir.h>
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
#include <torch/csrc/jit/tensorexpr/stmt.h>
#include <torch/csrc/jit/tensorexpr/types.h>
#include <functional>
#include <utility>
#include <vector>
namespace torch::jit::tensorexpr {
using ParameterList = const std::vector<VarHandle>;
using ReduceInteraction = std::function<ExprHandle(ExprHandle, ExprHandle)>;
// A Reducer is a user interface describing a particular reduction
// operation. It has three components: An initialization value, a way of
// interacting each value with the accumulation, and a method for obtaining the
// current value to be reduced. It is materialized into a ReduceOp when loop
// variables are known.
class TORCH_API Reducer {
public:
Reducer(ExprHandle init, ReduceInteraction& interaction)
: init_(init.node()), interaction_(interaction) {}
template <typename RI>
Reducer(ExprHandle init, RI interaction)
: init_(init.node()), interaction_(std::move(interaction)) {}
ExprPtr initializer() const {
return init_;
}
ExprHandle operator()(
const BufHandle& result_buf,
ExprHandle body,
const std::vector<ExprHandle>& output,
const std::vector<VarHandle>& inner) const;
ReduceOpPtr operator()(
const BufPtr& result_buf,
ExprPtr body,
const std::vector<ExprPtr>& output,
const std::vector<VarPtr>& inner) const;
ExprHandle operator()(
const BufHandle& result_buf,
BufHandle acc_buf,
const ExprHandle& body,
const std::vector<ExprHandle>& output,
const std::vector<VarHandle>& inner) const;
// Polymorphic handling of Body functions with a variety of parameters.
static ExprHandle getReduceBody(
const std::function<ExprHandle(ParameterList&)>& func,
const std::vector<VarHandle>& vars) {
return func(vars);
}
static ExprHandle getReduceBody(
const std::function<ExprHandle(const VarHandle&)>& func,
const std::vector<VarHandle>& vars) {
if (vars.size() != 1) {
throw malformed_input("mismatch between reduce body and arg size (1)");
}
return func(vars[0]);
}
static ExprHandle getReduceBody(
const std::function<ExprHandle(const VarHandle&, const VarHandle&)>& func,
const std::vector<VarHandle>& vars) {
if (vars.size() != 2) {
throw malformed_input("mismatch between reduce body and arg size (2)");
}
return func(vars[0], vars[1]);
}
static ExprHandle getReduceBody(
const std::function<
ExprHandle(const VarHandle&, const VarHandle&, const VarHandle&)>&
func,
const std::vector<VarHandle>& vars) {
if (vars.size() != 3) {
throw malformed_input("mismatch between reduce body and arg size (3)");
}
return func(vars[0], vars[1], vars[2]);
}
static ExprHandle getReduceBody(
const std::function<ExprHandle(
const VarHandle&,
const VarHandle&,
const VarHandle&,
const VarHandle&)>& func,
const std::vector<VarHandle>& vars) {
if (vars.size() != 4) {
throw malformed_input("mismatch between reduce body and arg size (4)");
}
return func(vars[0], vars[1], vars[2], vars[3]);
}
// Completes the reduction operator by applying the interaction function to
// the accumulation and the body expression.
static ExprPtr complete(
const BufPtr& accumulator,
const ReduceInteraction& interaction,
ExprHandle body,
const std::vector<ExprPtr>& output_args,
const std::vector<VarPtr>& reduce_args) {
ExprHandle accum =
ExprHandle(alloc<Load>(body.dtype(), accumulator, output_args));
auto e = interaction(std::move(accum), std::move(body));
return e.node();
}
static ExprHandle complete(
const BufHandle& accumulator,
const ReduceInteraction& interaction,
ExprHandle body,
const std::vector<ExprHandle>& output_args,
const std::vector<VarHandle>& reduce_args) {
ExprHandle accum = Load::make(body.dtype(), accumulator, output_args);
auto e = interaction(std::move(accum), std::move(body));
return e;
}
private:
ExprPtr init_;
ReduceInteraction interaction_;
};
// An expression representing a Reduction operation (e.g. Sum, Max) broken into
// it's component parts: initialization, accumulation var, acquisition of value
// to be reduced and interaction.
//
// This is intended to be expanded in the loopnest and not make it to codegen.
class TORCH_API ReduceOp : public ExprNode<ReduceOp> {
public:
ReduceOp(
const ExprPtr& body,
std::vector<VarPtr> reduce_args,
Reducer reducer)
: ExprNodeBase(body->dtype()),
body_(body),
reduce_args_(std::move(reduce_args)),
reducer_(std::move(reducer)) {
result_buf_ = nullptr;
acc_buf_ = nullptr;
ri_operand_ = nullptr;
}
ReduceOp(
const ExprPtr& body,
std::vector<VarPtr> reduce_args,
BufPtr result_buf,
BufPtr acc_buf,
ExprPtr ri_operand,
Reducer reducer)
: ExprNodeBase(body->dtype()),
body_(body),
reduce_args_(std::move(reduce_args)),
result_buf_(std::move(result_buf)),
acc_buf_(std::move(acc_buf)),
ri_operand_(std::move(ri_operand)),
reducer_(std::move(reducer)) {}
static ExprHandle make(
ExprHandle body,
const std::vector<VarHandle>& reduce_args,
const Reducer& reducer);
static ExprHandle make(
ExprHandle body,
const std::vector<VarHandle>& reduce_args,
BufHandle result_buf,
BufHandle acc_buf,
ExprHandle ri_operand,
const Reducer& reducer);
// return the body expression which obtains the value to be reduced.
ExprPtr body() const {
return body_;
}
// Returns the original Reducer factory that can create ReduceOps.
const Reducer& reducer() const {
return reducer_;
}
// returns variables associated with the axes of reduction.
const std::vector<VarPtr>& reduce_args() const {
return reduce_args_;
}
void setAccBuf(BufHandle acc_buf) {
acc_buf_ = acc_buf.node();
}
BufPtr getAccBuf() {
return acc_buf_;
}
void setResultBuf(BufHandle buf) {
result_buf_ = buf.node();
}
BufPtr getResultBuf() {
return result_buf_;
}
void setRiOperand(ExprHandle ri_operand) {
ri_operand_ = ri_operand.node();
}
ExprPtr getRiOperand() {
return ri_operand_;
}
private:
// body_ = reducer_->interaction_(result_buf_, ri_operand_)
ExprPtr body_;
std::vector<VarPtr> reduce_args_;
BufPtr result_buf_;
BufPtr acc_buf_;
ExprPtr ri_operand_;
const Reducer reducer_;
};
class Sum : public Reducer {
public:
Sum()
: Reducer(ExprHandle(0), [](const ExprHandle& a, const ExprHandle& b) {
return a + b;
}) {}
};
inline ExprHandle maximumVal(ScalarType type) {
switch (type) {
#define MAX_BY_TYPE_CASE(Type, Name) \
case ScalarType::Name: \
return ExprHandle(std::numeric_limits<Type>::max());
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, MAX_BY_TYPE_CASE)
#undef MAX_BY_TYPE_CASE
default:
throw unsupported_dtype();
}
return ExprHandle();
}
inline ExprHandle minimumVal(ScalarType type) {
switch (type) {
#define MAX_BY_TYPE_CASE(Type, Name) \
case ScalarType::Name: \
return ExprHandle(std::numeric_limits<Type>::min());
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, MAX_BY_TYPE_CASE)
#undef MAX_BY_TYPE_CASE
default:
throw unsupported_dtype();
}
}
class Maximum : public Reducer {
public:
// TODO possible to remove this arg by deferring the init value until we
// know the dtype of the body.
Maximum(Dtype dtype)
: Reducer(
minimumVal(dtype.scalar_type()),
[](const ExprHandle& a, const ExprHandle& b) {
return Max::make(a, b, true);
}) {}
Maximum(ExprHandle initializer)
: Reducer(
std::move(initializer),
[](const ExprHandle& a, const ExprHandle& b) {
return Max::make(a, b, true);
}) {}
};
class Minimum : public Reducer {
public:
Minimum(Dtype dtype)
: Reducer(
maximumVal(dtype.scalar_type()),
[](const ExprHandle& a, const ExprHandle& b) {
return Min::make(a, b, true);
}) {}
Minimum(const ExprHandle& initializer)
: Reducer(initializer, [](const ExprHandle& a, const ExprHandle& b) {
return Min::make(a, b, true);
}) {}
};
class ReductionExpander : public IRMutator {
public:
StmtPtr expand(const StmtPtr& s) {
return s->accept_mutator(this);
}
ExprPtr mutate(const ReduceOpPtr& v) override {
return v->body();
}
};
} // namespace torch::jit::tensorexpr
```
|
==============================================================================================================================================
SOURCE CODE FILE: registerizer.h
LINES: 1
SIZE: 12.64 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\registerizer.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/ScalarType.h>
#include <c10/util/irange.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/tensorexpr/hash_provider.h>
#include <torch/csrc/jit/tensorexpr/ir_mutator.h>
#include <torch/csrc/jit/tensorexpr/ir_simplifier.h>
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
#include <utility>
#include <vector>
namespace torch::jit::tensorexpr {
namespace registerizer {
/* The Registerizer performs scalar replacement by looking for common Stores and
Loads to a single item in a buffer and replacing them with a local temporary
scalar which is cheaper to write.
For example it can replace:
{
A[0] = 0;
for(const auto x : c10::irange(10)) {
A[0] = (A[0]) + x;
}
}
with:
{
int A_ = 0;
for(const auto x : c10::irange(10)) {
A_ = x + A_;
}
A[0] = A_;
}
This is particularly useful on GPUs when parallelizing, since after replacing
loops with metavars we have a lot of accesses like this. */
class Scope;
/* Holds analysis information about accesses to a specific range of a
buffer, including the number of loads and stores and the lowest common parent
Block.
*/
class AccessInfo {
public:
AccessInfo() = default;
AccessInfo(
SimplifierHashType h,
BufPtr b,
std::vector<ExprPtr> i,
size_t accessOrder)
: hash_(h),
buf_(std::move(b)),
indices_(std::move(i)),
store_cost_(alloc<IntImm>(0)),
load_cost_(alloc<IntImm>(0)),
accessOrder_(accessOrder) {}
// Adds a Store to this access, which is in the provided scope.
void addStore(const StorePtr& store, const std::shared_ptr<Scope>& scope);
// Adds a Load to this access, which occurs in the usage Stmt in the provided
// scope.
void addLoad(
const LoadPtr& load,
const std::shared_ptr<Scope>& scope,
const StmtPtr& usage);
// Merge another AccessInfo into this one.
void merge(const std::shared_ptr<AccessInfo>& other);
// Returns true if the other AccessInfo's bounds may overlap this one.
bool overlaps(const std::shared_ptr<AccessInfo>& other);
// Returns true if the indices of this access depend on the provided Var.
bool dependsOnVar(const VarPtr& v);
// Clone this AccessInfo, and set this as the new accesses' hiddenAccess.
static std::shared_ptr<AccessInfo> cloneWithHiddenInfo(
const std::shared_ptr<AccessInfo>& orig);
// print for debugging.
void print() const;
SimplifierHashType hash() const {
return hash_;
}
BufPtr buf() const {
return buf_;
}
const std::vector<ExprPtr>& indices() const {
return indices_;
}
BlockPtr block() const {
return block_;
}
void setEnclosingBlock(BlockPtr b) {
block_ = std::move(b);
}
StmtPtr first_usage() const {
return first_usage_;
}
StmtPtr last_usage() const {
return last_usage_;
}
void setUsageMarks(StmtPtr first, StmtPtr last) {
first_usage_ = std::move(first);
last_usage_ = std::move(last);
}
bool firstUsageOverlapped() const {
return firstUsageOverlapped_;
}
ExprPtr store_cost() const {
return store_cost_;
}
ExprPtr load_cost() const {
return load_cost_;
}
const std::vector<StorePtr>& stores() const {
return stores_;
}
const std::vector<LoadPtr>& loads() const {
return loads_;
}
void hoistCosts(const ExprPtr& extent) {
store_cost_ = IRSimplifier::simplify(alloc<Mul>(store_cost_, extent));
load_cost_ = IRSimplifier::simplify(alloc<Mul>(load_cost_, extent));
}
size_t conditionId() const {
return conditionId_;
}
void setConditionId(size_t c) {
conditionId_ = c;
}
size_t accessOrder() const {
return accessOrder_;
}
std::shared_ptr<AccessInfo> hiddenAccess() const {
return hiddenAccess_;
}
// Holds state relating to the scalar variable we will insert to replace some
// number of loads and stores.
struct ScalarReplacement {
VarPtr var{nullptr};
BufPtr var_wrapper{nullptr};
LetPtr initializer{nullptr};
};
ScalarReplacement& replacement() {
return replacement_;
}
private:
SimplifierHashType hash_;
BufPtr buf_;
std::vector<ExprPtr> indices_;
BlockPtr block_{nullptr};
StmtPtr first_usage_{nullptr};
StmtPtr last_usage_{nullptr};
// Whether or not this access is overlapped in the first Stmt it appears. This
// means we cannot use it's first Store as the initializer.
bool firstUsageOverlapped_{false};
// The cost in real ops that this access represents, to enable
// filtering accesses that wont save any loads or stores.
ExprPtr store_cost_;
ExprPtr load_cost_;
// The actual Stores and Loads which represent this access.
// Be careful with these, any mutator will invalidate these pointers.
std::vector<StorePtr> stores_;
std::vector<LoadPtr> loads_;
// An identifier representing the conditional block, if any, this access
// depends on.
size_t conditionId_{0};
// An identifier representing the order this access was first encountered, for
// sorting returned results.
size_t accessOrder_{0};
// Sometimes when traversing the tree we need to record what would happen if
// we hoisted an access, but sometimes it doesn't work out. This lets us
// "undo" some mutation and return to the internal hidden AccessInfo.
// It will be removed after any further additions to this AccessInfo.
std::shared_ptr<AccessInfo> hiddenAccess_;
ScalarReplacement replacement_;
};
using AccessHashMap =
std::unordered_map<SimplifierHashType, std::shared_ptr<AccessInfo>>;
// Represents a scope block and holds all accesses contained within it.
class Scope {
public:
Scope(BlockPtr b, std::shared_ptr<Scope> parent, size_t conditionId = 0)
: block_(std::move(b)),
parent_(std::move(parent)),
conditionId_(conditionId) {}
AccessHashMap& getAccessMapByBuf(const BufPtr& b);
std::unordered_map<BufPtr, AccessHashMap>& openAccesses() {
return openAccesses_;
}
std::vector<std::shared_ptr<AccessInfo>>& closedAccesses() {
return closedAccesses_;
}
BlockPtr block() const {
return block_;
}
std::shared_ptr<Scope> parent() const {
return parent_;
}
size_t conditionId() const {
return conditionId_;
}
const std::unordered_set<VarPtr>& localVars() const {
return localVars_;
}
void addLocalVar(VarPtr v) {
localVars_.insert(std::move(v));
}
void closeAccess(const std::shared_ptr<AccessInfo>& info);
void filterClosed();
private:
// Map of map to access, narrowing by Buf then by hash(Buf+Indices).
// This allows us to find a candidate access easily, and also check for
// overlap with other accesses to the same buf. Buf ->
// Hash ->
// Access
std::unordered_map<BufPtr, AccessHashMap> openAccesses_;
std::vector<std::shared_ptr<AccessInfo>> closedAccesses_;
// The Block object this scope represents.
BlockPtr block_;
// The enclosing scope object.
std::shared_ptr<Scope> parent_;
// An identifier representing the condition block this scope depends on.
size_t conditionId_;
// A set of variables local to this scope (e.g. loop vars).
std::unordered_set<VarPtr> localVars_;
};
/* Analyzes the graph and collects accesses to the same symbolic tensor element
* which can be replaced by a single local scalar.
*
* This works by recursively walking the tree in postfix order, building sets of
* accesses to the same symbolic element by scope and then merging lower scopes
* into their enclosing scope.
*
* It is safe to move two accesses of the same Tensor element to a local scalar
* Var if between all usages of the element there are no other Loads or Stores
* that may refer to it. In the comments I refer to this as overlapping the
* access, or "cutting" the existing AccessInfo. In the case where a candidate
* for registerization is cut, it may be possible to finalize the access early
* by writing it back to the Tensor and then create a new scalar variable after
* the overlapping access is complete. We will attempt to do this when it saves
* memory accesses.
*
* There are a few cases that make this more challenging:
*
* - For: Loops change the number of real usages of a buffer by the loop
* extent, but only if we can pull the definition and finalization of the scalar
* variable out of the loop block.
*
* - Cond: Conditions complicate lifting scalars out of internal scopes.
* Generally we cannot lift an access outside of a conditional scope unless
* there is already a reference to that same access at the higher scope, since
* we don't know if the condition was guarding an array access not safe at the
* higher scope. In the comments I refer to this as the condition "hiding" the
* access, and the outer access "unhiding" it.
*
* - IfThenElse: Same situation as Cond, except since IfThenElse is an Expr
* rather than a Stmt we cannot insert the scalar definition or finalizer
* within the conditional scope. Accesses inside an IfThenElse can be safely
* combined with external accesses but cannot exist completely within.
*
* - Let: Accesses dependent on local variables via Let Stmts, or loop vars,
* cannot be raised outside of the scope of the dependent var.
*/
class TORCH_API RegisterizerAnalysis : public IRVisitor {
public:
RegisterizerAnalysis()
: currentScope_(std::make_shared<Scope>(nullptr, nullptr, 0)) {}
~RegisterizerAnalysis() override = default;
void visit(const ForPtr& v) override;
void visit(const CondPtr& v) override;
void visit(const BlockPtr& v) override;
void visit(const StorePtr& v) override;
void visit(const LoadPtr& v) override;
void visit(const IfThenElsePtr& v) override;
void visit(const LetPtr& v) override;
#define STMT_ON_STACK(Op) \
void visit(const Op##Ptr& v) override { \
stmtStack_.push_front(v); \
IRVisitor::visit(v); \
stmtStack_.pop_front(); \
}
STMT_ON_STACK(AtomicAdd)
STMT_ON_STACK(Allocate)
STMT_ON_STACK(Free)
#undef STMT_ON_STACK
std::vector<std::shared_ptr<AccessInfo>> getCandidates();
private:
void mergeCurrentScopeIntoParent();
void mergeHiddenScope(bool allowClosed);
void closeAccessIntoScope(
const std::shared_ptr<AccessInfo>& info,
const std::shared_ptr<Scope>& scope);
std::unordered_set<size_t> exprConditionals_;
// A stack of enclosing Stmts for tracking the usage Stmt of Loads.
std::deque<StmtPtr> stmtStack_;
// The current scope being analyzed.
std::shared_ptr<Scope> currentScope_;
HashProvider hasher_;
size_t conditionId_{0};
size_t accessOrder_{0};
};
/* Replaces each registerizable access with a Scalar variable, including
* definition, initializer and finalizer.
*/
class TORCH_API RegisterizerReplacer : public IRMutator {
public:
RegisterizerReplacer(std::vector<std::shared_ptr<AccessInfo>>& vec)
: infoSet_(vec) {
buildReplacements();
}
ExprPtr mutate(const LoadPtr& v) override;
StmtPtr mutate(const StorePtr& v) override;
StmtPtr mutate(const BlockPtr& v) override;
private:
struct ReplacerScope {
std::unordered_map<StmtPtr, std::deque<std::shared_ptr<AccessInfo>>>
initializerPoints_;
std::unordered_map<StmtPtr, std::deque<std::shared_ptr<AccessInfo>>>
finalizePoints_;
};
// Creates the various ReplacerScope objects and builds internal maps.
void buildReplacements();
// State relating to the accesses yet to be replaced.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
std::vector<std::shared_ptr<AccessInfo>>& infoSet_;
std::unordered_map<StorePtr, std::shared_ptr<AccessInfo>> storeToAccess_;
std::unordered_map<LoadPtr, std::shared_ptr<AccessInfo>> loadToAccess_;
std::unordered_map<BlockPtr, ReplacerScope> parentToAccesses_;
// Holds the set of Stores that should be pulled into an initializer, so they
// can be eliminated.
std::set<StorePtr> eliminatedIntializers_;
// Tracks the number of times we've seen each buffer, so we can name the
// scalar Vars appropriately.
std::unordered_map<BufPtr, unsigned int> bufferAccessCounts_;
unsigned int getBufferAccessCount(const BufPtr& b) {
return ++bufferAccessCounts_[b];
}
};
} // namespace registerizer
// Apply scalar replacement to all accesses in s.
// To produce safe code, this must occur after handling parallelized axes and
// atomics.
TORCH_API StmtPtr registerize(StmtPtr s);
} // namespace torch::jit::tensorexpr
```
|
======================================================================================================================================
SOURCE CODE FILE: stmt.h
LINES: 1
SIZE: 24.20 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\stmt.h
ENCODING: utf-8
```h
#pragma once
#include <algorithm>
#include <list>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include <torch/csrc/jit/tensorexpr/expr.h>
namespace torch::jit::tensorexpr {
// The common base between all statement node.
class TORCH_API Stmt : public std::enable_shared_from_this<Stmt> {
public:
Stmt() = default;
virtual ~Stmt() = default;
virtual void accept(IRVisitor* visitor) = 0;
virtual StmtPtr accept_mutator(IRMutator* mutator) = 0;
StmtPtr get_parent() const {
return parent_ ? parent_->getptr() : nullptr;
}
/*
* Make a deep copy of the given statement.
*
* All statements and expressions used in children of the statement are
* cloned. Note that the variables are not deep-copied since they are
* immutable.
*/
static StmtPtr clone(const StmtPtr& s);
protected:
static void set_parent(const StmtPtr& s, Stmt* new_parent) {
s->parent_ = new_parent;
}
std::shared_ptr<Stmt> getptr() {
return shared_from_this();
}
private:
Stmt* parent_ = nullptr;
};
template <class Op>
class StmtNode : public Stmt {
public:
using StmtNodeBase = StmtNode<Op>;
void accept(IRVisitor* visitor) override {
visitor->visit(static_to<Op>(getptr()));
}
StmtPtr accept_mutator(IRMutator* mutator) override;
StmtNode() = default;
};
template <class Op>
StmtPtr StmtNode<Op>::accept_mutator(IRMutator* mutator) {
return mutator->mutate(static_to<Op>(getptr()));
}
// Concrete Stmt classes
class TORCH_API Block : public StmtNode<Block> {
public:
static BlockPtr make(const std::vector<StmtPtr>& stmts) {
std::vector<StmtPtr> valid_stmts;
for (auto& stmt : stmts) {
if (!stmt) {
continue;
}
valid_stmts.push_back(stmt);
}
if (valid_stmts.empty()) {
return nullptr;
}
return alloc<Block>(valid_stmts);
}
size_t nstmts() const {
return stmts_.size();
}
bool empty() const {
return stmts_.empty();
}
void prepend_stmt(const StmtPtr& s) {
if (s->get_parent()) {
throw malformed_input("Block prepend Stmt with existing parent", s);
}
stmts_.push_front(s);
set_parent(s, this);
}
void append_stmt(const StmtPtr& s) {
if (s->get_parent()) {
throw malformed_input("Block append Stmt with existing parent", s);
}
stmts_.push_back(s);
set_parent(s, this);
}
void insert_stmt_before(const StmtPtr& s, const StmtPtr& before) {
if (s->get_parent()) {
throw malformed_input("Block append Stmt with existing parent", s);
}
auto pos = std::find(stmts_.begin(), stmts_.end(), before);
if (pos == stmts_.end()) {
throw malformed_input(
"Inserting after statement that is not in block", s);
}
stmts_.insert(pos, s);
set_parent(s, this);
}
void insert_stmt_after(const StmtPtr& s, const StmtPtr& after) {
if (s->get_parent()) {
throw malformed_input("Block append Stmt with existing parent", s);
}
auto pos = std::find(stmts_.begin(), stmts_.end(), after);
if (pos == stmts_.end()) {
throw malformed_input(
"Inserting after statement that is not in block", s);
}
++pos;
stmts_.insert(pos, s);
set_parent(s, this);
}
bool replace_stmt(const StmtPtr& old_stmt, const StmtPtr& new_stmt) {
if (new_stmt->get_parent()) {
throw malformed_input(
"Block replace Stmt with existing parent", new_stmt);
}
auto pos = std::find(stmts_.begin(), stmts_.end(), old_stmt);
if (pos == stmts_.end()) {
return false;
}
stmts_.insert(pos, new_stmt);
stmts_.erase(pos);
set_parent(old_stmt, nullptr);
set_parent(new_stmt, this);
return true;
}
// Creates a new block by cloning `this` block and replacing the given
// statement with a new statement. Note that `old_stmt` refers to a statement
// in `this` block. If the `old_stmt` is not found, it will return `nullptr`.
BlockPtr clone_and_replace(const StmtPtr& old_stmt, const StmtPtr& new_stmt) {
if (new_stmt->get_parent()) {
throw malformed_input(
"Block replace Stmt with existing parent", new_stmt);
}
std::vector<StmtPtr> stmts(stmts_.begin(), stmts_.end());
std::vector<StmtPtr> cloned_stmts(stmts.size());
bool found = false;
for (int i = 0; i < static_cast<int>(stmts.size()); ++i) {
if (stmts[i] == old_stmt) {
found = true;
cloned_stmts[i] = new_stmt;
} else {
cloned_stmts[i] = Stmt::clone(stmts[i]);
}
}
if (!found) {
return nullptr;
}
return alloc<Block>(cloned_stmts);
}
bool remove_stmt(const StmtPtr& stmt) {
auto pos = std::find(stmts_.begin(), stmts_.end(), stmt);
if (pos == stmts_.end()) {
return false;
}
set_parent(stmt, nullptr);
stmts_.erase(pos);
return true;
}
std::list<StmtPtr> stmts() const {
return stmts_;
}
void clear() {
for (const auto& s : stmts_) {
set_parent(s, nullptr);
}
stmts_.clear();
}
void set_stmts(const std::vector<StmtPtr>& stmts) {
clear();
init(stmts);
}
explicit Block(const std::vector<StmtPtr>& stmts) {
init(stmts);
}
typedef std::list<StmtPtr>::iterator iterator;
typedef std::list<StmtPtr>::const_iterator const_iterator;
iterator begin() {
return stmts_.begin();
}
const_iterator begin() const {
return stmts_.begin();
}
iterator end() {
return stmts_.end();
}
const_iterator end() const {
return stmts_.end();
}
StmtPtr front() {
return stmts_.front();
}
StmtPtr front() const {
return stmts_.front();
}
StmtPtr back() {
return stmts_.back();
}
StmtPtr back() const {
return stmts_.back();
}
void splice(Block::iterator it, const BlockPtr& other) {
for (const StmtPtr& s : *other) {
set_parent(s, this);
}
stmts_.splice(it, other->stmts_);
}
static BlockPtr getSharedParent(StmtPtr p1, StmtPtr p2) {
std::unordered_set<BlockPtr> enclosing;
StmtPtr p1_p = std::move(p1);
while (p1_p) {
if (BlockPtr b = to<Block>(p1_p)) {
enclosing.insert(b);
}
p1_p = p1_p->get_parent();
}
StmtPtr p2_p = std::move(p2);
while (p2_p) {
if (BlockPtr b = to<Block>(p2_p)) {
if (enclosing.count(b) != 0) {
return b;
}
}
p2_p = p2_p->get_parent();
}
return nullptr;
}
// returns the immediate child containing statement s.
StmtPtr getEnclosedRoot(StmtPtr s) const {
while (s && s->get_parent().get() != this) {
s = s->get_parent();
}
return s;
}
private:
std::list<StmtPtr> stmts_;
void init(const std::vector<StmtPtr>& stmts) {
for (const StmtPtr& s : stmts) {
if (!s) {
continue;
}
if (!s->get_parent()) {
// If we get here, it's a bug, but we cannot throw an error from a
// constructor. But IR verifier would catch this.
set_parent(s, this);
}
stmts_.push_back(s);
}
}
};
class TORCH_API Store : public StmtNode<Store> {
public:
VarPtr base_handle() const {
return buf_->base_handle();
}
std::vector<ExprPtr> indices() const {
return indices_;
}
ExprPtr flat_index() const {
TORCH_CHECK(indices_.size() == 1, "Indices haven't been flattened.");
return indices_[0];
}
ExprPtr value() const {
return value_;
}
BufPtr buf() const {
return buf_;
}
void set_buf(BufPtr buf) {
buf_ = std::move(buf);
}
void set_indices(std::vector<ExprPtr> indices) {
indices_ = std::move(indices);
}
void set_value(ExprPtr value) {
value_ = std::move(value);
}
static StorePtr make(
const BufHandle& buf,
const std::vector<ExprHandle>& indices,
const ExprHandle& value);
Store(BufPtr buf, std::vector<ExprPtr> indices, ExprPtr value);
private:
BufPtr buf_;
std::vector<ExprPtr> indices_;
ExprPtr value_;
};
// Allocate a buffer of given shapes and dtypes and bind it with the given
// buffer var. The life span is at most through the current program, until it is
// explicitly freed. An unfreed memory is likely considered an error.
class TORCH_API Allocate : public StmtNode<Allocate> {
public:
static AllocatePtr make(const BufHandle& buf_handle) {
return alloc<Allocate>(buf_handle.node());
}
VarPtr buffer_var() const {
return buf_->base_handle();
}
Dtype dtype() const {
return buf_->dtype();
}
const std::vector<ExprPtr> dims() const {
return buf_->dims();
}
BufPtr buf() const {
return buf_;
}
void set_buf(BufPtr buf) {
buf_ = std::move(buf);
}
explicit Allocate(BufPtr buf) : buf_(std::move(buf)) {}
private:
BufPtr buf_;
// TODO: add memory types.
};
// PlacementAllocate is a variation of the Allocate operator in NNC IR. It does
// not allocate memory but reuse the memory of another buffer for the given
// buffer.
class TORCH_API PlacementAllocate : public StmtNode<PlacementAllocate> {
public:
static PlacementAllocatePtr make(
const BufHandle& buf_handle,
const BufHandle& buf_handle_to_reuse) {
return alloc<PlacementAllocate>(
buf_handle.node(), buf_handle_to_reuse.node());
}
BufPtr buf() const {
return buf_;
}
BufPtr buf_to_reuse() const {
return buf_to_reuse_;
}
void set_buf(BufPtr buf) {
buf_ = std::move(buf);
}
void set_buf_to_reuse(BufPtr buf) {
buf_to_reuse_ = std::move(buf);
}
explicit PlacementAllocate(BufPtr buf, BufPtr buf_to_reuse)
: buf_(std::move(buf)), buf_to_reuse_(std::move(buf_to_reuse)) {}
private:
BufPtr buf_;
BufPtr buf_to_reuse_;
};
// Free the specific buffer. It is an error.
class TORCH_API Free : public StmtNode<Free> {
public:
static FreePtr make(const BufHandle& buf_handle) {
return alloc<Free>(buf_handle.node());
}
VarPtr buffer_var() const {
return buf_->base_handle();
}
BufPtr buf() const {
return buf_;
}
void set_buf(BufPtr buf) {
buf_ = std::move(buf);
}
explicit Free(BufPtr buf) : buf_(std::move(buf)) {}
private:
BufPtr buf_;
};
class TORCH_API FreeExt : public StmtNode<FreeExt> {
public:
static FreeExtPtr make(const std::vector<BufHandle>& bufs);
std::vector<BufPtr> bufs() const {
return bufs_;
}
void set_bufs(std::vector<BufPtr> bufs) {
bufs_ = std::move(bufs);
}
explicit FreeExt(std::vector<BufPtr> bufs) : bufs_(std::move(bufs)) {}
private:
std::vector<BufPtr> bufs_;
};
class TORCH_API Let : public StmtNode<Let> {
public:
static LetPtr make(const VarHandle& var, const ExprHandle& val) {
return alloc<Let>(var.node(), val.node());
}
Let(VarPtr var, ExprPtr val) : var_(std::move(var)), val_(std::move(val)) {}
VarPtr var() const {
return var_;
}
ExprPtr value() const {
return val_;
}
void set_var(VarPtr var) {
var_ = std::move(var);
}
void set_val(ExprPtr val) {
val_ = std::move(val);
}
private:
VarPtr var_;
ExprPtr val_;
};
class TORCH_API Cond : public StmtNode<Cond> {
public:
static CondPtr make(
const ExprHandle& condition,
const StmtPtr& true_stmt,
const StmtPtr& false_stmt) {
return alloc<Cond>(condition.node(), true_stmt, false_stmt);
}
ExprPtr condition() const {
return condition_;
}
BlockPtr true_stmt() const {
return true_stmt_;
}
BlockPtr false_stmt() const {
return false_stmt_;
}
void set_condition(ExprPtr condition) {
condition_ = std::move(condition);
}
void set_true_stmt(StmtPtr true_stmt) {
if (true_stmt) {
BlockPtr b = to<Block>(true_stmt);
if (!b) {
b = alloc<Block>(std::vector<StmtPtr>({std::move(true_stmt)}));
}
true_stmt_ = b;
set_parent(true_stmt_, this);
}
}
void set_false_stmt(StmtPtr false_stmt) {
if (false_stmt) {
BlockPtr b = to<Block>(false_stmt);
if (!b) {
b = alloc<Block>(std::vector<StmtPtr>({std::move(false_stmt)}));
}
false_stmt_ = b;
set_parent(false_stmt_, this);
}
}
Cond(ExprPtr condition, StmtPtr true_stmt, StmtPtr false_stmt)
: condition_(std::move(condition)) {
set_true_stmt(std::move(true_stmt));
set_false_stmt(std::move(false_stmt));
}
CondPtr cloneWithNewBodies(
const StmtPtr& true_stmt,
const StmtPtr& false_stmt) {
return alloc<Cond>(condition_, true_stmt, false_stmt);
}
CondPtr cloneWithNewBody(const StmtPtr& true_stmt) {
return alloc<Cond>(condition_, true_stmt, nullptr);
}
private:
ExprPtr condition_;
BlockPtr true_stmt_ = nullptr;
BlockPtr false_stmt_ = nullptr;
};
class TORCH_API LoopOptions {
public:
enum {
IDX_UNSET = -1,
IDX_X = 0,
IDX_Y = 1,
IDX_Z = 2,
IDX_W = 3,
IDX_MAX = IDX_W,
};
// GPU Block Index
bool is_gpu_block_index() const {
return gpu_block_index_ != IDX_UNSET;
}
int gpu_block_index() const {
return gpu_block_index_;
}
std::string gpu_block_index_str() const {
if (!is_gpu_block_index()) {
throw malformed_input("Has no GPU block index");
}
// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
static const char* kBlockIndexNames[] = {
"blockIdx.x",
"blockIdx.y",
"blockIdx.z",
"blockIdx.w",
};
if (gpu_block_index_ < IDX_X || gpu_block_index_ > IDX_MAX) {
throw malformed_input("invalid GPU block index");
}
return kBlockIndexNames[gpu_block_index_];
}
void set_gpu_block_index(int index) {
if (index == IDX_UNSET) {
gpu_block_index_ = IDX_UNSET;
}
if (is_gpu_thread_index()) {
throw std::runtime_error("Cannot set both gpu block and thread index");
}
if (is_gpu_block_index() && gpu_block_index() != index) {
throw std::runtime_error("Cannot set a previously set block index");
}
gpu_block_index_ = index;
}
// GPU Thread Index
bool is_gpu_thread_index() const {
return gpu_thread_index() != IDX_UNSET;
}
int gpu_thread_index() const {
return gpu_thread_index_;
}
std::string gpu_thread_index_str() const {
if (!is_gpu_thread_index()) {
throw malformed_input("has no GPU thread index");
}
// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
static const char* kThreadIndexNames[] = {
"threadIdx.x", "threadIdx.y", "threadIdx.z", "threadIdx.w"};
if (gpu_thread_index_ < IDX_X || gpu_thread_index_ > IDX_MAX) {
throw malformed_input("invalid GPU thread index");
}
return kThreadIndexNames[gpu_thread_index_];
}
void set_gpu_thread_index(int index) {
if (index == IDX_UNSET) {
gpu_thread_index_ = IDX_UNSET;
}
if (is_gpu_block_index()) {
throw std::runtime_error("Cannot set both gpu thread and block index");
}
if (is_gpu_thread_index() && gpu_thread_index() != index) {
throw std::runtime_error("Cannot set a previously set thread index");
}
gpu_thread_index_ = index;
}
void set_parallel() {
is_parallel_ = true;
}
bool is_parallel() const {
return is_parallel_;
}
std::string ToString() const {
if (is_gpu_block_index()) {
return gpu_block_index_str();
} else if (is_gpu_thread_index()) {
return gpu_thread_index_str();
} else if (is_parallel()) {
return "parallel";
}
return "";
}
bool isDefault() const {
return gpu_block_index_ == IDX_UNSET && gpu_thread_index_ == IDX_UNSET &&
!is_parallel_;
}
void set_buffer_mapping(const std::unordered_map<std::string, BufPtr>& map) {
map_input_to_tensor_bufs_ = map;
}
std::unordered_map<std::string, BufPtr> get_buffer_mapping() const {
return map_input_to_tensor_bufs_;
}
private:
int gpu_block_index_{IDX_UNSET};
int gpu_thread_index_{IDX_UNSET};
bool is_parallel_{false};
std::unordered_map<std::string, BufPtr> map_input_to_tensor_bufs_;
};
class TORCH_API For : public StmtNode<For> {
public:
VarPtr var() const {
return var_;
}
ExprPtr start() const {
return start_;
}
ExprPtr stop() const {
return stop_;
}
BlockPtr body() const {
return body_;
}
static ForPtr make(
const VarHandle& var,
const ExprHandle& start,
const ExprHandle& stop,
const StmtPtr& body) {
if (!body) {
return nullptr;
}
return alloc<For>(var.node(), start.node(), stop.node(), body);
}
static ForPtr make(
const VarHandle& var,
const ExprHandle& start,
const ExprHandle& stop,
const StmtPtr& body,
const LoopOptions& loop_options) {
if (!body) {
return nullptr;
}
return alloc<For>(
var.node(), start.node(), stop.node(), body, loop_options);
}
const LoopOptions loop_options() const {
return loop_options_;
}
For(VarPtr var, ExprPtr start, ExprPtr stop, StmtPtr body)
: var_(std::move(var)), start_(std::move(start)), stop_(std::move(stop)) {
BlockPtr b = to<Block>(body);
if (!b) {
b = alloc<Block>(std::vector<StmtPtr>({std::move(body)}));
}
body_ = b;
set_parent(body_, this);
}
For(VarPtr var,
ExprPtr start,
ExprPtr stop,
StmtPtr body,
LoopOptions loop_options)
: var_(std::move(var)),
start_(std::move(start)),
stop_(std::move(stop)),
loop_options_(std::move(loop_options)) {
if (!var_) {
throw malformed_input("invalid Var in For loop");
} else if (!start_) {
throw malformed_input("invalid Start in For loop");
} else if (!stop_) {
throw malformed_input("invalid Stop in For loop");
} else if (!body || body->get_parent()) {
throw malformed_input("invalid Body in For loop");
}
BlockPtr b = to<Block>(body);
if (!b) {
b = alloc<Block>(std::vector<StmtPtr>({std::move(body)}));
}
body_ = b;
set_parent(body_, this);
}
void set_gpu_block_index(int block_index) {
loop_options_.set_gpu_block_index(block_index);
}
void set_gpu_thread_index(int thread_index) {
loop_options_.set_gpu_thread_index(thread_index);
}
void set_parallel() {
loop_options_.set_parallel();
}
bool is_parallel() const {
return loop_options_.is_parallel();
}
void set_buffer_map(const std::unordered_map<std::string, BufPtr>& map) {
loop_options_.set_buffer_mapping(map);
}
ForPtr cloneWithNewBody(const StmtPtr& body) const {
return alloc<For>(var_, start_, stop_, body, loop_options_);
}
BlockPtr removeBody() {
auto res = body_;
set_parent(res, nullptr);
body_ = nullptr;
return res;
}
void set_body(StmtPtr body) {
BlockPtr b = to<Block>(body);
if (!b) {
b = alloc<Block>(std::vector<StmtPtr>({std::move(body)}));
}
body_ = b;
set_parent(body_, this);
}
void set_start(ExprPtr start) {
start_ = std::move(start);
}
void set_stop(ExprPtr stop) {
stop_ = std::move(stop);
}
void set_var(VarPtr var) {
var_ = std::move(var);
}
private:
VarPtr var_;
ExprPtr start_;
ExprPtr stop_;
BlockPtr body_;
LoopOptions loop_options_;
};
// A backend specific IR Node that implements atomic-add.
// This node could only shows up as an internal with GPU backends.
// TODO: move to this an internal IR.
// TODO: make IR nodes extensible.
class TORCH_API AtomicAdd : public StmtNode<AtomicAdd> {
public:
AtomicAdd(BufPtr buf, std::vector<ExprPtr> indices, ExprPtr value)
: buf_(std::move(buf)),
indices_(std::move(indices)),
value_(std::move(value)) {}
VarPtr base_handle() const {
return buf_->base_handle();
}
BufPtr buf() const {
return buf_;
}
ExprPtr flat_index() const {
TORCH_CHECK(indices_.size() == 1, "Indices haven't been flattened.");
return indices_[0];
}
ExprPtr value() const {
return value_;
}
const std::vector<ExprPtr>& indices() const {
return indices_;
}
void set_buf(BufPtr buf) {
buf_ = std::move(buf);
}
void set_indices(std::vector<ExprPtr> indices) {
indices_ = std::move(indices);
}
void set_value(ExprPtr value) {
value_ = std::move(value);
}
private:
BufPtr buf_;
std::vector<ExprPtr> indices_;
ExprPtr value_;
};
class TORCH_API SyncThreads : public StmtNode<SyncThreads> {
public:
SyncThreads() = default;
};
/*
* ExternalCall statement represents a call to an external function that would
* compute the contents of the output buffer. An ExternalCall statement consists
* of:
* 1) output buffer - the buffer that'll be initialized by the call
* 2) external function name - a key from the NNC function registry to lookup
* the actual function to call
* 3) buffer arguments - the input buffers used by the function
* 4) non-buffer arguments - scalar arguments to pass to the function
*
* An example:
* A = nnc_conv2d(buf_args={Input, Weight, Bias}, args={1})
* Here 'A' is the output buffer, "nnc_conv2d" is the function name, the buffer
* arguments are 'Input', 'Weight', and 'Bias', and there is a single non-buffer
* argument - 1.
*
* The semantics of the scalar arguments is defined solely by the implementation
* of the external function.
*/
class TORCH_API ExternalCall : public StmtNode<ExternalCall> {
public:
static ExternalCallPtr make(
BufHandle buf,
const std::string& func_name,
const std::vector<BufHandle>& buf_args,
const std::vector<ExprHandle>& args);
BufPtr buf() const {
return buf_;
}
std::string func_name() const {
return func_name_;
}
std::vector<BufPtr> buf_args() const {
return buf_args_;
}
std::vector<ExprPtr> args() const {
return args_;
}
void set_buf(BufPtr buf) {
buf_ = std::move(buf);
}
void set_buf_args(std::vector<BufPtr> buf_args) {
buf_args_ = std::move(buf_args);
}
void set_args(std::vector<ExprPtr> args) {
args_ = std::move(args);
}
ExternalCall(
BufPtr buf,
std::string func_name,
std::vector<BufPtr> buf_args,
std::vector<ExprPtr> args)
: buf_(std::move(buf)),
func_name_(std::move(func_name)),
buf_args_(std::move(buf_args)),
args_(std::move(args)) {}
private:
BufPtr buf_;
std::string func_name_;
std::vector<BufPtr> buf_args_;
std::vector<ExprPtr> args_;
};
class TORCH_API ExternalCallWithAlloc : public StmtNode<ExternalCallWithAlloc> {
public:
static ExternalCallWithAllocPtr make(
const std::string& func_name,
const std::vector<BufHandle>& buf_out_args,
const std::vector<BufHandle>& buf_args,
const std::vector<ExprHandle>& args);
std::vector<BufPtr> buf_out_args() const {
return buf_out_args_;
}
std::string func_name() const {
return func_name_;
}
std::vector<BufPtr> buf_args() const {
return buf_args_;
}
std::vector<ExprPtr> args() const {
return args_;
}
void set_buf_out_args(std::vector<BufPtr> buf_out_args) {
buf_out_args_ = std::move(buf_out_args);
}
void set_buf_args(std::vector<BufPtr> buf_args) {
buf_args_ = std::move(buf_args);
}
void set_args(std::vector<ExprPtr> args) {
args_ = std::move(args);
}
ExternalCallWithAlloc(
std::string func_name,
std::vector<BufPtr> buf_out_args,
std::vector<BufPtr> buf_args,
std::vector<ExprPtr> args)
: func_name_(std::move(func_name)),
buf_out_args_(std::move(buf_out_args)),
buf_args_(std::move(buf_args)),
args_(std::move(args)) {}
private:
std::string func_name_;
std::vector<BufPtr> buf_out_args_;
std::vector<BufPtr> buf_args_;
std::vector<ExprPtr> args_;
};
} // namespace torch::jit::tensorexpr
```
|
========================================================================================================================================
SOURCE CODE FILE: tensor.h
LINES: 1
SIZE: 10.57 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\tensor.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <functional>
#include <utility>
#include <vector>
#include <torch/csrc/jit/tensorexpr/expr.h>
#include <torch/csrc/jit/tensorexpr/reduction.h>
namespace torch::jit::tensorexpr {
class TORCH_API Tensor {
public:
Tensor(BufPtr buf, const std::vector<VarPtr>& args, const ExprPtr& body)
: buf_(std::move(buf)) {
stmt_ = constructStmt(args, body, {}, {});
}
Tensor(BufHandle buf, const std::vector<VarHandle>& args, ExprHandle body)
: Tensor(buf.node(), VarHandleVectorToVarVector(args), body.node()) {}
Tensor(
BufPtr buf,
const std::vector<VarPtr>& args,
const std::vector<ExprPtr>& reduce_dims,
const std::vector<VarPtr>& reduce_args,
const ExprPtr& body)
: buf_(std::move(buf)) {
stmt_ = constructStmt(args, body, reduce_dims, reduce_args);
}
Tensor(
BufHandle buf,
const std::vector<VarHandle>& args,
const std::vector<ExprHandle>& reduce_dims,
const std::vector<VarHandle>& reduce_args,
ExprHandle body)
: Tensor(
buf.node(),
VarHandleVectorToVarVector(args),
ExprHandleVectorToExprVector(reduce_dims),
VarHandleVectorToVarVector(reduce_args),
body.node()) {}
Tensor(BufPtr buf, StmtPtr stmt)
: buf_(std::move(buf)), stmt_(std::move(stmt)) {}
BufPtr buf() const {
return buf_;
}
StmtPtr stmt() const {
return stmt_;
}
template <typename T>
inline ExprHandle load(const std::vector<T>& args) const;
template <typename... Ts>
inline ExprHandle load(const Ts&... ts) const;
private:
StmtPtr constructStmt(
const std::vector<VarPtr>& args,
const ExprPtr& body,
const std::vector<ExprPtr>& reduce_dims,
const std::vector<VarPtr>& reduce_args) const;
BufPtr buf_;
StmtPtr stmt_;
};
TORCH_API Tensor Compute(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const std::optional<std::vector<ExprHandle>>& strides,
const std::function<ExprHandle(const VarHandle&)>& body_func);
TORCH_API Tensor Compute(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const std::function<ExprHandle(const VarHandle&)>& body_func);
TORCH_API Tensor Compute(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const std::optional<std::vector<ExprHandle>>& strides,
const std::function<ExprHandle(const VarHandle&, const VarHandle&)>&
body_func);
TORCH_API Tensor Compute(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const std::function<ExprHandle(const VarHandle&, const VarHandle&)>&
body_func);
TORCH_API Tensor Compute(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const std::optional<std::vector<ExprHandle>>& strides,
const std::function<
ExprHandle(const VarHandle&, const VarHandle&, const VarHandle&)>&
body_func);
TORCH_API Tensor Compute(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const std::function<
ExprHandle(const VarHandle&, const VarHandle&, const VarHandle&)>&
body_func);
TORCH_API Tensor Compute(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const std::optional<std::vector<ExprHandle>>& strides,
const std::function<ExprHandle(
const VarHandle&,
const VarHandle&,
const VarHandle&,
const VarHandle&)>& body_func);
TORCH_API Tensor Compute(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const std::function<ExprHandle(
const VarHandle&,
const VarHandle&,
const VarHandle&,
const VarHandle&)>& body_func);
TORCH_API Tensor Compute(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const std::optional<std::vector<ExprHandle>>& strides,
const std::function<ExprHandle(const std::vector<VarHandle>&)>& body_func);
TORCH_API Tensor Compute(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const std::function<ExprHandle(const std::vector<VarHandle>&)>& body_func);
inline std::vector<VarHandle> create_index_vars(
const std::vector<ExprHandle>& dims) {
std::vector<VarHandle> vars;
vars.reserve(dims.size());
for (const ExprHandle& dim : dims) {
vars.emplace_back(alloc<Var>(
"i", dim.dtype().scalar_type() == ScalarType::Long ? kLong : kInt));
}
return vars;
}
// Handle reductions over a Reducer and a body_func which produces values.
template <typename InitFunc, typename BodyFunc>
Tensor Reduce(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const std::optional<std::vector<ExprHandle>>& strides,
const Reducer& reducer,
const InitFunc& init_func,
const BodyFunc& body_func,
const std::vector<ExprHandle>& reduce_dims) {
std::vector<VarHandle> vars = create_index_vars(dims);
std::vector<VarHandle> reduce_vars = create_index_vars(reduce_dims);
// If reduce_vars is empty, then it's not a reduction, but rather a simple
// copy
if (reduce_vars.empty()) {
ExprHandle body = Reducer::getReduceBody(body_func, vars);
BufHandle func_result =
Buf::make(func_name, dims, body.dtype(), std::nullopt, strides);
return Tensor(std::move(func_result), vars, std::move(body));
}
std::vector<VarHandle> all_vars;
all_vars.insert(all_vars.end(), vars.begin(), vars.end());
all_vars.insert(all_vars.end(), reduce_vars.begin(), reduce_vars.end());
ExprHandle body = Reducer::getReduceBody(body_func, all_vars);
std::vector<ExprHandle> output_args(vars.begin(), vars.end());
ExprHandle init_expr = Cast::make(body.dtype(), init_func(vars));
BufHandle func_result = Buf::make(func_name, dims, body.dtype(), init_expr);
ExprHandle reduce_op = reducer(func_result, body, output_args, reduce_vars);
if (body.dtype() == kBFloat16) {
ExprHandle init_expr_acc = Cast::make(kFloat, init_func(vars));
BufHandle func_result_acc =
Buf::make(func_name + "_acc", dims, kFloat, init_expr_acc);
reduce_op = reducer(
func_result,
std::move(func_result_acc),
body,
output_args,
reduce_vars);
}
Tensor t = Tensor(
std::move(func_result),
vars,
reduce_dims,
reduce_vars,
std::move(reduce_op));
return t;
}
template <typename InitFunc, typename BodyFunc>
Tensor Reduce(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const Reducer& reducer,
const InitFunc& init_func,
const BodyFunc& body_func,
const std::vector<ExprHandle>& reduce_dims) {
return Reduce<InitFunc, BodyFunc>(
func_name,
dims,
std::nullopt,
reducer,
init_func,
body_func,
reduce_dims);
}
template <typename BodyFunc>
Tensor Reduce(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const std::optional<std::vector<ExprHandle>>& strides,
const Reducer& reducer,
const BodyFunc& body_func,
const std::vector<ExprHandle>& reduce_dims) {
return Reduce(
func_name,
dims,
strides,
reducer,
[&](ParameterList& p [[maybe_unused]]) {
return ExprHandle(reducer.initializer());
},
body_func,
reduce_dims);
}
template <typename BodyFunc>
Tensor Reduce(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const Reducer& reducer,
const BodyFunc& body_func,
const std::vector<ExprHandle>& reduce_dims) {
return Reduce<BodyFunc>(
func_name, dims, std::nullopt, reducer, body_func, reduce_dims);
}
// Overload which allows inline lambda functions for the body_func.
template <typename BodyFunc>
Tensor Reduce(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const std::optional<std::vector<ExprHandle>>& strides,
const Reducer& reducer,
const BodyFunc&& body_func,
const std::vector<ExprHandle>& reduce_dims) {
return Reduce(func_name, dims, strides, reducer, body_func, reduce_dims);
}
template <typename BodyFunc>
Tensor Reduce(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const Reducer& reducer,
const BodyFunc&& body_func,
const std::vector<ExprHandle>& reduce_dims) {
return Reduce(func_name, dims, std::nullopt, reducer, body_func, reduce_dims);
}
TORCH_API Tensor Reduce(
const std::string& name,
const std::vector<ExprHandle>& dims,
const std::optional<std::vector<ExprHandle>>& strides,
const Reducer& reducer,
const BufHandle& buffer,
const std::vector<ExprHandle>& reduce_dims);
TORCH_API Tensor Reduce(
const std::string& name,
const std::vector<ExprHandle>& dims,
const Reducer& reducer,
const BufHandle& buffer,
const std::vector<ExprHandle>& reduce_dims);
// Overload for the common case of all dimensions of a previously Computed
// Tensor.
TORCH_API Tensor Reduce(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const std::optional<std::vector<ExprHandle>>& strides,
const Reducer& reducer,
const Tensor& tensor,
const std::vector<ExprHandle>& reduce_dims);
TORCH_API Tensor Reduce(
const std::string& func_name,
const std::vector<ExprHandle>& dims,
const Reducer& reducer,
const Tensor& tensor,
const std::vector<ExprHandle>& reduce_dims);
template <typename... Ts>
inline ExprHandle Tensor::load(const Ts&... ts) const {
std::vector<ExprHandle> params({ExprHandle(ts)...});
return Load::make(BufHandle(this->buf()), params);
}
template <typename T>
inline ExprHandle Tensor::load(const std::vector<T>& args) const {
std::vector<ExprHandle> params(args.begin(), args.end());
return Load::make(BufHandle(this->buf()), params);
}
template <typename... Ts>
inline ExprHandle BufHandle::load(const Ts&... ts) const {
std::vector<ExprHandle> params({ExprHandle(ts)...});
return ExprHandle(alloc<Load>(node(), ExprHandleVectorToExprVector(params)));
}
template <typename T>
inline ExprHandle BufHandle::load(const std::vector<T>& args) const {
std::vector<ExprHandle> params(args.begin(), args.end());
return ExprHandle(alloc<Load>(node(), ExprHandleVectorToExprVector(params)));
}
inline ExprHandle BufHandle::load(const std::vector<ExprHandle>& args) const {
return this->template load<ExprHandle>(args);
}
} // namespace torch::jit::tensorexpr
```
|
=================================================================================================================================================
SOURCE CODE FILE: tensorexpr_init.h
LINES: 1
SIZE: 0.25 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\tensorexpr_init.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/python/pybind.h>
#include <torch/csrc/utils/pybind.h>
namespace torch::jit {
// Initialize Python bindings for Tensor Expressions
void initTensorExprBindings(PyObject* module);
} // namespace torch::jit
```
|
=======================================================================================================================================
SOURCE CODE FILE: types.h
LINES: 1
SIZE: 4.33 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\types.h
ENCODING: utf-8
```h
#pragma once
#include <cstdint>
#include <iosfwd>
#include <c10/core/ScalarType.h>
#include <c10/util/Logging.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/tensorexpr/exceptions.h>
namespace torch::jit::tensorexpr {
using int32 = std::int32_t;
class Dtype;
TORCH_API std::ostream& operator<<(std::ostream& stream, const Dtype& dtype);
using ScalarType = c10::ScalarType;
enum ElementType {
kAllTypes = 0,
kIntegralTypes = 1 << 0,
kFloatingPointTypes = 1 << 1,
kBoolType = 1 << 2,
kComplexTypes = 1 << 3,
kQintTypes = 1 << 4,
kNonComplexOrQintTypes = kIntegralTypes | kBoolType | kFloatingPointTypes,
};
// Data types for scalar and vector elements.
class TORCH_API Dtype {
public:
explicit Dtype(int8_t type)
: scalar_type_(static_cast<ScalarType>(type)), lanes_(1) {}
explicit Dtype(ScalarType type) : scalar_type_(type), lanes_(1) {}
Dtype(int8_t type, int64_t lanes)
: scalar_type_(static_cast<ScalarType>(type)), lanes_(lanes) {}
Dtype(ScalarType type, int64_t lanes) : scalar_type_(type), lanes_(lanes) {}
Dtype(Dtype type, int64_t lanes)
: scalar_type_(type.scalar_type_), lanes_(lanes) {
if (type.lanes() != 1) {
throw malformed_input("dtype lanes dont match");
}
}
int64_t lanes() const {
return lanes_;
}
ScalarType scalar_type() const {
return scalar_type_;
}
Dtype scalar_dtype() const;
bool operator==(const Dtype& other) const {
return scalar_type_ == other.scalar_type_ && lanes_ == other.lanes_;
}
bool operator!=(const Dtype& other) const {
return !(*this == other);
}
int byte_size() const;
std::string ToCppString() const;
bool is_integral() const {
return c10::isIntegralType(scalar_type_, true);
}
bool is_floating_point() const {
return c10::isFloatingType(scalar_type_);
}
bool is_signed() const {
return c10::isSignedType(scalar_type_);
}
Dtype cloneWithScalarType(ScalarType nt) const {
return Dtype(nt, lanes_);
}
private:
friend TORCH_API std::ostream& operator<<(
std::ostream& stream,
const Dtype& dtype);
ScalarType scalar_type_;
int64_t lanes_; // the width of the element for a vector time
};
extern TORCH_API Dtype kHandle;
#define NNC_DTYPE_DECLARATION(ctype, name) extern TORCH_API Dtype k##name;
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, NNC_DTYPE_DECLARATION)
NNC_DTYPE_DECLARATION(c10::quint8, QUInt8)
NNC_DTYPE_DECLARATION(c10::qint8, QInt8)
#undef NNC_DTYPE_DECLARATION
template <typename T>
TORCH_API Dtype ToDtype();
#define NNC_TODTYPE_DECLARATION(ctype, name) \
template <> \
inline Dtype ToDtype<ctype>() { \
return k##name; \
}
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, NNC_TODTYPE_DECLARATION)
NNC_TODTYPE_DECLARATION(c10::quint8, QUInt8)
NNC_TODTYPE_DECLARATION(c10::qint8, QInt8)
#undef NNC_TODTYPE_DECLARATION
TORCH_API Dtype ToDtype(ScalarType type);
inline Dtype promoteTypes(Dtype a, Dtype b) {
if (a.lanes() != b.lanes()) {
throw malformed_input("promoting types with different lanes");
}
return Dtype(
static_cast<ScalarType>(c10::promoteTypes(
static_cast<c10::ScalarType>(a.scalar_type()),
static_cast<c10::ScalarType>(b.scalar_type()))),
a.lanes());
}
inline Dtype BinaryOpDtype(
Dtype op1_dtype,
Dtype op2_dtype,
ScalarType ret_type = ScalarType::Undefined) {
if (op1_dtype == op2_dtype) {
if (ret_type == ScalarType::Undefined) {
return op1_dtype;
}
return ToDtype(ret_type);
}
if (op1_dtype.lanes() != op2_dtype.lanes()) {
throw malformed_input("lanes dont match");
}
int64_t lanes = op1_dtype.lanes();
Dtype resultType = promoteTypes(op1_dtype, op2_dtype);
if (resultType.scalar_type() == ScalarType::Undefined) {
throw malformed_input("scalar type doesn't match");
}
if (lanes == 1) {
// Use the fixed scalar Dtypes.
return ToDtype(resultType.scalar_type());
}
return resultType;
}
} // namespace torch::jit::tensorexpr
namespace std {
using torch::jit::tensorexpr::Dtype;
std::string to_string(const Dtype& dtype);
using torch::jit::tensorexpr::ScalarType;
std::string to_string(const ScalarType& dtype);
} // namespace std
```
|
=====================================================================================================================================================
SOURCE CODE FILE: unique_name_manager.h
LINES: 1
SIZE: 0.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\unique_name_manager.h
ENCODING: utf-8
```h
#pragma once
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
namespace torch::jit::tensorexpr {
class VarHandle;
class Var;
using VarNameMap = std::unordered_map<VarPtr, std::string>;
// A manager to get unique names from vars.
// It starts with the name hints of the var and append "_" + $counter until it
// hits a unique name.
class TORCH_API UniqueNameManager {
public:
const std::string& get_unique_name(const VarHandle& v);
const std::string& get_unique_name(const VarPtr& v);
private:
friend class ScopedVarName;
VarNameMap unique_name_mapping_;
std::unordered_map<std::string, int> unique_name_count_;
std::unordered_set<std::string> all_unique_names_;
};
} // namespace torch::jit::tensorexpr
```
|
=================================================================================================================================================
SOURCE CODE FILE: var_substitutor.h
LINES: 1
SIZE: 1.68 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\tensorexpr\var_substitutor.h
ENCODING: utf-8
```h
#pragma once
#include <unordered_map>
#include <utility>
#include <vector>
#include <torch/csrc/jit/tensorexpr/analysis.h>
#include <torch/csrc/jit/tensorexpr/ir.h>
#include <torch/csrc/jit/tensorexpr/ir_mutator.h>
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
#include <torch/csrc/jit/tensorexpr/reduction.h>
namespace torch::jit::tensorexpr {
using VarMapping = std::vector<std::pair<VarPtr, ExprPtr>>;
class VarSubMutator : public IRMutator {
public:
VarSubMutator(const VarMapping& var_mapping) {
for (auto& entry : var_mapping) {
VarPtr key_var = entry.first;
ExprPtr value = entry.second;
if (!key_var) {
throw malformed_input("missing key in VarSubMutator");
}
var_mapping_[std::move(key_var)] = std::move(value);
}
}
ExprPtr mutate(const VarPtr& var) override {
auto iter = var_mapping_.find(var);
if (iter == var_mapping_.end()) {
return var;
}
return iter->second;
}
ExprPtr mutate(const ReduceOpPtr& var) override {
auto body = var->body()->accept_mutator(this);
std::vector<VarPtr> new_inner;
for (const auto& v : var->reduce_args()) {
ExprPtr e = v->accept_mutator(this);
if (VarPtr new_var = to<Var>(e)) {
new_inner.push_back(std::move(new_var));
} else {
VarFinder varFinder;
e->accept(&varFinder);
auto varlist = varFinder.vars();
new_inner.insert(new_inner.end(), varlist.begin(), varlist.end());
}
}
return alloc<ReduceOp>(body, new_inner, var->reducer());
}
private:
std::unordered_map<VarPtr, ExprPtr> var_mapping_;
};
} // namespace torch::jit::tensorexpr
```
|
=========================================================================================================================================
SOURCE CODE FILE: file_check.h
LINES: 1
SIZE: 2.56 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\testing\file_check.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <memory>
#include <string>
namespace torch::jit {
struct Graph;
namespace testing {
struct FileCheckImpl;
struct FileCheck {
public:
TORCH_API explicit FileCheck();
TORCH_API ~FileCheck();
// Run FileCheck against test string
TORCH_API void run(const std::string& test_string);
// Run FileCheck against dump of graph IR
TORCH_API void run(const Graph& graph);
// Parsing input checks string and run against test string / dump of graph IR
TORCH_API void run(
const std::string& input_checks_string,
const std::string& test_string);
TORCH_API void run(
const std::string& input_checks_string,
const Graph& graph);
// Checks that the string occurs, starting at the end of the most recent match
TORCH_API FileCheck* check(const std::string& str);
// Checks that the string does not occur between the previous match and next
// match. Consecutive check_nots test against the same previous match and next
// match
TORCH_API FileCheck* check_not(const std::string& str);
// Checks that the string occurs on the same line as the previous match
TORCH_API FileCheck* check_same(const std::string& str);
// Checks that the string occurs on the line immediately following the
// previous match
TORCH_API FileCheck* check_next(const std::string& str);
// Checks that the string occurs count number of times, starting at the end
// of the previous match. If exactly is true, checks that there are exactly
// count many matches
TORCH_API FileCheck* check_count(
const std::string& str,
size_t count,
bool exactly = false);
// A series of consecutive check_dags get turned into a group of checks
// which can appear in any order relative to each other. The checks begin
// at the end of the previous match, and the match for the check_dag group
// is the minimum match of all individual checks to the maximum match of all
// individual checks.
TORCH_API FileCheck* check_dag(const std::string& str);
// Checks that source token is highlighted in str (usually an error message).
TORCH_API FileCheck* check_source_highlighted(const std::string& str);
// Checks that the regex matched string occurs, starting at the end of the
// most recent match
TORCH_API FileCheck* check_regex(const std::string& str);
// reset checks
TORCH_API void reset();
private:
bool has_run = false;
std::unique_ptr<FileCheckImpl> fcImpl;
};
} // namespace testing
} // namespace torch::jit
```
|
================================================================================================================================================
SOURCE CODE FILE: hooks_for_testing.h
LINES: 1
SIZE: 0.58 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\testing\hooks_for_testing.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/api/compilation_unit.h>
#include <functional>
#include <memory>
namespace torch::jit {
struct Module;
using ModuleHook = std::function<void(Module module)>;
using FunctionHook = std::function<void(StrongFunctionPtr function)>;
TORCH_API void didFinishEmitModule(Module module);
TORCH_API void didFinishEmitFunction(StrongFunctionPtr defined);
TORCH_API void setEmitHooks(ModuleHook for_module, FunctionHook for_fn);
TORCH_API std::pair<ModuleHook, FunctionHook> getEmitHooks();
} // namespace torch::jit
```
|
============================================================================================================================================
SOURCE CODE FILE: backend_data.h
LINES: 1
SIZE: 1.21 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\backend\backend_data.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/lazy/backend/backend_device.h>
#include <torch/csrc/lazy/core/shape.h>
#include <cstring>
namespace torch::lazy {
class TORCH_API BackendData {
public:
struct Info {
/**
* Used by Lazy Graph Executor to tag info on BackendData objs
* */
virtual ~Info() = default;
};
/**
* Represents (Tensor) data stored on a backend device
* in its native format.
* */
using Handle = int64_t;
BackendData(BackendDevice device, Shape shape)
: device_(std::move(device)), shape_(std::move(shape)) {}
virtual ~BackendData() = default;
const BackendDevice& device() const {
return device_;
}
const Shape& shape() const {
return shape_;
}
Info* info() const {
return info_.get();
}
std::shared_ptr<Info> SetInfo(std::shared_ptr<Info> info) {
std::swap(info, info_);
return info;
}
virtual Handle GetHandle() = 0;
virtual void Assign(const BackendData& data) = 0;
virtual bool HasValue() const = 0;
private:
BackendDevice device_;
Shape shape_;
std::shared_ptr<Info> info_;
};
using BackendDataPtr = std::shared_ptr<BackendData>;
} // namespace torch::lazy
```
|
==============================================================================================================================================
SOURCE CODE FILE: backend_device.h
LINES: 1
SIZE: 2.96 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\backend\backend_device.h
ENCODING: utf-8
```h
#pragma once
#include <memory>
#include <ostream>
#include <string>
#include <ATen/Tensor.h>
#include <c10/macros/Export.h>
#include <c10/util/Deprecated.h>
#include <optional>
namespace c10 {
struct Device;
}
namespace torch::lazy {
// Backend should extend it and define their own supported hardware types.
struct TORCH_API BackendDeviceType {
int8_t type{(int8_t)at::kCPU};
// Note: previous default value was '0', which actually maps to at::kCPU, at
// least now it is explicit, we may want to make default/undefined semantics
// more clear though
BackendDeviceType() : type((int8_t)at::kCPU) {}
BackendDeviceType(int8_t type) : type(type) {}
virtual ~BackendDeviceType() = default;
virtual std::string toString() const {
return "Unknown";
}
};
class TORCH_API BackendDevice {
public:
// The default constructor will set both the device type and ordinal
// to backend specific defaults.
BackendDevice();
BackendDevice(std::shared_ptr<BackendDeviceType>&& type, int64_t ordinal);
int8_t type() const;
int64_t ordinal() const {
return ordinal_;
}
bool operator==(const BackendDevice& other) const {
return compare(other) == 0;
}
bool operator!=(const BackendDevice& other) const {
return compare(other) != 0;
}
bool operator<(const BackendDevice& rhs) const {
return compare(rhs) < 0;
}
std::string toString() const;
private:
int compare(const BackendDevice& rhs) const;
// Use shared_ptr instead of unique_ptr so that BackendDevice can be copied.
std::shared_ptr<BackendDeviceType> type_;
int64_t ordinal_;
};
TORCH_API std::ostream& operator<<(
std::ostream& os,
const BackendDevice& device);
// Helpers for converting a c10::Device to BackendDevice and vice versa.
TORCH_API BackendDevice atenDeviceToBackendDevice(const c10::Device& device);
TORCH_API c10::Device backendDeviceToAtenDevice(const BackendDevice& device);
// Tries to extract the backend device out of the lazy tensor. Returns nullopt
// if the input is not a lazy tensor.
TORCH_API std::optional<BackendDevice> GetBackendDevice(
const at::ITensorListRef tensors);
TORCH_API std::optional<BackendDevice> GetBackendDevice(
const at::TensorList tensors);
TORCH_API std::optional<BackendDevice> GetBackendDevice(
const at::Tensor& tensor);
TORCH_API std::optional<BackendDevice> GetBackendDevice(
const std::optional<c10::Device>& device);
// For variadic template.
TORCH_API std::optional<BackendDevice> GetBackendDevice();
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Winfinite-recursion")
template <typename T, typename... Args>
std::optional<BackendDevice> GetBackendDevice(
const T& tensor,
const Args&... forward_tensors) {
auto optional_device = GetBackendDevice(tensor);
if (optional_device) {
return optional_device;
}
return GetBackendDevice(forward_tensors...);
}
C10_DIAGNOSTIC_POP()
} // namespace torch::lazy
```
|
=================================================================================================================================================
SOURCE CODE FILE: backend_interface.h
LINES: 1
SIZE: 4.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\backend\backend_interface.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/Tensor.h>
#include <torch/csrc/lazy/backend/backend_data.h>
#include <torch/csrc/lazy/backend/backend_device.h>
#include <torch/csrc/lazy/backend/lowering_context.h>
#include <torch/csrc/lazy/core/lazy_graph_executor.h>
#include <torch/csrc/lazy/core/shape.h>
#include <torch/csrc/lazy/core/tensor.h>
namespace torch::lazy {
struct IrBuilder;
/**
* Work in progress- don't treat this as a stable interface yet!
*/
class TORCH_API BackendImplInterface {
public:
virtual ~BackendImplInterface() = default;
/**
* Initialization/Teardown
* */
// No-op by default. Allows custom functionality to be exposed through
// extension bindings.
virtual void InitializeAtenBindings() const {}
virtual void PrepareToExit() const = 0;
/**
* Configuration
* */
virtual void SetRngSeed(size_t seed) const = 0;
/**
* IR Tracing
* */
virtual const IrBuilder* GetIrBuilder() const = 0;
/**
* Data Transfer
* */
virtual BackendDataPtr MakeComputationDataFromTensor(
const at::Tensor& tensor,
const Shape& shape,
const BackendDevice& device) const = 0;
virtual BackendDataPtr MakeComputationDataFromScalar(
const at::Scalar& scalar,
const torch::lazy::BackendDevice& device) const = 0;
virtual BackendDataPtr CreateDataPlaceholder(
const BackendDevice& device,
const Shape& shape) const = 0;
// Gets backend data if the node is a device data node. Otherwise returns
// nullptr
virtual BackendDataPtr GetComputationDataFromNode(const Node*) const = 0;
virtual at::Tensor MakeTensorFromComputationData(
const BackendDataPtr data,
std::optional<at::ScalarType> logical_scalar_type) const = 0;
/**
* Lowering, Compilation, Execution
* */
virtual std::unique_ptr<LoweringContext> CreateLoweringContext(
const std::string& name,
BackendDevice device,
c10::ArrayRef<const torch::lazy::Node*> post_order,
Util::EmissionMap emit_status) const = 0;
virtual std::unique_ptr<LoweringContext> CreateLoweringContext(
const std::string& name,
BackendDevice device) const = 0;
// TODO(whc) need to keep this?
virtual std::vector<std::string> GetCompilationDevices(
const std::string& device,
c10::ArrayRef<std::string> devices) const = 0;
virtual std::vector<ComputationPtr> Compile(
std::vector<ComputationPtr> instances) const = 0;
virtual std::vector<BackendDataPtr> ExecuteComputation(
torch::lazy::ComputationPtr computation,
c10::ArrayRef<BackendDataPtr> arguments,
const BackendDevice& device) const = 0;
/**
* Device Configuration
* */
// Set or get the default device type.
// For backends used with virtual c10::Devices, this configures what real
// device type the backend should use, and matters if the backend supports
// more than one type of real device.
virtual std::shared_ptr<BackendDeviceType> GetDefaultDeviceType() const = 0;
virtual void SetDefaultDeviceType(int8_t type) = 0;
// Set or get the default device ordinal.
// For backends that supports multi-device, this configures what the
// default device the backend should use.
virtual int64_t GetDefaultDeviceOrdinal() const = 0;
virtual void SetDefaultDeviceOrdinal(int64_t) = 0;
// Specify which aten device should be used for eager fallback
// may change depending on current 'Default' DeviceType
virtual at::DeviceType EagerFallbackDeviceType() const = 0;
// Query all available backend devices
virtual std::vector<BackendDevice> GetBackendDevices() const = 0;
virtual std::string CreateMetricReport() const {
return "";
}
// Map a particular c10:: device to a concrete backend device
// Note:: c10:: devices may be virtual or concrete. xla:: and lazy:: are
// virtual devices, meaning they may map to a gpu, tpu, etc. behind the
// scenes. In the future, non-virtual c10:: devices may also use lazy tensors
// through a mode, in which case these APIs should still work, but should be
// identity mappings.
virtual BackendDevice GetBackendDevice(c10::Device device) const = 0;
// TODO(whc)
// Additional APIs expected for supporting distributed training, to be
// designed
/**
* Debug/Metrics
* */
// virtual std::map<std::string, Metric> GetMetrics() const = 0;
// virtual MemoryInfo GetMemoryInfo(const std::string& device) = 0;
virtual std::string GetComputationBackendText(
const ComputationPtr computation) const = 0;
};
class TORCH_API BackendRegistrar {
public:
BackendRegistrar(const BackendImplInterface* backend_impl_interface);
};
TORCH_API bool hasBackend();
TORCH_API const BackendImplInterface* getBackend();
TORCH_API const IrBuilder* getIrBuilder();
} // namespace torch::lazy
```
|
================================================================================================================================================
SOURCE CODE FILE: lowering_context.h
LINES: 1
SIZE: 3.27 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\backend\lowering_context.h
ENCODING: utf-8
```h
#pragma once
#include <memory>
#include <string>
#include <vector>
#include <torch/csrc/lazy/backend/backend_data.h>
#include <torch/csrc/lazy/backend/backend_device.h>
#include <torch/csrc/lazy/core/ir.h>
#include <torch/csrc/lazy/core/ir_util.h>
namespace torch::lazy {
class TORCH_API Computation {
public:
virtual int parameters_size() const = 0;
virtual const std::vector<Shape>& parameter_shapes() const = 0;
virtual const std::vector<std::string>& parameter_names() const = 0;
virtual const Shape& result_shape() const = 0;
virtual const std::string to_string() const = 0;
virtual ~Computation() = default;
// Indicates whether this computation is being executed inside a mark step
// Assume false unless set otherwise
bool in_mark_step = false;
};
using ComputationPtr = std::shared_ptr<Computation>;
// Keeps track of the code generation state.
class TORCH_API LoweringContext {
public:
LoweringContext(const std::string& name, BackendDevice device);
LoweringContext(
const std::string& name,
BackendDevice device,
c10::ArrayRef<const torch::lazy::Node*> post_order,
Util::EmissionMap emit_status);
virtual ~LoweringContext() = default;
static std::unique_ptr<LoweringContext> Create(
const std::string& name,
BackendDevice device,
c10::ArrayRef<const torch::lazy::Node*> post_order,
Util::EmissionMap emit_status);
static std::unique_ptr<LoweringContext> Create(
const std::string& name,
BackendDevice device);
const BackendDevice& device() const {
return device_;
}
// Retrieves the vector holding all the tensors associated with the parameter
// instructions which have been created.
const std::vector<BackendDataPtr>& GetParametersData() const;
// Adds a new input/output alias.
virtual void SetUpAlias(
const std::vector<int64_t>& output_index,
int64_t param_number,
const std::vector<int64_t>& param_index,
bool must_alias = false) {
// Dummy default implementation to do nothing.
}
// Check if parameter shape matches result at index.
virtual bool CheckResultShape(
const BackendDataPtr& parameter_data,
size_t result_idx) {
// Dummy default implementation to do nothing.
return false;
}
// Adds the given output as a component of the result tuple and returns its
// assigned position within the tuple.
virtual size_t AddResult(const torch::lazy::Output& output) = 0;
// Associates the given output with the input parameter of the given index and
// shape. Only used for the operator-by-operator execution, mostly for
// debugging purposes.
virtual void AddParameter(
const torch::lazy::Output& output,
size_t index,
const Shape& shape,
const std::string& name) = 0;
// Build the computation capturing all the operations created with the
// embedded builder (returned by the builder() API).
virtual ComputationPtr Build() = 0;
size_t GetEmittedNodeCount() const {
return emit_status_.size();
}
protected:
BackendDevice device_;
std::vector<BackendDataPtr> parameters_;
std::vector<size_t> parameter_sequence_;
Util::EmissionMap emit_status_;
};
} // namespace torch::lazy
```
|
==================================================================================================================================
SOURCE CODE FILE: cache.h
LINES: 1
SIZE: 3.74 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\cache.h
ENCODING: utf-8
```h
/**
* Cache utils in this file is adapted from PyTorch/XLA
* https://github.com/pytorch/xla/blob/master/third_party/xla_client/cache.h
*/
#pragma once
#include <functional>
#include <list>
#include <memory>
#include <mutex>
#include <unordered_map>
#include <utility>
namespace torch::lazy {
// Generic key and object cache with LRU expiration policy. The objects of type
// T will be stored as std::shared_ptr<T> and taken and returned as such, by the
// cache API.
template <
typename K,
typename T,
typename H = std::hash<K>,
typename E = std::equal_to<K>>
class Cache {
public:
using TypePtr = std::shared_ptr<T>;
using Element = std::pair<K, TypePtr>;
explicit Cache(size_t max_size) : max_size_(max_size) {}
// Adds an object to the cache, unless it already exists. If the cache grows
// beyond the limit set during construction, the oldest used object will be
// removed from the cache.
TypePtr Add(K key, TypePtr object) {
if (!max_size_) {
return object;
}
std::lock_guard<std::mutex> slock(lock_);
element_list_.emplace_front(Element(std::move(key), std::move(object)));
auto it = element_list_.begin();
auto emplace_result = element_map_.emplace(&it->first, it);
if (!emplace_result.second) {
element_list_.erase(it);
DoLRU(emplace_result.first->second);
} else if (element_list_.size() > max_size_) {
Element* last = &element_list_.back();
element_map_.erase(&last->first);
element_list_.pop_back();
}
return emplace_result.first->second->second;
}
// Retrieves the existing object if it exists. If it does, its position in
// the LRU list gets moved to the head of the list.
// Returns nullptr if no object with the specified key is found within the
// cache.
TypePtr Get(const K& key) {
if (!max_size_) {
return nullptr;
}
std::lock_guard<std::mutex> slock(lock_);
auto it = element_map_.find(&key);
if (it == element_map_.end()) {
return nullptr;
}
DoLRU(it->second);
return it->second->second;
}
TypePtr GetLatest() {
std::lock_guard<std::mutex> g(lock_);
TORCH_CHECK(!element_list_.empty());
return element_list_.front().second;
}
bool Erase(const K& key) {
if (!max_size_) {
return false;
}
std::lock_guard<std::mutex> slock(lock_);
auto it = element_map_.find(&key);
if (it == element_map_.end()) {
return false;
}
auto lit = it->second;
element_map_.erase(it);
element_list_.erase(lit);
return true;
}
void Clear() {
if (!max_size_) {
return;
}
std::lock_guard<std::mutex> slock(lock_);
element_map_.clear();
element_list_.clear();
}
int Numel() const {
if (!max_size_) {
return 0;
}
std::lock_guard<std::mutex> g(lock_);
TORCH_CHECK(element_map_.size() == element_list_.size());
return element_map_.size();
}
private:
using ElementList = std::list<Element>;
struct Hasher {
size_t operator()(const K* key) const {
return hasher(*key);
}
H hasher;
};
struct Equaler {
bool operator()(const K* k1, const K* k2) const {
return equaler(*k1, *k2);
}
E equaler;
};
using ElementMap = std::
unordered_map<const K*, typename ElementList::iterator, Hasher, Equaler>;
void DoLRU(typename ElementList::iterator it) {
element_list_.splice(element_list_.begin(), element_list_, it);
}
mutable std::mutex lock_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const size_t max_size_ = 0;
ElementList element_list_;
ElementMap element_map_;
};
} // namespace torch::lazy
```
|
===================================================================================================================================
SOURCE CODE FILE: config.h
LINES: 1
SIZE: 0.92 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\config.h
ENCODING: utf-8
```h
#pragma once
#include <c10/macros/Export.h>
#include <c10/util/Flags.h>
TORCH_DECLARE_bool(torch_lazy_ir_debug);
TORCH_DECLARE_bool(torch_lazy_handle_special_scalars);
TORCH_DECLARE_bool(torch_lazy_all_numbers_special_scalars);
TORCH_DECLARE_bool(torch_lazy_param_aliasing);
TORCH_DECLARE_bool(torch_lazy_reuse_ir);
TORCH_DECLARE_bool(torch_lazy_use_thread_pool);
TORCH_DECLARE_bool(torch_lazy_enable_device_data_cache);
TORCH_DECLARE_int(torch_lazy_compilation_cache_size);
TORCH_DECLARE_int(torch_lazy_device_data_cache_size);
TORCH_DECLARE_int(torch_lazy_io_thread_pool_size);
TORCH_DECLARE_int(torch_lazy_metrics_samples);
TORCH_DECLARE_int(torch_lazy_trim_graph_check_frequency);
TORCH_DECLARE_int(torch_lazy_trim_graph_size);
TORCH_DECLARE_string(torch_lazy_metrics_percentiles);
TORCH_DECLARE_int(torch_lazy_shape_cache_size);
namespace torch::lazy {
TORCH_API std::string& getLTCForceFallback();
}
```
|
=======================================================================================================================================
SOURCE CODE FILE: debug_util.h
LINES: 1
SIZE: 1.29 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\debug_util.h
ENCODING: utf-8
```h
#pragma once
#include <string>
#include <vector>
#include <torch/csrc/lazy/core/tensor.h>
namespace torch::lazy {
TORCH_API std::function<std::vector<SourceLocation>()>&
GetPythonFramesFunction();
TORCH_API std::string GetFirstUserFrameInPython();
class TORCH_API DebugUtil {
public:
enum GraphFormat {
kText,
kDot,
kBackend,
};
static GraphFormat GetDefaultGraphFormat();
// Dumps the current Python frame and the IR Graph whose roots are the IR
// values held at the tensors. If indices is not nullptr, it selects the
// indices of the tensors whose graph will be emitted.
static std::string GetTensorsGraphInfo(
c10::ArrayRef<torch::lazy::LazyTensorPtr> tensors,
const std::vector<size_t>* indices,
GraphFormat format = GetDefaultGraphFormat());
// If the environment variable LTC_SAVE_TENSORS_FILE is set to the proper
// output path, an instance of the report returned by GetTensorsGraphInfo() is
// saved.
static void SaveTensorsGraphInfo(
const char* name,
c10::ArrayRef<torch::lazy::LazyTensorPtr> tensors,
const std::vector<size_t>* indices,
GraphFormat format = GetDefaultGraphFormat());
static bool ExperimentEnabled(const std::string& name);
};
} // namespace torch::lazy
```
|
=======================================================================================================================================
SOURCE CODE FILE: dynamic_ir.h
LINES: 1
SIZE: 1.41 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\dynamic_ir.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/symbol.h>
#include <c10/core/ScalarType.h>
#include <c10/util/Flags.h>
#include <torch/csrc/lazy/core/hash.h>
#include <torch/csrc/lazy/core/ir.h>
#include <torch/csrc/lazy/core/ir_metadata.h>
#include <torch/csrc/lazy/ts_backend/ts_node.h>
namespace torch::lazy {
/**
* The goal of "dynamic" Nodes is to patch a hole in our tracing.
* Previously, if a user called `sizes` on a Tensor, it would leak out
* of our tracing system, as `sizes` returns a torch.Size or an int. To
* prevent this from happening, we introduce DimensionNode, a new type
* of Node that abstracts the operation of getting the dimensions of a
* Tensor.
*
* Consider the following example:
* ```
* numel = x.shape()[0] * x.shape()[1]
* ```
*
* Here, `x.shape()[i]` will be a SizeNode (subclass of DimensionNode),
* and the multiplication of the two SizeNodes will be represented by
* a SizeMul (also a subclass of DimensionNode). Through this, we can
* prevent `numel` from being represented as a Python int and thus
* burned into the Graph.
*/
class TORCH_API DimensionNode {
public:
virtual bool isSymbolic() const {
return false;
}
virtual int64_t getDynamicValue() const {
TORCH_CHECK(false, "NYI");
}
virtual int64_t getStaticValue() const {
TORCH_CHECK(false, "NYI");
}
virtual ~DimensionNode() = default;
};
} // namespace torch::lazy
```
|
=================================================================================================================================
SOURCE CODE FILE: hash.h
LINES: 1
SIZE: 7.66 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\hash.h
ENCODING: utf-8
```h
/**
* Hash utils in this file is adapted from PyTorch/XLA
* https://github.com/pytorch/xla/blob/e0e5f937a0ba8d904f9608137dc8c51ba439df2d/third_party/xla_client/util.h
*/
#pragma once
#include <ATen/Tensor.h>
#include <c10/core/Scalar.h>
#include <c10/util/int128.h>
#include <torch/csrc/Export.h>
#include <cstring>
#include <set>
#include <string>
#include <string_view>
#include <vector>
namespace torch::lazy {
using size_t = std::size_t;
class TORCH_API hash_t : public c10::uint128 {
public:
// Swich from typedef hash_t = uint128 to provide explicit casters
hash_t(int8_t val) : uint128(static_cast<uint32_t>(val)) {}
hash_t(int16_t val) : uint128(static_cast<uint32_t>(val)) {}
hash_t(int32_t val) : uint128(static_cast<uint32_t>(val)) {}
hash_t(int64_t val) : uint128(static_cast<uint64_t>(val)) {}
hash_t(uint32_t val) : uint128(val) {}
hash_t(uint64_t val) : uint128(val) {}
hash_t(uint128 val) : uint128(val) {}
hash_t(uint64_t top, uint64_t bottom) : uint128(top, bottom) {}
hash_t() = default;
};
// Std* functions use 64-bit hash
size_t TORCH_API StdDataHash(const void* data, size_t size);
size_t TORCH_API StdHashCombine(uintmax_t a, uintmax_t b);
// Other functions are all 128-bit
hash_t TORCH_API HashBlock(const void* data, size_t n, const hash_t& seed);
hash_t TORCH_API DataHash(const void* data, size_t size);
hash_t TORCH_API HashCombine(const hash_t& a, const hash_t& b);
size_t TORCH_API HashReduce(const hash_t& a);
// Returns a string representation of a hash
std::string TORCH_API HashToString(const hash_t& a);
struct HashReducer {
size_t operator()(const hash_t& value) const {
return HashReduce(value);
}
};
static inline hash_t StringHash(const char* data) {
return DataHash(data, std::strlen(data));
}
// Automatic templated implementation for 'arithmetic' types
template <typename T, std::enable_if_t<std::is_arithmetic_v<T>>* = nullptr>
hash_t Hash(const T& value) {
return DataHash(&value, sizeof(value));
}
// added because on macos builds the vector<bool> specialization
// breaks falling through to the templated arithmetic types above
hash_t TORCH_API Hash(const std::vector<bool>& value);
// Specialiazed implementations for proprietary types
static inline hash_t Hash(const c10::ScalarType& value) {
return DataHash(&value, sizeof(value));
}
static inline hash_t Hash(const c10::MemoryFormat& value) {
return DataHash(&value, sizeof(value));
}
static inline hash_t Hash(const c10::DeviceType& value) {
return DataHash(&value, sizeof(value));
}
static inline hash_t Hash(const c10::Device& value) {
return HashCombine(Hash(value.type()), Hash(value.index()));
}
static inline hash_t Hash(const c10::Layout& value) {
return DataHash(&value, sizeof(value));
}
static inline hash_t Hash(const c10::Scalar& value) {
switch (value.type()) {
case c10::ScalarType::ComplexDouble:
return Hash(value.toComplexDouble());
case c10::ScalarType::Double:
return Hash(value.toDouble());
case c10::ScalarType::Long:
return Hash(value.toLong());
case c10::ScalarType::Bool:
return Hash(value.toBool());
default:
TORCH_INTERNAL_ASSERT(false, "Unknown scalar type.", value.type());
}
}
static inline hash_t TensorHash(const at::Tensor& tensor) {
at::Tensor ctensor = tensor.contiguous();
int64_t size = ctensor.numel() * ctensor.element_size();
switch (ctensor.scalar_type()) {
case at::ScalarType::Bool:
return DataHash(ctensor.const_data_ptr<bool>(), size);
case at::ScalarType::Byte:
return DataHash(ctensor.const_data_ptr<uint8_t>(), size);
case at::ScalarType::Char:
return DataHash(ctensor.const_data_ptr<int8_t>(), size);
case at::ScalarType::Short:
return DataHash(ctensor.const_data_ptr<int16_t>(), size);
case at::ScalarType::Int:
return DataHash(ctensor.const_data_ptr<int32_t>(), size);
case at::ScalarType::Long:
return DataHash(ctensor.const_data_ptr<int64_t>(), size);
case at::ScalarType::Float:
return DataHash(ctensor.const_data_ptr<float>(), size);
case at::ScalarType::Double:
return DataHash(ctensor.const_data_ptr<double>(), size);
case at::ScalarType::BFloat16:
return DataHash(ctensor.const_data_ptr<at::BFloat16>(), size);
case at::ScalarType::Half:
return DataHash(ctensor.const_data_ptr<at::Half>(), size);
case at::ScalarType::ComplexFloat:
return DataHash(ctensor.const_data_ptr<c10::complex<float>>(), size);
case at::ScalarType::ComplexDouble:
return DataHash(ctensor.const_data_ptr<c10::complex<double>>(), size);
case at::ScalarType::UInt16:
return DataHash(ctensor.const_data_ptr<uint16_t>(), size);
case at::ScalarType::UInt32:
return DataHash(ctensor.const_data_ptr<uint32_t>(), size);
case at::ScalarType::UInt64:
return DataHash(ctensor.const_data_ptr<uint64_t>(), size);
default:
TORCH_INTERNAL_ASSERT(
false, "Unsupported scalar type:", ctensor.scalar_type());
}
}
static inline hash_t Hash(const std::string& value) {
return DataHash(value.data(), value.size());
}
static inline hash_t Hash(const std::string_view& value) {
return DataHash(value.data(), value.size());
}
static inline hash_t Hash(const at::Generator& value) {
return TensorHash(value.get_state());
}
// Taken from glibc's implementation of hashing optionals,
// we want to include a contribution to the hash to distinguish
// cases where one or another option was null, but we hope it doesn't
// collide with an actually scalar value.
//
// Use an arbitrary randomly-selected 64-bit integer rather than a
// small constant that we then hash at runtime so we don't have to
// repeatedly hash a constant at runtime.
// NOLINTNEXTLINE(*-narrowing-conversions)
static const int64_t kNullOpt = 0x8655d738f3678dda;
// Hashing for std::optional types contributes to hash
// for optionals with null value, important to distinguish
// between <nullopt, non-nullopt> and <non-nullopt, nullopt> cases
template <typename T>
hash_t Hash(const std::optional<T>& value) {
if (value.has_value()) {
return Hash(value.value());
} else {
return kNullOpt;
}
}
// Hashing of containers
// Forward declare to allow hashes of vectors of vectors to work.
template <typename T>
hash_t ContainerHash(const T& values);
template <typename T>
hash_t Hash(const std::vector<T>& values) {
return ContainerHash(values);
}
// Need a special case for std::optional<container>?
template <typename T>
hash_t Hash(const std::optional<std::vector<T>>& value) {
if (value.has_value()) {
return ContainerHash(value.value());
} else {
return kNullOpt;
}
}
template <typename T>
hash_t Hash(const std::set<T>& values) {
return ContainerHash(values);
}
template <typename T, typename S>
hash_t Hash(const std::pair<T, S>& values) {
return HashCombine(Hash(values.first), Hash(values.second));
}
static inline hash_t Hash(const hash_t& value) {
return value;
}
template <typename T>
hash_t Hash(c10::ArrayRef<T> values) {
return ContainerHash(values);
}
template <typename T>
hash_t ContainerHash(const T& values) {
hash_t h(static_cast<uint64_t>(0x85ebca77c2b2ae63));
for (const auto& value : values) {
h = HashCombine(h, Hash(value));
}
return h;
}
// Varargs hashing
template <typename T = void>
hash_t MHash() {
return hash_t(static_cast<uint64_t>(0x165667b19e3779f9));
}
template <typename T, typename... Targs>
hash_t MHash(T value, Targs... Fargs) {
return HashCombine(Hash(value), MHash(Fargs...));
}
} // namespace torch::lazy
```
|
====================================================================================================================================
SOURCE CODE FILE: helpers.h
LINES: 1
SIZE: 2.24 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\helpers.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/Scalar.h>
#include <c10/util/BFloat16.h>
#include <c10/util/Half.h>
#include <torch/csrc/lazy/core/permutation_util.h>
#include <torch/csrc/lazy/core/shape.h>
#include <torch/csrc/lazy/core/util.h>
#include <complex>
#include <functional>
#include <optional>
#include <tuple>
#include <vector>
// TODO: Consolidate this file with util.h
namespace torch::lazy {
// Converts an iterable container to a vector of int64's.
template <typename S>
static std::vector<int64_t> ToI64Vector(const S& input) {
return ToVector<int64_t>(input);
}
// Creates a set of dimension by dropping the drop_dims ones.
TORCH_API std::vector<int64_t> DropDimensions(
c10::ArrayRef<int64_t> sizes,
c10::ArrayRef<int64_t> drop_dims);
// Get the canonical dimension index in the [0, rank) interval. Negative
// indices are interpreted as follows: -1 is rank-1, -2 is rank-2 etc.
TORCH_API int64_t GetCanonicalDimensionIndex(int64_t dim, int64_t rank);
// Same as above, for multiple dimensions.
TORCH_API std::vector<int64_t> GetCanonicalDimensionIndices(
c10::ArrayRef<int64_t> dimensions,
int64_t rank);
// Returns the canonical position in the dim dimension, handling negative
// values for the position.
TORCH_API int64_t GetCanonicalPosition(
c10::ArrayRef<int64_t> dimensions,
int64_t dim,
int64_t pos);
// Creates a transposition from the given input and dimensions.
TORCH_API std::vector<int64_t> MakeTransposePermutation(
int64_t dim0,
int64_t dim1,
int64_t rank);
// Calculates the protomoted shape to which the input shapes should be
// broadcasted for an elementwise operation. The size of the common dimensions
// (2,3,4 for shape1, and 0,1,2 for shape2) must either match, or either one
// of the two be 1.
// Example:
// shape1 = [9, 7, 6, 1, 2]
// shape2 = [6, 5, 2]
// result_shape = [9, 7, 6, 5, 2]
TORCH_API std::vector<int64_t> GetPromotedShape(
c10::ArrayRef<int64_t> shape1_dims,
c10::ArrayRef<int64_t> shape2_dims);
TORCH_API Shape
GetPromotedBinaryOpShape(const Shape& shape1, const Shape& shape2);
TORCH_API std::vector<std::string> StrSplit(std::string_view text, char delim);
} // namespace torch::lazy
```
|
=================================================================================================================================================
SOURCE CODE FILE: ltc_ops.h
LINES: 1
SIZE: 1.46 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\internal_ops\ltc_ops.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/lazy/core/ir.h>
#include <c10/util/CallOnce.h>
#include <string>
namespace torch::lazy {
class TORCH_API OpKindWrapper {
public:
explicit OpKindWrapper(const char* name) : name_(name) {}
const OpKind& operator*() const {
return get();
}
operator OpKind() const {
return get();
}
private:
const OpKind& get() const {
c10::call_once(once_, [this]() { op_kind_ = OpKind::Get(name_); });
return op_kind_;
}
const char* name_;
mutable OpKind op_kind_;
mutable c10::once_flag once_;
};
const OpKindWrapper ltc_all_to_all("lazy_tensors::all_to_all");
const OpKindWrapper ltc_cast("lazy_tensors::cast");
const OpKindWrapper ltc_collective_permute("lazy_tensors::collective_permute");
const OpKindWrapper ltc_cross_replica_sum("lazy_tensors::cross_replica_sum");
const OpKindWrapper ltc_device_data("lazy_tensors::device_data");
const OpKindWrapper ltc_get_dimensions_size(
"lazy_tensors::ltc_get_dimensions_size");
const OpKindWrapper ltc_moving_average("lazy_tensors::moving_average");
const OpKindWrapper ltc_nms("lazy_tensors::nms");
const OpKindWrapper ltc_not_supported("lazy_tensors::not_supported");
const OpKindWrapper ltc_replication_pad("lazy_tensors::replication_pad");
const OpKindWrapper ltc_replication_pad_backward(
"lazy_tensors::replication_pad_backward");
const OpKindWrapper ltc_tensor_data("lazy_tensors::tensor_data");
} // namespace torch::lazy
```
|
===============================================================================================================================
SOURCE CODE FILE: ir.h
LINES: 1
SIZE: 7.90 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\ir.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/symbol.h>
#include <functional>
#include <memory>
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include <c10/core/ScalarType.h>
#include <c10/util/ArrayRef.h>
#include <c10/util/Flags.h>
#include <torch/csrc/lazy/core/hash.h>
#include <torch/csrc/lazy/core/ir_metadata.h>
#include <torch/csrc/lazy/core/shape.h>
TORCH_DECLARE_bool(ltc_enable_dynamic_shapes);
namespace torch::lazy {
static const hash_t kHashSeed(static_cast<uint32_t>(0x5a2d296e9));
class Node;
struct Output;
struct Value;
using NodePtr = std::shared_ptr<Node>;
// The Kind of operation a Node can be associated to.
struct TORCH_API OpKind {
OpKind() = default;
explicit OpKind(c10::Symbol op) : op(op) {}
bool operator==(const OpKind& rhs) const {
return op == rhs.op;
}
bool operator!=(const OpKind& rhs) const {
return !operator==(rhs);
}
bool operator<(const OpKind& rhs) const {
return c10::unique_t(op) < c10::unique_t(rhs.op);
}
hash_t hash() const;
std::string ToString() const {
return op.toQualString();
}
// Retrieves an existing operation object, or creates a new one. Operations
// that are specific to lazy tensors, should live within the 'lazy_tensors::'
// namespace.
static OpKind Get(const std::string& name);
c10::Symbol op;
};
inline std::ostream& operator<<(std::ostream& stream, const OpKind& op) {
stream << op.ToString();
return stream;
}
using OpList = c10::ArrayRef<Value>;
hash_t OperandHashes(
const OpList& operands,
const hash_t& seed,
bool bakeInSizes);
// A node in the graph. Nodes for operations which require extra data to be
// stored for lowering should inherit from this class and add an operation
// specific member there. For example, a constant might create a new
// NodeConstant class (inheriting from Node) with an extra lazy_tensors::Literal
// field, or a tensor value might create a new NodeTensor with a computation
// client data handle in it.
class TORCH_API Node {
public:
static bool enableDynamicShape();
// Creates a new node with the given op name. The op is a unique identifier
// for the operation. The num_outputs tells how many outputs a given operation
// generates.
//
// None leaf node's node_hash does not contains shape information always.
// So we pass in the hash value rather than a function.
Node(OpKind op, size_t num_outputs);
// Construct node with operands and shapes
Node(
OpKind op,
OpList operands,
std::vector<Shape>&& shapes,
size_t num_outputs = 1);
// Construct node with operands and no shape
Node(OpKind op, OpList operands, size_t num_outputs = 1);
// Construct node with shape and no operands
Node(OpKind op, Shape shape, size_t num_outputs = 1);
virtual ~Node() = default;
const OpKind& op() const {
return op_;
}
size_t num_outputs() const {
return num_outputs_;
}
// Retrieves the full shape of the IR Node.
virtual c10::ArrayRef<Shape> shapes() const;
virtual const Shape& shape(size_t output_index = 0) const;
// Add the shape computed by the shape_fn
void addComputedShape(const std::function<Shape()>& shape_fn);
// Compute the shape using the provided shape_fn if not previously cached
Shape computeShape(const std::function<Shape()>& shape_fn);
virtual const std::vector<Output>& operands() const;
virtual const Output& operand(size_t i) const;
// Gets operand at index i if index is valid, or kNullOutput otherwise.
virtual const Output& nullable_operand(size_t i) const;
// Returns the hash of the dag used to look up the compiled graph
virtual hash_t hash() const = 0;
// Returns the hash of the dag used to for shape caching
virtual hash_t shapeHash() const = 0;
const MetaData& metadata() const {
return metadata_;
}
UserMetaData* user_metadata() const {
return user_metadata_.get();
}
std::shared_ptr<UserMetaData> SetUserMetadata(
std::shared_ptr<UserMetaData> user_meta) {
std::swap(user_metadata_, user_meta);
return user_meta;
}
virtual std::string ToString() const;
private:
// The ID of the operation captured by this node.
OpKind op_;
size_t num_outputs_ = 1;
// The IR specific metadata attached to the IR node.
MetaData metadata_;
// The IR framework user can attach a user defined metadata object deriving
// from UserMetaData.
std::shared_ptr<UserMetaData> user_metadata_;
protected:
// Adds node's index output number as operand.
void AddOperand(const NodePtr& node, size_t index = 0);
std::vector<Shape> shapes_;
// A node holds a real reference to its operands.
std::vector<NodePtr> operands_;
// Outputs do not hold references on the nodes, and neither do the uses, since
// otherwise we get into circular reference counting.
std::vector<Output> operands_as_outputs_;
};
inline std::ostream& operator<<(std::ostream& stream, const Node& node) {
stream << node.ToString();
return stream;
}
// Note: Keep this version of NodeCast for smooth PyTorch/XLA migration, and
// clean up once the migration is done.
template <typename T>
const T* NodeCast(const Node* node, OpKind op) {
if (op != node->op()) {
return nullptr;
}
#ifdef NDEBUG
return static_cast<const T*>(node);
#else
return &dynamic_cast<const T&>(*node);
#endif
}
template <typename T>
const T* NodeCast(const Node* node) {
if (T::ClassOpKind() != node->op()) {
return nullptr;
}
// TODO: Some IR classes share the same opkind, such as Mean and MeanDim, so
// static_cast is not safe here. Unless we have opkind unique for each class,
// we have to use dynamic_cast here.
return dynamic_cast<const T*>(node);
}
// Represents a specific output produced by a node. Since the output of a node
// can be composed by multiple outputs, the node+index coordinates fully qualify
// each single output.
struct TORCH_API Output {
struct Hasher {
size_t operator()(const Output& output) const;
};
Output() = default;
explicit Output(const Node* node, size_t index = 0)
: node(node), index(index) {}
hash_t hash() const;
hash_t shapeHash() const;
bool operator==(const Output& rhs) const {
return node == rhs.node && index == rhs.index;
}
// To compare the operands of to-be-constructed node and to-be-reused node
bool operator==(const Value& rhs) const;
bool operator!=(const Output& rhs) const {
return !operator==(rhs);
}
const Shape& shape() const {
return node->shape(index);
}
std::string ToString() const;
// The node providing the output.
const Node* node{nullptr};
// The index in the node's output this output refers to.
size_t index{0};
};
inline std::ostream& operator<<(std::ostream& stream, const Output& output) {
stream << output.ToString();
return stream;
}
template <typename T>
using OutputMap = std::unordered_map<Output, T, Output::Hasher>;
// Represents an input/operand for a Node object.
struct TORCH_API Value {
Value() = default;
/* implicit */ Value(NodePtr&& node, size_t index = 0)
: node(std::move(node)), index(index) {}
/* implicit */ Value(const NodePtr& node, size_t index = 0)
: node(node), index(index) {}
hash_t hash() const;
hash_t shapeHash() const;
operator bool() const {
return node != nullptr;
}
operator Output() const {
return Output(node.get(), index);
}
const Shape& shape() const {
return node->shape(index);
}
Node* operator->() const {
return node.get();
}
NodePtr node;
size_t index = 0;
};
} // namespace torch::lazy
namespace c10 {
// Explicit template instantiation to make ArrayRef<Value> work
template class at::ArrayRef<torch::lazy::Value>;
} // namespace c10
```
|
=======================================================================================================================================
SOURCE CODE FILE: ir_builder.h
LINES: 1
SIZE: 4.74 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\ir_builder.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/ScalarType.h>
#include <torch/csrc/lazy/backend/backend_interface.h>
#include <torch/csrc/lazy/core/config.h>
#include <torch/csrc/lazy/core/ir.h>
#include <torch/csrc/lazy/core/tensor.h>
#include <torch/csrc/lazy/core/trie.h>
#include <optional>
#include <vector>
// This file is part of the backend interface. So, ops shouldn't be added or
// removed without due process The exception to this being the view ops which
// will be removed soon pending functionalization
namespace torch::lazy {
template <typename T, typename... Args>
NodePtr ReuseNode(Args&&... args) {
if (FLAGS_torch_lazy_reuse_ir) {
return LookupNodeFromTrieCache<T>(std::forward<Args>(args)...);
}
return nullptr;
}
// Caching an IR node into TrieCache
static inline void CacheNode(NodePtr node) {
if (FLAGS_torch_lazy_reuse_ir) {
TrieCache::Get()->Insert(std::move(node));
}
}
template <typename T, typename... Args>
NodePtr MakeNode(Args&&... args) {
return std::make_shared<T>(std::forward<Args>(args)...);
}
// op is passed in for a more efficient node casting, see the implementation of
// NodeCast
template <typename T, typename... Args>
NodePtr ReuseOrMakeNode(Args&&... args) {
NodePtr node = ReuseNode<T>(std::forward<Args>(args)...);
if (!node) {
node = MakeNode<T>(std::forward<Args>(args)...);
CacheNode(node);
}
return node;
}
struct IrBuilder {
virtual NodePtr MakeDeviceData(
const std::shared_ptr<BackendData>& data) const = 0;
virtual NodePtr MakeScalar(
const at::Scalar& value,
const at::ScalarType& type) const = 0;
virtual NodePtr MakeExpand(
const Value& input0,
const std::vector<int64_t>& size,
const bool& is_scalar_expand) const = 0;
virtual NodePtr MakeCast(
const Value& input0,
const at::ScalarType& dtype,
const std::optional<at::ScalarType>& stype = std::nullopt) const = 0;
virtual NodePtr MakeTensorList(const OpList& inputs) const = 0;
virtual NodePtr MakeGeneric(
const OpKind& op,
const OpList& operands,
const Shape& shape,
const size_t& num_outputs = 1,
const hash_t& hash_seed = static_cast<uint32_t>(0x5a2d296e9)) const = 0;
// dynamic ir nodes
virtual NodePtr MakeSizeNode(const Value& input, size_t dim) const = 0;
virtual NodePtr MakeSizeAdd(const Value& a, const Value& b) const = 0;
virtual NodePtr MakeSizeMul(const Value& a, const Value& b) const = 0;
virtual NodePtr MakeSizeDiv(const Value& a, const Value& b) const = 0;
virtual ~IrBuilder() = default;
};
static inline NodePtr MakeDeviceData(const std::shared_ptr<BackendData>& data) {
return getIrBuilder()->MakeDeviceData(data);
}
static inline NodePtr MakeScalar(
const at::Scalar& value,
const at::ScalarType& type) {
return getIrBuilder()->MakeScalar(value, type);
}
static inline NodePtr MakeExpand(
const Value& input0,
const std::vector<int64_t>& size,
const bool& is_scalar_expand) {
return getIrBuilder()->MakeExpand(input0, size, is_scalar_expand);
}
static inline NodePtr MakeCast(
const Value& input0,
const at::ScalarType& dtype,
const std::optional<at::ScalarType>& stype = std::nullopt) {
return getIrBuilder()->MakeCast(input0, dtype, stype);
}
static inline NodePtr MakeTensorList(const OpList& inputs) {
return getIrBuilder()->MakeTensorList(inputs);
}
static inline NodePtr MakeGeneric(
const OpKind& op,
const OpList& operands,
const Shape& shape,
const size_t& num_outputs = 1,
const hash_t& hash_seed = static_cast<uint32_t>(0x5a2d296e9)) {
return getIrBuilder()->MakeGeneric(
op, operands, shape, num_outputs, hash_seed);
}
// dynamic ir nodes
static inline NodePtr MakeSizeNode(const Value& input, size_t dim) {
return getIrBuilder()->MakeSizeNode(input, dim);
}
static inline NodePtr MakeSizeAdd(const Value& a, const Value& b) {
return getIrBuilder()->MakeSizeAdd(a, b);
}
static inline NodePtr MakeSizeMul(const Value& a, const Value& b) {
return getIrBuilder()->MakeSizeAdd(a, b);
}
static inline NodePtr MakeSizeDiv(const Value& a, const Value& b) {
return getIrBuilder()->MakeSizeDiv(a, b);
}
inline Value GetSymIntValue(const c10::SymInt& a) {
if (auto ma = a.maybe_as_int()) {
return Value(MakeScalar(*ma, at::kLong), 0);
} else {
return Value(
dynamic_cast<torch::lazy::SymNodeImpl*>(a.toSymNodeImplUnowned())
->node_,
0);
}
}
// TODO: this should return Value
inline std::vector<int64_t> GetSymIntArrayRefValue(c10::SymIntArrayRef arr) {
std::vector<int64_t> r;
for (const auto& a : arr) {
r.emplace_back(a.guard_int(__FILE__, __LINE__));
}
return r;
}
} // namespace torch::lazy
```
|
=========================================================================================================================================
SOURCE CODE FILE: ir_dump_util.h
LINES: 1
SIZE: 0.68 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\ir_dump_util.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/lazy/core/ir.h>
#include <string>
namespace torch::lazy {
class BackendDevice;
class TORCH_API DumpUtil {
public:
static std::string ToDot(c10::ArrayRef<const Node*> nodes);
static std::string PostOrderToDot(
c10::ArrayRef<const Node*> post_order,
c10::ArrayRef<const Node*> roots);
static std::string ToText(c10::ArrayRef<const Node*> nodes);
static std::string PostOrderToText(
c10::ArrayRef<const Node*> post_order,
c10::ArrayRef<const Node*> roots);
static std::string ToBackend(
c10::ArrayRef<Value> values,
const BackendDevice& device);
};
} // namespace torch::lazy
```
|
========================================================================================================================================
SOURCE CODE FILE: ir_metadata.h
LINES: 1
SIZE: 1.34 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\ir_metadata.h
ENCODING: utf-8
```h
#pragma once
#include <c10/macros/Macros.h>
#include <string>
#include <vector>
namespace torch::lazy {
struct SourceLocation {
std::string file;
std::string function;
int line = -1;
};
TORCH_API void EmitShortFrameInfo(
std::ostream& stream,
const std::vector<SourceLocation>& frames);
TORCH_API std::ostream& operator<<(
std::ostream& stream,
const std::vector<SourceLocation>& frames);
// The base class for user defined metadata which is possible to attach to IR
// nodes.
struct TORCH_API UserMetaData {
virtual ~UserMetaData() = default;
};
struct TORCH_API MetaData {
std::string scope;
std::vector<SourceLocation> frame_info;
};
// TODO(whc) is this going to be used outside of in IR decompositions?
// RAII data structure to be used a stack variable to enter a new IR scope. IR
// scope names will appear in the IR and will help identifying the source of the
// single IR nodes.
struct TORCH_API ScopePusher {
explicit ScopePusher(const std::string& name);
~ScopePusher();
ScopePusher(ScopePusher&& other) = delete;
ScopePusher(const ScopePusher&) = delete;
ScopePusher& operator=(const ScopePusher&) = delete;
ScopePusher& operator=(ScopePusher&&) = delete;
static void ResetScopes();
};
TORCH_API MetaData GetMetaDataIfDebugging();
} // namespace torch::lazy
```
|
====================================================================================================================================
SOURCE CODE FILE: ir_util.h
LINES: 1
SIZE: 1.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\ir_util.h
ENCODING: utf-8
```h
#pragma once
#include <unordered_map>
#include <vector>
#include <torch/csrc/lazy/core/ir.h>
namespace torch::lazy {
class TORCH_API Util {
public:
// Tracks the emission status of the nodes during the post-order generation.
// It helps tracking loops within the computation graphs.
enum EmitStatus {
kNotEmitted,
kEmitting,
kEmitted,
};
using EmissionMap = std::unordered_map<const Node*, EmitStatus>;
// Computes the post order from the given node, without using recursion. The
// emission map can be used as saved state, for multiple separate calls to
// this API. The returned post-order can be empty if the node has already been
// emitted inside the emission map. An error is generated if a loop is
// detected.
static std::vector<const Node*> ComputePostOrder(
const Node* node,
EmissionMap* emap);
static std::vector<const Node*> ComputePostOrder(
c10::ArrayRef<const Node*> nodes,
EmissionMap* emap);
// Same as above, but computes the post order on the set of nodes specified as
// argument.
static std::vector<const Node*> ComputePostOrder(
c10::ArrayRef<const Node*> nodes);
// Retrieves the number of nodes within the graph whose sink are passed in the
// nodes argument.
static size_t GetGraphSize(c10::ArrayRef<const Node*> nodes);
};
} // namespace torch::lazy
```
|
================================================================================================================================================
SOURCE CODE FILE: lazy_graph_executor.h
LINES: 1
SIZE: 15.08 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\lazy_graph_executor.h
ENCODING: utf-8
```h
#pragma once
#include <c10/util/ArrayRef.h>
#include <torch/csrc/lazy/backend/lowering_context.h>
#include <torch/csrc/lazy/core/cache.h>
#include <torch/csrc/lazy/core/ir_util.h>
#include <torch/csrc/lazy/core/multi_wait.h>
#include <torch/csrc/lazy/core/tensor.h>
#include <torch/csrc/lazy/core/util.h>
namespace torch::lazy {
class TORCH_API LazyGraphExecutor {
public:
struct DeviceDataInfo : public BackendData::Info {
DeviceDataInfo(int64_t tensor_id, bool read_only)
: tensor_id(tensor_id), read_only(read_only) {}
int64_t tensor_id = 0;
bool read_only = false;
};
// Register a lazy graph executor instance that can be retrieved using Get()
static void Register(LazyGraphExecutor*);
static LazyGraphExecutor* Get();
virtual ~LazyGraphExecutor() = default;
// Override these methods to perform custom tensor registration and
// unregistration Note: It is vital that the parent implementations are also
// called in order for the tensors to show up in the live tensor list
virtual void RegisterTensor(std::shared_ptr<LazyTensor::Data> data);
virtual void UnregisterTensor(LazyTensor::Data* data);
// Seed for random generator.
// Override to supply your own DeviceContextArena.
virtual Value GetRngSeed(const BackendDevice& device);
virtual uint64_t GetRunningSeed(const BackendDevice& device);
virtual void SetRngSeed(const BackendDevice& device, uint64_t seed);
void DeviceBarrier(const BackendDevice& device);
BackendDataPtr GetDeviceData(
const at::Tensor& tensor,
const BackendDevice& device);
BackendDataPtr GetDeviceData(
const at::Scalar& value,
at::ScalarType scalar_type,
const BackendDevice& device);
// Retrieves the set of lazy tensors which are currently live in the system,
// for the given device. If device is nullptr, the live tensors for all
// devices will be returned. Returned tensors are sorted by device as primary
// key, and by unique ID as secondary key.
std::vector<LazyTensorPtr> GetLiveTensors(const BackendDevice* device);
// Makes sure that any outstanding IR operation accumulated over live tensors,
// gets turned into device data. If wait is true, the sync operation will be
// run synchronously. The devices argument, if not empty, tells the devices
// which should be partecipating into the replicated computation.
virtual void SyncLiveTensorsGraph(
const BackendDevice* device,
c10::ArrayRef<std::string> devices,
bool wait);
// Applies all the pending IR operations queued over the input tensors. All
// the tensors must be on the same device. If wait is true, the sync operation
// will be run synchronously. The devices argument, if not empty, tells the
// devices which should be partecipating into the replicated computation.
void SyncTensorsGraph(
std::vector<LazyTensorPtr>* tensors,
c10::ArrayRef<std::string> devices,
bool wait,
bool sync_ltc_data);
// Marks an execution step, which allows the tensor framework to understand
// the computation boundaries.
// Override to supply your own DeviceContextArena.
virtual void MarkStep(const BackendDevice& device);
// Waits for all the outstanding operations on all the supplied devices.
// If devices is empty, the wait will happen for all local devices.
void WaitDeviceOps(c10::ArrayRef<BackendDevice> devices);
// Retrieves the PyTorch CPU tensors behind the lazy tensors IR operations.
// All the tensors must be on the same device.
std::vector<at::Tensor> GetTensors(std::vector<LazyTensorPtr>* tensors);
size_t IncTrimCounter() const;
// Dumps the backend specific text of the computation accumulated in the graph
// which is attached the tensors.
std::string DumpBackendComputation(const std::vector<LazyTensorPtr>& tensors);
Value GetDeviceDataIrValue(
const at::Scalar& value,
c10::ScalarType type,
const BackendDevice& device);
Value GetIrValueForScalar(
const at::Scalar& value,
c10::ScalarType type,
const BackendDevice& device);
Value GetIrValueForScalar(
const at::Scalar& value,
const BackendDevice& device);
// TODO: even though this API is currently used **only** in codegen to
// generate real scalar IR values vs scalar tensors, we would like to
// use it in other cases where `GetIrValueForXXXScalar` is used, as well
// In order to do that, we need to untangle the cases where we don't need
// `expand` and where we don't expect a scalar tensor
Value GetIrValueForScalarFromCodegen(
const at::Scalar& value,
const BackendDevice& device);
Value GetIrValueForExpandedScalar(
const at::Scalar& value,
const Shape& shape,
const BackendDevice& device);
struct CachedComputation {
explicit CachedComputation(ComputationPtr computation)
: computation(std::move(computation)) {}
ComputationPtr computation;
};
using ComputationCache = Cache<hash_t, CachedComputation, HashReducer>;
ComputationCache* GetComputationCache();
hash_t GetGraphHash(const std::vector<LazyTensorPtr>& tensors);
// Clear the computation cache.
void ClearComputationCache();
// Remove a specific computation cache entry from its hash.
void RemoveFromComputationCache(const hash_t& hash);
protected:
// TODO(alanwaketan): Revisit if all of them need to be accessible to
// derived classes.
struct SyncTensorsConfig {
// Whether we want to force data on the target tensors (hence trimming
// the IR graph above them).
bool force_ltc_data = true;
// Whether when setting the data, the other properties of the tensor
// state should be reset.
bool sync_ltc_data = true;
};
struct SyncTensorCollection {
SyncTensorCollection() : hash(0) {}
SyncTensorsConfig config;
std::vector<size_t> indices;
hash_t hash;
std::vector<ExceptionCleanup> unlocker;
BackendDevice device;
};
struct PostOrderData {
std::vector<const Node*> post_order;
Util::EmissionMap emission_map;
std::vector<BackendDataPtr> parameters_data;
std::vector<size_t> parameter_sequence;
};
// Locking:
// We perform two kinds of operations of tensors, synchronous and
// asynchronous. The ApplyPendingGraph() are synchronous, as we need the
// device data result immediately. Before the synchronous operations can
// start, they need to wait that the pending asynchronous operations have
// completed. Synchronous operations do not hold device locks, since they are
// strictly sequential, dictated by the PyTorch execution order. The
// SyncTensorsGraph() is asynchronous, and returns immediately after having
// scheduled the asynchronous operation. While executing, the asynchronous
// operations will hold locks on all the participating devices (in most common
// cases there will be only one device).
// Since asynchronous operations capture device locks, only one asynchronous
// operation can execute at the same time, on a given device. Tensor
// operations which send data to device do not need to hold any device locks
// while doing so. Only operations which _use_ device data (computations, and
// transfer from server) need to wait for asynchronous operations to complete
// (barrier).
class DeviceLocker {
public:
explicit DeviceLocker(BackendDevice device) : device_(std::move(device)) {}
const BackendDevice& device() const {
return device_;
}
void Lock();
void Unlock(std::exception_ptr exptr);
void Barrier();
private:
void CheckResetException();
BackendDevice device_;
std::mutex mutex_;
std::condition_variable cv_;
bool locked_ = false;
std::exception_ptr exptr_;
};
class DeviceLockerArena {
public:
static DeviceLockerArena* Get();
std::shared_ptr<DeviceLocker> GetLocker(const BackendDevice& device);
void DeviceBarrier(const BackendDevice& device);
// Use a set to impose an order on the device locking sequence (ABBA
// prevention).
std::vector<ExceptionCleanup> LockDevices(
const std::set<BackendDevice>& devices);
private:
ExceptionCleanup LockDevice(const BackendDevice& device);
std::mutex mutex_;
std::map<BackendDevice, std::shared_ptr<DeviceLocker>> lockers_;
};
class DataCacheArena {
public:
static DataCacheArena* Get();
BackendDataPtr GetDeviceData(
const at::Tensor& tensor,
const BackendDevice& device);
BackendDataPtr GetDeviceData(
const at::Scalar& value,
at::ScalarType scalar_type,
const BackendDevice& device);
private:
struct TensorHasher {
size_t operator()(const at::Tensor& tensor) const;
};
struct TensorComparer {
bool operator()(const at::Tensor& tensor1, const at::Tensor& tensor2)
const;
};
explicit DataCacheArena(size_t max_cache_size);
using DataCache =
Cache<at::Tensor, BackendData, TensorHasher, TensorComparer>;
DataCache* GetDataCache(const BackendDevice& device);
size_t max_cache_size_ = 0;
std::mutex mutex_;
std::map<BackendDevice, std::unique_ptr<DataCache>> device_caches_;
};
// The DeviceContextArena holds per device live information and statistics,
// among which the lazy tensors which are currently alive in the system. This
// is used to create computation "barriers" in order to flush pending
// operations and ensure the same computations are created during the training
// loops.
// TODO(alanwaketan): Add a registry such that we don't need to make all
// related methods virtual.
class DeviceContextArena {
protected:
struct DeviceContext {
std::mutex lock;
std::map<int64_t, std::weak_ptr<LazyTensor::Data>> tensors_data;
uint64_t seed = 101;
uint64_t running_seed = 101;
Value seed_ir_value;
};
public:
static DeviceContextArena* Get();
virtual ~DeviceContextArena() = default;
void RegisterTensor(std::shared_ptr<LazyTensor::Data> data);
void UnregisterTensor(LazyTensor::Data* data);
std::vector<LazyTensorPtr> GetLiveTensors(const BackendDevice* device);
// Overriding it allow derived class to use their own IRs for Value.
virtual Value GetRngSeed(const BackendDevice& device);
uint64_t GetRunningSeed(const BackendDevice& device);
void SetRngSeed(const BackendDevice& device, uint64_t seed);
void MarkStep(const BackendDevice& device);
std::vector<BackendDevice> GetActiveDevices();
protected:
DeviceContext* GetDeviceContext(const BackendDevice& device);
void ForAllDeviceContexts(
const std::function<void(DeviceContext*)>& fn,
const BackendDevice* device);
// Overriding it allow derived class to use their own conversions.
virtual Value IrValueFromScalar(
const at::Scalar& value,
at::ScalarType scalar_type,
const BackendDevice& device);
private:
std::vector<DeviceContext*> GetAllDeviceContexts();
std::mutex lock_;
std::map<BackendDevice, DeviceContext*> device_contexts_;
};
struct Async {
Async(
SyncTensorCollection* coll,
std::vector<BackendDataPtr> parameters_data,
std::vector<BackendDataPtr> tensors_data,
ComputationCache::TypePtr cached_computation);
virtual ~Async() = default;
void Wait();
MultiWait mwait;
std::vector<size_t> indices;
std::vector<ExceptionCleanup> unlocker;
std::vector<BackendDataPtr> parameters_data;
BackendDevice device;
ComputationCache::TypePtr cached_computation;
std::vector<BackendDataPtr> tensors_data;
};
void ResetTrimCounter() const;
// Waits for this SyncTensorCollection's device barrier and acquire the lock.
virtual void TensorCollectionBarrier(SyncTensorCollection* coll);
// One can override to insert your own profiler.
virtual PostOrderData RunPostOrder(
const std::vector<Value>& ir_values,
SyncTensorCollection* coll);
private:
struct CompilationResult {
BackendDevice device;
size_t emitted_nodes = 0;
ComputationPtr computation;
std::vector<BackendDataPtr> parameters_data;
};
virtual bool ShouldSyncTensor(const LazyTensorPtr& tensor) const;
SyncTensorCollection CollectSyncTensors(
const std::vector<LazyTensorPtr>& tensors,
const SyncTensorsConfig& config);
std::vector<Value> CollectRoots(
const std::vector<LazyTensorPtr>& tensors,
c10::ArrayRef<size_t> indices);
std::vector<BackendDataPtr> SetTensorData(
std::vector<LazyTensorPtr>* tensors,
const SyncTensorsConfig& config,
c10::ArrayRef<size_t> indices,
const std::vector<torch::lazy::BackendDataPtr>& tensor_data_vec);
void ExtractIRAndPrepareTensorData(
std::vector<LazyTensorPtr>* tensors,
const SyncTensorsConfig& config,
c10::ArrayRef<size_t> indices,
std::vector<Value>& ir_values,
std::vector<BackendDataPtr>& tensor_data_vec);
std::shared_ptr<Async> TryRunCachedSync(
std::vector<LazyTensorPtr>* tensors,
SyncTensorCollection* coll,
PostOrderData* po_data,
const std::vector<BackendDataPtr>& tensor_data_vec);
CompilationResult Compile(
const std::vector<LazyTensorPtr>& tensors,
c10::ArrayRef<std::string> devices,
const SyncTensorCollection& coll,
PostOrderData* po_data,
const std::vector<Value>& ir_values);
ComputationCache::TypePtr LookupCachedCompile(const hash_t& hash);
std::shared_ptr<Async> SyncTensorsGraphInternal(
std::vector<LazyTensorPtr>* tensors,
c10::ArrayRef<std::string> devices,
const SyncTensorsConfig& config);
// Schedules the execution of a sync tensors operation in background. The
// asynchronous operation will hold the device locks by capturing the ones
// present within the coll structure.
std::shared_ptr<Async> ScheduleSyncTensorsGraph(
SyncTensorCollection* coll,
std::vector<BackendDataPtr> parameters_data,
std::vector<BackendDataPtr> tensors_data,
ComputationCache::TypePtr cached_computation);
std::shared_ptr<Async> ScheduleSyncTensorsGraph(
std::vector<LazyTensorPtr>* tensors,
SyncTensorCollection* coll,
std::vector<BackendDataPtr> parameters_data,
ComputationCache::TypePtr cached_computation,
const std::vector<BackendDataPtr>& tensor_data_vec);
std::vector<at::Tensor> GetTensorsFused(std::vector<LazyTensorPtr>* tensors);
std::vector<at::Tensor> FetchTensors(
std::vector<LazyTensorPtr>* tensors,
c10::ArrayRef<BackendDataPtr> tensors_data,
const std::vector<size_t>* indices);
// Gathers the device data for all the input tensors, after an
// asynchronous operation.
std::vector<BackendDataPtr> GatherTensorsData(
const std::vector<LazyTensorPtr>& tensors,
c10::ArrayRef<size_t> indices,
c10::ArrayRef<BackendDataPtr> tensors_data);
};
} // namespace torch::lazy
```
|
====================================================================================================================================
SOURCE CODE FILE: metrics.h
LINES: 1
SIZE: 8.30 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\metrics.h
ENCODING: utf-8
```h
/**
* This file is adapted from PyTorch/XLA
* https://github.com/pytorch/xla/blob/master/third_party/xla_client/metrics.h
*/
#pragma once
#include <atomic>
#include <functional>
#include <map>
#include <memory>
#include <mutex>
#include <string>
#include <vector>
#include <c10/macros/Export.h>
namespace torch::lazy {
struct TORCH_API Sample {
Sample() = default;
Sample(int64_t timestamp_ns, double value)
: timestamp_ns(timestamp_ns), value(value) {}
int64_t timestamp_ns = 0;
double value = 0;
};
using MetricReprFn = std::function<std::string(double)>;
// Class used to collect time-stamped numeric samples. The samples are stored in
// a circular buffer whose size can be configured at constructor time.
class TORCH_API MetricData {
public:
// Creates a new MetricData object with the internal circular buffer storing
// max_samples samples. The repr_fn argument allow to specify a function which
// pretty-prints a sample value.
MetricData(MetricReprFn repr_fn, size_t max_samples);
// Returns the total values of all the samples being posted to this metric.
double Accumulator() const;
size_t TotalSamples() const;
void AddSample(int64_t timestamp_ns, double value);
// Returns a vector with all the current samples, from the oldest to the
// newer. If accumulator is not nullptr, it will receive the current value of
// the metrics' accumulator (the sum of all posted values). If total_samples
// is not nullptr, it will receive the count of the posted values.
std::vector<Sample> Samples(double* accumulator, size_t* total_samples) const;
std::string Repr(double value) const {
return repr_fn_(value);
}
void Reset();
bool IsValid() const {
return TotalSamples() > 0;
}
private:
mutable std::mutex lock_;
MetricReprFn repr_fn_;
size_t count_ = 0;
std::vector<Sample> samples_;
double accumulator_ = 0.0;
};
// Counters are a very lightweight form of metrics which do not need to track
// sample time.
class TORCH_API CounterData {
public:
CounterData() : value_(0) {}
void AddValue(int64_t value) {
value_ += value;
}
int64_t Value() const {
return value_;
}
void Reset() {
value_ = 0;
}
bool IsValid() const {
return value_ > 0;
}
private:
std::atomic<int64_t> value_;
};
class TORCH_API MetricsArena {
public:
static MetricsArena* Get();
void ResetCounters();
void ResetMetrics();
// Registers a new metric in the global arena.
void RegisterMetric(
const std::string& name,
MetricReprFn repr_fn,
size_t max_samples,
std::shared_ptr<MetricData>* data);
void RegisterCounter(
const std::string& name,
std::shared_ptr<CounterData>* data);
void ForEachMetric(
const std::function<void(const std::string&, MetricData*)>& metric_func);
void ForEachCounter(
const std::function<void(const std::string&, CounterData*)>&
counter_func);
std::vector<std::string> GetMetricNames();
MetricData* GetMetric(const std::string& name);
std::vector<std::string> GetCounterNames();
CounterData* GetCounter(const std::string& name);
private:
std::mutex lock_;
std::map<std::string, std::shared_ptr<MetricData>> metrics_;
std::map<std::string, std::shared_ptr<CounterData>> counters_;
};
// Emits the value in a to_string() conversion.
TORCH_API std::string MetricFnValue(double value);
// Emits the value in a humanized bytes representation.
TORCH_API std::string MetricFnBytes(double value);
// Emits the value in a humanized time representation. The value is expressed in
// nanoseconds EPOCH time.
TORCH_API std::string MetricFnTime(double value);
// The typical use of a Metric is one in which it gets created either in a
// global scope context:
// static Metric* metric = new Metric("RpcCount");
// Or within a function scope:
// void MyFunction(...) {
// static Metric* metric = new Metric("RpcCount");
// ...
// metric->AddSample(ts_nanos, some_value);
// }
class TORCH_API Metric {
public:
explicit Metric(
std::string name,
MetricReprFn repr_fn = MetricFnValue,
size_t max_samples = 0);
const std::string& Name() const {
return name_;
}
double Accumulator() const;
void AddSample(int64_t timestamp_ns, double value);
void AddSample(double value);
std::vector<Sample> Samples(double* accumulator, size_t* total_samples) const;
std::string Repr(double value) const;
private:
MetricData* GetData() const;
std::string name_;
MetricReprFn repr_fn_;
size_t max_samples_;
mutable std::shared_ptr<MetricData> data_ptr_;
mutable std::atomic<MetricData*> data_;
};
// A Counter is a lightweight form of metric which tracks an integer value which
// can increase or decrease.
// A typical use is as:
// static Counter* counter = new Counter("MyCounter");
// ...
// counter->AddValue(+1);
class TORCH_API Counter {
public:
explicit Counter(std::string name);
void AddValue(int64_t value) {
GetData()->AddValue(value);
}
int64_t Value() const {
return GetData()->Value();
}
private:
CounterData* GetData() const;
std::string name_;
mutable std::shared_ptr<CounterData> data_ptr_;
mutable std::atomic<CounterData*> data_;
};
#define TORCH_LAZY_COUNTER(name, value) \
do { \
static ::torch::lazy::Counter* __counter = \
new ::torch::lazy::Counter(name); \
__counter->AddValue(value); \
} while (0)
#define TORCH_LAZY_FN_COUNTER(ns) TORCH_LAZY_COUNTER(c10::str(ns, __func__), 1)
#define TORCH_LAZY_VALUE_METRIC(name, value) \
do { \
static ::torch::lazy::Metric* __metric = \
new ::torch::lazy::Metric(name, torch::lazy::MetricFnValue); \
__metric->AddSample(value); \
} while (0)
// Creates a report with the current metrics statistics.
TORCH_API std::string CreateMetricReport();
// Creates a report with the selected metrics statistics.
TORCH_API std::string CreateMetricReport(
const std::vector<std::string>& counter_names,
const std::vector<std::string>& metric_names);
// Returns the currently registered metric names. Note that the list can grow
// since metrics are usually function intialized (they are static function
// variables).
TORCH_API std::vector<std::string> GetMetricNames();
// Retrieves the metric data of a given metric, or nullptr if such metric does
// not exist.
TORCH_API MetricData* GetMetric(const std::string& name);
// Returns the currently registered counter names. Note that the list can grow
// since counters are usually function intialized (they are static function
// variables).
TORCH_API std::vector<std::string> GetCounterNames();
// Retrieves the counter data of a given counter, or nullptr if such counter
// does not exist.
TORCH_API CounterData* GetCounter(const std::string& name);
// Retrieves the current EPOCH time in nanoseconds.
TORCH_API int64_t NowNs();
// Scope based utility class TORCH_API to measure the time the code takes within
// a given C++ scope.
class TORCH_API TimedSection {
public:
explicit TimedSection(Metric* metric) : metric_(metric), start_(NowNs()) {}
TimedSection(TimedSection&& other) = delete;
TimedSection(const TimedSection&) = delete;
TimedSection& operator=(const TimedSection&) = delete;
TimedSection& operator=(TimedSection&&) = delete;
~TimedSection() {
int64_t now = NowNs();
metric_->AddSample(now, static_cast<double>(now - start_));
}
double Elapsed() const {
return 1e-9 * static_cast<double>(NowNs() - start_);
}
private:
Metric* metric_;
int64_t start_;
};
#define TORCH_LAZY_TIMED(name) \
static torch::lazy::Metric* timed_metric = \
new torch::lazy::Metric(name, torch::lazy::MetricFnTime); \
torch::lazy::TimedSection timed_section(timed_metric)
#define TORCH_LAZY_FN_COUNTER_TIMED_TRACING(ns) \
TORCH_LAZY_FN_COUNTER(ns); \
TORCH_LAZY_TIMED("LazyTracing")
} // namespace torch::lazy
```
|
=======================================================================================================================================
SOURCE CODE FILE: multi_wait.h
LINES: 1
SIZE: 1.73 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\multi_wait.h
ENCODING: utf-8
```h
/**
* This file is adapted from PyTorch/XLA
* https://github.com/pytorch/xla/blob/master/third_party/xla_client/multi_wait.h
*/
#pragma once
#include <condition_variable>
#include <exception>
#include <functional>
#include <memory>
#include <mutex>
#include <c10/macros/Export.h>
namespace torch::lazy {
// Support waiting for a number of tasks to complete.
class TORCH_API MultiWait {
public:
explicit MultiWait(size_t count) : count_(count) {}
// Signal the completion of a single task.
void Done();
// Waits until at least count (passed as constructor value) completions
// happened.
void Wait();
// Same as above, but waits up to wait_seconds.
void Wait(double wait_seconds);
// Resets the threshold counter for the MultiWait object. The completed count
// is also reset to zero.
void Reset(size_t count);
// Creates a completer functor which signals the mult wait object once func
// has completed. Handles exceptions by signaling the multi wait with the
// proper status value. This API returns a function which captures a MultiWait
// reference, so care must be taken such that the reference remains valid for
// the whole lifetime of the returned function.
std::function<void()> Completer(std::function<void()> func);
// Similar as the above API, but with explicit capture of the MultiWait shared
// pointer.
static std::function<void()> Completer(
std::shared_ptr<MultiWait> mwait,
std::function<void()> func);
private:
void Complete(const std::function<void()>& func);
std::mutex mutex_;
std::condition_variable cv_;
size_t count_ = 0;
size_t completed_count_ = 0;
std::exception_ptr exptr_;
};
} // namespace torch::lazy
```
|
==================================================================================================================================================
SOURCE CODE FILE: arithmetic_ir_ops.h
LINES: 1
SIZE: 0.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\ops\arithmetic_ir_ops.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/lazy/core/ir.h>
namespace torch::lazy {
TORCH_API NodePtr operator+(const Value& node1, const Value& node2);
TORCH_API NodePtr operator-(const Value& node1, const Value& node2);
TORCH_API NodePtr operator*(const Value& node1, const Value& node2);
TORCH_API NodePtr operator/(const Value& node1, const Value& node2);
} // namespace torch::lazy
```
|
======================================================================================================================================
SOURCE CODE FILE: utils.h
LINES: 1
SIZE: 1.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\ops\utils.h
ENCODING: utf-8
```h
#include <vector>
#include <torch/csrc/lazy/core/tensor_util.h>
#include <torch/csrc/lazy/core/util.h>
namespace torch::lazy {
TORCH_API bool StrideIsSupported(c10::ArrayRef<int64_t> stride);
TORCH_API std::vector<int64_t> GetArrayStridePermutation(
c10::ArrayRef<int64_t> stride);
TORCH_API Shape MakeDiagonalShape(
const Shape& shape,
int64_t offset,
int64_t dim1,
int64_t dim2);
TORCH_API Shape
MakePermuteShape(const Shape& source_shape, c10::ArrayRef<int64_t> permutation);
TORCH_API Shape MakeSelectShape(
const Shape& shape,
int64_t dim,
int64_t start,
int64_t end,
int64_t stride);
TORCH_API int64_t GetStride(int64_t start, int64_t end, int64_t stride);
TORCH_API std::vector<int64_t> BuildSqueezedDimensions(
c10::ArrayRef<int64_t> dimensions,
int64_t squeeze_dim);
TORCH_API std::vector<int64_t> BuildUnsqueezedDimensions(
c10::ArrayRef<int64_t> dimensions,
int64_t squeeze_dim);
} // namespace torch::lazy
```
|
=============================================================================================================================================
SOURCE CODE FILE: permutation_util.h
LINES: 1
SIZE: 1.26 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\permutation_util.h
ENCODING: utf-8
```h
#pragma once
#include <c10/util/ArrayRef.h>
#include <c10/util/Exception.h>
#include <c10/util/irange.h>
#include <vector>
namespace torch::lazy {
TORCH_API std::vector<int64_t> InversePermutation(
c10::ArrayRef<int64_t> input_permutation);
TORCH_API bool IsPermutation(c10::ArrayRef<int64_t> permutation);
// Gathers the input using the order specified by the permutation. For each i,
// output[i] = dimensions[permutation[i]]. The given permutation must be the
// same size as the input.
template <typename Container>
std::vector<typename Container::value_type> PermuteDimensions(
c10::ArrayRef<int64_t> permutation,
const Container& dimensions) {
using T = typename Container::value_type;
TORCH_CHECK(
dimensions.size() == permutation.size(),
"Invalid permutation specified. dimensions.size() != permutation.size() (",
dimensions.size(),
" vs. ",
permutation.size(),
")");
TORCH_CHECK(
IsPermutation(permutation),
"Invalid permutation specified. Permutation is not permutation");
std::vector<T> output(dimensions.size());
for (const auto i : c10::irange(permutation.size())) {
output[i] = dimensions[permutation[i]];
}
return output;
}
} // namespace torch::lazy
```
|
==================================================================================================================================
SOURCE CODE FILE: shape.h
LINES: 1
SIZE: 2.05 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\shape.h
ENCODING: utf-8
```h
#pragma once
#include <ostream>
#include <vector>
#include <c10/core/Scalar.h>
#include <torch/csrc/jit/passes/symbolic_shape_analysis.h>
#include <torch/csrc/lazy/core/hash.h>
TORCH_DECLARE_bool(ltc_enable_symbolic_shapes);
namespace torch::lazy {
class TORCH_API Shape {
public:
Shape() = default;
Shape(
at::ScalarType scalar_type,
c10::ArrayRef<int64_t> sizes,
std::optional<std::vector<bool>> is_symbolic = std::nullopt);
std::string to_string() const;
c10::ScalarType scalar_type() const {
return scalar_type_;
}
void set_scalar_type(at::ScalarType value) {
scalar_type_ = value;
}
int64_t dim() const {
return static_cast<int64_t>(sizes_.size());
}
c10::ArrayRef<int64_t> sizes() const {
return sizes_;
}
int64_t size(int64_t dim) const {
return sizes_.at(dim);
}
void set_size(int64_t dim, int64_t size) {
sizes_.at(dim) = size;
}
const std::optional<std::vector<bool>>& is_symbolic() const {
return is_symbolic_;
}
// Makes a copy with symbolic dims applied
Shape with_symbolic_dims(
std::optional<std::vector<bool>> symbolic_dims) const;
size_t numel() const;
hash_t hash(bool bakeInSizes) const;
bool operator==(const Shape& other) const;
private:
c10::ScalarType scalar_type_{c10::ScalarType::Undefined};
// Sizes are the upper bound sizes for a tensor, used by XLA.
std::vector<int64_t> sizes_;
// Stores which dimmensions are symbolic
// If nullopt, either it hasn't been initialized or the symbolic
// dimmensions are not calculatable
std::optional<std::vector<bool>> is_symbolic_ = std::nullopt;
};
TORCH_API std::ostream& operator<<(std::ostream& out, const Shape& shape);
TORCH_API bool symbolicShapeEnabled();
// Calculate and applies symbolic shapes onto the
// Shape objects passed to result_shapes
TORCH_API void applySymbolicShapesOnLT(
const char* schema_str,
std::vector<c10::IValue> args,
std::vector<Shape>& result_shapes);
} // namespace torch::lazy
```
|
============================================================================================================================================
SOURCE CODE FILE: shape_inference.h
LINES: 1
SIZE: 15.16 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\shape_inference.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/Tensor.h>
#include <c10/core/ScalarType.h>
#include <c10/core/SymInt.h>
#include <c10/core/SymIntArrayRef.h>
#include <c10/core/SymNodeImpl.h>
#include <c10/macros/Export.h>
#include <torch/csrc/lazy/backend/backend_data.h>
#include <torch/csrc/lazy/core/ir.h>
#include <torch/csrc/lazy/core/shape.h>
#include <torch/csrc/lazy/core/tensor.h>
#include <optional>
#include <vector>
namespace torch::lazy {
// Turn clang-format off, as we rely on the whole signature being on one line
// for codegen.
// clang-format off
TORCH_API std::vector<torch::lazy::Shape> compute_shape__adaptive_avg_pool2d(const at::Tensor & self, at::IntArrayRef output_size);
TORCH_API std::vector<torch::lazy::Shape> compute_shape__adaptive_avg_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape__adaptive_avg_pool3d(const at::Tensor & self, at::IntArrayRef output_size);
TORCH_API std::vector<torch::lazy::Shape> compute_shape__adaptive_avg_pool3d_backward(const at::Tensor & grad_output, const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_abs(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_arange_out(const at::Scalar & start, const at::Scalar & end, const at::Scalar & step, at::Tensor & out);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_bernoulli(const at::Tensor & self, ::std::optional<at::Generator> generator);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_bernoulli(const at::Tensor & self, double p, ::std::optional<at::Generator> generator);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_binary_cross_entropy(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_binary_cross_entropy_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_cat(at::TensorList tensors, int64_t dim);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_cholesky(const at::Tensor & self, bool upper);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_clamp_min(const at::Tensor & self, const at::Scalar & min);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_clone(const at::Tensor & self, ::std::optional<at::MemoryFormat> memory_format);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_constant_pad_nd(const at::Tensor & self, at::IntArrayRef pad, const at::Scalar & value);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_convolution(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_convolution_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalIntArrayRef bias_sizes, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, int64_t groups, ::std::array<bool,3> output_mask);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_embedding(const at::Tensor & weight, const at::Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_embedding_dense_backward(const at::Tensor & grad_output, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_expand(const at::Tensor & self, at::IntArrayRef size, bool implicit);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_expand(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_flip(const at::Tensor & self, at::IntArrayRef dims);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_glu_backward(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_grid_sampler_2d(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_grid_sampler_2d_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_index_select(const at::Tensor & self, int64_t dim, const at::Tensor & index);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_inverse(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_isnan(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_log_sigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_log_sigmoid_forward(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_logdet(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_logical_and(const at::Tensor & self, const at::Tensor & other);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_logical_not(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_logical_or(const at::Tensor & self, const at::Tensor & other);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_logical_xor(const at::Tensor & self, const at::Tensor & other);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_masked_fill(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_max(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_mean(const at::Tensor & self, ::std::optional<at::ScalarType> dtype);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_min(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_mv(const at::Tensor & self, const at::Tensor & vec);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_batch_norm(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, bool training, double momentum, double eps);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_dropout(const at::Tensor & input, double p, ::std::optional<bool> train);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_dropout_backward(const at::Tensor & grad_output, const at::Tensor & mask, double scale);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, double eps);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_native_layer_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, at::IntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_new_empty_strided(const at::Tensor & self, at::IntArrayRef size, at::IntArrayRef stride, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_nll_loss2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index, const at::Tensor & total_weight);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_nll_loss2d_forward(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, int64_t reduction, int64_t ignore_index);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_nonzero(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_normal_functional(const at::Tensor & self, double mean, double std, ::std::optional<at::Generator> generator);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_random(const at::Tensor & self, ::std::optional<at::Generator> generator);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_random(const at::Tensor & self, int64_t to, ::std::optional<at::Generator> generator);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_random(const at::Tensor & self, int64_t from, ::std::optional<int64_t> to, ::std::optional<at::Generator> generator);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_relu(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_repeat(const at::Tensor & self, at::IntArrayRef repeats);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_slogdet(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_smooth_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_sort(const at::Tensor & self, int64_t dim, bool descending);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_stack(at::TensorList tensors, int64_t dim);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_std(const at::Tensor & self, bool unbiased);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_std(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_std(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional<at::Scalar> & correction, bool keepdim);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_sum(const at::Tensor & self, ::std::optional<at::ScalarType> dtype);
TORCH_API std::vector<torch::lazy::Shape> compute_shape__to_copy(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, bool non_blocking, ::std::optional<at::MemoryFormat> memory_format);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_take(const at::Tensor & self, const at::Tensor & index);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_trace(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_zero(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_narrow_copy_symint(const at::Tensor & self, int64_t dim, int64_t start, c10::SymInt length);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_hardswish(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_hardswish_backward(const at::Tensor & grad_output, const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_selu(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_uniform(const at::Tensor & self, double from, double to, ::std::optional<at::Generator> generator);
// Non-Native ops
TORCH_API std::vector<Shape> compute_shape_scalar(const at::Scalar& value, const at::ScalarType& type);
TORCH_API std::vector<Shape> compute_shape_expand(const Output& input0, const std::vector<int64_t>& size, const bool& is_scalar_expand);
TORCH_API std::vector<Shape> compute_shape_view(const Output& input0, const std::vector<int64_t>& output_sizes);
TORCH_API std::vector<Shape> compute_shape_cast(const Output& input0, const at::ScalarType& dtype, const ::std::optional<at::ScalarType>& stype);
// View Ops
// (Now that functionalization pass is used, we should kill these in a later PR)
TORCH_API std::vector<Shape> compute_shape_as_strided_view_update(const Output& target, const Output& input, const std::vector<int64_t>& size, const std::vector<int64_t>& stride, const int64_t& storage_offset);
TORCH_API std::vector<Shape> compute_shape_as_strided(const Output& input, const std::vector<int64_t>& size, const std::vector<int64_t>& stride, const int64_t& storage_offset);
TORCH_API std::vector<Shape> compute_shape_diagonal_view_update(const Output& target, const Output& input, const int64_t& offset, const int64_t& dim1, const int64_t& dim2);
TORCH_API std::vector<Shape> compute_shape_diagonal(const Output& input, const int64_t& offset, const int64_t& dim1, const int64_t& dim2);
TORCH_API std::vector<Shape> compute_shape_narrow_view_update(const Output& input, const Output& source, const std::vector<int64_t>& base_indices);
TORCH_API std::vector<Shape> compute_shape_narrow(const Output& input, const std::vector<int64_t>& base_indices, const std::vector<int64_t>& sizes);
TORCH_API std::vector<Shape> compute_shape_permute(const Output& input, const std::vector<int64_t>& dims);
TORCH_API std::vector<Shape> compute_shape_resize(const Output& input, const std::vector<int64_t>& size);
TORCH_API std::vector<Shape> compute_shape_select_view_update(const Output& target, const Output& source, const int64_t& dim, const int64_t& start, const int64_t& end, const int64_t& stride);
TORCH_API std::vector<Shape> compute_shape_select(const Output& input, const int64_t& dim, const int64_t& start, const int64_t& end, const int64_t& stride);
TORCH_API std::vector<Shape> compute_shape_squeeze(const Output& input, const int& dim);
TORCH_API std::vector<Shape> compute_shape_unsqueeze(const Output& input, const int& dim);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_select_scatter(const at::Tensor & self, const at::Tensor & src, int64_t dim, int64_t index);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_diagonal_scatter(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_slice_scatter_symint(const at::Tensor & self, const at::Tensor & src, int64_t dim, ::std::optional<c10::SymInt> start, ::std::optional<c10::SymInt> end, c10::SymInt step);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_as_strided_scatter_symint(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, ::std::optional<c10::SymInt> storage_offset);
// clang-format on
} // namespace torch::lazy
```
|
===================================================================================================================================
SOURCE CODE FILE: tensor.h
LINES: 1
SIZE: 9.89 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\tensor.h
ENCODING: utf-8
```h
#pragma once
#include <c10/core/SymNodeImpl.h>
#include <c10/util/intrusive_ptr.h>
#include <torch/csrc/lazy/backend/backend_data.h>
#include <torch/csrc/lazy/backend/backend_device.h>
#include <torch/csrc/lazy/core/ir.h>
#include <torch/csrc/lazy/core/util.h>
namespace torch::lazy {
class TORCH_API SymNodeImpl : public c10::SymNodeImpl {
public:
SymNodeImpl(NodePtr ptr) : node_(std::move(ptr)) {}
NodePtr node_;
};
class LazyTensor;
using LazyTensorPtr = c10::intrusive_ptr<LazyTensor>;
class TORCH_API LazyTensor : public c10::intrusive_ptr_target {
public:
// This is the core lazy tensor data structure where all the tensor data is
// held. The lazy tensor is nothing more than a shared pointer to a Data
// object.
struct Data {
Data(BackendDataPtr handle, BackendDevice device)
: handle(std::move(handle)),
device(std::move(device)),
unique_id(GetNextTensorId()) {}
Data(Value ir_value, BackendDevice device)
: ir_value(std::move(ir_value)),
device(std::move(device)),
unique_id(GetNextTensorId()) {}
Data(at::Tensor tensor_data, BackendDevice device)
: tensor_data(std::move(tensor_data)),
device(std::move(device)),
unique_id(GetNextTensorId()) {}
// TODO(alanwaketan): Remove this ctor. This is a
// temporary ctor to ease XLA LTC migration. It depends on
// XLA's Functionalization integration.
Data(BackendDevice device)
: device(std::move(device)), unique_id(GetNextTensorId()) {}
Data(Data&& other) = delete;
Data(const Data&) = delete;
Data& operator=(const Data&) = delete;
Data& operator=(Data&&) = delete;
virtual ~Data();
BackendDataPtr handle;
Value ir_value;
std::optional<at::Tensor> tensor_data;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const BackendDevice device;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const int64_t unique_id = 0;
size_t generation = 1;
};
static LazyTensorPtr Create(
const at::Tensor& tensor,
const BackendDevice& device);
static LazyTensorPtr Create(Value ir_value, const BackendDevice& device);
static LazyTensorPtr Create(const BackendDataPtr& handle);
static LazyTensorPtr Create(std::shared_ptr<Data> data);
// The default ctor previously created a null LazyTensor (one with no 'data'
// obj). Creating a null LazyTensor is no longer possible, since the same can
// be achieved by creating a null LazyTensorPtr and it is way too confusing to
// have to check both lazy_tensor_ptr && *lazy_tensor_ptr, so everywhere that
// used to rely on a LazyTensor obj with a null Data can now rely on a null
// LazyTensorPtr instead.
LazyTensor() = delete;
LazyTensor(const LazyTensor&) = default;
LazyTensor(LazyTensor&&) noexcept = default;
LazyTensor& operator=(const LazyTensor&) = default;
LazyTensor& operator=(LazyTensor&&) noexcept = default;
~LazyTensor() override = default;
size_t generation() const {
return data()->generation;
}
// Override it to use your own Shape.
virtual int64_t size(int64_t dim) const;
// Override it to use your own graph executor.
virtual at::Tensor ToTensor(bool detached);
void ShallowCopyTo(const LazyTensorPtr& dest) const;
// Assigns the tensor value to the lazy tensor.
void SetTensor(at::Tensor tensor);
void UpdateFromTensor(const at::Tensor& tensor, bool sync);
void UpdateFromTensorOut(const at::Tensor& tensor);
void UpdateFromTensorOut(const LazyTensorPtr& tensor);
const std::shared_ptr<Data>& data() const;
// Override it to use your own type conversion.
virtual at::ScalarType dtype() const;
MaybeRef<Shape> shape() const;
const BackendDevice& GetDevice() const;
int64_t GetUniqueId() const;
// Fetches the data behind the tensor. If the tensor has a graph defining
// its current value, executes the graph and fetches the data result.
BackendDataPtr GetDataHandle();
// Fetches the current value of the data, which can be missing (nullptr)
// in case the tensor has a graph defining its current value,
BackendDataPtr CurrentDataHandle() const;
void SetDataHandle(BackendDataPtr handle);
void SetDataHandle(BackendDataPtr handle, bool sync);
// Retrieves the current IR Node, or nullptr in case no active IR Node is
// available.
Value CurrentIrValue() const;
// Retrieves the IR Node representing this LazyTensor. One will be created if
// missing. Note that although this is a const API, it actually changes the
// internal state ofthe object.
Value GetIrValue() const;
void SetIrValue(Value ir_value);
void SetInPlaceIrValue(Value ir_value);
std::optional<at::Tensor> CurrentTensorData() const;
std::vector<LazyTensorPtr> MakeOutputTensors(const NodePtr& node) const;
LazyTensorPtr CopyTensorToDevice(const BackendDevice& device);
// Applies the queue of operations in preparation for using the data.
// Override it to use your own graph executor.
virtual void ApplyPendingGraph();
// Override it to set extra information.
virtual void AssignIrValue(Value ir_value) const;
protected:
explicit LazyTensor(std::shared_ptr<Data> data);
void SetTensorData(at::Tensor tensor_data);
// We build a graph accumulating operations, but at a given point we
// need to force a rendering, otherwise the graph can grow without control.
// Think:
// for i in range(0, 100000):
// a = a + b
void TryLimitGraphSize();
// Override it to instantiate your own data.
virtual Value GetIrValueForTensor(
const at::Tensor& tensor,
const BackendDevice& device) const;
Value CreateTensorNode(const BackendDataPtr& data, bool read_only) const;
private:
LazyTensor(const at::Tensor& tensor, const BackendDevice& device);
LazyTensor(Value ir_value, const BackendDevice& device);
explicit LazyTensor(const BackendDataPtr& handle);
static int64_t GetNextTensorId();
std::shared_ptr<Data> data_;
};
// Utils to convert at::Tensor to LazyTensor, and vice versa.
// Section 0: c10::Tensorlist ==> lazy::TensorList
// note: GetTensorList is not totally parallel to GetLtcTensor; A TensorList
// skips
// the LazyTensor wrappers, assuming that the list of underlying IR nodes
// is actually more useful for downstream computations. TBD.
TORCH_API torch::lazy::Value GetTensorList(at::ITensorListRef tensors);
// Section 1: at::Tensor => LazyTensor.
// Extracts the LazyTensor out of an at::Tensor. Returns a null LazyTensor
// if the tensor is not a lazy tensor.
TORCH_API LazyTensorPtr TryGetLtcTensor(const at::Tensor& tensor);
// Extracts the LazyTensor out of an at::Tensor. Throws an exception
// if the tensor is not a lazy tensor.
TORCH_API LazyTensorPtr GetLtcTensor(const at::Tensor& tensor);
// Same as above, applied to a list of tensors.
TORCH_API std::vector<LazyTensorPtr> GetLtcTensors(
c10::ArrayRef<at::Tensor> tensors);
// If tensor is a lazy tensor type, returns the LazyTensor embedded within it,
// otherwise creates a new lazy tensor type with tensor as data.
TORCH_API LazyTensorPtr GetOrCreateLtcTensor(
const std::optional<at::Tensor>& tensor,
const BackendDevice& device);
TORCH_API LazyTensorPtr GetLtcTensorOrCreateForWrappedNumber(
const at::Tensor& tensor,
const BackendDevice& device);
// Section 2: LazyTensor => at::Tensor.
// Creates an ATen tensor from an LazyTensor.
TORCH_API at::Tensor CreateAtenFromLtcTensor(const LazyTensorPtr& ltc_tensor);
TORCH_API at::Tensor CreateAtenFromLtcTensor(LazyTensor&& ltc_tensor);
// Note [Lazy Tensor Functionalization]
// The functionalization pass is implemented by wrapping all TensorImpl
// objects in C++ with an extra FunctionalTensorWrapper object,
// that knows how to perform functionalization
//
// Certain functions in the aten API serve as entry/exit points for
// functionalization, where we need to perform the wrapping/unwrapping:
// - aten::to.device
// - aten::empty
// Given a non-lazy tensor, this function creates a lazy tensor on the specified
// (lazy) device. The functionalize_output determines whether or not we should
// wrap the output in a "functional wrapper".
//
// How do you know whether to pass true/false for functionalize_output?
//
// Case 1: nonlazy -> lazy
// If you're implementing a function that takes in nonlazy tensors and returns
// lazy tensors, then you should think of that function as an "entrypoint" to
// functionalization, and use functionalize_output=true Examples include:
// - factory functions (the LTC kernel for at::empty)
// - CPU -> Lazy device converions (the LTC kernel for at::to_device)
//
// Case 2: lazy -> lazy
// If you're implementing a function that takes in lazy tensors and returns
// lazy tensors,
// **but** requires creating lazy tensors internally,
// then you can assume that the current function is running inside of some
// outer context where functionalization is already running, that will take
// care of doing the wrapping for you, and use functionalize_output=true
// Examples include:
// - CPU fallback (takes in lazy tensors, converts to cpu, calls kernel,
// converts returns back to lazy tensors).
TORCH_API at::Tensor to_lazy_tensor(
const at::Tensor& self,
const c10::TensorOptions& options,
at::Device device,
bool non_blocking,
bool functionalize_output);
template <size_t... Indices>
auto TupleAtenFromLtcTensorsImpl(
const std::vector<LazyTensorPtr>& tensors,
std::index_sequence<Indices...>) {
return std::make_tuple(CreateAtenFromLtcTensor(tensors[Indices])...);
}
template <size_t N>
auto TupleAtenFromLtcTensors(const std::vector<LazyTensorPtr>& tensors) {
return TupleAtenFromLtcTensorsImpl(tensors, std::make_index_sequence<N>{});
}
} // namespace torch::lazy
```
|
========================================================================================================================================
SOURCE CODE FILE: tensor_impl.h
LINES: 1
SIZE: 1.90 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\tensor_impl.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/Tensor.h>
#include <c10/core/SymIntArrayRef.h>
#include <c10/core/TensorImpl.h>
#include <torch/csrc/lazy/core/tensor.h>
namespace torch::lazy {
// Tensor implementation class used to be fed to the at::Tensor.
// Its scope is just to handle an LazyTensor.
class TORCH_API LTCTensorImpl final : public c10::TensorImpl {
public:
explicit LTCTensorImpl(const LazyTensorPtr& tensor);
explicit LTCTensorImpl(const LazyTensor& tensor);
explicit LTCTensorImpl(LazyTensor&& tensor);
LazyTensorPtr tensor() {
return tensor_;
}
void set_tensor(const LazyTensorPtr& lazy_tensor);
void force_refresh_sizes() {
generation_ = 0;
}
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
const c10::VariableVersion& version_counter,
bool allow_tensor_metadata_change) const override;
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
c10::VariableVersion&& version_counter,
bool allow_tensor_metadata_change) const override;
void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override;
at::IntArrayRef sizes_custom() const override;
at::IntArrayRef strides_custom() const override;
int64_t numel_custom() const override;
int64_t storage_offset_custom() const override;
int64_t dim_custom() const override;
bool is_contiguous_custom(at::MemoryFormat memory_format) const override;
bool is_strides_like_custom(at::MemoryFormat memory_format) const override;
bool is_non_overlapping_and_dense_custom() const override;
c10::SymIntArrayRef sym_sizes_custom() const override;
c10::SymIntArrayRef sym_strides_custom() const override;
c10::SymInt sym_numel_custom() const override;
private:
void setup_size_properties();
LazyTensorPtr tensor_;
mutable std::optional<std::vector<c10::SymInt>> sym_sizes_;
size_t generation_{0};
};
} // namespace torch::lazy
```
|
========================================================================================================================================
SOURCE CODE FILE: tensor_util.h
LINES: 1
SIZE: 2.55 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\tensor_util.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/lazy/backend/backend_interface.h>
#include <torch/csrc/lazy/core/shape.h>
#include <ATen/FunctionalTensorWrapper.h>
#include <string>
#include <vector>
namespace torch::lazy {
TORCH_API std::vector<int64_t> ComputeArrayStrides(
c10::ArrayRef<int64_t> sizes);
TORCH_API std::vector<at::Tensor> DataHandlesToTensors(
c10::ArrayRef<BackendDataPtr> data_handles,
at::ScalarType dest_element_type);
// Uploads an ATEN tensor data to the device and fetches the corresponding
// device data handle.
TORCH_API BackendDataPtr
TensorToDataHandle(const at::Tensor& tensor, const BackendDevice& device);
// Retrieves the device data handles by parallel uploading data onto the
// corresponding devices.
TORCH_API std::vector<BackendDataPtr> CreateTensorsData(
const std::vector<at::Tensor>& tensors,
const std::vector<BackendDevice>& devices);
// Makes a deep copy of an ATEN tensor.
inline at::Tensor CopyTensor(const at::Tensor& ref) {
return ref.to(ref.options(), /*non_blocking=*/false, /*copy=*/true);
}
// Same as above, with an additional cast.
inline at::Tensor CopyTensor(
const at::Tensor& ref,
at::ScalarType dest_type,
bool copy = true) {
return ref.to(ref.options().dtype(dest_type), /*non_blocking=*/false, copy);
}
template <typename T, typename S>
T OptionalOr(const std::optional<S>& value, T defval) {
return value ? static_cast<T>(*value) : defval;
}
// Unwraps tensor to target dtype if it's a wrapped number.
inline at::Tensor UnwrapNumber(const at::Tensor& tensor, at::ScalarType dtype) {
return tensor.unsafeGetTensorImpl()->is_wrapped_number() ? tensor.to(dtype)
: tensor;
}
template <typename T>
at::Scalar MakeIntScalar(T value) {
return at::Scalar(static_cast<int64_t>(value));
}
// Routing values to device data maximizes the changes for compilation cache
// hits, but it can prevent the compiler to perform optimizations. So tensor
// values which are within a given set, are routed to constant scalars if this
// API returns true.
TORCH_API bool IsSpecialScalar(const at::Scalar& value);
// Note: returns a reference instead of a fresh tensor to avoid refcount bumps.
inline const at::Tensor& maybe_unwrap_functional(const at::Tensor& tensor) {
if (at::functionalization::impl::isFunctionalTensor(tensor)) {
return at::functionalization::impl::unsafeGetFunctionalWrapper(tensor)
->value();
} else {
return tensor;
}
}
} // namespace torch::lazy
```
|
========================================================================================================================================
SOURCE CODE FILE: thread_pool.h
LINES: 1
SIZE: 0.78 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\thread_pool.h
ENCODING: utf-8
```h
/**
* This file is adapted from PyTorch/XLA
* https://github.com/pytorch/xla/blob/master/third_party/xla_client/metrics.h
*/
#pragma once
#include <functional>
#include <memory>
#include <thread>
#include <c10/macros/Export.h>
namespace torch::lazy {
// NOLINTNEXTLINE(cppcoreguidelines-special-member-functions)
class TORCH_API Completion {
public:
class Data;
explicit Completion(std::shared_ptr<Data> data);
~Completion();
void Wait();
private:
std::shared_ptr<Data> data_;
};
// Schedules a closure which might wait for IO or other events/conditions.
TORCH_API void ScheduleIoClosure(std::function<void()> closure);
TORCH_API Completion
ScheduleIoClosureWithCompletion(std::function<void()> closure);
} // namespace torch::lazy
```
|
=================================================================================================================================
SOURCE CODE FILE: trie.h
LINES: 1
SIZE: 2.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\trie.h
ENCODING: utf-8
```h
#pragma once
#include <atomic>
#include <list>
#include <c10/core/ScalarType.h>
#include <torch/csrc/lazy/core/ir.h>
#include <torch/csrc/lazy/core/metrics.h>
namespace torch::lazy {
struct TORCH_API TrieNode {
static size_t GetNextUniqueId() {
static thread_local size_t id_generator = 0;
return id_generator++;
}
size_t unique_id;
size_t hit_counter;
NodePtr ir_node;
std::list<std::shared_ptr<TrieNode>> successors;
TrieNode() : unique_id(GetNextUniqueId()), hit_counter(0), ir_node(nullptr) {}
explicit TrieNode(NodePtr node)
: unique_id(GetNextUniqueId()),
hit_counter(0),
ir_node(std::move(node)) {}
};
class TORCH_API TrieCache {
public:
static TrieCache* Get();
TrieNode* Current() const;
// Take an iterator as the input because we want to move the corresponding
// node in the successor list to achieve a LRU caching effect
void SetCurrent(std::list<std::shared_ptr<TrieNode>>::iterator& iter);
// Used in MarkStep to indicate the end of one tracing
void ResetCurrent();
// Create a new TrieNode for ir_node and insert into the TrieCache
void Insert(NodePtr ir_node);
// Clear all TrieCache nodes
// TODO: Because we don't expect user to explicitly call this function via
// a Python API, we may need to introduce a threshold on the size of the cache
// to avoid holding tensors for too long.
void Clear();
void DumpToDotFile(const std::string& file_name);
private:
TrieCache();
std::shared_ptr<TrieNode> root_;
TrieNode* current_;
};
template <typename T, typename... Args>
NodePtr LookupNodeFromTrieCache(Args&&... args) {
auto& successors = TrieCache::Get()->Current()->successors;
for (auto it = successors.begin(); it != successors.end(); it++) {
NodePtr ir_node = (*it)->ir_node;
const T* concrete_node = NodeCast<T>(ir_node.get());
if (concrete_node &&
concrete_node->CanBeReused(std::forward<Args>(args)...)) {
TORCH_LAZY_COUNTER(
"IrNodeReused_" + c10::demangle((typeid(T).name())), 1);
(*it)->hit_counter++;
TrieCache::Get()->SetCurrent(it);
return ir_node;
}
}
return nullptr;
}
} // namespace torch::lazy
```
|
===================================================================================================================================
SOURCE CODE FILE: unique.h
LINES: 1
SIZE: 1.17 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\unique.h
ENCODING: utf-8
```h
/**
* Unique in this file is adapted from PyTorch/XLA
* https://github.com/pytorch/xla/blob/master/third_party/xla_client/unique.h
*/
#pragma once
#include <optional>
#include <functional>
#include <set>
namespace torch::lazy {
// Helper class to allow tracking zero or more things, which should be forcibly
// be one only thing.
template <typename T, typename C = std::equal_to<T>>
class Unique {
public:
std::pair<bool, const T&> set(const T& value) {
if (value_) {
TORCH_CHECK(C()(*value_, value), "'", *value_, "' vs '", value);
return std::pair<bool, const T&>(false, *value_);
}
value_ = value;
return std::pair<bool, const T&>(true, *value_);
}
operator bool() const {
return value_.has_value();
}
operator const T&() const {
return *value_;
}
const T& operator*() const {
return *value_;
}
const T* operator->() const {
return value_.operator->();
}
std::set<T> AsSet() const {
std::set<T> vset;
if (value_.has_value()) {
vset.insert(*value_);
}
return vset;
}
private:
std::optional<T> value_;
};
} // namespace torch::lazy
```
|
=================================================================================================================================
SOURCE CODE FILE: util.h
LINES: 1
SIZE: 2.89 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\core\util.h
ENCODING: utf-8
```h
/**
* Most of the utils in this file is adapted from PyTorch/XLA
* https://github.com/pytorch/xla/blob/master/third_party/xla_client/util.h
*/
#pragma once
#include <exception>
#include <functional>
#include <vector>
#include <c10/util/OptionalArrayRef.h>
#include <optional>
namespace torch::lazy {
// Similar to c10::scope_exit but with a status.
// TODO(alanwaketan): Consolidate it with c10::scope_exit.
template <typename T>
class Cleanup {
public:
using StatusType = T;
explicit Cleanup(std::function<void(StatusType&&)>&& func)
: func_(std::move(func)) {}
Cleanup(Cleanup&& ref) noexcept
: func_(std::move(ref.func_)), status_(std::move(ref.status_)) {}
Cleanup(const Cleanup&) = delete;
~Cleanup() {
if (func_ != nullptr) {
func_(std::move(status_));
}
}
Cleanup& operator=(const Cleanup&) = delete;
Cleanup& operator=(Cleanup&& ref) noexcept {
if (this != &ref) {
func_ = std::move(ref.func_);
status_ = std::move(ref.status_);
}
return *this;
}
void Release() {
func_ = nullptr;
}
void SetStatus(StatusType&& status) {
status_ = std::move(status);
}
const StatusType& GetStatus() const {
return status_;
}
private:
std::function<void(StatusType&&)> func_;
StatusType status_;
};
using ExceptionCleanup = Cleanup<std::exception_ptr>;
// Allows APIs which might return const references and values, to not be forced
// to return values in the signature.
// TODO(alanwaketan): This is clever, but is there really no std or c10
// supports? Needs more investigations.
template <typename T>
class MaybeRef {
public:
/* implicit */ MaybeRef(const T& ref) : ref_(ref) {}
/* implicit */ MaybeRef(T&& value)
: storage_(std::move(value)), ref_(*storage_) {}
const T& Get() const {
return ref_;
}
const T& operator*() const {
return Get();
}
operator const T&() const {
return Get();
}
bool IsStored() const {
return storage_.has_value();
}
private:
std::optional<T> storage_;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const T& ref_;
};
template <typename T>
std::vector<T> Iota(size_t size, T init = 0, T incr = 1) {
std::vector<T> result(size);
T value = init;
for (size_t i = 0; i < size; ++i, value += incr) {
result[i] = value;
}
return result;
}
template <typename T, typename S>
std::vector<T> ToVector(const S& input) {
return std::vector<T>(input.begin(), input.end());
}
template <typename T>
std::optional<std::vector<T>> ToOptionalVector(
c10::OptionalArrayRef<T> arrayRef) {
if (arrayRef) {
return arrayRef->vec();
}
return std::nullopt;
}
template <typename T>
std::underlying_type_t<T> GetEnumValue(T value) {
return static_cast<std::underlying_type_t<T>>(value);
}
} // namespace torch::lazy
```
|
==========================================================================================================================================
SOURCE CODE FILE: python_util.h
LINES: 1
SIZE: 0.32 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\python\python_util.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/lazy/core/ir_metadata.h>
#include <optional>
#include <vector>
namespace torch::lazy {
std::optional<SourceLocation> TORCH_PYTHON_API GetPythonFrameTop();
std::vector<SourceLocation> TORCH_PYTHON_API GetPythonFrames();
} // namespace torch::lazy
```
|
=========================================================================================================================================
SOURCE CODE FILE: config.h
LINES: 1
SIZE: 0.21 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\ts_backend\config.h
ENCODING: utf-8
```h
#pragma once
#include <c10/util/Flags.h>
// TODO(whc) unclear if this is useful, has only been tested as true
TORCH_DECLARE_bool(torch_lazy_ts_tensor_update_sync);
TORCH_DECLARE_bool(torch_lazy_ts_cuda);
```
|
=============================================================================================================================================
SOURCE CODE FILE: dynamic_ir.h
LINES: 1
SIZE: 2.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\ts_backend\dynamic_ir.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/symbol.h>
#include <memory>
#include <string>
#include <c10/core/ScalarType.h>
#include <c10/util/Flags.h>
#include <torch/csrc/lazy/core/dynamic_ir.h>
#include <torch/csrc/lazy/core/hash.h>
#include <torch/csrc/lazy/core/ir.h>
#include <torch/csrc/lazy/core/ir_metadata.h>
#include <torch/csrc/lazy/ts_backend/ts_node.h>
TORCH_DECLARE_bool(ltc_enable_dynamic_shapes);
namespace torch::lazy {
/**
* The goal of "dynamic" Nodes is to patch a hole in our tracing.
* Previously, if a user called `sizes` on a Tensor, it would leak out
* of our tracing system, as `sizes` returns a torch.Size or an int. To
* prevent this from happening, we introduce DimensionNode, a new type
* of Node that abstracts the operation of getting the dimensions of a
* Tensor.
*
* Consider the following example:
* ```
* numel = x.shape()[0] * x.shape()[1]
* ```
*
* Here, `x.shape()[i]` will be a SizeNode (subclass of DimensionNode),
* and the multiplication of the two SizeNodes will be represented by
* a SizeMul (also a subclass of DimensionNode). Through this, we can
* prevent `numel` from being represented as a Python int and thus
* burned into the Graph.
*/
// Represents the result of calling `size` on a Tensor
class TORCH_API SizeNode : public TsNode, public DimensionNode {
public:
SizeNode(Value input, size_t dim);
int64_t getStaticValue() const override;
bool isSymbolic() const override;
std::string ToString() const override;
size_t dim_ = 0;
torch::lazy::TSOpVector Lower(
std::shared_ptr<torch::jit::GraphFunction> function,
TSLoweringContext* loctx) const override;
};
class TORCH_API SizeAdd : public TsNode, public DimensionNode {
public:
SizeAdd(Value a, Value b);
int64_t getStaticValue() const override;
bool isSymbolic() const override;
std::string ToString() const override;
};
class TORCH_API SizeMul : public TsNode, public DimensionNode {
public:
SizeMul(Value a, Value b);
int64_t getStaticValue() const override;
bool isSymbolic() const override;
std::string ToString() const override;
};
class TORCH_API SizeDiv : public TsNode, public DimensionNode {
public:
SizeDiv(Value a, Value b);
int64_t getStaticValue() const override;
bool isSymbolic() const override;
std::string ToString() const override;
};
} // namespace torch::lazy
```
|
=============================================================================================================================================
SOURCE CODE FILE: ir_builder.h
LINES: 1
SIZE: 2.40 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\ts_backend\ir_builder.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/lazy/core/internal_ops/ltc_ops.h>
#include <torch/csrc/lazy/core/ir.h>
#include <torch/csrc/lazy/core/ir_builder.h>
#include <torch/csrc/lazy/core/shape_inference.h>
#include <torch/csrc/lazy/generated/LazyNonNativeIr.h>
#include <torch/csrc/lazy/ts_backend/dynamic_ir.h>
#include <torch/csrc/lazy/ts_backend/ops/device_data.h>
#include <torch/csrc/lazy/ts_backend/ops/generic.h>
#include <torch/csrc/lazy/ts_backend/ts_node.h>
namespace torch::lazy {
struct TorchScriptIrBuilder : IrBuilder {
NodePtr MakeDeviceData(
const std::shared_ptr<BackendData>& data) const override {
return DeviceData::Create(data);
}
// TODO: Scalar node is not currently used by ts_backend. Enable reusing
// Scalar node later if needed.
NodePtr MakeScalar(const at::Scalar& value, const at::ScalarType& type)
const override {
return MakeNode<Scalar>(value, type);
}
NodePtr MakeExpand(
const Value& input0,
const std::vector<int64_t>& size,
const bool& is_scalar_expand) const override {
return ReuseOrMakeNode<Expand>(input0, size, is_scalar_expand);
}
NodePtr MakeCast(
const Value& input0,
const at::ScalarType& dtype,
const std::optional<at::ScalarType>& stype =
std::nullopt) const override {
return ReuseOrMakeNode<Cast>(input0, dtype, stype);
}
NodePtr MakeTensorList(const OpList& inputs) const override {
return ReuseOrMakeNode<TensorList>(inputs);
}
// Generic needs cleanup
NodePtr MakeGeneric(
const OpKind& op,
const OpList& operands,
const Shape& shape,
const size_t& num_outputs = 1,
const hash_t& hash_seed =
static_cast<uint32_t>(0x5a2d296e9)) const override {
return MakeNode<Generic>(op, operands, shape, num_outputs, hash_seed);
}
// dynamic ir nodes
// TODO: verify if IR node reusing works for Dynamic shape ops
NodePtr MakeSizeNode(const Value& input, size_t dim) const override {
return MakeNode<SizeNode>(input, dim);
}
NodePtr MakeSizeAdd(const Value& a, const Value& b) const override {
return MakeNode<SizeAdd>(a, b);
}
NodePtr MakeSizeMul(const Value& a, const Value& b) const override {
return MakeNode<SizeMul>(a, b);
}
NodePtr MakeSizeDiv(const Value& a, const Value& b) const override {
return MakeNode<SizeDiv>(a, b);
}
};
} // namespace torch::lazy
```
|
==================================================================================================================================================
SOURCE CODE FILE: tensor_aten_ops.h
LINES: 1
SIZE: 0.53 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\ts_backend\tensor_aten_ops.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/lazy/core/tensor.h>
namespace torch::lazy {
//////////////////////////////////////////////////////////////////////////////
// ATEN operators follows here, listed in alphabetical order.
//////////////////////////////////////////////////////////////////////////////
void copy_(torch::lazy::LazyTensorPtr& input, torch::lazy::LazyTensorPtr& src);
// Fills the input with the given value.
void fill_(torch::lazy::LazyTensorPtr& input, const at::Scalar& value);
} // namespace torch::lazy
```
|
========================================================================================================================================================
SOURCE CODE FILE: ts_autograd_functions.h
LINES: 1
SIZE: 0.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\ts_backend\ts_autograd_functions.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/autograd/custom_function.h>
namespace torch::lazy {
struct MaxPool3dAutogradFunctionTS
: public torch::autograd::Function<MaxPool3dAutogradFunctionTS> {
static at::Tensor forward(
torch::autograd::AutogradContext* ctx,
const at::Tensor& self,
at::IntArrayRef kernel_size,
at::IntArrayRef stride,
at::IntArrayRef padding,
at::IntArrayRef dilation,
bool ceil_mode);
static torch::autograd::variable_list backward(
torch::autograd::AutogradContext* ctx,
torch::autograd::variable_list grad_output);
};
} // namespace torch::lazy
```
|
==================================================================================================================================================
SOURCE CODE FILE: ts_backend_impl.h
LINES: 1
SIZE: 1.26 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\ts_backend\ts_backend_impl.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/lazy/backend/backend_interface.h>
#include <utility>
namespace torch::lazy {
class TORCH_API TSData : public torch::lazy::BackendData {
public:
TSData(const at::Scalar& scalar, const torch::lazy::BackendDevice& device)
: torch::lazy::BackendData(device, torch::lazy::Shape(scalar.type(), {})),
scalar(scalar) {}
TSData(
at::Tensor data,
const torch::lazy::Shape& shape,
const torch::lazy::BackendDevice& device)
: torch::lazy::BackendData(device, shape), data_(std::move(data)) {}
TSData(
const torch::lazy::Shape& shape,
const torch::lazy::BackendDevice& device)
: torch::lazy::BackendData(device, shape) {}
Handle GetHandle() override {
return reinterpret_cast<int64_t>(this);
}
void Assign(const torch::lazy::BackendData& data) override {
data_ = static_cast<const TSData&>(data).data_;
}
bool HasValue() const override {
return data_.defined();
}
at::Tensor data() {
return data_;
}
std::optional<at::Scalar> scalar;
private:
at::Tensor data_;
};
TORCH_API torch::lazy::BackendImplInterface* GetTSBackendImpl();
TORCH_PYTHON_API void InitTorchScriptBackend();
} // namespace torch::lazy
```
|
====================================================================================================================================================
SOURCE CODE FILE: ts_eager_fallback.h
LINES: 1
SIZE: 0.70 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\ts_backend\ts_eager_fallback.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/dispatch/Dispatcher.h>
#include <ATen/core/ivalue.h>
#include <ATen/core/stack.h>
#include <functional>
namespace torch::lazy {
bool force_eager_fallback(c10::Symbol op);
void ltc_eager_fallback(
const c10::OperatorHandle& op,
torch::jit::Stack* stack);
void ts_eager_fallback(
const c10::OperatorHandle& op,
torch::jit::Stack* stack,
c10::DeviceType device_type);
// The TorchScript backend does not register itself with pytorch dispatcher
// until it is explicitly initialized. This function should only be called
// by the main Torchscript backend init function.
void register_ts_ltc_eager_fallback();
} // namespace torch::lazy
```
|
======================================================================================================================================================
SOURCE CODE FILE: ts_lowering_context.h
LINES: 1
SIZE: 4.55 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\ts_backend\ts_lowering_context.h
ENCODING: utf-8
```h
#pragma once
#include <sstream>
#include <torch/csrc/api/include/torch/jit.h>
#include <torch/csrc/jit/runtime/graph_executor.h>
#include <torch/csrc/lazy/backend/lowering_context.h>
#include <torch/csrc/lazy/core/ir.h>
#include <torch/csrc/lazy/ts_backend/ts_node_lowering.h>
namespace torch::lazy {
using TSOpVector = std::vector<torch::jit::Value*>;
class TORCH_API TSComputation : public Computation {
public:
TSComputation(const std::shared_ptr<torch::jit::Graph>& graph)
: graph_(graph), graph_executor_(graph, "") {
for (torch::jit::Value* input : graph_->inputs()) {
parameter_names_.push_back(input->debugName());
}
}
int parameters_size() const override {
return static_cast<int>(parameter_names_.size());
}
const std::vector<Shape>& parameter_shapes() const override {
throw std::runtime_error(
"TODO(whc) implement TS computation shapes or change interface");
return parameter_shapes_;
}
const std::vector<std::string>& parameter_names() const override {
return parameter_names_;
}
const Shape& result_shape() const override {
throw std::runtime_error(
"TODO(whc) implement TS computation shapes or change interface");
return result_shape_;
}
const std::string to_string() const override {
std::ostringstream oss;
oss << *graph_;
return oss.str();
}
std::shared_ptr<torch::jit::Graph> graph() const {
return graph_;
}
torch::jit::GraphExecutor& graph_executor() {
return graph_executor_;
}
private:
std::shared_ptr<torch::jit::Graph> graph_;
torch::jit::GraphExecutor graph_executor_;
std::vector<std::string> parameter_names_;
std::vector<Shape> parameter_shapes_;
Shape result_shape_;
};
class TORCH_API TSLoweringContext : public LoweringContext {
public:
TSLoweringContext(const std::string& name, const BackendDevice device);
TSLoweringContext(
const std::string& name,
BackendDevice device,
c10::ArrayRef<const Node*> post_order,
Util::EmissionMap emit_status);
size_t AddResult(const Output& output) override {
return AddResult(GetOutputOp(output));
}
void AddParameter(
const torch::lazy::Output& output,
size_t index,
const Shape& shape,
const std::string& name) override {
TORCH_INTERNAL_ASSERT(false, "not implemented");
}
void Lower(const Node* node);
ComputationPtr Build() override {
for (torch::jit::Value* output : root_tuple_) {
graph_->block()->registerOutput(output);
}
return std::shared_ptr<Computation>(new TSComputation(graph_));
}
// Retrieves the lowered operation for an output. If the requested output is
// not available yet, the graph behind the output's Node is lowered, and the
// corresponding TS operation returned.
torch::jit::Value* GetOutputOp(const Output& output) {
auto it = emitted_outputs_.find(output);
if (it == emitted_outputs_.end()) {
auto post_order = Util::ComputePostOrder(output.node, &emit_status_);
for (auto node : post_order) {
Lower(node);
}
// At this point the output better be present, otherwise there is an issue
// with the lowering code.
it = emitted_outputs_.find(output);
TORCH_CHECK(
it != emitted_outputs_.end(),
"No TS operation emitted for output: ",
output.ToString());
}
return it->second;
}
// Assigns the given TS operation to the specified output. As outputs are
// lowered in a post-order fashion, later nodes should always find their
// operands among the emitted outputs.
void AssignOutputOp(const Output& output, torch::jit::Value* op);
// If a parameter associated with data has already been declared, it will be
// returned. Otherwise a new one will be created, associated with the tensor
// held in data.
torch::jit::Value* GetParameter(const BackendDataPtr& data);
std::shared_ptr<torch::jit::Graph> graph() const {
return graph_;
}
private:
struct Parameter {
torch::jit::Value* param{nullptr};
size_t index = 0;
};
size_t AddResult(torch::jit::Value* op) {
root_tuple_.push_back(op);
return root_tuple_.size() - 1;
}
std::shared_ptr<torch::jit::Graph> graph_;
std::shared_ptr<torch::jit::GraphFunction> function_;
std::unordered_map<BackendData::Handle, Parameter> parameters_map_;
std::vector<torch::jit::Value*> root_tuple_;
OutputMap<torch::jit::Value*> emitted_outputs_;
};
} // namespace torch::lazy
```
|
==========================================================================================================================================
SOURCE CODE FILE: ts_node.h
LINES: 1
SIZE: 3.37 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\ts_backend\ts_node.h
ENCODING: utf-8
```h
#pragma once
#include <c10/util/ArrayRef.h>
#include <torch/csrc/jit/api/function_impl.h>
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/lazy/backend/lowering_context.h>
#include <torch/csrc/lazy/core/ir.h>
#include <torch/csrc/lazy/core/shape.h>
#include <torch/csrc/lazy/ts_backend/ts_lowering_context.h>
namespace torch::lazy {
using TSOpVector = std::vector<torch::jit::Value*>;
class TORCH_API TsNode : public lazy::Node {
public:
TsNode(
OpKind op,
OpList operands,
std::vector<Shape>&& shapes,
size_t num_outputs,
hash_t hash_seed = kHashSeed);
TsNode(
OpKind op,
OpList operands,
const std::function<Shape()>& shape_fn,
size_t num_outputs,
hash_t hash_seed = kHashSeed);
TsNode(
OpKind op,
OpList operands,
size_t num_outputs,
hash_t hash_seed = kHashSeed);
TsNode(
OpKind op,
Shape shape,
size_t num_outputs,
hash_t hash_seed = kHashSeed);
~TsNode() override = default;
hash_t hash() const override;
hash_t shapeHash() const override;
const std::string getPythonStacktrace() const;
// Lower is a backend-specific method since it returns a backend specific
// type. hence, it is convenient to define it differently per-backend rather
// than at Node API
virtual TSOpVector Lower(
std::shared_ptr<torch::jit::GraphFunction> function,
TSLoweringContext* loctx) const;
private:
// The hash of the dag WITH size info. Used for shape caching
hash_t shape_hash_;
// The hash of the dag used to look up the compiled graph by a hash
// in this case, we will use the dag hash WITHOUT size info if dynamic shape
// is enabled and use the dag hash WITH size info otherwise.
hash_t dag_hash_;
};
// Note: this OpKind is separate from ltc_ops.h since it would be a circular
// import otherwise, I like leaving TensorList in this file, and I think most of
// ltc_ops special cases will be deleted anyway
const OpKind tensor_list_opkind = OpKind::Get("lazy_tensors::tensor_list");
// TensorList represents an at::TensorList which is a vector[Tensor] but is also
// a first-class IValue and can be fed as a single input to a TS program. It is
// much easier to handle TensorLists in Lazy Tensor code if they are represented
// as a single Node so there can be more than one TensorList and more than one
// Tensor side-by-side as operands to an op.
//
// Note: shape is undefined for TensorList. We assert in some places that
// #shapes matches #outputs and this stems from
// the fact that currently all IR nodes represent tensors (there is no
// type system for this IR). Becuase of this, TensorList is a bit of a
// hack.
//
// TODO(whc) once Shape() API is moved to Node base, also make it virtual, and
// then implement it as NotImplemented for TensorList, also fixing the assertion
// that would fail.
struct TORCH_API TensorList : public TsNode {
static OpKind ClassOpKind() {
return tensor_list_opkind;
}
TensorList() = delete;
TensorList(OpList values);
bool CanBeReused(OpList values) const {
return operands() == std::vector<Output>(values.begin(), values.end());
}
TSOpVector Lower(
std::shared_ptr<torch::jit::GraphFunction> function,
TSLoweringContext* loctx) const override;
};
} // namespace torch::lazy
```
|
===================================================================================================================================================
SOURCE CODE FILE: ts_node_lowering.h
LINES: 1
SIZE: 0.47 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\lazy\ts_backend\ts_node_lowering.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/api/include/torch/jit.h>
#include <torch/csrc/lazy/backend/lowering_context.h>
namespace torch::lazy {
using TSOpVector = std::vector<torch::jit::Value*>;
TORCH_API TSOpVector LowerTSBuiltin(
const std::shared_ptr<torch::jit::GraphFunction>& function,
c10::Symbol sym,
const std::vector<torch::jit::NamedValue>& arguments,
const std::vector<torch::jit::NamedValue>& kwarguments = {});
} // namespace torch::lazy
```
|
===================================================================================================================================
SOURCE CODE FILE: back_compat.h
LINES: 1
SIZE: 1.02 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\onnx\back_compat.h
ENCODING: utf-8
```h
#pragma once
#include <onnx/onnx_pb.h>
namespace torch::onnx {
// The following constants are defined here to avoid breaking Meta's internal
// usage of ONNX which pre-dates ONNX 1.14 and thus does not support FLOAT8:
// cf. https://github.com/pytorch/pytorch/pull/106379#issuecomment-1675189340
// -abock, 2023-08-25
//
// ::ONNX_NAMESPACE::TensorProto_DataType_FLOAT8E4M3FN
constexpr auto TensorProto_DataType_FLOAT8E4M3FN =
static_cast<::ONNX_NAMESPACE::TensorProto_DataType>(17);
// ::ONNX_NAMESPACE::TensorProto_DataType_FLOAT8E4M3FNUZ
constexpr auto TensorProto_DataType_FLOAT8E4M3FNUZ =
static_cast<::ONNX_NAMESPACE::TensorProto_DataType>(18);
// ::ONNX_NAMESPACE::TensorProto_DataType_FLOAT8E5M2
constexpr auto TensorProto_DataType_FLOAT8E5M2 =
static_cast<::ONNX_NAMESPACE::TensorProto_DataType>(19);
// ::ONNX_NAMESPACE::TensorProto_DataType_FLOAT8E5M2FNUZ
constexpr auto TensorProto_DataType_FLOAT8E5M2FNUZ =
static_cast<::ONNX_NAMESPACE::TensorProto_DataType>(20);
} // namespace torch::onnx
```
|
============================================================================================================================
SOURCE CODE FILE: init.h
LINES: 1
SIZE: 0.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\onnx\init.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/utils/pybind.h>
namespace torch::onnx {
void initONNXBindings(PyObject* module);
} // namespace torch::onnx
```
|
============================================================================================================================
SOURCE CODE FILE: onnx.h
LINES: 1
SIZE: 0.51 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\onnx\onnx.h
ENCODING: utf-8
```h
#pragma once
namespace torch::onnx {
enum class OperatorExportTypes {
ONNX, // Strict ONNX export
ONNX_ATEN, // ONNX With ATen op everywhere
ONNX_ATEN_FALLBACK, // ONNX export with ATen fallback
ONNX_FALLTHROUGH, // Export supported ONNX ops. Pass through unsupported ops.
};
enum class TrainingMode {
EVAL, // Inference mode
PRESERVE, // Preserve model state (eval/training)
TRAINING, // Training mode
};
constexpr auto kOnnxNodeNameAttribute = "onnx_name";
} // namespace torch::onnx
```
|
===============================================================================================================================
SOURCE CODE FILE: api.h
LINES: 1
SIZE: 0.51 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\api.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/profiler/orchestration/observer.h>
// There are some components which use these symbols. Until we migrate them
// we have to mirror them in the old autograd namespace.
namespace torch::autograd::profiler {
using torch::profiler::impl::ActivityType;
using torch::profiler::impl::getProfilerConfig;
using torch::profiler::impl::ProfilerConfig;
using torch::profiler::impl::profilerEnabled;
using torch::profiler::impl::ProfilerState;
} // namespace torch::autograd::profiler
```
|
======================================================================================================================================
SOURCE CODE FILE: collection.h
LINES: 1
SIZE: 20.93 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\profiler\collection.h
ENCODING: utf-8
```h
#pragma once
#include <cstdint>
#include <memory>
#include <mutex>
#include <type_traits>
#include <utility>
#include <variant>
#include <ATen/Context.h>
#include <c10/core/Device.h>
#include <c10/core/TensorImpl.h>
#include <c10/macros/Macros.h>
#include <c10/util/ApproximateClock.h>
#include <c10/util/flat_hash_map.h>
#include <c10/util/strong_type.h>
#include <torch/csrc/profiler/containers.h>
#include <torch/csrc/profiler/data_flow.h>
#include <torch/csrc/profiler/events.h>
#include <torch/csrc/profiler/kineto_shim.h>
#include <torch/csrc/profiler/orchestration/python_tracer.h>
#include <torch/csrc/profiler/perf.h>
#include <torch/csrc/profiler/stubs/base.h>
#include <torch/csrc/profiler/util.h>
#include <torch/csrc/utils/python_stub.h>
namespace torch::profiler::impl {
enum class EventType : uint8_t {
TorchOp = 0,
Backend,
Vulkan,
Allocation,
OutOfMemory,
PyCall,
PyCCall,
Kineto
};
// ============================================================================
// == Value (Tensor, Scalar) summary ==========================================
// ============================================================================
struct TORCH_API RawTensorMetadataBase {
RawTensorMetadataBase() = default;
explicit RawTensorMetadataBase(const at::Tensor& t);
StorageImplData data_;
c10::ScalarType dtype_{c10::ScalarType::Undefined};
c10::Layout layout_{c10::Layout::Strided};
uint32_t size_dim_{0};
};
// Collected during profiling.
struct TORCH_API RawTensorMetadata : RawTensorMetadataBase {
RawTensorMetadata() = default;
RawTensorMetadata(const RawTensorMetadata&) = default;
RawTensorMetadata(RawTensorMetadata&&) noexcept = default;
RawTensorMetadata& operator=(const RawTensorMetadata&) = default;
RawTensorMetadata& operator=(RawTensorMetadata&&) noexcept = default;
~RawTensorMetadata() = default;
explicit RawTensorMetadata(const at::Tensor& t);
// Wrap `weak_self_` in `std::optional` and split device into components to
// keep struct default constructable. (which the std::array initializer needs)
std::optional<WeakTensor> weak_self_;
c10::DeviceType device_type_{c10::DeviceType::CPU};
c10::DeviceIndex device_index_{-1};
};
// Used during post processing.
struct TORCH_API TensorMetadata : public RawTensorMetadataBase {
TensorMetadata(
const RawTensorMetadata& r,
std::vector<int64_t> sizes,
std::vector<int64_t> strides);
TensorImplAddress impl() const {
return weak_self_.get();
}
WeakTensor weak_self_;
c10::Device device_;
std::vector<int64_t> sizes_;
std::vector<int64_t> strides_;
// Set during `calculateUniqueTensorIDs`.
std::optional<TensorID> id_;
std::optional<AllocationID> allocation_id_;
};
// Used during post processing.
struct TORCH_API ProfilerStepInfo {
int64_t start_time_ns; // start time of the profiler step
int64_t end_time_ns; // end time of the profiler step
uint64_t out_idx; // index of the profiler step in the profiler "out" var in
// getRecords
ProfilerStepInfo(int64_t start, int64_t end, uint64_t out_idx)
: start_time_ns(start), end_time_ns(end), out_idx(out_idx) {}
};
using op_input_t = std::variant<
TensorMetadata,
std::vector<TensorMetadata>,
c10::IValue,
std::nullopt_t>;
// ============================================================================
// == ExtraFields =============================================================
// ============================================================================
template <EventType>
struct ExtraFields;
struct TorchOpBasicFields {
int64_t sequence_number_{0};
uint64_t forward_tid_{0};
at::RecordScope scope_{};
bool is_async_{false};
uint64_t record_function_id_{0};
int64_t debug_handle_{0};
std::string name_;
std::string overload_name_;
// Set in the exit callback.
uint64_t end_tid_{0};
};
using jit_stack_t = std::vector<std::string>;
using jit_modules_t = std::vector<std::string>;
using extra_args_t = std::unordered_map<std::string, c10::IValue>;
using extra_meta_t = std::unordered_map<std::string, std::string>;
using kwinputs_t = std::unordered_map<std::string, c10::IValue>;
struct FallbackPair {
ProfilerVoidEventStub device_event_start_ = nullptr;
ProfilerVoidEventStub device_event_end_ = nullptr;
};
template <>
struct ExtraFields<EventType::TorchOp> : TorchOpBasicFields {
ExtraFields(
TorchOpBasicFields&& f,
uint64_t correlation_id,
c10::time_t end_time_ns,
std::vector<op_input_t>&& inputs,
std::vector<op_input_t>&& concrete_inputs,
jit_stack_t&& jit_stack,
jit_modules_t&& jit_modules,
extra_args_t&& extra_args,
extra_meta_t&& extra_meta,
kwinputs_t&& kwinputs,
FallbackPair&& device_fallback,
bool allow_tf32_cublas,
std::unique_ptr<perf_counters_t>&& perf_event_counters)
: TorchOpBasicFields(std::move(f)),
correlation_id_{correlation_id},
end_time_ns_{end_time_ns},
inputs_{std::move(inputs)},
concrete_inputs_{std::move(concrete_inputs)},
jit_stack_{std::move(jit_stack)},
jit_modules_{std::move(jit_modules)},
extra_args_{std::move(extra_args)},
extra_meta_{std::move(extra_meta)},
kwinputs_{std::move(kwinputs)},
device_fallback_{std::move(device_fallback)},
allow_tf32_cublas_{allow_tf32_cublas},
perf_event_counters_{std::move(perf_event_counters)} {}
uint64_t correlation_id_;
c10::time_t end_time_ns_;
std::vector<op_input_t> inputs_;
std::vector<op_input_t> concrete_inputs_;
jit_stack_t jit_stack_;
jit_modules_t jit_modules_;
extra_args_t extra_args_;
extra_meta_t extra_meta_;
kwinputs_t kwinputs_;
FallbackPair device_fallback_;
bool allow_tf32_cublas_;
std::unique_ptr<perf_counters_t> perf_event_counters_;
};
template <>
struct ExtraFields<EventType::Backend> {
int64_t start_time_us_;
int64_t end_time_us_;
int64_t debug_handle_;
at::RecordScope scope_;
std::string name_;
std::string backend_;
jit_stack_t jit_stack_;
jit_modules_t jit_modules_;
};
template <>
struct ExtraFields<EventType::Vulkan> {
using raw_event_t = std::pair<c10::approx_time_t, vulkan_id_t>;
std::string name_;
int64_t duration_ns_{0};
// While building the event tree, we want to report a vulkan event's duration
// as 0 so that its end time doesn't exceed that of its parent cpu op
bool in_tree_building_{false};
};
struct RawAllocation {
c10::approx_time_t start_time_;
void* ptr_;
int64_t alloc_size_;
size_t total_allocated_;
size_t total_reserved_;
c10::DeviceType device_type_;
c10::DeviceIndex device_index_;
};
// For performance.
static_assert(c10::is_pod_v<RawAllocation>, "Non-POD member of RawAllocation.");
template <>
struct ExtraFields<EventType::Allocation> : RawAllocation {
ExtraFields(const RawAllocation& allocation) : RawAllocation(allocation) {}
c10::Device device() const {
return {device_type_, device_index_};
}
std::optional<TensorID> id_;
std::optional<AllocationID> allocation_id_;
};
template <>
struct ExtraFields<EventType::OutOfMemory> {
c10::approx_time_t start_time_;
int64_t alloc_size_;
size_t total_allocated_;
size_t total_reserved_;
c10::DeviceType device_type_;
c10::DeviceIndex device_index_;
};
// For performance.
static_assert(
c10::is_pod_v<ExtraFields<EventType::OutOfMemory>>,
"Non-POD member of ExtraFields<EventType::OutOfMemory>.");
struct PyFrameState {
int line_no_;
at::StringView filename_;
at::StringView funcname_;
};
template <typename T, typename Tag>
using strong_t = strong::
type<T, Tag, strong::regular, strong::convertible_to<T>, strong::hashable>;
using PyModuleSelf = strong_t<PyObject*, struct PyModuleSelf_>;
using PyModuleCls = strong_t<PyObject*, struct PyModuleCls_>;
using PyMethod = strong_t</*PyMethodDef*/ void*, struct PyMethod_>;
using PyOptimizerSelf = strong_t<PyObject*, struct PyOptSelf_>;
using PyOptimizerCls = strong_t<PyObject*, struct PyOptimizer_>;
struct NNModuleInfo {
struct ParameterInfo {
std::string name_;
TensorMetadata metadata_;
std::optional<TensorMetadata> grad_metadata_;
};
PyModuleSelf self_;
PyModuleCls cls_;
at::StringView cls_name_;
std::vector<ParameterInfo> parameters_;
// Indicates that `self_` is the kth instance of `cls_` observed.
size_t id_{std::numeric_limits<size_t>::max()};
};
struct OptimizerInfo {
struct ParameterInfo {
TensorMetadata metadata_;
std::optional<TensorMetadata> grad_metadata_;
std::vector<std::pair<std::string, TensorMetadata>> state_;
};
PyOptimizerSelf self_;
PyOptimizerCls cls_;
at::StringView cls_name_;
std::vector<ParameterInfo> parameters_;
};
struct PyExtraFieldsBase {
PyExtraFieldsBase(
c10::time_t end_time_ns,
size_t python_tid,
PyFrameState caller)
: end_time_ns_{end_time_ns},
python_tid_{python_tid},
caller_{std::move(caller)} {}
c10::time_t end_time_ns_;
size_t python_tid_;
PyFrameState caller_;
// kth python event observed. (Used by TensorBoard)
size_t id_{std::numeric_limits<size_t>::max()};
};
template <>
struct ExtraFields<EventType::PyCall> : public PyExtraFieldsBase {
struct args_t {
PyFrameState frame_state_;
std::optional<NNModuleInfo> module_info_;
std::optional<OptimizerInfo> optimizer_info_;
};
ExtraFields(
c10::time_t end_time_ns,
size_t python_tid,
PyFrameState caller,
args_t args)
: PyExtraFieldsBase(end_time_ns, python_tid, std::move(caller)),
callsite_{std::move(args.frame_state_)},
module_{std::move(args.module_info_)},
optimizer_{std::move(args.optimizer_info_)} {}
PyFrameState callsite_;
std::optional<NNModuleInfo> module_;
std::optional<OptimizerInfo> optimizer_;
};
template <>
struct ExtraFields<EventType::PyCCall> : public PyExtraFieldsBase {
using args_t = at::StringView;
ExtraFields(
c10::time_t end_time_ns,
size_t python_tid,
PyFrameState caller,
args_t args)
: PyExtraFieldsBase(end_time_ns, python_tid, std::move(caller)),
function_name_{std::move(args)} {}
at::StringView function_name_;
};
template <>
struct ExtraFields<EventType::Kineto> {
// Mirrors `libkineto::GenericTraceActivity::Flow`. This information is used
// during post processing to properly embed Kineto events into the broader
// profiler tree structure. End users are not generally expected to use these
// fields directly, but they are available for debugging.
struct Flow {
uint32_t id{0};
uint32_t type{0};
uint32_t start{0};
};
std::string name_;
int64_t duration_ns_{0};
uint64_t correlation_id_{0};
libkineto::ActivityType activity_type_;
Flow flow;
std::weak_ptr<Result> linked_activity_{};
};
struct TORCH_API Result : public std::enable_shared_from_this<Result> {
template <typename... Args>
[[nodiscard]] static std::shared_ptr<Result> create(Args... args) {
return std::shared_ptr<Result>(new Result(std::forward<Args>(args)...));
}
template <typename T>
decltype(auto) visit(T&& visitor) {
return std::visit(std::forward<T>(visitor), extra_fields_);
}
template <typename T>
decltype(auto) visit(T&& visitor) const {
return std::visit(std::forward<T>(visitor), extra_fields_);
}
template <typename T, typename Fn>
void visit_if_base(const Fn& fn) const {
visit([&](const auto& extra_fields) {
using extra_fields_t = typename std::remove_cv_t<
typename std::remove_reference_t<decltype(extra_fields)>>;
if constexpr (std::is_base_of_v<T, extra_fields_t>) {
fn(extra_fields);
}
});
}
EventType tag() const {
return visit([](const auto& i) { return deduceTag(i); });
}
std::string name() const;
std::string overload_name() const;
libkineto::ActivityType kinetoType() const;
uint64_t correlationID() const;
int64_t endTimeNS() const;
uint64_t endTID() const;
c10::DeviceType deviceType() const;
int64_t start_time_ns_;
uint64_t start_tid_;
kineto::DeviceAndResource kineto_info_;
std::variant<
ExtraFields<EventType::TorchOp>,
ExtraFields<EventType::Backend>,
ExtraFields<EventType::Vulkan>,
ExtraFields<EventType::Allocation>,
ExtraFields<EventType::OutOfMemory>,
ExtraFields<EventType::PyCall>,
ExtraFields<EventType::PyCCall>,
ExtraFields<EventType::Kineto>>
extra_fields_;
std::weak_ptr<Result> parent_;
std::vector<std::shared_ptr<Result>> children_;
bool finished_{false};
const torch::profiler::impl::kineto::activity_t* kineto_activity_{nullptr};
private:
template <EventType E>
Result(
int64_t start_time_ns,
uint64_t start_tid,
kineto::DeviceAndResource kineto_info,
ExtraFields<E>&& extra_fields)
: start_time_ns_{start_time_ns},
start_tid_{start_tid},
kineto_info_{kineto_info},
extra_fields_{std::move(extra_fields)} {}
template <EventType E>
static EventType deduceTag(const ExtraFields<E>&) {
return E;
}
};
struct KinetoObserverContext : public at::ObserverContext {
struct Event {
TorchOpBasicFields basic_fields_;
c10::approx_time_t start_time_;
// Set in the exit callback.
c10::approx_time_t end_time_{
std::numeric_limits<c10::approx_time_t>::min()};
bool allow_tf32_cublas_;
std::unique_ptr<perf_counters_t> counters_;
extra_meta_t* extra_nccl_meta_{};
};
explicit KinetoObserverContext(Event* event) : event_{event} {}
Event* event_;
FallbackPair* fallback_{nullptr};
};
constexpr int IO_ENCODER_DEFAULT_BLOCK_SIZE = 1024;
constexpr int SCALAR_LIST_LENGTH_LIMIT = 30;
// InputOutputEncoder
// Stores each op_events' shapes and dtypes, and concrete values into a
// contiguous AppendOnlyList so that we no longer create vectors for shapes
// and dtypes on every op. Those vectors can be created during
// post-processing.
// It splits the data into two categories: input shapes and concrete inputs.
class InputOutputEncoder final {
public:
void push(c10::ArrayRef<const c10::IValue> values);
// Used during post-processing to unpack the encoded data.
// Each method returns a "supplier" lambda which takes no arguments;
// invoking the lambda once will return a list of args that represent
// the inputs for one op.
// The data is split into two streams: "input shapes" and "concrete inputs".
// Note: "auto" only works because these are only used in collection.cpp,
// where they are implemented.
auto getInputShapeGenerator();
auto getConcreteInputGenerator();
bool isSupportedScalarList(const c10::IValue& list_candidate);
void clear();
enum class Tag {
Tensor = 0,
UndefinedTensor,
TensorListBegin, // TODO: generalize to other lists.
ScalarList,
Scalar,
Other,
TERMINATOR
};
enum class IOType { Shapes, ConcreteInputs, None };
private:
void push(const at::Tensor& t);
// Implementation detail for getInputShapeGenerator and
// getConcreteInputGenerator
auto getIValueGenerator(const IOType& io_type);
AppendOnlyList<Tag, IO_ENCODER_DEFAULT_BLOCK_SIZE> tags_;
AppendOnlyList<RawTensorMetadata, IO_ENCODER_DEFAULT_BLOCK_SIZE>
tensor_metadata_;
AppendOnlyList<int64_t, IO_ENCODER_DEFAULT_BLOCK_SIZE> tensor_sizes_strides_;
AppendOnlyList<c10::IValue, IO_ENCODER_DEFAULT_BLOCK_SIZE> ivalues_;
};
using perf_profiler_t = torch::profiler::impl::linux_perf::PerfProfiler;
class TORCH_API ThreadLocalSubqueue {
public:
ThreadLocalSubqueue(const uint64_t tid, ProfilerConfig config);
std::unique_ptr<KinetoObserverContext> begin_op(const at::RecordFunction& fn);
template <class... Args>
void emplace_backend_event(Args&&... args) {
backend_events_.emplace_back(std::forward<Args>(args)...);
}
template <class... Args>
void emplace_vulkan_event(Args&&... args) {
vulkan_events_.emplace_back(std::forward<Args>(args)...);
}
template <class... Args>
void emplace_allocation_event(Args&&... args) {
allocations_.emplace_back(std::forward<Args>(args)...);
}
template <class... Args>
void emplace_ooms_event(Args&&... args) {
ooms_.emplace_back(std::forward<Args>(args)...);
}
template <class... Args>
void emplace_py_call(Args&&... args) {
py_calls_.emplace_back(std::forward<Args>(args)...);
}
uint64_t tid() const {
return tid_;
}
const kineto::DeviceAndResource& kineto_info() const {
return kineto_info_;
}
inline void disable_perf_profiler(perf_counters_t& counters) const {
perf_profiler_->Disable(counters);
}
private:
uint64_t tid_;
ProfilerConfig config_;
kineto::DeviceAndResource kineto_info_;
std::unique_ptr<perf_profiler_t> perf_profiler_;
friend class RecordQueue;
// See `containers.h` for block size benchmarks.
static constexpr size_t BlockSize = 512;
struct TorchOpStorage {
// NB: This is a destructive operation.
void materialize(
std::vector<std::shared_ptr<Result>>& out,
std::vector<ProfilerStepInfo>& step_info,
const std::function<c10::time_t(c10::approx_time_t)>& time_converter,
const uint64_t tid,
const kineto::DeviceAndResource& kineto_info);
template <typename T, size_t ChunkSize>
class EventBlock : public std::array<T, ChunkSize> {
public:
EventBlock();
uint64_t correlation_id(const T* ptr) const;
private:
uint64_t id_start_;
};
using event_t = KinetoObserverContext::Event;
class OpList : public AppendOnlyList<event_t, BlockSize, EventBlock> {
public:
template <class... Args>
std::pair<event_t*, uint64_t> emplace_back(Args&&... args);
static uint64_t correlationID(const OpList::Iterator& e);
} op_events_;
// report_input_shapes
InputOutputEncoder inputs_outputs_;
// with_stack (JIT)
AppendOnlyList<jit_stack_t, BlockSize> jit_stack_;
// with_modules
AppendOnlyList<jit_modules_t, BlockSize> jit_modules_;
// with_flops
AppendOnlyList<extra_args_t, BlockSize> extra_args_;
// report extra metadata, i.e. collective communication meta
AppendOnlyList<extra_meta_t, BlockSize> extra_meta_;
// report kwinputs
AppendOnlyList<kwinputs_t, BlockSize> kwinputs_;
// ProfilerState::KINETO_GPU_FALLBACK or
// ProfilerState::KINETO_PRIVATEUSE1_FALLBACK
AppendOnlyList<FallbackPair, BlockSize> device_fallback_;
} torch_ops_;
// reportBackendEventToActiveKinetoProfiler
AppendOnlyList<ExtraFields<EventType::Backend>, BlockSize> backend_events_;
// _reportVulkanEventToProfiler
AppendOnlyList<ExtraFields<EventType::Vulkan>::raw_event_t, BlockSize>
vulkan_events_;
// reportMemoryUsage
AppendOnlyList<RawAllocation, BlockSize> allocations_;
// reportOOMs
AppendOnlyList<ExtraFields<EventType::OutOfMemory>, BlockSize> ooms_;
// with_stack (Python)
AppendOnlyList<
std::pair<python_tracer::TraceKey, c10::approx_time_t>,
BlockSize>
py_calls_;
};
class TORCH_API RecordQueue {
public:
RecordQueue(ProfilerConfig config, std::set<ActivityType> activities);
bool tracePython() const;
ThreadLocalSubqueue* getSubqueue();
void stop();
void restart();
// NB: This is a destructive operation.
std::pair<
std::vector<std::shared_ptr<Result>>,
std::unique_ptr<torch::profiler::impl::kineto::ActivityTraceWrapper>>
getRecords(
std::function<c10::time_t(c10::approx_time_t)> time_converter,
uint64_t start_time_ns,
uint64_t end_time_ns);
private:
uint32_t id_;
ProfilerConfig config_;
std::set<ActivityType> activities_;
ska::flat_hash_map<uint64_t, std::unique_ptr<ThreadLocalSubqueue>>
sub_queues_;
std::mutex sub_queue_mutex_;
std::unique_ptr<python_tracer::PythonTracerBase> python_tracer_;
};
TORCH_API bool get_record_concrete_inputs_enabled();
TORCH_API void set_record_concrete_inputs_enabled_fn(std::function<bool()>);
TORCH_API void set_record_concrete_inputs_enabled_val(bool);
TORCH_API bool get_fwd_bwd_enabled();
TORCH_API void set_fwd_bwd_enabled_fn(std::function<bool()>);
TORCH_API void set_fwd_bwd_enabled_val(bool);
TORCH_API bool get_cuda_sync_enabled();
TORCH_API void set_cuda_sync_enabled_fn(std::function<bool()>);
TORCH_API void set_cuda_sync_enabled_val(bool);
// Comms related RecordFunctions will record information about tensor storage
// locations.
TORCH_API bool get_record_tensor_addrs_enabled();
TORCH_API void set_record_tensor_addrs_enabled_fn(std::function<bool()>);
TORCH_API void set_record_tensor_addrs_enabled_val(bool);
} // namespace torch::profiler::impl
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.