text
stringlengths 145
7.65M
|
---|
==================================================================================================================================
SOURCE CODE FILE: irparser.h
LINES: 1
SIZE: 1.11 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\ir\irparser.h
ENCODING: utf-8
```h
#pragma once
#include <optional>
#include <string>
#include <unordered_map>
#include <torch/csrc/Export.h>
namespace torch::jit {
struct Graph;
struct Value;
// \brief Parse IR from \p STR constructing the corresponding IR in\ GRAPH.
// if parse_tensor_constants is true will construct empty tensors
// for Tensor constants with random or unitialized contents, otherwise will
// throw
TORCH_API void parseIR(
const std::string& str,
torch::jit::Graph* graph,
bool parse_tensor_constants = false);
/** \brief Parse IR from \p STR constructing the corresponding IR in\ GRAPH.
*
* \p VMAP is filled with String to Value pairs allowing to index Values in the
* newly created graph by their name in the original IR string.
* if parse_tensor_constants is true will construct empty tensors
* for Tensor constants with random or unitialized contents, otherwise will
* throw
*/
TORCH_API void parseIR(
const std::string& str,
torch::jit::Graph* graph,
std::unordered_map<std::string, Value*>& vmap,
bool parse_tensor_constants = false);
} // namespace torch::jit
```
|
=====================================================================================================================================
SOURCE CODE FILE: named_value.h
LINES: 1
SIZE: 2.42 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\ir\named_value.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/ivalue.h>
#include <torch/csrc/jit/frontend/source_range.h>
#include <torch/csrc/jit/ir/constants.h>
#include <torch/csrc/utils/variadic.h>
namespace torch::jit {
struct Value;
/**
* A value with optional extra name and location information. Used during
* schema matching to provide extra error information and resolve kwargs.
*/
struct NamedValue {
NamedValue(const SourceRange& loc, const std::string& name, Value* value)
: loc_(loc), name_(name), value_(value) {}
NamedValue(const SourceRange& loc, Value* value) : loc_(loc), value_(value) {}
/* implicit */ NamedValue(Value* value) : value_(value) {}
NamedValue(const std::string& name, Value* value)
: name_(name), value_(value) {}
/* implicit */ NamedValue(IValue value) : ivalue_(std::move(value)) {}
NamedValue(const std::string& name, IValue value)
: name_(name), ivalue_(std::move(value)) {}
template <
typename T,
typename = std::enable_if_t<
(!std::is_same_v<std::decay_t<T>, NamedValue> &&
!std::is_same_v<std::decay_t<T>, Value*> &&
!std::is_same_v<std::decay_t<T>, IValue>)>>
// NOLINTNEXTLINE(bugprone-forwarding-reference-overload)
NamedValue(T&& t) : NamedValue(IValue(std::forward<T>(t))) {}
template <
typename T,
typename = std::enable_if_t<
(!std::is_same_v<std::decay_t<T>, Value*> &&
!std::is_same_v<std::decay_t<T>, IValue>)>>
NamedValue(const std::string& name, T&& t)
: NamedValue(name, IValue(std::forward<T>(t))) {}
SourceRange locOr(const SourceRange& backup_location) const {
if (!loc_)
return backup_location;
return loc();
}
// note: this will insert a constant node into the graph at the current
// insert point if this NamedValue is actually a constant
Value* value(Graph& g) const {
if (!value_)
return insertConstant(
g, ivalue_); // use insertConstant to remove need to include ir.h here
return value_;
}
const std::string& name() const {
AT_ASSERT(name_);
return *name_;
}
const SourceRange& loc() const {
AT_ASSERT(loc_);
return *loc_;
}
at::TypePtr type() const;
private:
std::optional<SourceRange> loc_;
std::optional<std::string> name_;
Value* value_{nullptr};
// only valid if value_ == nullptr;
IValue ivalue_;
};
} // namespace torch::jit
```
|
======================================================================================================================================
SOURCE CODE FILE: node_hashing.h
LINES: 1
SIZE: 0.27 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\ir\node_hashing.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
struct TORCH_API HashNode {
size_t operator()(const Node* k) const;
};
struct TORCH_API EqualNode {
bool operator()(const Node* lhs, const Node* rhs) const;
};
} // namespace torch::jit
```
|
===============================================================================================================================
SOURCE CODE FILE: scope.h
LINES: 1
SIZE: 7.19 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\ir\scope.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/jit_type.h>
#include <ATen/core/symbol.h>
#include <c10/util/intrusive_ptr.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/frontend/source_range.h>
#include <optional>
#include <unordered_map>
namespace torch::jit {
struct ModuleInstanceInfo;
constexpr size_t kModuleInstanceInfo = 2;
namespace utils {
std::string get_module_info(const ModuleInstanceInfo& module_instance_info);
} // namespace utils
// Scope is a node of a trie that represents the tree of nested scopes.
// Individual scopes are pushed and popped from Graph, which holds a
// pointer to the current scope. Each Node in Graph holds a pointer
// to the scope that was current when the node was created.
// The trie never needs to shrink, it only grows until it is disposed
// of when Graph is deallocated. Hence, pointers to scopes held by nodes
// will always be valid as long as Graph is alive.
struct Scope;
using ScopePtr = c10::intrusive_ptr<Scope>;
using c10::Symbol;
struct TORCH_API Scope : public c10::intrusive_ptr_target {
private:
ScopePtr parent_;
Symbol name_;
ScopePtr intrusive_from_this();
public:
Scope();
Scope(ScopePtr parent, Symbol name);
ScopePtr push(Symbol name);
ScopePtr parent();
bool isRoot() const;
bool isBlank() const;
ScopePtr getRoot();
size_t getDepth();
Symbol name() const;
std::string namesFromRoot(const std::string& separator = "/") const;
};
struct Function;
struct InlinedCallStack;
/**
* ModuleInstanceInfo is a structure to include the module type and instance
* name. It also provide public methods to get the pointer to module type and
* instance name.
*
* This structure is mainly used as a private member in InlinedCallStack, such
* that one can follow the callstack to find the relevant module hierarchy.
*/
struct ModuleInstanceInfo {
private:
c10::ClassTypePtr module_type_{nullptr};
std::string instance_name_;
public:
ModuleInstanceInfo() = default;
ModuleInstanceInfo(c10::ClassTypePtr module_type, std::string instance_name);
c10::ClassTypePtr class_type() {
return module_type_;
}
c10::ClassTypePtr class_type() const {
return module_type_;
}
std::string instance_name() const {
return instance_name_;
}
bool operator==(const ModuleInstanceInfo& rhs) const {
return (class_type() == rhs.class_type()) &&
(instance_name() == rhs.instance_name());
}
};
/**
* InlinedCallStack is an element in a list representing callstack of functions
* that have been inlined.
*
* Each such element holds info about the current callsite (Function and
* SourceRange) and a pointer to the next element in the list. The last element
* in the list represents the innermost function that was inlined.
*
* For instance, if a node has a callstack
* [foo, source_range1] -> [bar, source_range2]
* it means that this node was originally from function 'bar' that was called
* at 'source_range2' in function 'foo' that was called in the current function
* at 'source_range1'.
*
* If a node did not come from any inlined function, its callstack will be
* empty.
*
* The callstack lists only grow, we never remove elements from them, which
* allows us to reuse same elements in different lists. For instance, if we
* inline function 'bar' to 'foo' and then inline 'foo' to two functions 'ham'
* and 'baz', the callstacks would look like:
*
* [baz, source_range3] --
* \
* --> [foo, source_range1] -> [bar, source_range2]
* /
* [ham, source_range4] --
*/
using InlinedCallStackPtr = c10::intrusive_ptr<InlinedCallStack>;
using InlinedCallStackEntry =
std::tuple<Function*, SourceRange, std::optional<ModuleInstanceInfo>>;
struct TORCH_API InlinedCallStack : public c10::intrusive_ptr_target {
private:
std::optional<InlinedCallStackPtr> callee_;
Function* fn_;
// Reason for fn_name_ even though we have fn_
// Serialized callstack is used in circustmances where InlinedCallstack
// cannot be constructed during runtime, e.g. mobile runtime or
// delegated backends.
// Since in those cases we do not have Function* we store function name
// fn_name does not give you access to the same information that Function*
// does, however in mobile/delegated backend runtime we use InlindedCallStack
// for exception stack and for that purpose fn_name_ suffices.
const std::string fn_name_;
SourceRange source_range_;
InlinedCallStackPtr intrusive_from_this();
std::optional<ModuleInstanceInfo> module_instance_info_;
public:
// Constructor for a leaf callstack node.
InlinedCallStack(Function* fn, SourceRange source_range);
// Constructor for a leaf callstack node.
InlinedCallStack(
Function* fn,
SourceRange source_range,
std::optional<ModuleInstanceInfo> module_instance_info);
// Constructor for a leaf callstack node.
InlinedCallStack(
Function* fn,
SourceRange source_range,
std::optional<ModuleInstanceInfo> module_instance_info,
std::string& function_name);
// Constructor for an inner callstack node.
InlinedCallStack(
InlinedCallStackPtr callee,
Function* fn,
SourceRange source_range);
InlinedCallStack(
InlinedCallStackPtr callee,
Function* fn,
SourceRange source_range,
std::optional<ModuleInstanceInfo> module_instance_info);
InlinedCallStack(
InlinedCallStackPtr callee,
Function* fn,
SourceRange source_range,
std::optional<ModuleInstanceInfo> module_instance_info,
std::string& function_name);
// Return next element in the callstack list.
std::optional<InlinedCallStackPtr> callee() const;
// Return module instance associated with the current element.
std::optional<ModuleInstanceInfo> module_instance() const;
// Returns the source range of the node
SourceRange source_range() const;
Function* function() const;
const std::string& function_name() const;
// Return callstack as a vector of [Function, SourceRange] pairs.
std::vector<InlinedCallStackEntry> vec();
void setCallee(std::optional<InlinedCallStackPtr>);
bool operator==(const InlinedCallStack& rhs) const {
// No need to compare fn_, since source_range equivalence check
// should suffice.
return (module_instance().has_value() ==
rhs.module_instance().has_value()) &&
(module_instance().has_value() &&
module_instance().value() == rhs.module_instance().value()) &&
callee() == rhs.callee() && source_range() == rhs.source_range();
}
bool operator!=(const InlinedCallStack& rhs) const {
return !(*this == rhs);
}
};
// {source range, node name, InlinedCallStack}
// We store node name because same debug infor will be used for
// profiling as well, so we need to know op names as well.
using DebugInfoTuple =
std::tuple<SourceRange, std::string, InlinedCallStackPtr>;
constexpr size_t kDebugInfoTupleSourceRangeIndex{0};
constexpr size_t kDebugInfoTupleNodeNameIndex{1};
constexpr size_t kDebugInfoTupleInlinedCSIndex{2};
} // namespace torch::jit
```
|
==========================================================================================================================================
SOURCE CODE FILE: subgraph_matcher.h
LINES: 1
SIZE: 3.12 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\ir\subgraph_matcher.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
#include <unordered_map>
#include <vector>
namespace torch::jit {
/**
* \brief A structure describing a match of a pattern in a graph.
*
* The structure contains an anchor node, from which the match was found, and
* match-maps for nodes and values. A match-map specifies the correspondance
* between nodes in the pattern graph (match-map keys) with nodes in the actual
* graph (match-map values). We keep such maps for both nodes and values.
*/
struct Match {
Node* anchor;
std::unordered_map<const Node*, Node*> nodes_map;
std::unordered_map<const Value*, Value*> values_map;
};
/**
* \brief Find all matches of a \p PATTERN in a \p GRAPH.
*
* The function returns a vector of match-descriptors (see description of
* `struct Match`).
*
* Matching rules:
* - Pattern graph must contain a single block.
* - Matched subgraphs do not span across different blocks.
* - No uses outside the match are allowed, except for Param and Return nodes.
* Basically, we're matching hammocks, not arbitrary subgraphs.
* - The pattern graph must return only one value (i.e. it must have a single
* node leading to return).
* - Nodes that are not used in computation of the return value in the pattern
* graph are ignored during matching (IOW, we're essentially performing DCE on
* the pattern).
* - Pattern graph nodes cannot alias. TODO: the check not implemented yet.
* - Aliasing nodes in the graph cannot consitute a match (i.e. through all
* found matches, no nodes in the subgraph alias with each other). TODO: check
* not implemented yet.
* - The matcher will not mutate either the pattern graph or the matched graph.
* The matched graph is taken as non-const so that Match may contain non-const
* pointers. This enables clients of this API to use Match to drive mutations.
*
* Note [Multi-output Patterns]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Subgraph matcher provides limited support for multi-output patterns. With a
* single output pattern, a single scan through the graph is sufficient to
* find all the matches: given a starting node (an "anchor"), we can
* deterministically check whether a pattern matches a subgraph corresponding to
* this anchor node. For a general case of multi-output patterns, we would have
* N anchors, which would result in M^N comparisons (M is the size of the
* graph). Clearly this is computationally prohibitive.
*
* To overcome this, we impose some constraints on the multi-output patterns
* that we accept. We require that checking whether the pattern matches a
* subgraph would still be fully determined by a single node in the graph. To
* achieve this, we designate the first output in the pattern as the "main"
* output and assume that we can traverse up from this node to match the
* entire pattern.
*
* Corrolary 1: the order of outputs in the pattern matters!
* Corollary 2: patterns cannot contain any nodes not participating in the main
* output computation.
*/
std::vector<Match> TORCH_API
findPatternMatches(const Graph& pattern, Graph& graph);
} // namespace torch::jit
```
|
======================================================================================================================================
SOURCE CODE FILE: type_hashing.h
LINES: 1
SIZE: 0.44 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\ir\type_hashing.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/jit_type.h>
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
struct TORCH_API HashType {
size_t operator()(const TypePtr& type) const;
size_t operator()(const c10::ConstTypePtr& type) const;
};
struct EqualType {
bool operator()(const TypePtr& a, const TypePtr& b) const;
bool operator()(const c10::ConstTypePtr& a, const c10::ConstTypePtr& b) const;
};
} // namespace torch::jit
```
|
==============================================================================================================================
SOURCE CODE FILE: jit_log.h
LINES: 5
SIZE: 4.78 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\jit_log.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <memory>
#include <ostream>
#include <string>
#include <unordered_map>
// `TorchScript` offers a simple logging facility that can enabled by setting an
// environment variable `PYTORCH_JIT_LOG_LEVEL`.
// Logging is enabled on a per file basis. To enable logging in
// `dead_code_elimination.cpp`, `PYTORCH_JIT_LOG_LEVEL` should be
// set to `dead_code_elimination.cpp` or, simply, to `dead_code_elimination`
// (i.e. `PYTORCH_JIT_LOG_LEVEL=dead_code_elimination`).
// Multiple files can be logged by separating each file name with a colon `:` as
// in the following example,
// `PYTORCH_JIT_LOG_LEVEL=dead_code_elimination:guard_elimination`
// There are 3 logging levels available for your use ordered by the detail level
// from lowest to highest.
// * `GRAPH_DUMP` should be used for printing entire graphs after optimization
// passes
// * `GRAPH_UPDATE` should be used for reporting graph transformations (i.e.
// node deletion, constant folding, etc)
// * `GRAPH_DEBUG` should be used for providing information useful for debugging
// the internals of a particular optimization pass or analysis
// The default logging level is `GRAPH_DUMP` meaning that only `GRAPH_DUMP`
// statements will be enabled when one specifies a file(s) in
// `PYTORCH_JIT_LOG_LEVEL`.
// `GRAPH_UPDATE` can be enabled by prefixing a file name with an `>` as in
// `>alias_analysis`.
// `GRAPH_DEBUG` can be enabled by prefixing a file name with an `>>` as in
// `>>alias_analysis`.
// `>>>` is also valid and **currently** is equivalent to `GRAPH_DEBUG` as there
// is no logging level that is higher than `GRAPH_DEBUG`.
namespace torch::jit {
struct Node;
struct Graph;
enum class JitLoggingLevels {
GRAPH_DUMP = 0,
GRAPH_UPDATE,
GRAPH_DEBUG,
};
TORCH_API std::string get_jit_logging_levels();
TORCH_API void set_jit_logging_levels(std::string level);
TORCH_API void set_jit_logging_output_stream(std::ostream& out_stream);
TORCH_API std::ostream& get_jit_logging_output_stream();
TORCH_API std::string getHeader(const Node* node);
TORCH_API std::string log_function(const std::shared_ptr<Graph>& graph);
TORCH_API ::torch::jit::JitLoggingLevels jit_log_level();
// Prefix every line in a multiline string \p IN_STR with \p PREFIX.
TORCH_API std::string jit_log_prefix(
const std::string& prefix,
const std::string& in_str);
TORCH_API std::string jit_log_prefix(
::torch::jit::JitLoggingLevels level,
const char* fn,
int l,
const std::string& in_str);
TORCH_API bool is_enabled(
const char* cfname,
::torch::jit::JitLoggingLevels level);
TORCH_API std::ostream& operator<<(
std::ostream& out,
::torch::jit::JitLoggingLevels level);
#define JIT_LOG(level, ...) \
if (is_enabled(__FILE__, level)) { \
::torch::jit::get_jit_logging_output_stream() \
<< ::torch::jit::jit_log_prefix( \
level, __FILE__, __LINE__, ::c10::str(__VA_ARGS__)); \
}
// tries to reconstruct original python source
#define SOURCE_DUMP(MSG, G) \
JIT_LOG( \
::torch::jit::JitLoggingLevels::GRAPH_DUMP, \
MSG, \
"\n", \
::torch::jit::log_function(G));
// use GRAPH_DUMP for dumping graphs after optimization passes
#define GRAPH_DUMP(MSG, G) \
JIT_LOG( \
::torch::jit::JitLoggingLevels::GRAPH_DUMP, MSG, "\n", (G)->toString());
// use GRAPH_UPDATE for reporting graph transformations (i.e. node deletion,
// constant folding, CSE)
#define GRAPH_UPDATE(...) \
JIT_LOG(::torch::jit::JitLoggingLevels::GRAPH_UPDATE, __VA_ARGS__);
// use GRAPH_DEBUG to provide information useful for debugging a particular opt
// pass
#define GRAPH_DEBUG(...) \
JIT_LOG(::torch::jit::JitLoggingLevels::GRAPH_DEBUG, __VA_ARGS__);
// use GRAPH_EXPORT to export a graph so that the IR can be loaded by a script
#define GRAPH_EXPORT(MSG, G) \
JIT_LOG( \
::torch::jit::JitLoggingLevels::GRAPH_DEBUG, \
MSG, \
"\n<GRAPH_EXPORT>\n", \
(G)->toString(), \
"</GRAPH_EXPORT>");
#define GRAPH_DUMP_ENABLED \
(is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_DUMP))
#define GRAPH_UPDATE_ENABLED \
(is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_UPDATE))
#define GRAPH_DEBUG_ENABLED \
(is_enabled(__FILE__, ::torch::jit::JitLoggingLevels::GRAPH_DEBUG))
} // namespace torch::jit
```
|
====================================================================================================================================
SOURCE CODE FILE: jit_opt_limit.h
LINES: 1
SIZE: 1.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\jit_opt_limit.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <string>
#include <unordered_map>
// `TorchScript` offers a simple optimization limit checker
// that can be configured through environment variable `PYTORCH_JIT_OPT_LIMIT`.
// The purpose is to limit how many optimization you can make per pass.
// This is useful for debugging any passes.
// Opt limit checker is enabled on a per file basis (hence per pass). For
// example, in `constant_propagation.cpp`, `PYTORCH_JIT_OPT_LIMIT` should be set
// to `constant_propagation=<opt_limit>` or, simply, to
// `constant_propagation=<opt_limit>` where <opt_limit> is the number of
// optimizations you want to make for the pass. (i.e.
// `PYTORCH_JIT_OPT_LIMIT="constant_propagation=<opt_limit>"`).
// Multiple files can be configured by separating each file name with a colon
// `:` as in the following example,
// `PYTORCH_JIT_OPT_LIMIT="constant_propagation=<opt_limit>:dead_code_elimination=<opt_limit>"`
// You can call opt limiter by calling JIT_OPT_ALLOWED. It will return true if
// we haven't reached the optimization limit yet. Otherwise, it will return
// false. Typical usage:
// if (!JIT_OPT_ALLOWED) {
// GRAPH_DUMP(...); //supplied from jit_log
// return;
// }
namespace torch::jit {
TORCH_API bool opt_limit(const char* pass_name);
#define JIT_OPT_ALLOWED opt_limit(__FILE__)
} // namespace torch::jit
```
|
==================================================================================================================================
SOURCE CODE FILE: code.h
LINES: 1
SIZE: 1.09 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\code.h
ENCODING: utf-8
```h
#pragma once
#include <vector>
#include <ATen/core/ivalue.h>
#include <ATen/core/operator_name.h>
#include <torch/csrc/jit/runtime/instruction.h>
namespace torch::jit::mobile {
using Stack = std::vector<c10::IValue>;
using DebugHandle = int64_t;
class Function;
struct Code {
std::vector<Instruction> instructions_;
std::vector<DebugHandle> debug_handles_;
std::vector<c10::OperatorName> op_names_;
std::vector<int> operator_input_sizes_;
std::vector<std::function<void(Stack&)>> operators_;
std::vector<c10::IValue> constants_;
std::vector<c10::TypePtr> types_;
// TODO After we actually export CALL instructions we can remove this.
// We may need a two-stage importing scheme, where we firstly construct all
// function objects, and then append referenced function pointers. This could
// be done in parseMethods().
std::vector<mobile::Function*> functions_;
size_t register_size_ = 0; // Aggregated output size.
// initialized means operators_ array is filled with operators
bool initialized = false;
};
} // namespace torch::jit::mobile
```
|
========================================================================================================================================
SOURCE CODE FILE: debug_info.h
LINES: 1
SIZE: 2.21 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\debug_info.h
ENCODING: utf-8
```h
#pragma once
#include <c10/util/flat_hash_map.h>
#include <caffe2/serialize/inline_container.h>
#include <torch/csrc/jit/api/compilation_unit.h>
#include <torch/csrc/jit/ir/scope.h>
#include <torch/csrc/jit/serialization/source_range_serialization.h>
namespace torch::jit {
/*
* MobileDebugTable:
* Deserializes debug_pkl and callstack_map records from PT model's zip archive
* and stores them in a map of debug handles to DebugInfoPair. Debug handles are
* unique per model and runtime, be in lite interpreter or delegate, an
* exception of BackendRuntimeException should raised using debug handles.
* getSourceDebugString method is responsible for translating debug
* handles to correspond debug information.
* This debug informatin includes stack trace of model level source code and
* module hierarchy where the exception occurred.
*/
class MobileDebugTable {
public:
MobileDebugTable() = default;
MobileDebugTable(
std::unique_ptr<caffe2::serialize::PyTorchStreamReader>& reader,
const std::shared_ptr<CompilationUnit>& cu);
template <typename It>
MobileDebugTable(It begin, It end) : callstack_ptr_map_(begin, end) {}
std::string getSourceDebugString(
const int64_t debug_handle,
const std::string& top_module_type_name = "ModuleTypeUnknown") const;
std::string getSourceDebugString(
const std::vector<int64_t>& debug_handles,
const std::string& top_module_type_name = "ModuleTypeUnknown") const;
std::string getModuleHierarchyInfo(
const int64_t debug_handle,
const std::string& top_module_type_name = "ModuleTypeUnknown") const;
std::string getModuleHierarchyInfo(
const std::vector<int64_t>& debug_handles,
const std::string& top_module_type_name = "ModuleTypeUnknown") const;
const ska::flat_hash_map<int64_t, DebugInfoTuple>& getCallStackPtrMap()
const {
return callstack_ptr_map_;
}
private:
std::pair<std::string, std::string> getSourceDebugModuleHierarchyInfo(
const std::vector<int64_t>& debug_handles,
const std::string& top_module_type_name = "ModuleTypeUnknown") const;
ska::flat_hash_map<int64_t, DebugInfoTuple> callstack_ptr_map_;
};
} // namespace torch::jit
```
|
=========================================================================================================================================
SOURCE CODE FILE: file_format.h
LINES: 3
SIZE: 6.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\file_format.h
ENCODING: utf-8
```h
#pragma once
#include <array>
#include <cerrno>
#include <cstddef>
#include <cstring>
#include <fstream>
#include <istream>
#include <memory>
#include <c10/core/CPUAllocator.h>
#include <c10/core/impl/alloc_cpu.h>
#include <caffe2/serialize/read_adapter_interface.h>
#if defined(HAVE_MMAP)
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#endif
/**
* @file
*
* Helpers for identifying file formats when reading serialized data.
*
* Note that these functions are declared inline because they will typically
* only be called from one or two locations per binary.
*/
namespace torch::jit {
/**
* The format of a file or data stream.
*/
enum class FileFormat {
UnknownFileFormat = 0,
FlatbufferFileFormat,
ZipFileFormat,
};
/// The size of the buffer to pass to #getFileFormat(), in bytes.
constexpr size_t kFileFormatHeaderSize = 8;
constexpr size_t kMaxAlignment = 16;
/**
* Returns the likely file format based on the magic header bytes in @p header,
* which should contain the first bytes of a file or data stream.
*/
// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
static inline FileFormat getFileFormat(const char* data) {
// The size of magic strings to look for in the buffer.
static constexpr size_t kMagicSize = 4;
// Bytes 4..7 of a Flatbuffer-encoded file produced by
// `flatbuffer_serializer.h`. (The first four bytes contain an offset to the
// actual Flatbuffer data.)
static constexpr std::array<char, kMagicSize> kFlatbufferMagicString = {
'P', 'T', 'M', 'F'};
static constexpr size_t kFlatbufferMagicOffset = 4;
// The first four bytes of a ZIP file.
static constexpr std::array<char, kMagicSize> kZipMagicString = {
'P', 'K', '\x03', '\x04'};
// Note that we check for Flatbuffer magic first. Since the first four bytes
// of flatbuffer data contain an offset to the root struct, it's theoretically
// possible to construct a file whose offset looks like the ZIP magic. On the
// other hand, bytes 4-7 of ZIP files are constrained to a small set of values
// that do not typically cross into the printable ASCII range, so a ZIP file
// should never have a header that looks like a Flatbuffer file.
if (std::memcmp(
data + kFlatbufferMagicOffset,
kFlatbufferMagicString.data(),
kMagicSize) == 0) {
// Magic header for a binary file containing a Flatbuffer-serialized mobile
// Module.
return FileFormat::FlatbufferFileFormat;
} else if (std::memcmp(data, kZipMagicString.data(), kMagicSize) == 0) {
// Magic header for a zip file, which we use to store pickled sub-files.
return FileFormat::ZipFileFormat;
}
return FileFormat::UnknownFileFormat;
}
/**
* Returns the likely file format based on the magic header bytes of @p data.
* If the stream position changes while inspecting the data, this function will
* restore the stream position to its original offset before returning.
*/
// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
static inline FileFormat getFileFormat(std::istream& data) {
FileFormat format = FileFormat::UnknownFileFormat;
std::streampos orig_pos = data.tellg();
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
std::array<char, kFileFormatHeaderSize> header;
data.read(header.data(), header.size());
if (data.good()) {
format = getFileFormat(header.data());
}
data.seekg(orig_pos, data.beg);
return format;
}
/**
* Returns the likely file format based on the magic header bytes of the file
* named @p filename.
*/
// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
static inline FileFormat getFileFormat(const std::string& filename) {
std::ifstream data(filename, std::ifstream::binary);
return getFileFormat(data);
}
// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
static void file_not_found_error() {
std::stringstream message;
message << "Error while opening file: ";
if (errno == ENOENT) {
message << "no such file or directory" << '\n';
} else {
message << "error no is: " << errno << '\n';
}
TORCH_CHECK(false, message.str());
}
// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
static inline std::tuple<std::shared_ptr<char>, size_t> get_file_content(
const char* filename) {
#if defined(HAVE_MMAP)
int fd = open(filename, O_RDONLY);
if (fd < 0) {
// failed to open file, chances are it's no such file or directory.
file_not_found_error();
}
struct stat statbuf {};
fstat(fd, &statbuf);
size_t size = statbuf.st_size;
void* ptr = mmap(nullptr, statbuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
close(fd);
auto deleter = [statbuf](char* ptr) { munmap(ptr, statbuf.st_size); };
std::shared_ptr<char> data(reinterpret_cast<char*>(ptr), deleter);
#else
FILE* f = fopen(filename, "rb");
if (f == nullptr) {
file_not_found_error();
}
fseek(f, 0, SEEK_END);
size_t size = ftell(f);
fseek(f, 0, SEEK_SET);
// make sure buffer size is multiple of alignment
size_t buffer_size = (size / kMaxAlignment + 1) * kMaxAlignment;
std::shared_ptr<char> data(
static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
fread(data.get(), size, 1, f);
fclose(f);
#endif
return std::make_tuple(data, size);
}
// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
static inline std::tuple<std::shared_ptr<char>, size_t> get_stream_content(
std::istream& in) {
// get size of the stream and reset to orig
std::streampos orig_pos = in.tellg();
in.seekg(orig_pos, std::ios::end);
const long size = in.tellg();
in.seekg(orig_pos, in.beg);
// read stream
// NOLINT make sure buffer size is multiple of alignment
size_t buffer_size = (size / kMaxAlignment + 1) * kMaxAlignment;
std::shared_ptr<char> data(
static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
in.read(data.get(), size);
// reset stream to original position
in.seekg(orig_pos, in.beg);
return std::make_tuple(data, size);
}
// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration)
static inline std::tuple<std::shared_ptr<char>, size_t> get_rai_content(
caffe2::serialize::ReadAdapterInterface* rai) {
size_t buffer_size = (rai->size() / kMaxAlignment + 1) * kMaxAlignment;
std::shared_ptr<char> data(
static_cast<char*>(c10::alloc_cpu(buffer_size)), c10::free_cpu);
rai->read(
0, data.get(), rai->size(), "Loading ReadAdapterInterface to bytes");
return std::make_tuple(data, buffer_size);
}
} // namespace torch::jit
```
|
===============================================================================================================================================
SOURCE CODE FILE: flatbuffer_loader.h
LINES: 1
SIZE: 4.98 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\flatbuffer_loader.h
ENCODING: utf-8
```h
#pragma once
#include <istream>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include <ATen/core/ivalue.h>
#include <c10/core/Device.h>
#include <c10/macros/Macros.h>
#include <torch/csrc/jit/mobile/module.h>
#include <optional>
/**
* Defines the public API for loading flatbuffer-serialized mobile modules.
* Note that this header must not include or depend on flatbuffer-defined
* types, to avoid leaking those details to PyTorch clients.
*/
namespace torch::jit {
/// All non-copied data pointers provided to `parse_and_initialize_*` functions
/// must be aligned to this boundary. Since the Module will point directly into
/// the data, this alignment is necessary to ensure that certain types/structs
/// are properly aligned.
constexpr size_t kFlatbufferDataAlignmentBytes = 16;
/// Maps file names to file contents.
using ExtraFilesMap = std::unordered_map<std::string, std::string>;
// On high level, to produce a Module from a file on disk, we need to go
// through the follow steps:
// 1. Read: Read the file from disk -> memory
// 2. Deserialize: Parse the bytes to produce some in memory manipulable
// structure
// 3. Module initialization: Produce mobile::Module out of the structure
// produced in 2.
// Under this context, the structure described in 2. is the flatbuffer-defined
// type mobile::serialization::Module. However, this step/type is not visible in
// the public API.
// Parse a mobile::Module from raw bytes.
//
// This function does steps 2+3 described above.
//
// Does not take ownership of `data`; if you want it to take ownership, see the
// shared_ptr overload of this function.
//
// If should_copy_tensor_memory is true, then the returned module will NOT have
// refences to `data`, so `data` can be freed immediately.
//
// If should_copy_tensor_memory is false, then returned module will have tensors
// that points inside of `data`; the caller will need to make sure that `data`
// outlives the returned Module. Also, `data` must be aligned to
// kFlatbufferDataAlignmentBytes.
TORCH_API mobile::Module parse_and_initialize_mobile_module(
void* data,
size_t size, // of `data`, in bytes.
std::optional<at::Device> device = std::nullopt,
ExtraFilesMap* extra_files = nullptr,
bool should_copy_tensor_memory = false);
// Parse a mobile::Module from raw bytes.
//
// This function does steps 2+3 described above.
//
// The returned Module holds a reference to `data`, which must be aligned to
// kFlatbufferDataAlignmentBytes.
//
// If you do not want the Module to hold a reference to `data`, see the raw
// pointer overload of this function.
TORCH_API mobile::Module parse_and_initialize_mobile_module(
std::shared_ptr<char> data,
size_t size, // of `data`, in bytes.
std::optional<at::Device> device = std::nullopt,
ExtraFilesMap* extra_files = nullptr);
// Parse a mobile::Module from raw bytes, also returning JIT-related metadata.
//
// This is the same as parse_and_initialize_mobile_module() except that it also
// extracts JIT source files and constants. Can be used to construct a
// jit::Module.
TORCH_API mobile::Module parse_and_initialize_mobile_module_for_jit(
void* data,
size_t size, // of `data`, in bytes.
ExtraFilesMap& jit_sources,
std::vector<IValue>& jit_constants,
std::optional<at::Device> device = std::nullopt,
ExtraFilesMap* extra_files = nullptr);
// Load a mobile::Module from a filepath.
//
// This function does steps 1+2+3 described above.
//
// We need to have this as a convienience because Python API will need to wrap
// this. C++ clients should use one of the versions of
// parse_and_initialize_mobile_module() so they can manage the raw data more
// directly.
TORCH_API mobile::Module load_mobile_module_from_file(
const std::string& filename,
std::optional<at::Device> device = std::nullopt,
ExtraFilesMap* extra_files = nullptr);
TORCH_API uint64_t get_bytecode_version(std::istream& in);
TORCH_API uint64_t get_bytecode_version(const std::string& filename);
TORCH_API uint64_t get_bytecode_version_from_bytes(char* flatbuffer_content);
TORCH_API mobile::ModuleInfo get_module_info_from_flatbuffer(
char* flatbuffer_content);
// The methods below are less efficient because it need to read the stream in
// its entirity to a buffer
TORCH_API mobile::Module load_mobile_module_from_stream_with_copy(
std::istream& in,
std::optional<at::Device> device = std::nullopt,
ExtraFilesMap* extra_files = nullptr);
TORCH_API mobile::Module parse_flatbuffer_no_object(
std::shared_ptr<char> data,
size_t size,
std::optional<at::Device> device);
TORCH_API mobile::Module parse_and_initialize_mobile_module(
void* data,
size_t,
std::optional<at::Device>,
ExtraFilesMap* extra_files,
bool should_copy_tensor_memory);
// no op, TODO(qihan) delete
TORCH_API bool register_flatbuffer_loader();
} // namespace torch::jit
```
|
===================================================================================================================================
SOURCE CODE FILE: frame.h
LINES: 1
SIZE: 0.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\frame.h
ENCODING: utf-8
```h
#pragma once
#include <cstddef>
#include <torch/csrc/jit/mobile/code.h>
#include <optional>
namespace torch::jit::mobile {
class Frame {
public:
explicit Frame(const Code& code) : code_(code) {}
const Code& getCode() const {
return code_;
}
void step() {
pc_++;
}
void jump(size_t n) {
pc_ += n;
}
size_t getPC() const {
return pc_;
}
const Instruction& getInstruction() const {
return code_.instructions_.at(pc_);
}
std::optional<int64_t> getDebugHandle() const {
return getDebugHandle(pc_);
}
std::optional<int64_t> getDebugHandle(size_t pc) const {
if (pc >= code_.debug_handles_.size()) {
return {};
}
return code_.debug_handles_[pc];
}
private:
const Code& code_;
size_t pc_{0};
};
} // namespace torch::jit::mobile
```
|
======================================================================================================================================
SOURCE CODE FILE: function.h
LINES: 1
SIZE: 2.91 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\function.h
ENCODING: utf-8
```h
#pragma once
#include <vector>
#include <ATen/core/function.h>
#include <ATen/core/function_schema.h>
#include <ATen/core/ivalue.h>
#include <torch/csrc/jit/mobile/code.h>
namespace torch::jit {
enum OpCode : uint8_t;
struct Instruction;
struct OperatorString;
namespace mobile {
class TORCH_API Function : public torch::jit::Function {
public:
explicit Function(c10::QualifiedName name);
Function(
c10::QualifiedName name,
Code code,
std::optional<c10::FunctionSchema> schema);
void run(Stack& stack) override;
at::IValue operator()(Stack& stack);
void ensure_defined() override {}
size_t num_inputs() const override;
const c10::QualifiedName& qualname() const override;
bool call(Stack&, c10::function_ref<void(const mobile::Code&)>) override;
// NOTE: the APIs below is dangerous: if you call append_instruction with
// dbg_handle and then call it without; then the dbg_handle will become
// misaligned. Therefore only use ONE variant at time.
void append_instruction(OpCode op, int64_t X, int64_t N, int64_t dbg_handle);
void append_instruction(OpCode op, int64_t X, int64_t N);
void append_operator(
const std::string& name,
const std::string& overload_name,
const std::optional<int>& num_specified_args);
void append_constant(const c10::IValue& constant);
void append_type(const c10::TypePtr& type);
void append_function(mobile::Function& func);
void set_register_size(size_t size);
int64_t get_debug_handle(size_t pc) const;
const Code& get_code() const;
Code& get_code();
torch::jit::Function& setSchema(c10::FunctionSchema schema) override;
bool hasSchema() const;
const c10::FunctionSchema& getSchema() const override;
// Returns the debug handle corresponding to where the execution
// is halted due to exception.
// If no corresponding debug handle is found then -1 is returned.
const std::vector<int64_t>& getExceptionDebugHandles() const;
static Function& registerFunc(
const std::string& qualified_name,
const std::vector<Instruction>& instructions,
const std::vector<c10::IValue>& constants,
const std::vector<c10::TypePtr>& types,
const size_t register_size);
// if not initialize, initialize by loading operators.
// return true of all op loaded, return false if some op is not found
// in the current runtime. Then, the ops that did not found will be filled
// in unsupported_op_names
bool initialize_operators(bool should_check_operators);
private:
c10::QualifiedName name_;
Code code_;
std::optional<c10::FunctionSchema> schema_; // (byte-code version 4+)
};
std::optional<std::function<void(Stack&)>> makeOperatorFunction(
const c10::OperatorName& opname,
std::optional<int> num_specified_args);
TORCH_API std::string operator_str(const c10::OperatorName& opname);
} // namespace mobile
} // namespace torch::jit
```
|
====================================================================================================================================
SOURCE CODE FILE: import.h
LINES: 1
SIZE: 3.86 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\import.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/mobile/module.h>
#include <torch/csrc/jit/mobile/parse_operators.h>
#include <istream>
#include <memory>
#include <caffe2/serialize/file_adapter.h>
namespace torch::jit {
using caffe2::serialize::ReadAdapterInterface;
using ExtraFilesMap = std::unordered_map<std::string, std::string>;
constexpr const char* kArchiveNameBytecode = "bytecode";
constexpr const char* kArchiveNameConstants = "constants";
constexpr const char* kArchiveNameVersion = "version";
// The family of methods below load a serialized Mobile Module
// into a mobile::Module object.
TORCH_API mobile::Module _load_for_mobile(
std::istream& in,
std::optional<at::Device> device,
ExtraFilesMap& extra_file,
uint64_t module_load_options = kDefaultMobileLoadOptions);
TORCH_API mobile::Module _load_for_mobile(
const std::string& filename,
std::optional<at::Device> device,
ExtraFilesMap& extra_files);
TORCH_API mobile::Module _load_for_mobile(
std::unique_ptr<ReadAdapterInterface> rai,
std::optional<c10::Device> device,
ExtraFilesMap& extra_files,
uint64_t module_load_options = kDefaultMobileLoadOptions);
TORCH_API mobile::Module _load_for_mobile(
const std::string& filename,
std::optional<at::Device> device,
ExtraFilesMap& extra_files,
uint64_t module_load_options);
TORCH_API mobile::Module _load_for_mobile(
std::istream& in,
std::optional<at::Device> device = std::nullopt);
TORCH_API mobile::Module _load_for_mobile(
const std::string& filename,
std::optional<at::Device> device = std::nullopt);
TORCH_API mobile::Module _load_for_mobile(
std::unique_ptr<ReadAdapterInterface> rai,
std::optional<c10::Device> device = std::nullopt);
/**
* Load only the contents of the "extra/" files whose names are
* passed in the map (extra_files). Populate the corresponding values
* with the contents of those files. Do not attempt to load the entire
* model, and stop once the extra files have been extracted.
*
* This API is needed to be able to load GPU models on linux CPU
* machines and extract only the extra files so that we can inspect
* the metadata that was added to the .ptl archive when it was
* generated.
*
*/
void _load_extra_only_for_mobile(
const std::string& filename,
std::optional<at::Device> device,
ExtraFilesMap& extra_files);
// Currently used by both mobile/import.cpp and model_compatibility.cpp.
// Should be removed after model_compatibility.cpp start using simplified
// version type_resolver and obj_loader.
at::TypePtr resolveTypeNameMobile(
const c10::QualifiedName& qn,
const std::shared_ptr<CompilationUnit>& compilation_unit);
c10::StrongTypePtr typeResolverMobile(
const c10::QualifiedName& qn,
const std::shared_ptr<CompilationUnit>& compilation_unit);
c10::intrusive_ptr<c10::ivalue::Object> objLoaderMobile(
const at::StrongTypePtr& type,
const at::IValue& input,
mobile::CompilationUnit& mobile_compilation_unit);
// Given a reader, which has access to a model file,
// return true if there exists tensors in `bytecode` archive
bool isTensorInBytecodeArchive(
caffe2::serialize::PyTorchStreamReader& stream_reader);
namespace mobile {
/**
* Given a torch::jit::mobile::Module, return a set of operator names
* (with overload name) that are used by any method in this mobile
* Mobile. This method runs through the bytecode for all methods
* in the specified model (module), and extracts all the root
* operator names. Root operators are operators that are called
* directly by the model (as opposed to non-root operators, which
* may be called transitively by the root operators).
*
*/
TORCH_API std::set<std::string> _export_operator_list(
torch::jit::mobile::Module& module);
} // namespace mobile
} // namespace torch::jit
```
|
=========================================================================================================================================
SOURCE CODE FILE: import_data.h
LINES: 1
SIZE: 1.01 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\import_data.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/TensorBase.h>
#include <c10/core/Device.h>
#include <torch/csrc/jit/mobile/module.h>
#include <optional>
#include <istream>
#include <map>
#include <string>
namespace torch::jit {
/**
* Loads named parameters from the serialized data in @p in.
*
* Calls #TORCH_CHECK() if the data format is not recognized.
*/
TORCH_API std::map<std::string, at::Tensor> _load_parameters(
std::istream& in,
std::optional<at::Device> device = std::nullopt);
/**
* Loads named parameters from the serialized data in @p filename.
*
* Calls #TORCH_CHECK() if the data format is not recognized.
*/
TORCH_API std::map<std::string, at::Tensor> _load_parameters(
const std::string& filename,
std::optional<at::Device> device = std::nullopt);
// NOTE: Please prefer using _load_parameters over using the function below.
TORCH_API std::map<std::string, at::Tensor> mobile_module_to_parameter_map(
const mobile::Module& module);
} // namespace torch::jit
```
|
==================================================================================================================================================
SOURCE CODE FILE: import_export_common.h
LINES: 1
SIZE: 0.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\import_export_common.h
ENCODING: utf-8
```h
#pragma once
/**
* @file
* Declarations shared between import_data.cpp and export_data.cpp
*/
namespace torch::jit::mobile::internal {
/**
* The name of the mobile::Module attribute which contains saved parameters, as
* a Dict of names to Tensors. Only used for Flatbuffer serialization.
*/
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
constexpr char kSavedParametersAttributeName[] = "data";
} // namespace torch::jit::mobile::internal
```
|
=========================================================================================================================================
SOURCE CODE FILE: interpreter.h
LINES: 1
SIZE: 0.65 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\interpreter.h
ENCODING: utf-8
```h
#pragma once
#include <vector>
#include <torch/csrc/jit/mobile/code.h>
#include <torch/csrc/jit/mobile/frame.h>
namespace torch::jit::mobile {
struct InterpreterState {
TORCH_API explicit InterpreterState(const Code& code);
TORCH_API bool run(Stack& stack);
private:
void enterFrame(const Code&);
void leaveFrame();
void saveExceptionDebugHandles();
void callFunction(torch::jit::Function& f, Stack& stack);
c10::IValue& reg(size_t reg);
std::vector<c10::IValue> registers_;
std::vector<Frame> frames_;
};
const std::vector<DebugHandle>& getInterpretersExceptionDebugHandles();
} // namespace torch::jit::mobile
```
|
====================================================================================================================================
SOURCE CODE FILE: method.h
LINES: 1
SIZE: 0.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\method.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/ivalue.h>
#include <torch/csrc/jit/mobile/function.h>
namespace torch::jit::mobile {
class Module;
struct TORCH_API Method {
Method(const Module* owner, Function* function);
void run(Stack& stack) const;
void run(Stack&& stack) const {
run(stack);
}
c10::IValue operator()(std::vector<c10::IValue> stack) const;
const std::string& name() const {
return function_->name();
}
int64_t get_debug_handle(size_t pc) const {
return function_->get_debug_handle(pc);
}
Function& function() const {
return *function_;
}
private:
// Methods are uniquely owned by a single module.
// This raw pointer allows referencing the module
const Module* owner_;
// Underlying unbound function
Function* function_;
};
} // namespace torch::jit::mobile
```
|
====================================================================================================================================
SOURCE CODE FILE: module.h
LINES: 1
SIZE: 5.97 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\module.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/jit_type.h>
#include <torch/csrc/jit/mobile/debug_info.h>
#include <torch/csrc/jit/mobile/function.h>
#include <torch/csrc/jit/mobile/method.h>
#include <torch/csrc/jit/mobile/quantization.h>
#include <utility>
namespace torch::jit::mobile {
using Stack = std::vector<c10::IValue>;
// A CompilationUnit object is the one that gets executed by the lite
// interpreter.
//
// A CompilationUnit object contains a list of Method Objects. These are methods
// that appear in the original PyTorch Model. These method correspond to Python
// member functions of the Model class.
//
// Methods in turn contain a Function, and a back-pointer to the Module that
// owns this Method instance.
//
// A Function contains a Code Object (code_) which is defined in interpreter.h
//
// A Code object contains the following:
//
// std::vector<Instruction> instructions_;
// std::vector<c10::OperatorName> op_names_;
// std::vector<std::function<void(Stack&)>> operators_;
// std::vector<c10::IValue> constants_;
// std::vector<c10::TypePtr> types_;
// size_t register_size_; // Aggregated output size.
//
class CompilationUnit {
public:
void register_function(std::unique_ptr<Function> fn);
std::vector<std::unique_ptr<Function>>& methods() {
return methods_;
}
const std::vector<std::unique_ptr<Function>>& methods() const {
return methods_;
}
Function* find_function(const c10::QualifiedName& qn);
const Function* find_function(const c10::QualifiedName& qn) const;
void unsafeRemoveFunction(const int64_t index) {
methods_.erase(methods_.begin() + index);
}
private:
std::vector<std::unique_ptr<Function>> methods_;
};
// A Torch Mobile Module is a representation of the model (trained in case
// of inference). A Mobile Module contains
//
// 1. data (object_)
// 2. metadata (optional) about the model (metadata_ from the metadata.pkl
// file added after training)
// 3. Compilation Unit (cu_)
//
class TORCH_API Module {
public:
Module(
c10::intrusive_ptr<c10::ivalue::Object> object,
std::shared_ptr<CompilationUnit> cu)
: object_(std::move(object)), cu_(std::move(cu)) {}
Module() = default;
Method get_method(const std::string& method_name) const;
template <typename... Types>
c10::IValue run_method(const std::string& method_name, Types&&... args) {
return get_method(method_name)({IValue(std::forward<Types>(args))...});
}
c10::IValue forward(std::vector<c10::IValue> inputs) {
return get_method("forward")(std::move(inputs));
}
std::optional<Method> find_method(const std::string& basename) const;
const std::string name() const {
return object_->name();
}
const std::vector<at::IValue>& slots() const {
return object_->slots();
}
const c10::intrusive_ptr<c10::ivalue::Object> _ivalue() const {
return object_;
}
const std::vector<at::Tensor> parameters() const;
const std::map<std::string, at::Tensor> named_parameters() const;
std::string get_forward_method_debug_info(int64_t debug_handle) const;
std::string getModuleHierarchy(const int64_t debug_handle) const;
std::string getCallStack(const int64_t debug_handle) const;
/// Enables "training" mode.
void train(bool on = true);
/// Calls train(false) to enable "eval" mode.
void eval() {
train(/*on=*/false);
}
/// True if the module is in training mode.
bool is_training() const;
const std::unordered_map<std::string, std::string> getMetadata() const {
return metadata_;
}
void setMetadata(
const std::unordered_map<std::string, std::string>& metadata) {
metadata_ = metadata;
}
const std::vector<Method> get_methods() const;
c10::IValue attr(const std::string& name, c10::IValue or_else) const {
if (auto r = object_->type()->findAttributeSlot(name)) {
return object_->getSlot(*r);
}
if (auto r = object_->type()->findConstantSlot(name)) {
return object_->type()->getConstant(*r);
}
return or_else;
}
void setDebugTable(MobileDebugTable&& debug_table) {
debug_table_ = std::move(debug_table);
}
const MobileDebugTable& getDebugTable() const {
return debug_table_;
}
void setHasDebugHandles(bool has_debug_handles) {
has_debug_handles_ = has_debug_handles;
}
bool hasDebugHandles() const {
return has_debug_handles_;
}
const CompilationUnit& compilation_unit() const {
return *cu_;
}
void set_delete_memory(std::shared_ptr<char> delete_mem) {
mem_to_delete_ = std::move(delete_mem);
}
void set_min_operator_version(int64_t version) {
min_operator_version_ = version;
}
int64_t min_operator_version() const {
return min_operator_version_;
}
void set_bytecode_version(int64_t version) {
bytecode_version_ = version;
}
int64_t bytecode_version() const {
return bytecode_version_;
}
private:
friend class quantization::PTQQuanizationHelper;
bool compareMethodSchemas(
const std::string& name_1,
const std::string& name_2);
void unsafeRemoveMethod(const std::string& basename);
void unsafeCopyMethod(
const std::string& new_method_name,
const Function& to_be_copied);
c10::intrusive_ptr<c10::ivalue::Object> object_;
std::unordered_map<std::string, std::string> metadata_;
std::shared_ptr<CompilationUnit> cu_;
MobileDebugTable debug_table_;
bool has_debug_handles_ = false;
int64_t min_operator_version_ = 4;
int64_t bytecode_version_ = 4;
// Extra handle for the module to delete when itself is deleted
std::shared_ptr<char> mem_to_delete_;
};
struct TORCH_API ModuleInfo {
uint64_t bytecode_version;
uint64_t operator_version;
std::unordered_map<std::string, int> opname_to_num_args;
std::unordered_set<std::string> function_names;
std::unordered_set<std::string> type_names;
};
TORCH_API ModuleInfo get_module_info(const mobile::Module& module);
} // namespace torch::jit::mobile
```
|
======================================================================================================================================
SOURCE CODE FILE: observer.h
LINES: 1
SIZE: 3.66 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\observer.h
ENCODING: utf-8
```h
#pragma once
#include <c10/util/ThreadLocalDebugInfo.h>
#include <string>
#include <unordered_map>
#include <vector>
namespace torch {
class MobileDebugInfo : public c10::DebugInfoBase {
public:
const std::string& getModelName() {
return model_name_;
}
void setModelName(const std::string& model_name) {
model_name_ = model_name;
}
const std::string& getMethodName() {
return method_name_;
}
void setMethodName(const std::string& method_name) {
method_name_ = method_name;
}
size_t getOpIdx() {
return op_idx_;
}
void setOpIdx(size_t op_idx) {
op_idx_ = op_idx;
}
private:
std::string model_name_;
std::string method_name_;
// TODO: Kimish
// If we launch a thread such as for at::launch, interepter continuation
// and if the caching allocator is enabled in the base thread
// then, in order to propagate this information, that is caching allocator
// is enabled, across thread boundaries we can use the mechanism provided
// by ThreadLocalDebugInfo
// Once the thread local MobileDebugInfo is accessible in the launched
// thread, it can be accessed in that thread and that thread can set
// its own thread local CachingAllocatorInfo.
// However, we cannot expect every launched thread to extract and set
// its own thread local copy of CachingAllocatorInfo.
// But this can be done in lite interpreter, where in the run method
// it can do info =
// c10::ThreadLocalDebugInfo::get(c10::DebugInfoKind::MOBILE_RUNTIME_INFO))
// .get_caching_allocator_info();
// GetThreadLocalCachingAllocatorInfo() = info;
// Other option is to have MobileDebugInfo itself be the place where thread
// local copy of CachingAllocatorInfo is stored. Then
// DefaultMobileCPUAllocator inspects this to decide if to use
// CachingAllocator. However, current lite interpreter does not support FORK,
// thus from the run method of lite interpreter we are not really gonna launch
// another instance of lite interpreter in a different thread. So for now not
// getting bothered about passing CachingAllocatorInfo across thread
// boundaries. c10::CachingAllocatorInfo caching_allocator_info;
size_t op_idx_ = 0;
};
class MobileModuleObserver {
public:
virtual ~MobileModuleObserver() = default;
virtual void onEnterRunMethod(const int32_t) {}
virtual void onExitRunMethod(
const std::unordered_map<std::string, std::string>&,
const std::string&,
const int32_t) {}
virtual void onFailRunMethod(
const std::unordered_map<std::string, std::string>&,
const std::string&,
const int32_t,
const char*) {}
virtual void onEnterLoadModel(const int32_t) {}
virtual void onExitLoadModel(
const int32_t,
const std::unordered_map<std::string, std::string>&) {
} // key: filename, value: file content
virtual void onFailLoadModel(const int32_t, const char*) {}
virtual void onFailLoadModel(
const int32_t,
const char*,
const std::unordered_map<std::string, std::string>&) {}
virtual std::vector<std::string> getDefaultExtraFiles() = 0;
virtual std::unordered_map<std::string, std::string> processMetadataFromExtra(
const std::unordered_map<std::string, std::string>&) = 0;
};
class MobileObserverConfig {
public:
void setModuleObserver(std::unique_ptr<MobileModuleObserver> reporter) {
module_observer_ = std::move(reporter);
}
MobileModuleObserver* getModuleObserver() {
return module_observer_.get();
}
private:
std::unique_ptr<MobileModuleObserver> module_observer_;
};
MobileObserverConfig& observerConfig();
} // namespace torch
```
|
============================================================================================================================================
SOURCE CODE FILE: parse_bytecode.h
LINES: 1
SIZE: 0.74 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\parse_bytecode.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/mobile/function.h>
namespace torch::jit::mobile {
using c10::IValue;
TORCH_API void parseInstructions(
const std::string& function_name,
c10::ivalue::TupleElements&& ins_list,
c10::ivalue::TupleElements& debug_handles_m_tuple,
mobile::Function* function);
TORCH_API void parseConstants(
const c10::ivalue::TupleElements& consts_list,
mobile::Function* function);
TORCH_API void parseTypes(
const c10::ivalue::TupleElements& types_list,
mobile::Function* function);
TORCH_API void parseRegisterSize(size_t rsize, mobile::Function* function);
TORCH_API void applyUpgrader(
mobile::Function* function,
uint64_t operator_version);
} // namespace torch::jit::mobile
```
|
=============================================================================================================================================
SOURCE CODE FILE: parse_operators.h
LINES: 1
SIZE: 0.72 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\parse_operators.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/mobile/function.h>
namespace torch::jit {
using c10::IValue;
enum MobileModuleLoadOptions {
OPERATOR_CHECK = 1,
// PARSE_ALL_EXTRA_FILE_MAPS is used to gate for ExtraFileMaps to pull all
// files automatically without explicit entries mapping. Refer to PR for a
// detail: https://github.com/pytorch/pytorch/pull/99747
PARSE_ALL_EXTRA_FILE_MAPS = 2,
};
const uint64_t kDefaultMobileLoadOptions =
MobileModuleLoadOptions::OPERATOR_CHECK;
namespace mobile {
TORCH_API void parseOperators(
c10::ivalue::TupleElements&& ops_list,
const uint64_t& module_load_options,
mobile::Function* function);
} // namespace mobile
} // namespace torch::jit
```
|
================================================================================================================================================
SOURCE CODE FILE: prim_ops_registery.h
LINES: 1
SIZE: 0.61 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\prim_ops_registery.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/ivalue.h>
#include <functional>
#include <vector>
namespace torch::jit::mobile {
using Stack = std::vector<c10::IValue>;
void registerPrimOpsFunction(
const std::string& name,
const std::function<void(Stack&)>& fn);
bool hasPrimOpsFn(const std::string& name);
std::function<void(Stack&)>& getPrimOpsFn(const std::string& name);
class prim_op_fn_register {
public:
prim_op_fn_register(
const std::string& name,
const std::function<void(Stack&)>& fn) {
registerPrimOpsFunction(name, fn);
}
};
} // namespace torch::jit::mobile
```
|
===========================================================================================================================================
SOURCE CODE FILE: profiler_edge.h
LINES: 1
SIZE: 4.50 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\profiler_edge.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/autograd/profiler_kineto.h>
#include <torch/csrc/jit/mobile/module.h>
namespace torch::jit::mobile {
// If we dont have kineto available then edge profiler does not
// work since it relies on Kineto
#ifdef USE_KINETO
class TORCH_API KinetoEdgeCPUProfiler {
public:
// This profiler only profiles KINETO events
// No GPU_FALLBACK or NVTX
/*
* @param m is the instance of mobile Module which is being profiled.
* Note that this implies that KinetoEdgeCPUProfiler can be used
* to profile specific Module (see usage below), unliked ProfilerKineto
* which can profile pytorch runtime in arbitrary scope.
* @param fname is the name of the file to which chrome trace is written.
* @param report_input_shapes: whether to record shapes of op's inputs.
* @param with_stack: whether to record model's python stacktrace for the op.
* @param with_flops: whether to report flops corresponding to the op.
* @param with_modules: whether to report original python module
* hierarchy to which the op belongs.
* @param events
* @param adjust_vulkan_timestamps: whether to adjust vulkan timestamps from
* query pool to align with cpu event times
*
* Usage pattern for this profiler must be as follows:
*
* {
* KinetoEdgeCPUProfiler(m, filename, args);
* m.forward(...);
* }
*
* The reason being that KinetoEdgeCPUProfiler has a dependency on Module
* and thus it must not outlive it.
*
* Thus, when KinetoEdgeCPUProfiler is used as RAII to do profiling
* within certain scope. In that scope, the captured reference to
* Module will outlive KinetoEdgeCPUProfiler. This is gauranteed because
* KinetoEdgeCPUProfiler must be constructed later than Module, on stack.
*
* An example of the anti-pattern and wrong usage is:
*
* std::shared_ptr<KinetoMobileCPUProfiler> profiler(m, filename, args);
* m.forward(...);
*
* Since KinetoEdgeCPUProfiler object would then be constructed on heap
* with its lifetime managed manually or via smart pointers.
*/
KinetoEdgeCPUProfiler(
const torch::jit::mobile::Module& m,
const std::string& fname,
const bool report_input_shapes = false,
const bool profile_memory = false,
const bool with_stack = false,
const bool with_flops = false,
const bool with_modules = false,
std::vector<std::string> events = {},
const bool adjust_vulkan_timestamps = false);
const std::unique_ptr<torch::autograd::profiler::ProfilerResult>&
disableProfiler();
const std::unique_ptr<torch::autograd::profiler::ProfilerResult>&
getProfilerResult();
void recordBackendEvent(
const int64_t start_time_us,
const int64_t end_time_us,
const int64_t debug_handle,
const std::string& event_name,
const std::string& backend_name);
void recordBackendMemoryEvent(
void* ptr,
int64_t alloc_size,
size_t total_allocated,
size_t total_reserved,
c10::Device device);
~KinetoEdgeCPUProfiler();
private:
/*
* We store a reference to Module to make such dependency explicit, since
* a Module reference is already stored in a functor.
*/
const mobile::Module& m_;
std::string trace_file_name_;
std::unique_ptr<torch::autograd::profiler::ProfilerResult> profiler_result_;
};
TORCH_API KinetoEdgeCPUProfiler* getCurrentEdgeProfiler();
#define RECORD_BACKEND_EVENT_TO_EDGE_PROFILER( \
start_time_us, end_time_us, debug_handle, event_name, backend_name) \
if (mobile::getCurrentEdgeProfiler()) { \
mobile::getCurrentEdgeProfiler()->recordBackendEvent( \
start_time_us, end_time_us, debug_handle, event_name, backend_name); \
}
#define RECORD_BACKEND_MEMORY_EVENT_TO_EDGE_PROFILER( \
ptr, alloc_size, total_allocated, total_reserved, device) \
if (mobile::getCurrentEdgeProfiler()) { \
mobile::getCurrentEdgeProfiler()->recordBackendMemoryEvent( \
ptr, alloc_size, total_allocated, total_reserved, device); \
}
#else
#define RECORD_BACKEND_EVENT_TO_EDGE_PROFILER( \
start_time_us, end_time_us, debug_handle, event_name, backend_name)
#define RECORD_BACKEND_MEMORY_EVENT_TO_EDGE_PROFILER( \
ptr, alloc_size, total_allocated, total_reserved, device)
#endif
} // namespace torch::jit::mobile
```
|
===============================================================================================================================================
SOURCE CODE FILE: promoted_prim_ops.h
LINES: 1
SIZE: 1.08 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\promoted_prim_ops.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/mobile/prim_ops_registery.h>
#include <torch/csrc/jit/mobile/register_ops_common_utils.h>
namespace torch::jit {
void tupleIndex(Stack& stack);
void raiseException(Stack& stack);
void is(Stack& stack);
void unInitialized(Stack& stack);
void isNot(Stack& stack);
void aten_format(Stack& stack);
void size(Stack& stack);
void sym_size(Stack& stack);
void sym_size_int(Stack& stack);
void sym_stride_int(Stack& stack);
void sym_numel(Stack& stack);
void sym_storage_offset(Stack& stack);
void sym_stride(Stack& stack);
void device(Stack& stack);
void device_with_index(Stack& stack);
void dtype(Stack& stack);
void layout(Stack& stack);
void toPrimDType(Stack& stack);
void dim(Stack& stack);
void _not(Stack& stack);
void boolTensor(Stack& stack);
void toList(Stack& stack);
void numToTensorScalar(Stack& stack);
void isCuda(Stack& stack);
void numToTensorBool(Stack& stack);
void dictIndex(Stack& stack);
void raiseExceptionWithMessage(Stack& stack);
} // namespace torch::jit
```
|
==========================================================================================================================================
SOURCE CODE FILE: quantization.h
LINES: 1
SIZE: 1.24 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\quantization.h
ENCODING: utf-8
```h
#pragma once
#include <c10/macros/Export.h>
#include <string>
namespace torch::jit::mobile {
class Module;
namespace quantization {
/*
* Device side PTQ API.
* Once the model has been prepared for quantization on server side, such model
* is sent to device. On device side the model is further trained. At the end of
* the training, before the model is readied for inference, we need to quantize
* the model.
* Usage of this API is as follows.
* PTQQuanizationHelper ptq_helper;
* ptq_helper.quantize_dynamic(m, "forward");
* Args:
* m: Captured by reference, an instance of mobile::Module. This module will be
* mutated in place to replace its <method_name> method with quantized
* equivalent. method:name: Name of the method to be quantized. AOT preparation
* for quantization must also have been done for this method. Returns: In place
* mutated `m` whose size should be smaller due to weight quantization and whose
* <method_name> method should use quantized ops
*/
class TORCH_API PTQQuanizationHelper {
public:
PTQQuanizationHelper() = default;
void quantize_dynamic(
torch::jit::mobile::Module& m,
const std::string& method_name);
};
} // namespace quantization
} // namespace torch::jit::mobile
```
|
=======================================================================================================================================================
SOURCE CODE FILE: register_ops_common_utils.h
LINES: 1
SIZE: 1.70 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\register_ops_common_utils.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/Context.h>
#include <ATen/NativeFunctions.h>
#include <ATen/core/ivalue.h>
#include <ATen/core/stack.h>
#include <torch/csrc/jit/runtime/jit_exception.h>
#include <torch/csrc/jit/runtime/vararg_functions.h>
namespace torch::jit {
inline void noop(Stack& n) {}
int64_t normalizeIndex(int64_t idx, int64_t list_size);
// reference function THPVariable_to in python_variable_methods.cpp
[[maybe_unused]] static at::Tensor to_dispatch(
at::Tensor self,
std::optional<at::Device> device,
std::optional<at::ScalarType> scalarType,
bool non_blocking,
bool copy) {
if (device && device->is_cuda()) {
at::globalContext().lazyInitDevice(c10::DeviceType::CUDA);
}
if (!device && !scalarType && !copy) {
return self;
} else if (!device) {
return self.to(*scalarType, non_blocking, copy);
} else if (!scalarType) {
return self.to(*device, non_blocking, copy);
} else {
return self.to(*device, *scalarType, non_blocking, copy);
}
}
// Convert the tensor pointed to by \p data to a nested list. \p dim is the
// number of dimensions in the tensor and \p cur_dim is the dimension being
// processed by the current invocation. \p ty is the expected output IR type of
// the operation. \p is the scalar type of \p data. \p sizes and \p strides are
// the sizes and strides of the tensor operand and \p element_size is the size
// in bytes of one tensor element.
IValue tensorToListRecursive(
char* data,
int64_t cur_dim,
int64_t num_tensor_dims,
at::TypePtr ty,
at::ScalarType scalar_ty,
at::IntArrayRef sizes,
at::IntArrayRef strides,
size_t element_size);
} // namespace torch::jit
```
|
=========================================================================================================================================
SOURCE CODE FILE: type_parser.h
LINES: 1
SIZE: 1.46 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\type_parser.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/dynamic_type.h>
#include <ATen/core/jit_type.h>
#include <unordered_set>
namespace c10 {
class TORCH_API TypeParser {
public:
explicit TypeParser(std::string pythonStr);
explicit TypeParser(std::vector<std::string>& pythonStrs);
TypePtr parse();
std::vector<TypePtr> parseList();
static const std::unordered_set<std::string>& getNonSimpleType();
static const std::unordered_set<std::string>& getCustomType();
std::unordered_set<std::string> getContainedTypes();
private:
TypePtr parseNamedTuple(const std::string& qualified_name);
TypePtr parseCustomType();
TypePtr parseTorchbindClassType();
TypePtr parseNonSimple(const std::string& token);
void expect(const char* s);
void expectChar(char c);
template <typename T>
TypePtr parseSingleElementType();
void lex();
std::string next();
std::string_view nextView();
void advance();
[[nodiscard]] std::string_view cur() const;
std::string pythonStr_;
size_t start_;
std::string_view next_token_;
// Used for parsing string list
std::vector<std::string> pythonStrs_;
std::unordered_map<std::string, c10::TypePtr> str_type_ptr_map_;
// Store all contained types when parsing a string
std::unordered_set<std::string> contained_types_;
};
TORCH_API TypePtr parseType(const std::string& pythonStr);
TORCH_API std::vector<TypePtr> parseType(std::vector<std::string>& pythonStr);
} // namespace c10
```
|
=============================================================================================================================================
SOURCE CODE FILE: upgrader_mobile.h
LINES: 1
SIZE: 0.92 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\mobile\upgrader_mobile.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/core/ivalue_inl.h>
#include <torch/csrc/jit/mobile/code.h>
#include <torch/csrc/jit/mobile/function.h>
#include <torch/csrc/jit/serialization/import_export_functions.h>
#include <string>
#include <unordered_map>
#include <vector>
namespace torch::jit {
struct Instruction;
struct Upgrader {
int min_version;
int max_version;
std::string upgrader_name;
int index;
};
// From operator_versions.yaml
TORCH_API const std::unordered_map<std::string, std::vector<Upgrader>>
getOperatorVersionMapForMobile();
struct OperatorString {
const std::string name;
const std::string overload_name;
const std::optional<int> num_specified_args;
};
struct ByteCodeFunctionWithOperator {
mobile::Function& function;
std::vector<OperatorString> operators;
};
TORCH_API const std::vector<ByteCodeFunctionWithOperator>&
getUpgraderBytecodeList();
} // namespace torch::jit
```
|
==============================================================================================================================================
SOURCE CODE FILE: add_if_then_else.h
LINES: 1
SIZE: 0.17 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\add_if_then_else.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API bool AddIfThenElseOp(std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
============================================================================================================================================
SOURCE CODE FILE: annotate_warns.h
LINES: 1
SIZE: 0.17 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\annotate_warns.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API void AnnotateWarns(const std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
======================================================================================================================================
SOURCE CODE FILE: autocast.h
LINES: 1
SIZE: 0.25 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\autocast.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API void Autocast(const std::shared_ptr<Graph>& graph);
TORCH_API bool setAutocastMode(bool value);
TORCH_API bool autocastEnabled();
} // namespace torch::jit
```
|
===========================================================================================================================================
SOURCE CODE FILE: bailout_graph.h
LINES: 1
SIZE: 1.10 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\bailout_graph.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ATen.h>
#include <ATen/core/ivalue.h>
#include <ATen/core/jit_type.h>
#include <ATen/core/stack.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/ir/ir.h>
#include <list>
#include <vector>
namespace torch::jit {
// Replaces prim::Guard nodes with prim::BailOut nodes and
// computes sets of inputs needed to resume execution at
// bailout points
TORCH_API void InsertBailOuts(std::shared_ptr<Graph> graph);
// Builds a bailout graph into `target` (which is an empty graph)
// for a given bailout point `bailout_index`
// from the original graph `orig` (the original unoptimized graph)
// BailOut graphs allow Interpreter to resume
// execution of the (un/de)optimized graph (i.e.
// a graph that doesn't rely on any assumptions derived from
// on profiling information) from a given BailOut point
// should any of the assumptions fail for an actual input.
TORCH_API std::shared_ptr<Graph> BuildBailOutGraphFrom(
int64_t bailout_index,
const std::shared_ptr<Graph>& orig,
const std::shared_ptr<Graph>& target);
} // namespace torch::jit
```
|
======================================================================================================================================
SOURCE CODE FILE: batch_mm.h
LINES: 1
SIZE: 0.14 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\batch_mm.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API void BatchMM(std::shared_ptr<Graph>& graph);
}
```
|
==========================================================================================================================================
SOURCE CODE FILE: canonicalize.h
LINES: 1
SIZE: 0.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\canonicalize.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API std::shared_ptr<Graph> Canonicalize(
const std::shared_ptr<Graph>& graph,
bool keep_unique_names = true);
TORCH_API void CanonicalizeOutputs(std::shared_ptr<Graph>& graph);
TORCH_API std::optional<const Use> firstOrLastUse(Value* v, bool find_first);
TORCH_API bool isBeforeOrAfter(
const Use& a,
const Use& b,
bool checking_before);
} // namespace torch::jit
```
|
==========================================================================================================================================================
SOURCE CODE FILE: canonicalize_graph_fuser_ops.h
LINES: 1
SIZE: 0.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\canonicalize_graph_fuser_ops.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API void CanonicalizeOps(const std::shared_ptr<Graph>& graph);
}
```
|
=================================================================================================================================================
SOURCE CODE FILE: check_strict_fusion.h
LINES: 1
SIZE: 0.17 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\check_strict_fusion.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API void CheckStrictFusion(std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
=============================================================================================================================================
SOURCE CODE FILE: clear_profiling.h
LINES: 1
SIZE: 0.47 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\clear_profiling.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ATen.h>
#include <ATen/core/ivalue.h>
#include <ATen/core/jit_type.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API void unprofileGraphInputs(const std::shared_ptr<Graph>& graph);
TORCH_API void unprofileBlock(Block* start_block);
// Unprofiles all the node outputs in a block.
TORCH_API void ClearProfilingInformation(const std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
=================================================================================================================================================
SOURCE CODE FILE: clear_undefinedness.h
LINES: 1
SIZE: 0.85 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\clear_undefinedness.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ATen.h>
#include <ATen/core/ivalue.h>
#include <ATen/core/jit_type.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// Undefinedness makes argument matching fail for regular tensor operations
// if 1+ arguments are undefined or possibly undefined tensors.
// Technically, undefined tensors are **not** tensors as the regular tensor
// operations do not know how to handle them.
// However, in practice, there are guards and conversion operators that
// **always** gate regular operations if undefined tensors may be present
// Eventually, we would love to move to the world where we use optionals
// in lieu of undefined tensors.
// When this happens, this pass will be removed
TORCH_API void ClearUndefinedness(const std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
==============================================================================================================================================================
SOURCE CODE FILE: common_subexpression_elimination.h
LINES: 1
SIZE: 0.17 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\common_subexpression_elimination.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API bool EliminateCommonSubexpression(
const std::shared_ptr<Graph>& graph);
}
```
|
========================================================================================================================================
SOURCE CODE FILE: concat_opt.h
LINES: 1
SIZE: 0.53 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\concat_opt.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// Eliminates common inputs among `aten::cat` ops.
TORCH_API bool EliminateConcatCommonInputs(const std::shared_ptr<Graph>& graph);
// Expands `aten::cat` ops into `aten::copy` ops and eliminates redudancies
// in the buffers used for concatenation if possible.
TORCH_API void ExpandConcatAndEliminateRedundancy(
const std::shared_ptr<Graph>& graph);
TORCH_API bool CombineConcats(const std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
==============================================================================================================================================
SOURCE CODE FILE: constant_pooling.h
LINES: 1
SIZE: 0.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\constant_pooling.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API void ConstantPooling(const std::shared_ptr<Graph>& graph);
}
```
|
==================================================================================================================================================
SOURCE CODE FILE: constant_propagation.h
LINES: 1
SIZE: 1.29 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\constant_propagation.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// Runs constant propagation on all objects unless ignore_custom_classes is
// specified as true, in which case user defined classes are skipped. This is
// useful to prevent early fusion of packing operations, which end up lowering
// away information about their constructors (e.g. packed::linear_clamp_prepack
// and prepacked::conv2d_clamp_prepack)
// Returns True if the pass made a change to the graph
TORCH_API bool ConstantPropagation(
std::shared_ptr<Graph>& graph,
bool ignore_custom_classes = false);
// runs constant propagation only on ops that have non-aliasing inputs & outputs
// Returns True if the pass made a change to the graph
TORCH_API bool ConstantPropagationImmutableTypes(std::shared_ptr<Graph>& graph);
// Runs the node if its inputs are constants. Callers of this function must
// make their own determination if constant prop is appropriate - for example
// non-deterministic ops or ops with side effects. If ignore_custom_classes is
// specified, nodes that output user defined classes are not run.
TORCH_API std::optional<Stack> runNodeIfInputsAreConstant(
const Node* node,
bool ignore_custom_classes = false,
AliasDb* db = nullptr);
} // namespace torch::jit
```
|
=======================================================================================================================================================
SOURCE CODE FILE: create_autodiff_subgraphs.h
LINES: 1
SIZE: 0.52 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\create_autodiff_subgraphs.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/ir/ir.h>
#include <cstddef>
namespace torch::jit {
// insert GraphExecutor nodes that group together
// subgraphs that are differentiable by the jit's autodiff passes
// threshold - minimum number of nodes that will appear in a block
// returns all differentiable blocks that have been found
TORCH_API std::vector<Node*> CreateAutodiffSubgraphs(
const std::shared_ptr<Graph>& graph,
size_t threshold = 2);
} // namespace torch::jit
```
|
======================================================================================================================================================
SOURCE CODE FILE: create_functional_graphs.h
LINES: 1
SIZE: 0.29 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\create_functional_graphs.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API void CreateFunctionalGraphs(const std::shared_ptr<Graph>& graph);
TORCH_API void InlineFunctionalGraphs(const std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
===================================================================================================================================================
SOURCE CODE FILE: dead_code_elimination.h
LINES: 1
SIZE: 1.56 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\dead_code_elimination.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// If given a top-level graph, DCE will construct do alias analysis that allows
// for "smarter" dead code elimination (we will eliminate mutable ops if we can
// prove the mutated values are not used). Otherwise, we will not allow DCE to
// eliminate mutable ops.
//
// So, prefer to use the graph version if you can.
enum class DCESideEffectPolicy : uint8_t {
// default behavior: dead code elimination will check if a node has side
// effects
// and not delete it if it does.
DONT_DELETE_NODES_WITH_SIDE_EFFECTS,
// with this flag, dead code elimination will not check if a node has side
// effects and treat nodes with side effects like any other node,
// i.e. delete them if their outputs aren't used anywhere.
ALLOW_DELETING_NODES_WITH_SIDE_EFFECTS
};
TORCH_API void EliminateDeadCode(
const std::shared_ptr<Graph>& graph,
DCESideEffectPolicy sideEffectPolicy =
DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS);
TORCH_API void EliminateDeadCode(
Block* block,
bool recurse = true,
DCESideEffectPolicy sideEffectPolicy =
DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS);
// Invoke the user-provided callback on all live values before deleting anything
TORCH_API void EliminateDeadCode(
Block* block,
std::function<void(const std::unordered_set<const Value*>&)> cb,
DCESideEffectPolicy sideEffectPolicy =
DCESideEffectPolicy::DONT_DELETE_NODES_WITH_SIDE_EFFECTS);
} // namespace torch::jit
```
|
===========================================================================================================================================
SOURCE CODE FILE: decompose_ops.h
LINES: 1
SIZE: 0.14 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\decompose_ops.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API void DecomposeOps(std::shared_ptr<Graph>& graph);
}
```
|
==================================================================================================================================================
SOURCE CODE FILE: device_type_analysis.h
LINES: 1
SIZE: 0.25 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\device_type_analysis.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
struct Graph;
// Propagates Device type info throughout the given graph.
TORCH_API bool DeviceTypePropagation(std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
============================================================================================================================================
SOURCE CODE FILE: dtype_analysis.h
LINES: 1
SIZE: 0.39 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\dtype_analysis.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/ir/ir.h>
#include <memory>
namespace torch::jit {
struct Graph;
// Propagate tensor properties (e.g., dtype, device, is_contiguous, layout)
// propagation on all tensor objects. Currently, we only support dtype
// propagation
TORCH_API bool DtypePropagation(std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
==============================================================================================================================================
SOURCE CODE FILE: eliminate_no_ops.h
LINES: 1
SIZE: 0.50 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\eliminate_no_ops.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// Remove ops that do nothing on the forward pass (like aten::detach).
// This pass is invoked as a part of freeze_module.
// This function also takes a set of custom ops to eliminate. All ops in this
// set must take their output as their first input, i.e. x = f(x, ...)
TORCH_API bool EliminateNoOps(
std::shared_ptr<Graph>& graph,
std::unordered_set<c10::Symbol> custom_ops = {});
} // namespace torch::jit
```
|
================================================================================================================================================
SOURCE CODE FILE: erase_number_types.h
LINES: 1
SIZE: 0.79 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\erase_number_types.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// Erase NumberType information. This is necessary for and only used in
// exporting to ONNX. This pass ensures that no remaining Values have
// NumberType types, replacing them with tensors.
// The following things are done to erase NumberType info:
// - NumberType outputs are changed to DynamicType.
// - prim::Constant nodes which are numbers get changed into 0-dim tensors of
// the corresponding type
// - prim::TensorToNum, aten::Float, aten::Int and prim::NumToTensor nodes
// are erased.
//
// The pass assumes that DCE will be called sometime after.
TORCH_API void EraseNumberTypes(const std::shared_ptr<Graph>& graph);
TORCH_API void EraseNumberTypesOnBlock(Block* block);
} // namespace torch::jit
```
|
======================================================================================================================================================
SOURCE CODE FILE: fixup_trace_scope_blocks.h
LINES: 1
SIZE: 1.65 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\fixup_trace_scope_blocks.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/api/module.h>
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// Directly after tracing, we have an ill-formed graph with blocks inserted.
// Example:
//
// graph(%self : ClassType<Module>,
// %input.1 : Float(3, 4)):
// %1 : ClassType<Module> = prim::GetAttr[name="relu1"](%self)
// %2 : ClassType<Module> = prim::GetAttr[name="relu2"](%self)
// %3 : ClassType<Module> = prim::GetAttr[name="rrr"](%2)
// = prim::TracedModuleForward[scope="__module.relu1"]()
// block0():
// %input : Float(3, 4) = aten::relu(%input.1),
// -> ()
// = prim::TracedModuleForward[scope="__module.relu2"](),
// block0():
// = prim::TracedModuleForward[scope="__module.relu2.rrr"](),
// block0():
// %6 : Float(3, 4) = aten::relu(%input),
// -> ()
// -> ()
// return (%6)
//
// In this pass, we:
// 1) Lift Value defs to as high of a scope as needed to ensure that
// they dominate all their uses. For example, `input` in the above
// graph needs to be lifted to the top-level block so that its use
// in the second `relu` operator is dominated.
// 2) Lambda lift the blocks. This ensures that all values used within
// each scope have their defs captured.
// 3) Convert the scope blocks into methods on their respective Modules,
// and convert TracedModuleForward nodes to CallMethod nodes into those
// methods.
//
// Then, we'll have a well-formed graph with proper method calls.
TORCH_API void FixupTraceScopeBlocks(
std::shared_ptr<Graph>& graph,
Module* self);
} // namespace torch::jit
```
|
==========================================================================================================================================
SOURCE CODE FILE: fold_conv_bn.h
LINES: 1
SIZE: 0.98 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\fold_conv_bn.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/api/module.h>
namespace torch::jit {
/** \brief Fold Conv2d-BatchNorm2d into Conv2d in all methods of this
* module and all its submodules, forward is included by default.
*
* The weight and bias of the Conv2d are correspondingly updated. Should only be
* used on modules in eval mode.
*/
TORCH_API Module FoldConvBatchNorm(const Module& module);
struct TORCH_API ConvBNParameters {
at::Tensor conv_w;
at::Tensor conv_b;
at::Tensor bn_rm;
at::Tensor bn_rv;
double bn_eps = 0.0;
at::Tensor bn_w;
at::Tensor bn_b;
};
/**
* Given the current weight and bias tensors of a Conv module and parameters
* of the BatchNorm module we're folding with, compute the updated values
* for the weight and bias.
*
* The function is basically copied from torch/nn/utils/fusion.py
*/
TORCH_API std::tuple<at::Tensor, at::Tensor> computeUpdatedConvWeightAndBias(
const ConvBNParameters& p);
} // namespace torch::jit
```
|
============================================================================================================================================
SOURCE CODE FILE: fold_linear_bn.h
LINES: 1
SIZE: 0.68 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\fold_linear_bn.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/api/module.h>
namespace torch::jit {
struct TORCH_API LinearBNParameters {
at::Tensor linear_w;
at::Tensor linear_b;
at::Tensor bn_rm;
at::Tensor bn_rv;
double bn_eps = 0.0;
at::Tensor bn_w;
at::Tensor bn_b;
};
/**
* Given the current weight and bias tensors of a Linear module and parameters
* of the BatchNorm module we're folding with, compute the updated values
* for the weight and bias.
*
* The function is basically copied from torch/nn/utils/fusion.py
*/
TORCH_API std::tuple<at::Tensor, at::Tensor> computeUpdatedLinearWeightAndBias(
const LinearBNParameters& p);
} // namespace torch::jit
```
|
===========================================================================================================================================
SOURCE CODE FILE: freeze_module.h
LINES: 1
SIZE: 1.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\freeze_module.h
ENCODING: utf-8
```h
/** \brief This file defines freezing Torchscript module API.
*
* This API has python-binding and can be invoked directly or as a part of
* general optimization pipeline.
*/
#pragma once
#include <torch/csrc/jit/api/module.h>
#include <torch/csrc/jit/ir/ir.h>
/** \brief Freeze Module, i.e., Assume all attributes are constants.
*
* Freezing module is a functionality that allows the JIT to internalize
* immutable attributes. Combined with inlining, the module is aggressively
* optimized and significant overhead is optimized away. The freezeModule API
* produces a cloned frozen module.
*/
namespace torch::jit {
TORCH_API Module freeze_module(
const Module& module,
std::vector<std::string> preservedAttrs = std::vector<std::string>(),
bool freezeInterfaces = true,
bool preserveParameters = false);
// Clone-free version of freeze_module. This modifies the module inplace.
// Use this version to avoid extra memory usage incurred by cloning the module.
TORCH_API void freeze_module_inplace(
Module* module,
std::vector<std::string> preservedAttrs = std::vector<std::string>(),
bool freezeInterfaces = true,
bool preserveParameters = false);
} // namespace torch::jit
```
|
==================================================================================================================================================
SOURCE CODE FILE: frozen_concat_linear.h
LINES: 1
SIZE: 0.26 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\frozen_concat_linear.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// Concats multiple linear ops with the same Tensor input
// into a single linear op.
TORCH_API bool FrozenConcatLinear(std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
=========================================================================================================================================================
SOURCE CODE FILE: frozen_conv_add_relu_fusion.h
LINES: 1
SIZE: 0.31 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\frozen_conv_add_relu_fusion.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/api/module.h>
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API extern std::function<void(std::shared_ptr<Graph>&)>&
getFuseFrozenConvAddReluImpl();
TORCH_API void FuseFrozenConvAddRelu(std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
=================================================================================================================================================
SOURCE CODE FILE: frozen_conv_folding.h
LINES: 1
SIZE: 0.85 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\frozen_conv_folding.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// Fuses Convolution -> Batchnorm into a single Convolution by
// folding batchnorm weights into conv weights.
// This pass only works on Frozen Graphs; otherwise it is a No-Op.
TORCH_API bool FoldFrozenConvBatchnorm(std::shared_ptr<Graph>& graph);
// Fuses Convolution -> Add/Sub into a single Convolution by
// folding add constant tensor into conv weights.
// This pass only works on Frozen Graphs; otherwise it is a No-Op.
TORCH_API bool FoldFrozenConvAddOrSub(std::shared_ptr<Graph>& graph);
// Fuses Convolution -> Mul/Div into a single Convolution by
// folding add constant tensor into conv weights.
// This pass only works on Frozen Graphs; otherwise it is a No-Op.
TORCH_API bool FoldFrozenConvMulOrDiv(std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
========================================================================================================================================================
SOURCE CODE FILE: frozen_graph_optimizations.h
LINES: 1
SIZE: 0.45 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\frozen_graph_optimizations.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
/** \brief Runs a set of Optimizations that Optimize Frozen Graphs
*
* Currently this set of optimizations is:
* - FoldFrozenConvBatchnorm
* - FoldFrozenConvAddOrSub
* - FoldFrozenConvMulOrDiv
* - FoldFrozenLinearBatchnorm
*/
namespace torch::jit {
TORCH_API void OptimizeFrozenGraph(
std::shared_ptr<Graph>& graph,
bool optimize_numerics = true);
} // namespace torch::jit
```
|
===================================================================================================================================================
SOURCE CODE FILE: frozen_linear_folding.h
LINES: 1
SIZE: 0.35 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\frozen_linear_folding.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// Fuses Linear -> BatchNormNd into a single Linear by
// folding batchnorm weights into linear weights.
// This pass only works on Frozen Graphs; otherwise it is a No-Op.
TORCH_API bool FoldFrozenLinearBatchnorm(std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
=====================================================================================================================================================
SOURCE CODE FILE: frozen_linear_transpose.h
LINES: 1
SIZE: 0.27 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\frozen_linear_transpose.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// Transposes the weight matrix for frozen linear modules.
// and converts it into a matmul
TORCH_API bool FrozenLinearTranspose(std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
==================================================================================================================================================
SOURCE CODE FILE: frozen_ops_to_mkldnn.h
LINES: 1
SIZE: 0.39 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\frozen_ops_to_mkldnn.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// Converts operators & their parameters to mkldnn if it is profitable
// Currently encompassing Conv2d and Conv3d, and Linear
// Op must be in float32 and mkldnn must be built
// This pass only works on frozen graph
TORCH_API void ConvertFrozenOpsToMKLDNN(std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
=========================================================================================================================================
SOURCE CODE FILE: fuse_linear.h
LINES: 1
SIZE: 0.75 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\fuse_linear.h
ENCODING: utf-8
```h
/** \brief Fusing linear patterns as single at::linear for easier pattern
* matching in later passes
*/
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
/** \brief Match the at::linear pattern and fuse it into a single at::linear
* This pass fuse the addmm or matmul + add generated by JIT back to linear
* This pass can be deleted once the JIT can emit the aten::linear in the future
*/
TORCH_API void FuseLinear(std::shared_ptr<Graph>& graph);
/** Swap functional linear CallFunctions to aten::linear
*/
TORCH_API void SwapFunctionalLinear(std::shared_ptr<Graph>& graph);
/** Swap all functional linear CallFunctions in module
*/
TORCH_API void SwapFunctionalLinear(Module& module);
} // namespace torch::jit
```
|
=======================================================================================================================================
SOURCE CODE FILE: fuse_relu.h
LINES: 1
SIZE: 0.25 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\fuse_relu.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/api/module.h>
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API void FuseAddRelu(script::Module& module);
TORCH_API void FuseAddRelu(std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
=========================================================================================================================================
SOURCE CODE FILE: graph_fuser.h
LINES: 1
SIZE: 1.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\graph_fuser.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API bool canFuseOnCPULegacy();
TORCH_API void overrideCanFuseOnCPULegacy(bool value);
// NB: Be sure to run DCE before fusion, because dead instructions
// can prevent fusion opportunities from being exploited.
// On Windows will noop, NYI
TORCH_API void FuseGraph(
std::shared_ptr<Graph>& graph,
bool strict_fuser_check = false);
// \brief Custom fusion pass using a node-level callback to
// determine the inclusion of nodes in a subgraph.
//
// This helper omits aliased inputs and fusion across control flow
// boundaries.
//
// \arg graph The graph to be modified in-place
// \arg is_fusable A callback run on each fusable node in the graph.
// \arg kind The label given to the resultant fused subgraph
// \arg arg_limit The maximum number of args the resultant fused subgraph
// should have. Note: This will likely develop into a general
// post condition on the fused subgraph.
TORCH_API void CustomFuseGraph(
std::shared_ptr<Graph>& graph,
const std::function<bool(Node*)>& is_fusable,
Symbol kind,
size_t arg_limit = std::numeric_limits<size_t>::max());
} // namespace torch::jit
```
|
==================================================================================================================================================
SOURCE CODE FILE: graph_rewrite_helper.h
LINES: 1
SIZE: 1.74 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\graph_rewrite_helper.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/ir/irparser.h>
#include <torch/csrc/jit/ir/subgraph_matcher.h>
#include <torch/csrc/jit/passes/subgraph_rewrite.h>
namespace torch::jit::graph_rewrite_helper {
std::string getFuncName(Value* func_value);
Value* getValue(
const std::string& name,
const std::unordered_map<const Value*, Value*>& match_vmap,
const std::unordered_map<std::string, Value*>& vmap);
std::optional<IValue> getIValue(
const std::string& name,
const std::unordered_map<const Value*, Value*>& match_vmap,
const std::unordered_map<std::string, Value*>& vmap);
TORCH_API void replaceConvolutionWithAtenConv(std::shared_ptr<Graph>& graph);
bool isClampFusable(
const Match& match,
const std::unordered_map<std::string, Value*>& vmap);
// This struct contains a compiled IR patterns slated for use in the
// findPatternMatches function. The struct encapsulates the common
// information from parseIR that is used in conjunction with the
// pattern matching facility. A const instance of this struct can
// also be stored away to cache the compiled IR pattern and reduce
// runtime cost
struct PatternInfo {
std::string pattern_string;
std::unique_ptr<Graph> pattern_graph;
std::unordered_map<std::string, Value*> vmap;
std::vector<MatchFilter> filters;
static PatternInfo parse_from_str(
std::string pattern_string,
const std::vector<MatchFilter>& filters = {}) {
PatternInfo rv{
std::move(pattern_string),
std::make_unique<Graph>(),
decltype(vmap){},
filters};
parseIR(rv.pattern_string, rv.pattern_graph.get(), rv.vmap);
return rv;
}
};
} // namespace torch::jit::graph_rewrite_helper
```
|
===============================================================================================================================================
SOURCE CODE FILE: guard_elimination.h
LINES: 1
SIZE: 0.36 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\guard_elimination.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ATen.h>
#include <ATen/core/ivalue.h>
#include <ATen/core/jit_type.h>
#include <ATen/core/stack.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/ir/ir.h>
#include <list>
#include <vector>
namespace torch::jit {
TORCH_API void EliminateRedundantGuards(std::shared_ptr<Graph> graph);
} // namespace torch::jit
```
|
======================================================================================================================================================
SOURCE CODE FILE: hoist_conv_packed_params.h
LINES: 1
SIZE: 0.19 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\hoist_conv_packed_params.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/api/module.h>
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
void HoistConvPackedParams(script::Module& m);
} // namespace torch::jit
```
|
=======================================================================================================================================================
SOURCE CODE FILE: inline_autodiff_subgraphs.h
LINES: 1
SIZE: 0.26 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\inline_autodiff_subgraphs.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API bool canRunWithAutograd(Node* node);
TORCH_API void InlineAutodiffSubgraphs(
std::shared_ptr<Graph>& graph,
size_t threshold = 5);
} // namespace torch::jit
```
|
==============================================================================================================================================
SOURCE CODE FILE: inline_fork_wait.h
LINES: 1
SIZE: 0.52 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\inline_fork_wait.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// Inline Fork and Wait calls. This is used, for example, in ONNX export, where
// we do not support the explicit parallelism structures and would rather
// just have a flat graph. This inlines the forked section in the fork()
// callsite and replaces uses of the result of wait() calls with the values
// produced from the (now-inlined) forked section.
TORCH_API void InlineForkWait(const std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
====================================================================================================================================================
SOURCE CODE FILE: inline_forked_closures.h
LINES: 1
SIZE: 0.21 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\inline_forked_closures.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API void inlineForkedClosures(std::shared_ptr<Graph>& to_clean);
} // namespace torch::jit
```
|
=====================================================================================================================================
SOURCE CODE FILE: inliner.h
LINES: 1
SIZE: 0.24 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\inliner.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// Inline function and method calls.
TORCH_API void Inline(Graph& graph);
TORCH_API GraphFunction* tryToGraphFunction(Node* n);
} // namespace torch::jit
```
|
===========================================================================================================================================
SOURCE CODE FILE: inplace_check.h
LINES: 1
SIZE: 0.14 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\inplace_check.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API void CheckInplace(std::shared_ptr<Graph>& graph);
}
```
|
===========================================================================================================================================
SOURCE CODE FILE: insert_guards.h
LINES: 1
SIZE: 0.42 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\insert_guards.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ATen.h>
#include <ATen/core/ivalue.h>
#include <ATen/core/jit_type.h>
#include <ATen/core/stack.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/ir/ir.h>
#include <list>
#include <vector>
namespace torch::jit {
TORCH_API void InsertGuards(std::shared_ptr<Graph> graph);
TORCH_API void RemoveProfilingNodes(const std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
======================================================================================================================================================
SOURCE CODE FILE: integer_value_refinement.h
LINES: 1
SIZE: 0.21 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\integer_value_refinement.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// return true if graph is modified
TORCH_API bool RefineIntegerValues(const std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
===========================================================================================================================================
SOURCE CODE FILE: lift_closures.h
LINES: 1
SIZE: 0.20 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\lift_closures.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API void liftClosures(const std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
======================================================================================================================================
SOURCE CODE FILE: liveness.h
LINES: 1
SIZE: 0.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\liveness.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/ATen.h>
#include <ATen/core/ivalue.h>
#include <ATen/core/jit_type.h>
#include <ATen/core/stack.h>
#include <c10/util/sparse_bitset.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/jit/ir/ir.h>
#include <list>
#include <unordered_map>
#include <vector>
namespace torch::jit {
using SparseBitVector = ::c10::SparseBitVector<256>;
// BuildLivenessSets computes "bailout" liveness which is equivalent to
// "{LIVE_IN} or {GEN}" or "{LIVE_OUT} - {KILL}"
TORCH_API std::unordered_map<Node*, std::vector<Value*>> BuildLivenessSets(
std::shared_ptr<Graph> graph);
} // namespace torch::jit
```
|
============================================================================================================================================
SOURCE CODE FILE: loop_unrolling.h
LINES: 1
SIZE: 0.99 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\loop_unrolling.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// return true if graph is modified
TORCH_API bool UnrollLoops(std::shared_ptr<Graph>& graph);
// Only unrolls constant loops. Will unroll them regardless of loop block size
TORCH_API bool UnrollConstantLoops(std::shared_ptr<Graph>& graph);
TORCH_API Node* PeelLoop(Node* n, size_t times);
// return true if graph is modified
TORCH_API bool PeelProfilingLoops(const std::shared_ptr<Graph>& graph);
struct TORCH_API LoopsPeeler {
LoopsPeeler(std::function<bool(Node* n)> callback, size_t num_iterations = 1)
: callback_(std::move(callback)), num_iterations_(num_iterations) {}
bool run(const std::shared_ptr<Graph>& graph);
private:
void collectLoop(Node* n);
void collectLoops(Block* block);
void peelLoops();
std::function<bool(Node* n)> callback_ = nullptr;
Node* in_loop_ = nullptr;
std::list<Node*> loops_to_peel_;
size_t num_iterations_ = 1;
};
} // namespace torch::jit
```
|
===========================================================================================================================================
SOURCE CODE FILE: lower_grad_of.h
LINES: 1
SIZE: 0.33 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\lower_grad_of.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// This pass removes 'grad_of' nodes, replacing them with conditionals of
// the form:
// if any_defined(inputs):
// outputs = <original_computation>
// else:
// outputs = undefineds
TORCH_API void LowerGradOf(Graph& g);
} // namespace torch::jit
```
|
=========================================================================================================================================
SOURCE CODE FILE: lower_graph.h
LINES: 1
SIZE: 0.73 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\lower_graph.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
using ModulePtr = c10::intrusive_ptr<c10::ivalue::Object>;
// Given a graph with of a method which first argument is %self, lower it to a
// graph where all attributes accesses are replaced with explicit inputs of the
// graph (rather than results of prim::GetAttr executed on %self).
//
// Returns a tuple (graph, parameters) where the last module.parameters.size()
// inputs to the graph are the trainable parameters used in this method. The
// remaining inputs are the true inputs to the function.
TORCH_API std::pair<std::shared_ptr<Graph>, std::vector<IValue>> LowerGraph(
Graph& graph,
const ModulePtr& self);
} // namespace torch::jit
```
|
==========================================================================================================================================
SOURCE CODE FILE: lower_tuples.h
LINES: 1
SIZE: 0.64 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\lower_tuples.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// removes tuples where TupleConstruct and TupleUnpack are matched
// but leaves tuples in place across if statements, loops, and as inputs/outputs
TORCH_API void LowerSimpleTuples(const std::shared_ptr<Graph>& graph);
// removes _all_ tuples and raises an error if some cannot be removed
// this is used by ONNX to ensure there are not tuples before conversion,
// but will not work on graphs whose inputs contain tuples.
TORCH_API void LowerAllTuples(const std::shared_ptr<Graph>& graph);
TORCH_API void LowerSimpleTuples(Block* block);
} // namespace torch::jit
```
|
===========================================================================================================================================
SOURCE CODE FILE: metal_rewrite.h
LINES: 1
SIZE: 0.58 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\metal_rewrite.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/api/module.h>
#include <torch/csrc/jit/ir/ir.h>
#include <string>
#include <vector>
namespace torch::jit {
TORCH_API void metalInsertPrePackedOps(std::shared_ptr<Graph>& graph);
TORCH_API void metalInsertPrePackedOps(script::Module& module);
TORCH_API void metalFusePrePackedConvWithClamp(script::Module& module);
TORCH_API void metalFoldPrePackingOps(script::Module& module);
TORCH_API script::Module metalOptimizeForMobile(
const script::Module& module,
const std::vector<std::string>& preserved_methods);
} // namespace torch::jit
```
|
============================================================================================================================================
SOURCE CODE FILE: mkldnn_rewrite.h
LINES: 1
SIZE: 0.62 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\mkldnn_rewrite.h
ENCODING: utf-8
```h
#pragma once
#include <ATen/Config.h>
#include <torch/csrc/jit/api/module.h>
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/passes/subgraph_rewrite.h>
#if AT_MKLDNN_ENABLED()
#include <ideep/tensor.hpp>
#endif // AT_MKLDNN_ENABLED()
namespace torch::jit {
#if AT_MKLDNN_ENABLED()
namespace mkldnn {
const static std::map<std::string, std::vector<torch::jit::MatchFilter>>
fusion_rewrite_map = {
{"none", {}},
{"relu", {}},
};
} // namespace mkldnn
#endif // AT_MKLDNN_ENABLED()
void FuseConvWithEltwise(std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
===================================================================================================================================================
SOURCE CODE FILE: mobile_optimizer_type.h
LINES: 1
SIZE: 0.24 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\mobile_optimizer_type.h
ENCODING: utf-8
```h
#pragma once
#include <cstdint>
enum class MobileOptimizerType : int8_t {
CONV_BN_FUSION,
INSERT_FOLD_PREPACK_OPS,
REMOVE_DROPOUT,
FUSE_ADD_RELU,
HOIST_CONV_PACKED_PARAMS,
CONV_1D_TO_2D,
VULKAN_AUTOMATIC_GPU_TRANSFER,
};
```
|
===========================================================================================================================================
SOURCE CODE FILE: normalize_ops.h
LINES: 1
SIZE: 0.51 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\normalize_ops.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// This pass converts aten ops to a normalized form. It is
// run immediately after IR generation in both the tracer and compiler,
// so downstream consumers of the IR do not need handle ops in their
// pre-normalized form.
// Currently only handles normalization of op aliases.
TORCH_API void NormalizeOps(const std::shared_ptr<Graph>& graph);
const std::unordered_map<Symbol, Symbol>& getOperatorAliasMap();
} // namespace torch::jit
```
|
================================================================================================================================================
SOURCE CODE FILE: onednn_graph_fuser.h
LINES: 1
SIZE: 1.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\onednn_graph_fuser.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/passes/pass_manager.h>
#include <ATen/Config.h>
namespace torch::jit {
namespace fuser::onednn {
static std::atomic<bool> onednn_enabled{true};
static std::atomic<bool>& getLlgaEnabled() {
return onednn_enabled;
}
TORCH_API void fuseGraph(std::shared_ptr<Graph>& g);
} // namespace fuser::onednn
struct C10_EXPORT RegisterLlgaFuseGraph
: public PassManager<RegisterLlgaFuseGraph> {
static bool setEnabled(bool enabled) {
TORCH_CHECK(
AT_MKLDNN_ENABLED(),
"Running oneDNN Graph fuser is only supported with MKLDNN builds.");
bool oldState = fuser::onednn::getLlgaEnabled();
fuser::onednn::getLlgaEnabled() = enabled;
if (enabled) {
registerPass(fuser::onednn::fuseGraph);
} else {
clearPass();
}
return oldState;
}
static bool isEnabled() {
return fuser::onednn::getLlgaEnabled();
}
// override PassManager::registerPass to register pre-pass
static bool registerPass(GraphPass p) {
if (!isRegistered()) {
passID(registerPrePass(std::move(p)), true);
isRegistered(true);
return false;
}
return true;
}
// override PassManager::clearPass to clear pre-pass
static void clearPass() {
if (isRegistered()) {
clearPrePass(passID());
isRegistered(true);
}
}
};
} // namespace torch::jit
```
|
==================================================================================================================================
SOURCE CODE FILE: onnx.h
LINES: 1
SIZE: 0.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\onnx.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/onnx/onnx.h>
#include <torch/csrc/utils/pybind.h>
namespace torch::jit {
TORCH_API std::shared_ptr<Graph> ToONNX(
std::shared_ptr<Graph>& state,
::torch::onnx::OperatorExportTypes operator_export_type);
TORCH_API py::dict BlockToONNX(
Block* old_block,
Block* new_block,
::torch::onnx::OperatorExportTypes operator_export_type,
py::dict& env,
py::set& values_in_env,
bool is_sub_block = false);
TORCH_API void NodeToONNX(
Node* old_node,
Block* new_block,
::torch::onnx::OperatorExportTypes operator_export_type,
py::dict& env,
py::set& values_in_env);
TORCH_API void RemovePrintOps(std::shared_ptr<Graph>& graph);
TORCH_API void PreprocessCaffe2Ops(std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
==========================================================================================================================================
SOURCE CODE FILE: pass_manager.h
LINES: 1
SIZE: 4.57 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\pass_manager.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
/* `getCustomPrePasses()` returns a vector of passes that will be executed
* after differentiation but before any fusion. This is the de-facto location
* for compiler backends to insert passes.
*
* `getCustomPostPasses()` returns a vector of passes that will be
* executed after differentiation and after fusion (if any). This is the
* location for fusion cleanup passes if they are needed.
*
* Static registration of a pass can be done by creating a global
* `Register{Pre,Post}Pass r(Pass)` variable in a compilation unit.
*
* pass_manager.h uses a Meyer's singleton to store a vector of `Pass`es, which
* modify the IR graph in place.
*/
namespace torch::jit {
// A pass modifies a Graph in place.
using GraphPass = std::function<void(std::shared_ptr<Graph>&)>;
// Since Passes are std::functions, we associate a UUID to each pass, this way
// if we want to deregister a pass, we have something to reference it by.
using GraphPassNameType = unsigned int;
// Graph pass entries have a name associated with them
using GraphPassEntry = std::pair<GraphPass, GraphPassNameType>;
// Return currently registered passes. Passes are stored in a static vector
TORCH_API std::vector<std::pair<GraphPass, GraphPassNameType>>&
getCustomPostPasses();
TORCH_API std::vector<std::pair<GraphPass, GraphPassNameType>>&
getCustomPrePasses();
TORCH_API GraphPassNameType registerPostPass(GraphPass p);
TORCH_API GraphPassNameType registerPrePass(GraphPass p);
// Look up pass by name passed in, remove it from registered passes
TORCH_API void clearPostPass(GraphPassNameType p);
TORCH_API void clearPrePass(GraphPassNameType p);
// Remove all passes
TORCH_API void clearAllPostPasses();
TORCH_API void clearAllPrePasses();
// LEGACY CALL
struct TORCH_API RegisterPostPass {
RegisterPostPass(GraphPass p);
};
using RegisterPass = RegisterPostPass;
/*
* PassManager is a wrapper on the register/clear PostPass functions above. It
* will register the pass provided in "registerPass" and will hold on to its
* associated name that way clearPass can be later called and will delete the
* pass used to register when called.
*
* PassManager is templated because we want static variables based on a
* particular GraphPass. When deriving from PassManager, you should send as the
* template parameter your derived class as you would for the curiously
* recurring template pattern. This template parameter isn't actually used and
* is simply done to prevent static members from being shared across derived
* types.
*/
template <typename DerivedType>
struct C10_EXPORT PassManager {
private:
// We want this class to be abstract because it's
virtual void abstract() = 0;
protected:
/*
* isRegistered() will return if a pass has been registered
* isRegistered(true) will change the value of the internal static bool
*
* There's an internal static bool to this function to keep track of the
* state, this is so when functions are derived from this class, they don't
* have to worry about initializing the static members.
*/
static bool isRegistered(bool flip_bit = false) {
static bool val = false;
if (flip_bit)
val = !val;
return val;
}
/*
* name() will return the name of the registered pass
* name(pass_name, true) will set the name of the pass
* Similarly to isRegistered we use an internal static variable to hold the
* name.
*/
static GraphPassNameType passID(
GraphPassNameType PassID = 0,
bool set = false) {
static GraphPassNameType pass_id = 0;
if (set)
pass_id = PassID;
return pass_id;
}
public:
// registerPass(pass) will register the pass provided and set the
// name/isRegistered functions appropriately, it returns a bool value
// indicating whether the given pass is already registered previously.
static bool registerPass(GraphPass p) {
if (!isRegistered()) {
// If we don't already have a registered pass, register pass
// hold on to its name, change isRegistered to true
passID(registerPostPass(std::move(p)), true);
isRegistered(true);
return false;
}
return true;
}
// Calls ClearPostPass(passID())
static void clearPass() {
// If the pass is registered, clear it and change isRegistered to false.
if (isRegistered()) {
clearPostPass(passID());
isRegistered(true);
}
}
// clang-tidy requires virtual destructor;
virtual ~PassManager() = default;
};
} // namespace torch::jit
```
|
======================================================================================================================================
SOURCE CODE FILE: peephole.h
LINES: 1
SIZE: 0.49 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\peephole.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// return true if graph is modified
TORCH_API bool PeepholeOptimize(
const std::shared_ptr<Graph>& graph,
bool disable_shape_peepholes = false);
// return true if graph is modified
TORCH_API bool PeepholeOptimize(
Block* block,
bool disable_shape_peepholes = false);
// return true if graph is modified
TORCH_API bool FuseAddMM(const std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
======================================================================================================================================================
SOURCE CODE FILE: peephole_alias_sensitive.h
LINES: 1
SIZE: 0.42 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\peephole_alias_sensitive.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// Peephole Optimizes alias sensitive peepholes
// Currently this is invoked as part of PeepholeOptimize
// return true if graph is modified
// Optimizes on TensorType if shape_peepholes is true
TORCH_API bool PeepholeOptimizeAliasSensitive(
const std::shared_ptr<Graph>& graph,
bool shape_peepholes);
} // namespace torch::jit
```
|
==================================================================================================================================================
SOURCE CODE FILE: peephole_dict_idioms.h
LINES: 1
SIZE: 0.99 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\peephole_dict_idioms.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// Peephole Optimizes Dict Ops such as len() and __getitem__
// 1. getitem optimizations
// Given a function like this:
// def foo():
// d = {0 : 1}
// x = d[0]
// return x
// This pass produces (after dead code elimination):
// def foo(a, b):
// return 1
//
// This optimization can only happen if the dict is not modified
// and the dict has constant, non overlapping keys.
//
// 2. len optimizations
// Given a function like this:
// def foo():
// d = {0 : 1}
// return len(d)
// This pass produces (after dead code elimination):
// def foo():
// return 1
//
// This has the same requirements as the getitem optimizations.
//
// Currently this is invoked as part of PeepholeOptimize
// return true if graph is modified.
TORCH_API bool PeepholeOptimizeDictIdioms(const std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
==================================================================================================================================================
SOURCE CODE FILE: peephole_list_idioms.h
LINES: 1
SIZE: 2.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\peephole_list_idioms.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// Peephole Optimizes List ops such as len(li) and li[1].
// 1. Construct/Unpack optimizations
// Given a function like this:
// def foo(a, b):
// li = [a, b]
// x, y = li
// return x, y
// This pass produces (after dead code elimination):
// def foo(a, b):
// return a, b
//
// This is only applied to lists that are not modified.
//
// 2. getitem optimizations
// Given a function like this:
// def foo(a, b):
// li = [a, b]
// x = li[0]
// return x
// This pass produces (after dead code elimination):
// def foo(a, b):
// return a
//
// This optimization can only happen if the list is not modified.
//
// 3. len optimizations
// Given a function like this:
// def foo():
// li = [1, 2]
// return len(li)
// This pass produces (after dead code elimination):
// def foo():
// return 2
//
// This has the same requirements as the getitem optimizations.
//
// 4. ListConstruct + ListConstruct
// Given a function like this:
// def foo():
// return [1, 2] + [3, 4]
// This pass produces (after dead code elimination):
// def foo():
// return [1, 2, 3, 4]
//
// This is only applied to lists that are not modified.
//
// 5. Slice
// Given a function like this:
// def foo():
// return [1, 2, 3, 4, 5][0:2]
// This pass produces (after deadcode elimination):
// def foo():
// return [1, 2]
//
// Currently this is invoked as part of PeepholeOptimize
// return true if graph is modified.
// If `refine_list_len` is true will attempt to refine the len of lists through
// len comparisons and assertions. This does not generally optimize pytorch
// programs so it is not called by default in PeepholeOptimize.
TORCH_API bool PeepholeOptimizeListIdioms(
const std::shared_ptr<Graph>& graph,
bool refine_list_len = false);
} // namespace torch::jit
```
|
=================================================================================================================================================
SOURCE CODE FILE: peephole_non_tensor.h
LINES: 1
SIZE: 0.32 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\peephole_non_tensor.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
// return true if graph is modified
// Optimizing General Graph Patterns that
// are not covered in peephole.cpp and peephole_list_idioms
TORCH_API bool PeepholeOptimizeNonTensor(const std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
=============================================================================================================================================
SOURCE CODE FILE: prepack_folding.h
LINES: 1
SIZE: 0.34 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\prepack_folding.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/api/module.h>
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
using PrePackingOpsFilterFn = std::function<bool(Node*)>;
void PrePackingOpsFolder(
script::Module& m,
const PrePackingOpsFilterFn& is_foldable_op,
const std::string& attr_prefix);
} // namespace torch::jit
```
|
============================================================================================================================================================
SOURCE CODE FILE: dedup_module_uses.h
LINES: 1
SIZE: 0.81 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\quantization\dedup_module_uses.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/api/module.h>
namespace torch::jit {
/** Recursively deduplicate multiple uses of the same module by
* creating an instance clone for each use of the module, which means
* the type will be the same as before and all the attributes will be
* copied, then we'll change the use of the original module to the use
* of cloned module in the Graph.
*
* This is done to ensure that modules can survive destructive passes
* without changing model behavior. For example, here:
*
* x = self.conv1(x)
* x = self.relu(x)
* x = self.conv2(x)
* x = self.relu(x)
*
* self.relu needs to be deduplicated for potential future destructive passes
* to work properly.
*/
TORCH_API void DedupModuleUses(Module& module);
} // namespace torch::jit
```
|
===================================================================================================================================================
SOURCE CODE FILE: finalize.h
LINES: 1
SIZE: 2.30 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\quantization\finalize.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/api/module.h>
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/passes/quantization/quantization_type.h>
namespace torch::jit {
/** \brief Backend specific pass to fuse dequantize - op - quantize calls
* as quantized_op calls.
*
* Right now this is a fusion for fbgemm backend and only works for quantized
* conv op, we'll extend to more ops and more backends in the future.
*
* Currently supported fusion:
* q(conv2d(dq(a), dq(w), dq(b))) --> to_nchw(fbgemm_conv2d(prepack(to_nhwc(a)),
* prepack(to_nhwc(w)),
* prepack(to_nhwc(b))))
*
* q(linear(dq(a), dq(w), dq(b))) --> to_nchw(fbgemm_linear(prepack(to_nhwc(a)),
* prepack(to_nhwc(w)),
* prepack(to_nhwc(b))))
*
* \param graph the graph we want to apply fusion
*/
TORCH_API void QuantFusion(
std::shared_ptr<Graph>& graph,
QuantType quant_type = QuantType::STATIC);
/** \brief Insert prepack and unpack function in graph
* We want add pack/unpack functions for quantized weight because later we want
* to fold the packed weight as an attribute of the module, in order to reduce
* the cost of packing the weight on the fly in quantized models.
*
* Each quantized op has it's corresponding prepack/unpack function,
* right now, we only need to do prepack/unpack for quantized::linear
* and quantized::conv2d.
*/
TORCH_API void InsertPrepackUnpack(std::shared_ptr<Graph>& graph);
/** \brief Insert pack and unpack function in all graphs
* of module
*
* Go through graphs of all the methods of all child modules
* and call InsertPrepackUnpack on the graph.
*/
TORCH_API void InsertPrepackUnpack(Module& module);
TORCH_API script::Module Finalize(
script::Module& module,
QuantType quant_type = QuantType::STATIC,
const std::vector<std::string>& preserved_attrs =
std::vector<std::string>());
TORCH_API void FoldQuantizedPrepackingOps(Module& module);
TORCH_API Module FinalizeOnDevicePTQ(
Module& module,
QuantType quant_type,
const std::string& method_name);
} // namespace torch::jit
```
|
========================================================================================================================================================
SOURCE CODE FILE: fusion_passes.h
LINES: 1
SIZE: 0.17 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\quantization\fusion_passes.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/ir/ir.h>
namespace torch::jit {
TORCH_API void FuseQuantizedAddRelu(std::shared_ptr<Graph>& graph);
} // namespace torch::jit
```
|
=================================================================================================================================================
SOURCE CODE FILE: helper.h
LINES: 1
SIZE: 7.49 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\quantization\helper.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/api/module.h>
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/ir/subgraph_matcher.h>
#include <torch/csrc/jit/passes/graph_rewrite_helper.h>
#include <torch/csrc/jit/passes/quantization/quantization_type.h>
#include <functional>
#include <regex>
namespace torch::jit {
using graph_rewrite_helper::getFuncName;
// Vector of a module and the name of its method
using ModuleMethodVector = std::vector<std::pair<Module, std::string>>;
// Map of quantization parameter name and value
// for example _scale, _zero_point,
// _scalar_type and _axis(for per channel quantization)
using QParamVector = std::vector<std::pair<std::string, IValue>>;
// =========== helper functions for Value =========
// Check if a value is weight, since we need to use weight observer
// for weight
TORCH_API bool isWeight(Value* v);
// Check if a value is bias for conv and linear, which we do not
// quantize
TORCH_API bool isBiasOfConvOrLinear(Value* v);
TORCH_API bool isEmbeddingBagNonInput(Value* v);
// Get the use as scalar input of clamp ops for the input value
std::optional<Use> getClampScalarInputUse(Value* v);
// For a given value `v`, get the list of values that we need to check
// if they are observed/quantized or not, if so, we can say the
// `v` is also observed/quantized, since we can derive
// the quantization parameters for `v` given the list of values
TORCH_API std::vector<Value*> getPassThroughInputs(Value* v);
// Clones the method by the name of orig_method_name into new_method_name method
TORCH_API void cloneMethod(
Module& module,
const std::string& orig_method_name,
const std::string& new_method_name);
// Check if a value in the graph is a Scalar value
TORCH_API bool isScalar(Value* v);
// Check if value is the input of the graph
TORCH_API bool hitGraphInput(Value* value);
// Converts a mangled name, such as
// __torch__.torch.ao.nn.quantized.modules.conv.___torch_mangle_7.Conv2d
// into an unmangled name, such as
// __torch__.torch.ao.nn.quantized.modules.conv.Conv2d
TORCH_API std::string removeTorchMangle(const std::string& orig_name);
// Return the module name that corresponds to the value.
TORCH_API std::optional<std::string> getModuleName(Value* value);
// =========== helper functions for Node =========
TORCH_API bool isSingleInputGeneralShapeAtenFunction(Node* n);
TORCH_API bool isSingleInputGeneralValueAtenFunction(Node* n);
TORCH_API bool isSingleInputGeneralCallFunction(Node* n);
TORCH_API bool isSingleInputGeneralAtenFunction(Node* n);
TORCH_API bool isClamp(Node* n);
// Check if the node will produce the same result regardless of whether
// the input tensor is quantized or not, example: aten::size
TORCH_API bool isTensorInfoNode(Node* n);
// Check if this the propagate op that has single input, e.g. aten::cat
TORCH_API bool isPropagateQuantSingleInputOp(Node* n);
// Check if this is the propagate op that has two inputs, e.g. aten::add
TORCH_API bool isPropagateQuantBinaryOp(Node* n);
// Check if this is the node that we'll quantize or not quantize depending on
// whether the input of the node is quantized, example: aten::cat
TORCH_API bool isPropagateQuantOp(Node* n);
// Check if the node is a binary op like aten::add and aten::mul and
// if the input 1 is a scalar, these ops will be quantized to
// quantized::{op}_scalar
TORCH_API bool isBinaryOpWithScalarInput(Node* n);
TORCH_API std::optional<std::tuple<c10::QScheme, QParamVector>> getFixedQParams(
Node* n);
// We don't want to analyze the graph for some `builtin` CallFunctions
// like `linear` because we want to preserve the op boundary
TORCH_API bool userDefinedCallFunction(Node* n);
// Check if the node has scalar input
TORCH_API bool hasScalarInput(Node* n);
// Check if a node is quantizable
TORCH_API bool nodeQuantizable(
Node* n,
QuantType quant_type = QuantType::STATIC);
// Nodes which only require quantization of weight value, eg. embedding_bag
bool isWeightOnlyStaticQuantOp(Node* n);
// Check if a use of the value is quantizable, this depends on
// both the use node and the offset
TORCH_API bool useQuantizable(const Use& use, QuantType quant_type);
// Given a CallFunction node, extract the graph of the called function
TORCH_API std::shared_ptr<Graph> getCallFunctionGraph(Node* n);
// Check if `use` is a CallFunction of name `func_name` and if value
// `v` is the nth argument (if provided) of the function
bool matchCallFuncToUse(
const Use& use,
const std::string& func_name,
std::optional<int> nth_arg);
// Check if `use` is a AtenFunction of name `func_name` and if value
// `v` is the nth argument (if provided) of the function
bool matchAtenFuncToUse(
const Use& use,
const std::string& func_name,
std::optional<int> nth_arg);
// =========== helper functions for Block =========
// checks if a block will always raise an Exception
TORCH_API bool alwaysRaisesException(Block* block);
// =========== helper functions for Module ==========
// TODO: remove
TORCH_API std::vector<std::string> getModuleAccessPath(
Value* instance,
Value* self);
// TODO: remove
TORCH_API Module
findChildModule(const Module& module, const std::vector<std::string>& path);
// Given an CallMethod node, get the module instance corresponding
// to the instance Value
// TODO: refactor all current uses of this function to the Opt one
TORCH_API Module getInvokedModule(Module& module, Node* n, Value* self);
// Given an CallMethod node, get the module instance corresponding
// to the instance Value if the instance is a module, otherwise return
// std::nullopt
std::optional<Module> getInvokedModuleOpt(
const Module& module,
Node* n,
Value* self);
// ==================== filter functions for matches ==============
// filter to check Value `vname` is a constant of int value `value`
bool is_int_constant(
const Match& match,
const std::unordered_map<std::string, Value*>& vmap,
const std::string& vname,
int value);
// filter to check if the %alpha argument of aten::add is constant 1
bool aten_add_alpha_is_one(
const Match& match,
const std::unordered_map<std::string, Value*>& vmap);
// filter to check if the functional in CallFunction is relu
bool is_functional_relu(
const Match& match,
const std::unordered_map<std::string, Value*>& vmap);
// filter to check if the module is torch.nn.ReLU
bool is_relu_module(
const Match& match,
const std::unordered_map<std::string, Value*>& vmap);
bool is_linear_module(
const Match& match,
const std::unordered_map<std::string, Value*>& vmap);
// TODO: add a macro to declare the filters
bool is_conv1d_module(
const Match& match,
const std::unordered_map<std::string, Value*>& vmap);
bool is_conv2d_module(
const Match& match,
const std::unordered_map<std::string, Value*>& vmap);
bool is_conv3d_module(
const Match& match,
const std::unordered_map<std::string, Value*>& vmap);
bool is_conv_transpose1d_module(
const Match& match,
const std::unordered_map<std::string, Value*>& vmap);
bool is_conv_transpose2d_module(
const Match& match,
const std::unordered_map<std::string, Value*>& vmap);
bool is_batchnorm2d_module(
const Match& match,
const std::unordered_map<std::string, Value*>& vmap);
bool is_batchnorm3d_module(
const Match& match,
const std::unordered_map<std::string, Value*>& vmap);
} // namespace torch::jit
```
|
===========================================================================================================================================================
SOURCE CODE FILE: insert_observers.h
LINES: 1
SIZE: 2.34 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\include\torch\csrc\jit\passes\quantization\insert_observers.h
ENCODING: utf-8
```h
#pragma once
#include <torch/csrc/jit/api/module.h>
#include <torch/csrc/jit/passes/quantization/quantization_type.h>
namespace std {
template <>
struct hash<torch::jit::Module> {
inline size_t operator()(const torch::jit::Module& arg) const {
return std::hash<c10::intrusive_ptr<c10::ivalue::Object>>()(arg._ivalue());
}
};
} // namespace std
namespace torch::jit {
using QConfig = std::tuple<Module, Module>;
using QConfigDict = std::unordered_map<std::string, std::optional<QConfig>>;
/** \brief Insert observer module and observer function call for
* the Tensors that needs to be observed.
*
* For each Tensor that needs to be observed in the method, insert observer
* module to the input module and add forward calls of observer to the specified
* method.
*
* \param module the input module
* \param method_name the method we want to insert observers for
* \param qconfig_dict the qconfig dictionary that specifies how
* each module is going to be quantized
* \param inplace whether we want to do inplace modification to the input module
* or clone the module
* \param is_dynamic whether the dynamic quantization script is being used.
*/
TORCH_API Module InsertObservers(
Module& module,
const std::string& method_name,
const QConfigDict& qconfig_dict,
bool inplace,
QuantType quant_type = QuantType::STATIC);
/** \brief Insert observer module and observer method for
* the Tensors that needs to be observed.
*
* For each Tensor that needs to be observed in the method, insert observer
* module to the input module and observe_<method-name> methods to the module.
* This method is clone of mehtod_name with forward calls of observer added.
*
* \param module the input module
* \param method_name the method we want to insert observers for
* \param qconfig_dict the qconfig dictionary that specifies how
* each module is going to be quantized
* \param inplace whether we want to do inplace modification to the input module
* or clone the module
* \param is_dynamic whether the dynamic quantization script is being used.
*/
TORCH_API Module InsertObserversForOnDevicePTQ(
Module& module,
const std::string& method_name,
const QConfigDict& qconfig_dict,
bool inplace,
QuantType quant_type = QuantType::STATIC);
} // namespace torch::jit
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.