diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/_trace_wrapped_higher_order_op.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/_trace_wrapped_higher_order_op.py new file mode 100644 index 0000000000000000000000000000000000000000..6e22cafcc6dd5efeac0d05a81c8a9612270bd1d3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/_trace_wrapped_higher_order_op.py @@ -0,0 +1,120 @@ +import torch +from torch._C import DispatchKey +from torch._higher_order_ops.utils import autograd_not_implemented + +from torch._ops import HigherOrderOperator +from torch._subclasses import FakeTensorMode +from torch.fx.experimental._backward_state import BackwardState + +from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode, track_tensor_tree +from torch.utils._python_dispatch import _get_current_dispatch_mode +from torch.utils._pytree import tree_map_only + + +__all__ = ["trace_wrapped"] + + +# trace_wrapped(*args, fn) is equivalent to fn(*args), but with a twist: +# if you make_fx trace through this call, we will not actually trace into fn; instead, +# we will directly insert it as a call_function to fn in the graph. +# (Unlike make_fx, Dynamo WILL inline into fn.) +# You can think of this as a one off allow_in_graph equivalent for proxy tensor tracing. +# +# Because proxy tensor tracing does not actually run the function, there are +# requirements on the behavior of fn. We are still figuring it out, but here is the current state: +# +# 1) fn SHOULD only take a single argument, which must be a tensor +# 2) fn MUST return a new tensor with the same metadata as the original tensor +# (e.g., zeros_like(input) is a permissible implementation of fn). +# This is verified via an extra assert that is inserted into the traced graph. +# 3) fn MAY have side effects, but it MAY NOT perform metadata mutation on other tensors +# participating in proxy tensor tracing (it MAY mutate other tensors, it MAY mutate Python state) +# These requirements stem from the requirement that we need to continue performing proxy tensor tracing, +# which assumes accurate fake tensor metadata, without actually running fn. +# In the future, we may allow for a "meta" function associated with fn to allow for more interesting input-output patterns. +# +# Note that tensors / Python state are allowed to be mutated. +# This is relaxed constraint is not always sound, but it is sound for backward tracing with fake +# tensors as it takes place in AOTAutograd, as the backward pass is guaranteed not to depend on concrete +# tensor values (via fake tensor) or Python state (because the autograd engine doesn't depend on Python). +# +# The intended use case for this function is to allow AOTAutograd to defer complex +# backward hooks to compiled autograd. AOTAutograd performs a make_fx trace which preserves +# the function call as is in the graph, and only when we Dynamo through the backward graph in +# compiled autograd do we inline into the function. + + +def trace_wrapped(*args, **kwargs): + with torch.no_grad(): + return _trace_wrapped_op(*args, **kwargs) + + +# TODO(jansel): need to ensure this does not get DCEed +_trace_wrapped_op = HigherOrderOperator("trace_wrapped") + + +def _assert_meta(grad, size, stride, dtype): + assert grad.size() == size, "size mismatch" + assert grad.stride() == stride, "stride mismatch" + assert grad.dtype == dtype, "dtype mismatch" + return grad + + +@_trace_wrapped_op.py_impl(ProxyTorchDispatchMode) +def inner_trace(mode, *args, bw_state=None, **kwargs): + def self_invoke(*args, **dyn_kwargs): + with torch.no_grad(): + return _trace_wrapped_op(*args, **dyn_kwargs, **kwargs) + + def unwrap_proxies(x): + if isinstance(x, torch.Tensor): + return mode.tracer.unwrap_proxy(x) + if isinstance(x, (list, tuple)): + return type(x)(map(unwrap_proxies, x)) + if x is None: + return None + raise AssertionError(f"unhandled type: {type(x)}") + + proxy_kwargs = {} + if bw_state is not None: + assert isinstance(bw_state, BackwardState) and bw_state.proxy is not None + proxy_kwargs["bw_state"] = bw_state.proxy + out_proxy = mode.tracer.create_proxy( + "call_function", + self_invoke, + unwrap_proxies(args), + proxy_kwargs, + name="trace_wrapped", + ) + + if args[0] is None: + grad = args[1] # module backward hooks + else: + grad = args[0] # other backward hooks + grad = tree_map_only(torch.Tensor, torch.empty_like, grad) + track_tensor_tree(grad, out_proxy, constant=None, tracer=mode.tracer) + return grad + + +@_trace_wrapped_op.py_impl(FakeTensorMode) +def inner_fake(*args, **kwargs): + raise RuntimeError("This op should never be invoked here") + + +@_trace_wrapped_op.py_impl(DispatchKey.CompositeExplicitAutograd) +def _trace_wrapped_op_dense(*args, fn, **kwargs): + mode = _get_current_dispatch_mode() + assert mode is None, "Mode should never be enabled for CPU/CUDA key" + return fn(*args, **kwargs) + + +_trace_wrapped_op.py_impl(DispatchKey.Autograd)( + autograd_not_implemented(_trace_wrapped_op, deferred_error=True) +) + + +@_trace_wrapped_op.py_functionalize_impl +def _trace_wrapped_functionalized(ctx, *args, **kwargs): + unwrapped_args = ctx.unwrap_tensors(args) + with ctx.redispatch_to_next(): + return ctx.wrap_tensors(_trace_wrapped_op(*unwrapped_args, **kwargs)) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/bytecode_transformation.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/bytecode_transformation.py new file mode 100644 index 0000000000000000000000000000000000000000..63edf253062fece499cd276570cef07c4c58e5b7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/bytecode_transformation.py @@ -0,0 +1,1114 @@ +import copy +import dataclasses +import dis +import itertools +import sys +import types +from typing import Any, Callable, cast, Dict, Iterator, List, Optional, Tuple + +from .bytecode_analysis import ( + get_indexof, + propagate_line_nums, + remove_extra_line_nums, + stacksize_analysis, +) + + +@dataclasses.dataclass +class InstructionExnTabEntry: + start: "Instruction" + end: "Instruction" + target: "Instruction" + depth: int + lasti: bool + + def __repr__(self) -> str: + return ( + f"InstructionExnTabEntry(start={self.start.short_inst_repr()}, " + f"end={self.end.short_inst_repr()}, " + f"target={self.target.short_inst_repr()}, " + f"depth={self.depth}, lasti={self.lasti})" + ) + + def __eq__(self, o) -> bool: + return ( + self.start is o.start + and self.end is o.end + and self.target is o.target + and self.depth == o.depth + and self.lasti == o.lasti + ) + + +@dataclasses.dataclass +class Instruction: + """A mutable version of dis.Instruction""" + + opcode: int + opname: str + arg: Optional[int] + argval: Any + offset: Optional[int] = None + starts_line: Optional[int] = None + is_jump_target: bool = False + positions: Optional["dis.Positions"] = None + # extra fields to make modification easier: + target: Optional["Instruction"] = None + exn_tab_entry: Optional[InstructionExnTabEntry] = None + + def __hash__(self) -> int: + return id(self) + + def __eq__(self, other) -> bool: + return id(self) == id(other) + + def short_inst_repr(self) -> str: + return f"Instruction(opname={self.opname}, offset={self.offset})" + + +def convert_instruction(i: dis.Instruction) -> Instruction: + return Instruction( + i.opcode, + i.opname, + i.arg, + i.argval, + i.offset, + i.starts_line, + i.is_jump_target, + getattr(i, "positions", None), + ) + + +class _NotProvided: + def __repr__(self) -> str: + return "_NotProvided" + + +def create_instruction( + name, *, arg=None, argval=_NotProvided, target=None +) -> Instruction: + """ + At most one of `arg`, `argval`, and `target` can be not None/_NotProvided. + This is to prevent ambiguity, e.g. does + create_instruction("LOAD_CONST", 5) + mean load the constant at co_consts[5], or load the constant 5? + + If `arg` is not provided, it will be computed during assembly from + `argval` or `target`. + + Do not use for LOAD_GLOBAL - use create_load_global instead. + """ + assert name != "LOAD_GLOBAL" + cnt = (arg is not None) + (argval is not _NotProvided) + (target is not None) + if cnt > 1: + raise RuntimeError( + "only one of arg, argval, and target can be not None/_NotProvided" + ) + if arg is not None and not isinstance(arg, int): + raise RuntimeError("instruction arg must be int or None") + return Instruction( + opcode=dis.opmap[name], opname=name, arg=arg, argval=argval, target=target + ) + + +# Python 3.11 remaps +def create_jump_absolute(target) -> Instruction: + inst = "JUMP_FORWARD" if sys.version_info >= (3, 11) else "JUMP_ABSOLUTE" + return create_instruction(inst, target=target) + + +def create_load_global(name, push_null) -> Instruction: + """ + `name` is the name of the global to be loaded. + `push_null` specifies whether or not a NULL should be pushed to the stack + before the global (Python 3.11+ only). + + Python 3.11 changed the LOAD_GLOBAL instruction in that the first bit of + the instruction arg specifies whether a NULL should be pushed to the stack + before the global. The remaining bits of the instruction arg contain the + name index. See `create_call_function` for why this NULL is needed. + + The instruction's `arg` is actually computed when assembling the bytecode. + For Python 3.11, push_null information is propagated through the arg. + + NOTE: we don't use create_instruction since LOAD_GLOBAL is the only instruction + where both arg and argval need to be specified. + """ + return Instruction( + opcode=dis.opmap["LOAD_GLOBAL"], + opname="LOAD_GLOBAL", + arg=push_null, + argval=name, + ) + + +def create_dup_top() -> Instruction: + if sys.version_info >= (3, 11): + return create_instruction("COPY", arg=1) + return create_instruction("DUP_TOP") + + +def create_rot_n(n) -> List[Instruction]: + """ + Returns a "simple" sequence of instructions that rotates TOS to the n-th + position in the stack. For Python < 3.11, returns a single ROT_* + instruction. If no such instruction exists, an error is raised and the + caller is expected to generate an equivalent sequence of instructions. + For Python >= 3.11, any rotation can be expressed as a simple sequence of + swaps. + """ + if n <= 1: + # don't rotate + return [] + + if sys.version_info >= (3, 11): + # rotate can be expressed as a sequence of swap operations + # e.g. rotate 3 is equivalent to swap 3, swap 2 + return [create_instruction("SWAP", arg=i) for i in range(n, 1, -1)] + + # ensure desired rotate function exists + if sys.version_info < (3, 8) and n >= 4: + raise AttributeError(f"rotate {n} not supported for Python < 3.8") + if sys.version_info < (3, 10) and n >= 5: + raise AttributeError(f"rotate {n} not supported for Python < 3.10") + + if n <= 4: + return [create_instruction("ROT_" + ["TWO", "THREE", "FOUR"][n - 2])] + return [create_instruction("ROT_N", arg=n)] + + +def create_call_function(nargs, push_null) -> List[Instruction]: + """ + Creates a sequence of instructions that makes a function call. + + `push_null` is used in Python 3.11+ only. It is used in codegen when + a function call is intended to be made with the NULL + fn convention, + and we know that the NULL has not been pushed yet. We will push a + NULL and rotate it to the correct position immediately before making + the function call. + push_null should default to True unless you know you are calling a function + that you codegen'd with a null already pushed, for example + (assume `math` is available in the global scope), + + create_load_global("math", True) # pushes a null + create_instruction("LOAD_ATTR", argval="sqrt") + create_instruction("LOAD_CONST", argval=25) + create_call_function(1, False) + """ + if sys.version_info >= (3, 11): + output = [] + if push_null: + output.append(create_instruction("PUSH_NULL")) + output.extend(create_rot_n(nargs + 2)) + output.append(create_instruction("PRECALL", arg=nargs)) + output.append(create_instruction("CALL", arg=nargs)) + return output + return [create_instruction("CALL_FUNCTION", arg=nargs)] + + +def create_call_method(nargs) -> List[Instruction]: + if sys.version_info >= (3, 11): + return [ + create_instruction("PRECALL", arg=nargs), + create_instruction("CALL", arg=nargs), + ] + return [create_instruction("CALL_METHOD", arg=nargs)] + + +def lnotab_writer( + lineno: int, byteno: int = 0 +) -> Tuple[List[int], Callable[[int, int], None]]: + """ + Used to create typing.CodeType.co_lnotab + See https://github.com/python/cpython/blob/main/Objects/lnotab_notes.txt + This is the internal format of the line number table if Python < 3.10 + """ + assert sys.version_info < (3, 10) + lnotab: List[int] = [] + + def update(lineno_new, byteno_new): + nonlocal byteno, lineno + while byteno_new != byteno or lineno_new != lineno: + byte_offset = max(0, min(byteno_new - byteno, 255)) + line_offset = max(-128, min(lineno_new - lineno, 127)) + assert byte_offset != 0 or line_offset != 0 + byteno += byte_offset + lineno += line_offset + lnotab.extend((byte_offset, line_offset & 0xFF)) + + return lnotab, update + + +def linetable_310_writer(first_lineno): + """ + Used to create typing.CodeType.co_linetable + See https://github.com/python/cpython/blob/main/Objects/lnotab_notes.txt + This is the internal format of the line number table for Python 3.10 + """ + assert sys.version_info >= (3, 10) and sys.version_info < (3, 11) + linetable: List[int] = [] + lineno = first_lineno + lineno_delta = 0 + byteno = 0 + + def _update(byteno_delta, lineno_delta): + while byteno_delta != 0 or lineno_delta != 0: + byte_offset = max(0, min(byteno_delta, 254)) + line_offset = max(-127, min(lineno_delta, 127)) + assert byte_offset != 0 or line_offset != 0 + byteno_delta -= byte_offset + lineno_delta -= line_offset + linetable.extend((byte_offset, line_offset & 0xFF)) + + def update(lineno_new, byteno_new): + nonlocal lineno, lineno_delta, byteno + byteno_delta = byteno_new - byteno + byteno = byteno_new + _update(byteno_delta, lineno_delta) + lineno_delta = lineno_new - lineno + lineno = lineno_new + + def end(total_bytes): + _update(total_bytes - byteno, lineno_delta) + + return linetable, update, end + + +def encode_varint(n: int) -> List[int]: + """ + 6-bit chunk encoding of an unsigned integer + See https://github.com/python/cpython/blob/3.11/Objects/locations.md + """ + assert n >= 0 + b = [n & 63] + n >>= 6 + while n > 0: + b[-1] |= 64 + b.append(n & 63) + n >>= 6 + return b + + +def linetable_311_writer(first_lineno: int): + """ + Used to create typing.CodeType.co_linetable + See https://github.com/python/cpython/blob/3.11/Objects/locations.md + This is the internal format of the line number table for Python 3.11 + """ + assert sys.version_info >= (3, 11) + linetable = [] + lineno = first_lineno + + def update(positions: "dis.Positions", inst_size): + nonlocal lineno + lineno_new = positions.lineno if positions else None + + def _update(delta, size): + assert 0 < size <= 8 + # first byte - use 13 (no column info) is positions is + # malformed, otherwise use 14 (long form) + other_varints: Tuple[int, ...] = () + if ( + positions + and positions.lineno is not None + and positions.end_lineno is not None + and positions.col_offset is not None + and positions.end_col_offset is not None + ): + linetable.append(0b1_1110_000 + size - 1) + # for whatever reason, column offset needs `+ 1` + # https://github.com/python/cpython/blob/1931c2a438c50e6250725c84dff94fc760b9b951/Python/compile.c#L7603 + other_varints = ( + positions.end_lineno - positions.lineno, + positions.col_offset + 1, + positions.end_col_offset + 1, + ) + else: + linetable.append(0b1_1101_000 + size - 1) + # encode signed int + if delta < 0: + delta = ((-delta) << 1) | 1 + else: + delta <<= 1 + # encode unsigned int + linetable.extend(encode_varint(delta)) + for n in other_varints: + linetable.extend(encode_varint(n)) + + if lineno_new is None: + lineno_delta = 0 + else: + lineno_delta = lineno_new - lineno + lineno = lineno_new + while inst_size > 8: + _update(lineno_delta, 8) + inst_size -= 8 + _update(lineno_delta, inst_size) + + return linetable, update + + +@dataclasses.dataclass +class ExceptionTableEntry: + start: int + end: int + target: int + depth: int + lasti: bool + + +def encode_exception_table_varint(n: int) -> List[int]: + """ + Similar to `encode_varint`, but the 6-bit chunks are ordered in reverse. + """ + assert n >= 0 + b = [n & 63] + n >>= 6 + while n > 0: + b.append(n & 63) + n >>= 6 + b.reverse() + for i in range(len(b) - 1): + b[i] |= 64 + return b + + +def decode_exception_table_varint(bytes_iter: Iterator[int]) -> int: + """ + Inverse of `encode_exception_table_varint`. + """ + b = next(bytes_iter) + val = b & 63 + while b & 64: + val <<= 6 + b = next(bytes_iter) + val |= b & 63 + return val + + +def check_exception_table(tab: List[ExceptionTableEntry]) -> None: + """ + Verifies that a list of ExceptionTableEntries will make a well-formed + jump table: entries are non-empty, sorted, and do not overlap. + """ + for i in range(len(tab) - 1): + assert ( + tab[i].start <= tab[i].end + and tab[i].end < tab[i + 1].start + and tab[i + 1].start <= tab[i + 1].end + ) + + +def parse_exception_table(exntab: bytes) -> List[ExceptionTableEntry]: + """ + Parse the exception table according to + https://github.com/python/cpython/blob/3.11/Objects/exception_handling_notes.txt + """ + exntab_iter = iter(exntab) + tab = [] + try: + while True: + start = decode_exception_table_varint(exntab_iter) * 2 + length = decode_exception_table_varint(exntab_iter) * 2 + end = start + length - 2 + target = decode_exception_table_varint(exntab_iter) * 2 + dl = decode_exception_table_varint(exntab_iter) + depth = dl >> 1 + lasti = bool(dl & 1) + tab.append(ExceptionTableEntry(start, end, target, depth, lasti)) + except StopIteration: + check_exception_table(tab) + return tab + + +def assemble_exception_table(tab: List[ExceptionTableEntry]) -> bytes: + """ + Inverse of parse_exception_table - encodes list of exception + table entries into bytes. + """ + b = [] + for entry in tab: + first_entry = encode_exception_table_varint(entry.start // 2) + first_entry[0] |= 1 << 7 + b.extend(first_entry) + length = entry.end - entry.start + 2 + b.extend(encode_exception_table_varint(length // 2)) + b.extend(encode_exception_table_varint(entry.target // 2)) + dl = (entry.depth << 1) + entry.lasti + b.extend(encode_exception_table_varint(dl)) + return bytes(b) + + +def assemble(instructions: List[Instruction], firstlineno: int) -> Tuple[bytes, bytes]: + """Do the opposite of dis.get_instructions()""" + code: List[int] = [] + if sys.version_info >= (3, 11): + lnotab, update_lineno = linetable_311_writer(firstlineno) + num_ext = 0 + for i, inst in enumerate(instructions): + if inst.opname == "EXTENDED_ARG": + inst_size = 1 + num_ext += 1 + # copy positions from the actual instruction + for j in (1, 2, 3): + if instructions[i + j].opname != "EXTENDED_ARG": + inst.positions = instructions[i + j].positions + break + else: + inst_size = instruction_size(inst) // 2 + num_ext + num_ext = 0 + update_lineno(inst.positions, inst_size) + num_ext = 0 + arg = inst.arg or 0 + code.extend((inst.opcode, arg & 0xFF)) + for _ in range(instruction_size(inst) // 2 - 1): + code.extend((0, 0)) + else: + if sys.version_info < (3, 10): + lnotab, update_lineno = lnotab_writer(firstlineno) + else: + lnotab, update_lineno, end = linetable_310_writer(firstlineno) + + for inst in instructions: + if inst.starts_line is not None: + update_lineno(inst.starts_line, len(code)) + arg = inst.arg or 0 + code.extend((inst.opcode, arg & 0xFF)) + + if sys.version_info >= (3, 10): + end(len(code)) + + return bytes(code), bytes(lnotab) + + +def _get_instruction_by_offset(offset_to_inst: Dict[int, Instruction], offset: int): + """ + Get the instruction located at a given offset, accounting for EXTENDED_ARGs + """ + for n in (0, 2, 4, 6): + if offset_to_inst[offset + n].opcode != dis.EXTENDED_ARG: + return offset_to_inst[offset + n] + return None + + +def virtualize_jumps(instructions) -> None: + """Replace jump targets with pointers to make editing easier""" + jump_targets = {inst.offset: inst for inst in instructions} + + for inst in instructions: + if inst.opcode in dis.hasjabs or inst.opcode in dis.hasjrel: + inst.target = _get_instruction_by_offset(jump_targets, inst.argval) + + +_REL_JUMPS = set(dis.hasjrel) + + +def flip_jump_direction(instruction: Instruction) -> None: + if sys.version_info < (3, 11): + raise RuntimeError("Cannot flip jump direction in Python < 3.11") + if "FORWARD" in instruction.opname: + instruction.opname = instruction.opname.replace("FORWARD", "BACKWARD") + elif "BACKWARD" in instruction.opname: + instruction.opname = instruction.opname.replace("BACKWARD", "FORWARD") + else: + raise AttributeError("Instruction is not a forward or backward jump") + instruction.opcode = dis.opmap[instruction.opname] + assert instruction.opcode in _REL_JUMPS + + +def _get_instruction_front(instructions: List[Instruction], idx: int): + """ + i.e. get the first EXTENDED_ARG instruction (if any) when targeting + instructions[idx] with a jump. + """ + target = instructions[idx] + for offset in (1, 2, 3): + if idx >= offset and instructions[idx - offset].opcode == dis.EXTENDED_ARG: + target = instructions[idx - offset] + else: + break + return target + + +def devirtualize_jumps(instructions): + """Fill in args for virtualized jump target after instructions may have moved""" + indexof = get_indexof(instructions) + jumps = set(dis.hasjabs).union(set(dis.hasjrel)) + + for inst in instructions: + if inst.opcode in jumps: + target = _get_instruction_front(instructions, indexof[inst.target]) + if inst.opcode in dis.hasjabs: + if sys.version_info < (3, 10): + inst.arg = target.offset + elif sys.version_info < (3, 11): + # `arg` is expected to be bytecode offset, whereas `offset` is byte offset. + # Divide since bytecode is 2 bytes large. + inst.arg = int(target.offset / 2) + else: + raise RuntimeError("Python 3.11+ should not have absolute jumps") + else: # relative jump + # byte offset between target and next instruction + inst.arg = int(target.offset - inst.offset - instruction_size(inst)) + if inst.arg < 0: + if sys.version_info < (3, 11): + raise RuntimeError("Got negative jump offset for Python < 3.11") + inst.arg = -inst.arg + # forward jumps become backward + if "FORWARD" in inst.opname: + flip_jump_direction(inst) + elif inst.arg > 0: + # backward jumps become forward + if sys.version_info >= (3, 11) and "BACKWARD" in inst.opname: + flip_jump_direction(inst) + if sys.version_info >= (3, 10): + # see bytecode size comment in the absolute jump case above + inst.arg //= 2 + inst.argval = target.offset + inst.argrepr = f"to {target.offset}" + + +def virtualize_exception_table(exn_tab_bytes: bytes, instructions: List[Instruction]): + """Replace exception table entries with pointers to make editing easier""" + exn_tab = parse_exception_table(exn_tab_bytes) + offset_to_inst = {cast(int, inst.offset): inst for inst in instructions} + offsets = sorted(offset_to_inst.keys()) + end_offset_idx = 0 + exn_tab_iter = iter(exn_tab) + try: + + def step(): + nonlocal end_offset_idx + entry = next(exn_tab_iter) + # find rightmost offset <= entry.end, since entry.end may not be + # an actual instruction, e.g. if the end instruction is LOAD_GLOBAL, + # which takes more than 2 bytes, then entry.end points to the end + # of the LOAD_GLOBAL instruction, not the beginning. + while ( + end_offset_idx < len(offsets) and offsets[end_offset_idx] <= entry.end + ): + end_offset_idx += 1 + assert end_offset_idx > 0 + end_offset = offsets[end_offset_idx - 1] + inst_entry = InstructionExnTabEntry( + _get_instruction_by_offset(offset_to_inst, entry.start), + _get_instruction_by_offset(offset_to_inst, end_offset), + _get_instruction_by_offset(offset_to_inst, entry.target), + entry.depth, + entry.lasti, + ) + return entry, inst_entry + + entry, inst_entry = step() + for inst in instructions: + while inst.offset > entry.end: + entry, inst_entry = step() + if inst.offset >= entry.start: + inst.exn_tab_entry = copy.copy(inst_entry) + except StopIteration: + pass + + +def compute_exception_table( + instructions: List[Instruction], +) -> List[ExceptionTableEntry]: + """Compute exception table in list format from instructions with exn_tab_entries""" + exn_dict: Dict[Tuple[int, int], Tuple[int, int, bool]] = {} + indexof = get_indexof(instructions) + + for inst in instructions: + if inst.exn_tab_entry: + # account for prefixed EXTENDED_ARGS + start = _get_instruction_front( + instructions, indexof[inst.exn_tab_entry.start] + ).offset + # point to the last 2 bytes of the end instruction + end = ( + cast(int, inst.exn_tab_entry.end.offset) + + instruction_size(inst.exn_tab_entry.end) + - 2 + ) + target = _get_instruction_front( + instructions, indexof[inst.exn_tab_entry.target] + ).offset + key = (start, end) + val = (target, inst.exn_tab_entry.depth, inst.exn_tab_entry.lasti) + if key in exn_dict: + assert exn_dict[key] == val + exn_dict[key] = val + + # Dynamo may construct nested exception table entries for convenience, + # but Python expects exception table entries to not overlap. + # NOTE: below, "keys" refer to old instruction entries' starts and ends, + # and "entries" refer to the generated exception table entries. + + # Sort keys by increasing start, then decreasing end + keys_sorted = sorted(exn_dict.keys(), key=lambda t: (t[0], -t[1])) + # smallest byte that the next exception table entry can start at + nexti = 0 + # stack of current nested keys + key_stack: List[Tuple[int, int]] = [] + exn_tab: List[ExceptionTableEntry] = [] + + def pop(): + """ + Pop the key_stack and append an exception table entry if possible. + """ + nonlocal nexti + if key_stack: + key = key_stack.pop() + if nexti <= key[1]: + exn_tab.append( + ExceptionTableEntry(max(key[0], nexti), key[1], *exn_dict[key]) + ) + nexti = key[1] + 2 + + for key in keys_sorted: + # pop keys that are no longer nested over the current key + while key_stack and key_stack[-1][1] < key[0]: + pop() + if key_stack: + # create an entry covering to the current key, if possible + assert key_stack[-1][0] <= key[0] <= key[1] <= key_stack[-1][1] + left = max(nexti, key_stack[-1][0]) + if left < key[0]: + exn_tab.append( + ExceptionTableEntry(left, key[0] - 2, *exn_dict[key_stack[-1]]) + ) + nexti = key[0] + key_stack.append(key) + while key_stack: + pop() + check_exception_table(exn_tab) + return exn_tab + + +def check_inst_exn_tab_entries_nested( + tab: List[InstructionExnTabEntry], indexof +) -> None: + """ + Checks `tab` is a properly sorted list of nested InstructionExnTabEntry's, + i.e. no entries partially overlap. + "Properly sorted" means entries are sorted by increasing starts, then + decreasing ends. + """ + entry_stack: List[Tuple[int, int]] = [] + for entry in tab: + key = (indexof[entry.start], indexof[entry.end]) + while entry_stack and entry_stack[-1][1] < key[0]: + entry_stack.pop() + if entry_stack: + assert entry_stack[-1][0] <= key[0] <= key[1] <= entry_stack[-1][1] + entry_stack.append(key) + + +def propagate_inst_exn_table_entries(instructions: List[Instruction]) -> None: + """ + Copies exception table entries to all instructions in an entry's range. + Supports nested exception table entries. + """ + indexof = get_indexof(instructions) + entries: Dict[Tuple[int, int], InstructionExnTabEntry] = {} + for inst in instructions: + if inst.exn_tab_entry: + key = ( + indexof[inst.exn_tab_entry.start], + indexof[inst.exn_tab_entry.end], + ) + if key in entries: + assert inst.exn_tab_entry == entries[key] + entries[key] = inst.exn_tab_entry + sorted_entries = [ + entries[key] for key in sorted(entries.keys(), key=lambda t: (t[0], -t[1])) + ] + check_inst_exn_tab_entries_nested(sorted_entries, indexof) + # Propagation of nested entries works since nested entries come later + # in sorted order. + for entry in sorted_entries: + for i in range(indexof[entry.start], indexof[entry.end] + 1): + instructions[i].exn_tab_entry = copy.copy(entry) + + +def check_inst_exn_tab_entries_valid(instructions: List[Instruction]): + """ + Checks that exn_tab_entries of instructions are valid. + An entry's start, end, and target must be in instructions. + Instructions with an exn_tab_entry are located within + the entry's start and end instructions. + Instructions do not share exn_tab_entries. + + Implicitly checks for no duplicate instructions. + """ + indexof = get_indexof(instructions) + exn_tab_entry_set = set() + for i, inst in enumerate(instructions): + if inst.exn_tab_entry: + assert sys.version_info >= (3, 11) + assert id(inst.exn_tab_entry) not in exn_tab_entry_set + exn_tab_entry_set.add(id(inst.exn_tab_entry)) + entry = inst.exn_tab_entry + assert entry.start in indexof + assert entry.end in indexof + assert entry.target in indexof + assert indexof[entry.start] <= i <= indexof[entry.end] + + +def strip_extended_args(instructions: List[Instruction]) -> None: + instructions[:] = [i for i in instructions if i.opcode != dis.EXTENDED_ARG] + + +def remove_load_call_method(instructions: List[Instruction]) -> List[Instruction]: + """LOAD_METHOD puts a NULL on the stack which causes issues, so remove it""" + rewrites = {"LOAD_METHOD": "LOAD_ATTR", "CALL_METHOD": "CALL_FUNCTION"} + for inst in instructions: + if inst.opname in rewrites: + inst.opname = rewrites[inst.opname] + inst.opcode = dis.opmap[inst.opname] + return instructions + + +def remove_jump_if_none(instructions: List[Instruction]) -> None: + new_insts = [] + for inst in instructions: + new_insts.append(inst) + if "_NONE" in inst.opname: + is_op = create_instruction("IS_OP", arg=int("NOT" in inst.opname)) + is_op.argval = is_op.arg + jump_op = create_instruction( + "POP_JUMP_FORWARD_IF_TRUE" + if "FORWARD" in inst.opname + else "POP_JUMP_BACKWARD_IF_TRUE", + target=inst.target, + ) + # modify inst in-place to preserve jump target + inst.opcode = dis.opmap["LOAD_CONST"] + inst.opname = "LOAD_CONST" + inst.arg = None + inst.argval = None + new_insts.extend([is_op, jump_op]) + instructions[:] = new_insts + + +def explicit_super(code: types.CodeType, instructions: List[Instruction]) -> None: + """convert super() with no args into explicit arg form""" + cell_and_free = (code.co_cellvars or tuple()) + (code.co_freevars or tuple()) + if not len(code.co_varnames): + # A function with no argument cannot contain a valid "super()" call + return + output = [] + for idx, inst in enumerate(instructions): + output.append(inst) + if inst.opname == "LOAD_GLOBAL" and inst.argval == "super": + nexti = instructions[idx + 1] + if nexti.opname in ("CALL_FUNCTION", "PRECALL") and nexti.arg == 0: + assert "__class__" in cell_and_free + output.append(create_instruction("LOAD_DEREF", argval="__class__")) + first_var = code.co_varnames[0] + if first_var in cell_and_free: + output.append(create_instruction("LOAD_DEREF", argval=first_var)) + else: + output.append(create_instruction("LOAD_FAST", argval=first_var)) + nexti.arg = 2 + nexti.argval = 2 + if nexti.opname == "PRECALL": + # also update the following CALL instruction + call_inst = instructions[idx + 2] + call_inst.arg = 2 + call_inst.argval = 2 + + instructions[:] = output + + +def fix_extended_args(instructions: List[Instruction]) -> int: + """Fill in correct argvals for EXTENDED_ARG ops""" + output: List[Instruction] = [] + + def maybe_pop_n(n): + for _ in range(n): + if output and output[-1].opcode == dis.EXTENDED_ARG: + output.pop() + + for inst in instructions: + if inst.opcode == dis.EXTENDED_ARG: + # Leave this instruction alone for now so we never shrink code + inst.arg = 0 + elif inst.arg and inst.arg > 0xFFFFFF: + maybe_pop_n(3) + output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 24)) + output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 16)) + output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 8)) + elif inst.arg and inst.arg > 0xFFFF: + maybe_pop_n(2) + output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 16)) + output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 8)) + elif inst.arg and inst.arg > 0xFF: + maybe_pop_n(1) + output.append(create_instruction("EXTENDED_ARG", arg=inst.arg >> 8)) + output.append(inst) + + added = len(output) - len(instructions) + assert added >= 0 + instructions[:] = output + return added + + +# from https://github.com/python/cpython/blob/v3.11.1/Include/internal/pycore_opcode.h#L41 +# TODO use the actual object instead, can interface from eval_frame.c +_PYOPCODE_CACHES = { + "BINARY_SUBSCR": 4, + "STORE_SUBSCR": 1, + "UNPACK_SEQUENCE": 1, + "STORE_ATTR": 4, + "LOAD_ATTR": 4, + "COMPARE_OP": 2, + "LOAD_GLOBAL": 5, + "BINARY_OP": 1, + "LOAD_METHOD": 10, + "PRECALL": 1, + "CALL": 4, +} + + +def instruction_size(inst) -> int: + if sys.version_info >= (3, 11): + return 2 * (_PYOPCODE_CACHES.get(dis.opname[inst.opcode], 0) + 1) + return 2 + + +def check_offsets(instructions) -> None: + offset = 0 + for inst in instructions: + assert inst.offset == offset + offset += instruction_size(inst) + + +def update_offsets(instructions) -> None: + offset = 0 + for inst in instructions: + inst.offset = offset + offset += instruction_size(inst) + + +def debug_bytes(*args) -> str: + index = range(max(map(len, args))) + result = [] + for arg in ( + [index] + list(args) + [[int(a != b) for a, b in zip(args[-1], args[-2])]] + ): + result.append(" ".join(f"{x:03}" for x in arg)) + + return "bytes mismatch\n" + "\n".join(result) + + +def debug_checks(code): + """Make sure our assembler produces same bytes as we start with""" + dode = transform_code_object(code, lambda x, y: None, safe=True) + assert code.co_code == dode.co_code, debug_bytes(code.co_code, dode.co_code) + assert code.co_lnotab == dode.co_lnotab, debug_bytes(code.co_lnotab, dode.co_lnotab) + + +HAS_LOCAL = set(dis.haslocal) +HAS_NAME = set(dis.hasname) +HAS_FREE = set(dis.hasfree) +HAS_CONST = set(dis.hasconst) + + +def get_const_index(code_options, val) -> int: + for i, v in enumerate(code_options["co_consts"]): + # NOTE: stronger comparison is required, since we have + # examples where two values compare equal but have + # different semantic meaning in some cases, e.g. + # 0.0 == -0.0 but have different effects in torch.copysign. + if val is v: + return i + code_options["co_consts"] += (val,) + return len(code_options["co_consts"]) - 1 + + +def fix_vars(instructions: List[Instruction], code_options, varname_from_oparg=None): + # compute instruction arg from argval if arg is not provided + names = {name: idx for idx, name in enumerate(code_options["co_names"])} + if sys.version_info < (3, 11): + assert varname_from_oparg is None + varnames = {name: idx for idx, name in enumerate(code_options["co_varnames"])} + freenames = { + name: idx + for idx, name in enumerate( + code_options["co_cellvars"] + code_options["co_freevars"] + ) + } + else: + assert callable(varname_from_oparg) + allnames = {} + for idx in itertools.count(): + try: + name = varname_from_oparg(idx) + allnames[name] = idx + except IndexError: + break + varnames = {name: allnames[name] for name in code_options["co_varnames"]} + freenames = { + name: allnames[name] + for name in code_options["co_cellvars"] + code_options["co_freevars"] + } + for i in range(len(instructions)): + + def should_compute_arg(): + # argval is prioritized over arg + return instructions[i].argval is not _NotProvided + + if instructions[i].opname == "LOAD_GLOBAL": + # 3.11 LOAD_GLOBAL requires both arg and argval - see create_load_global + assert instructions[i].arg is not None + assert instructions[i].argval is not _NotProvided + if sys.version_info >= (3, 11): + instructions[i].arg = (names[instructions[i].argval] << 1) + ( + cast(int, instructions[i].arg) % 2 + ) + else: + instructions[i].arg = names[instructions[i].argval] + elif instructions[i].opcode in HAS_LOCAL: + if should_compute_arg(): + instructions[i].arg = varnames[instructions[i].argval] + elif instructions[i].opcode in HAS_NAME: + if should_compute_arg(): + instructions[i].arg = names[instructions[i].argval] + elif instructions[i].opcode in HAS_FREE: + if should_compute_arg(): + instructions[i].arg = freenames[instructions[i].argval] + elif instructions[i].opcode in HAS_CONST: + # NOTE: only update argval if arg is not provided. This assumes + # that any additions to co_consts are appended. + if instructions[i].arg is None: + # cannot use a dictionary since consts may not be hashable + idx = get_const_index(code_options, instructions[i].argval) + assert idx >= 0 + instructions[i].arg = idx + + +def get_code_keys() -> List[str]: + # Python 3.11 changes to code keys are not fully documented. + # See https://github.com/python/cpython/blob/3.11/Objects/clinic/codeobject.c.h#L24 + # for new format. + keys = ["co_argcount"] + keys.append("co_posonlyargcount") + keys.extend( + [ + "co_kwonlyargcount", + "co_nlocals", + "co_stacksize", + "co_flags", + "co_code", + "co_consts", + "co_names", + "co_varnames", + "co_filename", + "co_name", + ] + ) + if sys.version_info >= (3, 11): + keys.append("co_qualname") + keys.append("co_firstlineno") + if sys.version_info >= (3, 10): + keys.append("co_linetable") + else: + keys.append("co_lnotab") + if sys.version_info >= (3, 11): + # not documented, but introduced in https://github.com/python/cpython/issues/84403 + keys.append("co_exceptiontable") + keys.extend( + [ + "co_freevars", + "co_cellvars", + ] + ) + return keys + + +def transform_code_object(code, transformations, safe=False) -> types.CodeType: + keys = get_code_keys() + code_options = {k: getattr(code, k) for k in keys} + assert len(code_options["co_varnames"]) == code_options["co_nlocals"] + + instructions = cleaned_instructions(code, safe) + propagate_line_nums(instructions) + + transformations(instructions, code_options) + return clean_and_assemble_instructions(instructions, keys, code_options)[1] + + +def clean_and_assemble_instructions( + instructions: List[Instruction], keys: List[str], code_options: Dict[str, Any] +) -> Tuple[List[Instruction], types.CodeType]: + # also implicitly checks for no duplicate instructions + check_inst_exn_tab_entries_valid(instructions) + + code_options["co_nlocals"] = len(code_options["co_varnames"]) + varname_from_oparg = None + if sys.version_info >= (3, 11): + # temporary code object with updated names + tmp_code = types.CodeType(*[code_options[k] for k in keys]) + varname_from_oparg = tmp_code._varname_from_oparg # type: ignore[attr-defined] + fix_vars(instructions, code_options, varname_from_oparg=varname_from_oparg) + + dirty = True + while dirty: + update_offsets(instructions) + devirtualize_jumps(instructions) + # this pass might change offsets, if so we need to try again + dirty = bool(fix_extended_args(instructions)) + + remove_extra_line_nums(instructions) + bytecode, lnotab = assemble(instructions, code_options["co_firstlineno"]) + if sys.version_info < (3, 10): + code_options["co_lnotab"] = lnotab + else: + code_options["co_linetable"] = lnotab + + code_options["co_code"] = bytecode + code_options["co_stacksize"] = stacksize_analysis(instructions) + assert set(keys) - {"co_posonlyargcount"} == set(code_options.keys()) - { + "co_posonlyargcount" + } + if sys.version_info >= (3, 11): + code_options["co_exceptiontable"] = assemble_exception_table( + compute_exception_table(instructions) + ) + return instructions, types.CodeType(*[code_options[k] for k in keys]) + + +def populate_kw_names_argval(instructions, consts): + for inst in instructions: + if inst.opname == "KW_NAMES": + inst.argval = consts[inst.arg] + + +def cleaned_instructions(code, safe=False) -> List[Instruction]: + instructions = list(map(convert_instruction, dis.get_instructions(code))) + check_offsets(instructions) + if sys.version_info >= (3, 11): + populate_kw_names_argval(instructions, code.co_consts) + virtualize_exception_table(code.co_exceptiontable, instructions) + virtualize_jumps(instructions) + strip_extended_args(instructions) + if not safe: + if sys.version_info < (3, 11): + remove_load_call_method(instructions) + else: + remove_jump_if_none(instructions) + update_offsets(instructions) + devirtualize_jumps(instructions) + explicit_super(code, instructions) + return instructions + + +_unique_id_counter = itertools.count() + + +def unique_id(name) -> str: + return f"{name}_{next(_unique_id_counter)}" + + +def is_generator(code: types.CodeType) -> bool: + co_generator = 0x20 + return (code.co_flags & co_generator) > 0 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/cache_size.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/cache_size.py new file mode 100644 index 0000000000000000000000000000000000000000..340f227a9956f7e5621afe1fbe41109da9221bc9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/cache_size.py @@ -0,0 +1,172 @@ +import logging +import types +import weakref +from dataclasses import dataclass +from typing import Tuple + +from . import config + +log = logging.getLogger(__name__) +""" +[Note on cache size limit] + +Background - TorchDynamo cache is a linked list. Each cache entry is a +(check_fn, out_code, next pointer). These are stored on the f_code's co_extra +scratch space. When a frame is invoked, we walk this linked list and run +check_fn in each cache_entry to decide if the frame needs recompilation. If none +of the check_fn's returns True, we recompile and add a new entry. To ensure we +don't end up recompiling infinitely, we put limits on the cache size. + +There are two limits +1) cache_size_limit +2) accumulated_cache_size_limit + + +Earlier we used to have only limit - maximum number of entries in 1 cache line +(which is now represented by (2) above). So, why do we need two limits? Lets try +to understand that. + +In general, we want our cache limit value to be a small number (e.g. 8 or even +lower). This ensures that for frames that cause too many recompilation fall to +eager quickly. However, there is another problem that prevents us from lowering +the value of cache_size_limit. This is due to ID_MATCH'd guards. Today, we put +ID_MATCH guards on nn module if there is a graph break. This means we will have +many recompilations for the same code object because the ID_MATCH guard fails +for different instances of the nn module. This is a common pattern in how models +are authored. Therefore, this requires us to keep the cache_size_limit high. + +We resolve this by introducing these two limits. The first limit (1) limits the +number of cache entries that have an ID_MATCH'd guard for an nn module instance. +And, (2)nd limit becomes a safeguard mechanism to have a maximum compilations +for a code object. One important question is - what is the limit for the code +object that does not have any ID_MATCH guard? For such code objects, we choose +(1) as the cache size limit. + +Lets take an example to understand how these limits help. Suppose, we have 16 +instances of a nn module and we ID_MATCH on the self object. Further, suppose +the inputs to these functions have varying batch size, leading to one +recompilation. In total, there will be 32 recompilations, and therefore 32 cache +entries on the forward code object. In the older case when we had only 1 limit, +our cache size limit must be >= 32 to capture all these recompilations. Now, +suppose there is a separate function in the same program which is very dynamic +and unsuitable for compilation. Such a function will need to undergo 32 +compilations to burst the cache and fallback to eager. These 32 recompilations +are too many and we want to fallback for these compilation-unfriendly functions +sooner. + +In the new scenario, we can have (1) cache_size_limit = 2, (2) +accumulated_cache_size_limit = 32. This means that each ID_MATCH'd object can +have maximum of two cache entries, and the maximum number of cache entries +(irrespective of ID_MATCH obj) is 32. This covers the case of forward code +object which has 32 recompilations. For the other function, the one unsuitable +for recompilation, our limit is 2. So, we will burst the cache in just 2 +recompilations. In this manner, these 2 limits help us resolve the tension +mentioned earlier. +""" + + +@dataclass +class CacheSizeRelevantForFrame: + """ + We track the number of cache entries that have same id_match objects as the + given frame. + + TODO(janimesh) - Consider adding a map from tuple_of_match_ids to count - + https://github.com/pytorch/pytorch/pull/107496#discussion_r1304564682 - this + could be useful for debugging as well. + """ + + # Total number of CacheEntry objects in the Dynamo linked list + num_cache_entries: int = 0 + + # Number of CacheEntry objects having same ID_MATCH'd objects as given frame. + num_cache_entries_with_same_id_matched_objs: int = 0 + + def will_compilation_exceed(self, limit: int) -> bool: + # Checks if a compilation will exceed the given limit (thats why >=). + return ( + self.will_compilation_exceed_accumulated_limit() + or self.will_compilation_exceed_specific_limit(limit) + ) + + def will_compilation_exceed_accumulated_limit(self) -> bool: + return self.num_cache_entries >= config.accumulated_cache_size_limit + + def will_compilation_exceed_specific_limit(self, limit: int) -> bool: + return self.num_cache_entries_with_same_id_matched_objs >= limit + + +def _get_weakref_from_f_locals(frame: types.FrameType, local_name: str): + obj = frame.f_locals.get(local_name, None) + weak_id = None + try: + weak_id = weakref.ref(obj) + except TypeError: + pass # cannot weakref bool object + return weak_id + + +def _has_same_id_matched_objs(frame: types.FrameType, cache_entry) -> bool: + """ + Checks if the ID_MATCH'd objects saved on cache_entry are same as the ones + in frame.f_locals. + """ + if not cache_entry: + return False + + for ( + local_name, + weakref_from_cache_entry, + ) in cache_entry.check_fn.id_matched_objs.items(): + if weakref_from_cache_entry() is not None: + weakref_from_frame = _get_weakref_from_f_locals(frame, local_name) + if weakref_from_frame != weakref_from_cache_entry: + return False + + # Also covers the case where no ID_MATCH objects are saved in frame.f_locals + return True + + +def compute_cache_size( + frame: types.FrameType, cache_entry +) -> CacheSizeRelevantForFrame: + # Walk the linked list to calculate the cache size + num_cache_entries = 0 + num_cache_entries_with_same_id_matched_objs = 0 + + while cache_entry: + num_cache_entries += 1 + # Track the number of cache entries having same ID_MATCH'd objects as + # that of frame.f_locals. This will be used later to compare against the + # cache_size_limit. + if _has_same_id_matched_objs(frame, cache_entry): + num_cache_entries_with_same_id_matched_objs += 1 + cache_entry = cache_entry.next + + return CacheSizeRelevantForFrame( + num_cache_entries, num_cache_entries_with_same_id_matched_objs + ) + + +def is_recompilation(cache_size: CacheSizeRelevantForFrame) -> bool: + """ + If the frame (earlier parsed by compute_cache_size) has more than 1 cache + entry with same ID_MATCH'd objects, then its a recompilation. + """ + # Note that you can have multiple entries in the cache but still not a + # recompile, e.g., you can have 64 nn module instances, each one having an + # ID_MATCH guard, and each one having just 1 cache entry in the cache. In + # this case, we can have 64 entries in the cache, but no recompilation + # because there is only one entry for each id_matched_obj. + return cache_size.will_compilation_exceed(1) + + +def exceeds_cache_size_limit(cache_size: CacheSizeRelevantForFrame) -> Tuple[bool, str]: + """ + Checks if we are exceeding the cache size limit. + """ + if cache_size.will_compilation_exceed_accumulated_limit(): + return True, "accumulated_cache_size_limit" + if cache_size.will_compilation_exceed_specific_limit(config.cache_size_limit): + return True, "cache_size_limit" + return False, "" diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/code_context.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/code_context.py new file mode 100644 index 0000000000000000000000000000000000000000..0fe19016ca137b462b288279b92774109961fd72 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/code_context.py @@ -0,0 +1,29 @@ +import types + +from .utils import ExactWeakKeyDictionary + + +class CodeContextDict: + def __init__(self): + self.code_context = ExactWeakKeyDictionary() + + def has_context(self, code: types.CodeType): + return code in self.code_context + + def get_context(self, code: types.CodeType): + ctx = self.code_context.get(code) + if ctx is None: + ctx = {} + self.code_context[code] = ctx + return ctx + + def pop_context(self, code: types.CodeType): + ctx = self.get_context(code) + self.code_context._remove_id(id(code)) + return ctx + + def clear(self): + self.code_context.clear() + + +code_context = CodeContextDict() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/codegen.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/codegen.py new file mode 100644 index 0000000000000000000000000000000000000000..7d283789c22c020052b24dfa536b2ffcbdcc2e4e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/codegen.py @@ -0,0 +1,398 @@ +import collections +import dataclasses +import re +import sys +import types +from typing import Counter, Dict, List, Optional + +import torch.nn +from . import utils + +from .bytecode_transformation import ( + create_call_function, + create_dup_top, + create_instruction, + create_load_global, + create_rot_n, + Instruction, +) +from .exc import unimplemented +from .source import AttrSource, Source +from .utils import is_safe_constant, rot_n_helper +from .variables.base import VariableTracker +from .variables.nn_module import NNModuleVariable +from .variables.tensor import ( + NumpyNdarrayVariable, + SymNodeVariable, + TensorVariable, + UnspecializedPythonVariable, +) +from .variables.torch_function import TensorWithTFOverrideVariable + + +@dataclasses.dataclass +class GraphOutputEntry: + index: int + variable: VariableTracker + + +class PyCodegen: + """ + Helper class uses for constructing Python bytecode + """ + + def __init__( + self, + tx=None, + root: Optional[torch.nn.Module] = None, + graph_output_var: Optional[str] = None, + tempvars=None, + ): + self.root = root + self.top_of_stack: Optional[VariableTracker] = None + self.uses: Counter[VariableTracker] = collections.Counter() + self.graph_outputs: Dict[int, GraphOutputEntry] = {} + self._output: List[Instruction] = [] + self.tempvars = tempvars or {} + self.tx = tx + self.graph_output_var = graph_output_var + self.code_options = self.tx.output.code_options + self.cell_and_freevars = self.tx.cell_and_freevars + self.new_var = self.tx.output.new_var + self.mutable_side_effects_from_source = False + self.value_from_source: bool = True + + def restore_stack(self, stack_values, *, value_from_source=True): + prior = self.mutable_side_effects_from_source + self.mutable_side_effects_from_source = True + prev = self.value_from_source + self.value_from_source &= value_from_source + try: + self.foreach(stack_values) + finally: + self.mutable_side_effects_from_source = prior + self.value_from_source = prev + + def graph_output_vars(self): + return [x.variable for x in self.graph_outputs.values()] + + def call_reconstruct(self, value): + res = value.reconstruct(self) + assert res is None, f"reconstruct!=None {value}" + + def __call__(self, value, allow_cache=True): + """Generate code such that top-of-stack (TOS) is set to value""" + if isinstance(value, Source): + self.call_reconstruct(value) + self.clear_tos() + return + + assert isinstance(value, VariableTracker) + output = self._output + graph_outputs = self.graph_outputs + + if self.top_of_stack is value and allow_cache: + output.append(create_dup_top()) + return + + if self.mutable_side_effects_from_source: + # this is needed to get aliasing relationships right + # value.mutable_local.source will get mutated to hold `value` + # mutable_side_effects_from_source=False is used to codegen the mutation + # mutable_side_effects_from_source=True is used to codegen a reference + from .side_effects import MutableSideEffects + + if isinstance(value.mutable_local, MutableSideEffects): + self(value.mutable_local.source) + return + + if allow_cache: + if value.mutable_local and value.mutable_local in self.tempvars: + output.append(self.create_load(self.tempvars[value.mutable_local])) + self.top_of_stack = value + return + if self.tempvars.get(value) is not None: + output.append(self.create_load(self.tempvars[value])) + self.top_of_stack = value + return + + if value.source is not None and allow_cache and self.value_from_source: + self.call_reconstruct(value.source) + elif value.is_python_constant() and is_safe_constant( + value.as_python_constant() + ): + output.append(self.create_load_const(value.as_python_constant())) + elif isinstance(value, TensorWithTFOverrideVariable): + graph_outputs_key = self.add_graph_output(value) + + self.load_import_from(utils.__name__, "to_subclass") + self.load_graph_output(graph_outputs[graph_outputs_key].index) + output.append( + self.create_load_global( + value.global_mangled_class_name(self.tx), False, add=True + ) + ) + output.extend(create_call_function(2, True)) + elif isinstance( + value, + ( + TensorVariable, + SymNodeVariable, + UnspecializedPythonVariable, + NumpyNdarrayVariable, + ), + ): + graph_outputs_key = self.add_graph_output(value) + + if isinstance(value, NumpyNdarrayVariable): + self.load_import_from(utils.__name__, "to_numpy_helper") + + self.load_graph_output(graph_outputs[graph_outputs_key].index) + + if isinstance(value, NumpyNdarrayVariable): + output.extend(create_call_function(1, True)) + elif isinstance(value, UnspecializedPythonVariable) and value.need_unwrap: + output.extend( + [self.create_load_attr("item")] + create_call_function(0, True) + ) + elif isinstance(value, NNModuleVariable): + parts = value.module_key.split(".") + if parts[0] in self.code_options["co_varnames"]: + output.append(self.create_load(parts[0])) + parts = parts[1:] + else: + assert self.root is not None + output.append(self.create_load_output(self.root)) + for part in parts: + output.append(self.create_load_attr(part)) + else: + self.uses[value] += 1 + try: + self.call_reconstruct(value) + except NotImplementedError: + unimplemented(f"reconstruct: {value}") + if allow_cache and value in self.tempvars: + self._output.append(create_dup_top()) + self.add_cache(value) + + self.top_of_stack = value + + def add_graph_output(self, value): + graph_outputs_key = id(value.as_proxy()) + if graph_outputs_key not in self.graph_outputs: + self.graph_outputs[graph_outputs_key] = GraphOutputEntry( + len(self.graph_outputs), value + ) + return graph_outputs_key + + def load_graph_output(self, index): + output = self._output + output.append(self.create_load(self.graph_output_var)) + output.append(self._create_load_const(index)) + output.append(create_instruction("BINARY_SUBSCR")) + + def add_cache(self, value): + var = self.new_var() + self.tempvars[value] = var + if value.mutable_local: + self.tempvars[value.mutable_local] = var + self._output.append(self.create_store(var)) + + def foreach(self, items): + for i in items: + self(i) + + def setup_globally_cached(self, name, value, push_null): + """Store value in a new global""" + name = re.sub(r"[^a-zA-Z0-9_]+", "_", name) + f_globals = self.tx.f_globals + if name in f_globals: + assert id(f_globals[name]) == id(value) + else: + f_globals[name] = value + return [self.create_load_global(name, push_null, add=True)] + + def clear_tos(self): + self.top_of_stack = None + + def append_output(self, inst): + assert isinstance(inst, Instruction) + self._output.append(inst) + self.clear_tos() + + def extend_output(self, insts): + assert all(isinstance(x, Instruction) for x in insts) + self._output.extend(insts) + self.clear_tos() + + def get_instructions(self) -> List[Instruction]: + return self._output + + def create_load(self, name) -> Instruction: + if name in self.cell_and_freevars(): + return create_instruction("LOAD_DEREF", argval=name) + assert name in self.code_options["co_varnames"], f"{name} missing" + return create_instruction("LOAD_FAST", argval=name) + + def create_load_closure(self, name) -> Instruction: + assert name in self.cell_and_freevars() + return create_instruction("LOAD_CLOSURE", argval=name) + + def create_store(self, name) -> Instruction: + if name in self.cell_and_freevars(): + return create_instruction("STORE_DEREF", argval=name) + assert name in self.code_options["co_varnames"] + return create_instruction("STORE_FAST", argval=name) + + def create_load_global(self, name, push_null, add=False) -> Instruction: + if add: + self.tx.output.update_co_names(name) + assert name in self.code_options["co_names"], f"{name} not in co_names" + return create_load_global(name, push_null) + + def create_load_const(self, value) -> Instruction: + assert is_safe_constant(value), f"unsafe constant {value}" + return self._create_load_const(value) + + def _create_load_const(self, value) -> Instruction: + return create_instruction("LOAD_CONST", argval=value) + + create_load_output = _create_load_const + + def create_load_method(self, name): + self.tx.output.update_co_names(name) + return create_instruction("LOAD_METHOD", argval=name) + + def create_load_attr(self, name) -> Instruction: + if name not in self.code_options["co_names"]: + self.code_options["co_names"] += (name,) + return create_instruction("LOAD_ATTR", argval=name) + + def load_attr(self, name): + self.append_output(self.create_load_attr(name)) + + def create_load_attrs(self, names): + return [self.create_load_attr(name) for name in names.split(".")] + + def create_store_attr(self, name) -> Instruction: + if name not in self.code_options["co_names"]: + self.code_options["co_names"] += (name,) + return create_instruction("STORE_ATTR", argval=name) + + def store_attr(self, name): + self.append_output(self.create_store_attr(name)) + + def load_function_name(self, fn_name, push_null, num_on_stack=0): + """Load the global fn_name on the stack num_on_stack down""" + output = [] + if push_null and sys.version_info >= (3, 11): + output.extend( + [create_instruction("PUSH_NULL"), *self.rot_n(num_on_stack + 1)] + ) + output.extend( + [ + self.create_load_global(fn_name, False, add=True), + *self.rot_n(num_on_stack + 1), + ] + ) + return output + + def rot_n(self, n): + try: + return create_rot_n(n) + except AttributeError: + # desired rotate bytecode doesn't exist, generate equivalent bytecode + return [ + create_instruction("BUILD_TUPLE", arg=n), + self._create_load_const(rot_n_helper(n)), + *create_rot_n(2), + create_instruction("CALL_FUNCTION_EX", arg=0), + create_instruction("UNPACK_SEQUENCE", arg=n), + ] + + def pop_null(self): + # POP_TOP doesn't work for null, so we pop nulls by pushing in a + # nop function, calling it (which consumes the null), and popping the result. + assert sys.version_info >= (3, 11) + return [ + self._create_load_const(lambda: None), + *create_call_function(0, False), + create_instruction("POP_TOP"), + ] + + def call_function(self, nargs: int, push_null: bool): + self.extend_output(create_call_function(nargs, push_null=push_null)) + + def dup_top(self): + self.append_output(create_dup_top()) + + def store(self, varname): + self.append_output(self.create_store(varname)) + + def make_function_with_closure( + self, fn_name: str, code: types.CodeType, push_null: bool, num_on_stack=0 + ): + freevars = code.co_freevars + assert freevars + output = self._output + if sys.version_info >= (3, 11) and push_null: + output.append(create_instruction("PUSH_NULL")) + output.extend(self.rot_n(num_on_stack + 1)) + for var in freevars: + assert var in self.cell_and_freevars() + output.append(create_instruction("LOAD_CLOSURE", argval=var)) + output.append(create_instruction("BUILD_TUPLE", arg=len(freevars))) + output.append(self.create_load_const(code)) + if sys.version_info < (3, 11): + output.append(self.create_load_const(fn_name)) + output.append(create_instruction("MAKE_FUNCTION", arg=0x08)) + output.extend(self.rot_n(num_on_stack + 1)) + self.clear_tos() + + def create_load_python_module(self, mod, push_null) -> Instruction: + """ + Generate a LOAD_GLOBAL instruction to fetch a given python module. + """ + output = self.tx.output + global_scope = output.global_scope + name = re.sub(r"^.*[.]", "", mod.__name__) + if global_scope.get(name, None) is mod: + return self.create_load_global(name, push_null, add=True) + prefix = f"___module_{name}" + global_name = self.tx.output.install_global_by_id(prefix, mod) + return self.create_load_global(global_name, push_null, add=True) + + def make_call_generated_code(self, fn_name: str) -> None: + """Call the generated code function stored in fn_name""" + self.extend_output(self.load_function_name(fn_name, True)) + + graphargs = self.tx.output.graphargs + for arg in graphargs: + if arg.is_unspecialized: + self.extend_output( + [ + self.create_load_python_module(torch, True), + self.create_load_attr("as_tensor"), + ] + ) + self.call_reconstruct(arg) + self.extend_output(create_call_function(1, False)) + else: + self.call_reconstruct(arg) + + self.extend_output(create_call_function(len(graphargs), False)) + + def load_import_from(self, module_name, object_name) -> None: + self(AttrSource(self.tx.import_source(module_name), object_name)) + + def create_call_function_kw(self, nargs, kw_names, push_null) -> List[Instruction]: + if sys.version_info >= (3, 11): + output = create_call_function(nargs, push_null) + assert output[-2].opname == "PRECALL" + kw_names_inst = create_instruction("KW_NAMES", argval=kw_names) + output.insert(-2, kw_names_inst) + return output + return [ + self.create_load_const(kw_names), + create_instruction("CALL_FUNCTION_KW", arg=nargs), + ] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/compiled_autograd.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/compiled_autograd.py new file mode 100644 index 0000000000000000000000000000000000000000..0fe43fcaaf37b41dbb97adc2ef196c6614a0a252 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/compiled_autograd.py @@ -0,0 +1,280 @@ +import contextlib +import functools +from typing import List, Optional + +import torch +from torch._dynamo.external_utils import call_backward, call_hook +from torch._dynamo.source import GetItemSource, LocalSource +from torch._dynamo.utils import counters, lazy_format_graph_code +from torch._logging import getArtifactLogger, trace_structured +from torch._prims_common import clone_preserve_strides +from torch._subclasses import FakeTensorMode +from torch.fx import GraphModule +from torch.fx.experimental._backward_state import BackwardState +from torch.fx.experimental.proxy_tensor import ( + decompose, + disable_autocast_cache, + disable_proxy_modes_tracing, + fetch_object_proxy, + ProxyTorchDispatchMode, + PythonKeyTracer, + track_tensor_tree, +) +from torch.fx.experimental.symbolic_shapes import DimDynamic, ShapeEnv +from torch.fx.proxy import Proxy + +compiled_autograd_log = getArtifactLogger(__name__, "compiled_autograd") + + +def maybe_clone(x): + if x is not None: + return clone_preserve_strides(x) + return x + + +class AutogradCompilerInstance: + def __init__(self, compiler_fn) -> None: + self.compiler_fn = compiler_fn + self.stack = contextlib.ExitStack() + self.close = self.stack.close + self.shape_env = ShapeEnv() + self.fake_tensor_mode = FakeTensorMode( + allow_fallback_kernels=True, + allow_non_fake_inputs=True, + shape_env=self.shape_env, + ) + self.fx_tracer = PythonKeyTracer() + self.proxy_mode = ProxyTorchDispatchMode(self.fx_tracer, "symbolic") + self.hooks_proxy: Optional[Proxy] = None + + def wrap_fake(self, x, source): + assert isinstance(x, torch.Tensor) + return self.fake_tensor_mode.from_tensor(x, source=source) + + @staticmethod + def source(name, idx) -> GetItemSource: + return GetItemSource(LocalSource(name), idx) + + def begin_capture(self, inputs: List[torch.Tensor], sizes: List[int]): + counters["compiled_autograd"]["captures"] += 1 + self.fx_tracer.root = torch.nn.Module() + self.fx_tracer.graph = torch.fx.Graph(tracer_cls=PythonKeyTracer) + self.fx_tracer.tensor_attrs = {} + args_proxy = self.fx_tracer.create_proxy("placeholder", "inputs", (), {}) + sizes_proxy = self.fx_tracer.create_proxy("placeholder", "sizes", (), {}) + self.hooks_proxy = self.fx_tracer.create_proxy("placeholder", "hooks", (), {}) + + # tensor inputs to fake tensors + inputs = [ + self.wrap_fake(x, self.source("inputs", idx)) + for idx, x in enumerate(inputs) + ] + proxies = [args_proxy[i] for i in range(len(inputs))] + self.bind_tensors_to_proxies(inputs, proxies) + + # size inputs to symints + sizes = [ + self.shape_env.create_unspecified_symint_and_symbol( + val, + self.source("sizes", idx), + DimDynamic.DYNAMIC, + ) + for idx, val in enumerate(sizes) + ] + self.bind_tensors_to_proxies(sizes, sizes_proxy) + + # TODO(jansel): are all these modes needed? + self.stack.enter_context(decompose({})) + self.stack.enter_context(self.fake_tensor_mode) + self.stack.enter_context(self.proxy_mode.sym_mode) + self.stack.enter_context(self.proxy_mode) + self.stack.enter_context(disable_autocast_cache()) + return inputs, sizes + + def proxy_call_backward( + self, + inputs, + output_metadatas, + saved_tensors, + backward_idx: int, + ): + assert self.hooks_proxy is not None + backward_fn = self.hooks_proxy[backward_idx] # type: ignore[index] + proxies = self.fx_tracer.create_proxy( + kind="call_function", + target=call_backward, + args=( + backward_fn, + self.to_proxy(saved_tensors), + *self.to_proxy(inputs), + ), + kwargs={}, + ) + + with disable_proxy_modes_tracing(): + # create fake Tensors + grad_ins: List[Optional[torch.Tensor]] = [] + for output_metadata in output_metadatas: + if output_metadata is None: + grad_ins.append(None) + continue + + layout, device, dtype, size = output_metadata + grad_ins.append( + torch.empty(size=size, dtype=dtype, layout=layout, device=device) + ) + self.bind_tensors_to_proxies(grad_ins, proxies) + return tuple(grad_ins) + + def proxy_call_hook(self, hook, *args): + return self.fx_tracer.create_proxy( + "call_function", + call_hook, + ( + hook, + *[self.to_proxy(x) for x in args], + ), + {}, + ) + + def tensor_pre_hook(self, inputs, hook_id, i: int): + assert self.hooks_proxy is not None + hook = self.hooks_proxy[hook_id] # type: ignore[index] + proxy = self.proxy_call_hook( + hook, + inputs[i], + ) + with disable_proxy_modes_tracing(): + inputs[i] = maybe_clone(inputs[i]) + self.bind_tensors_to_proxies([inputs[i]], [proxy]) + return inputs + + def pre_hook(self, inputs, hook_id): + assert self.hooks_proxy is not None + hook = self.hooks_proxy[hook_id] # type: ignore[index] + proxies = self.proxy_call_hook( + hook, + inputs, + ) + with disable_proxy_modes_tracing(): + inputs = [maybe_clone(x) for x in inputs] + self.bind_tensors_to_proxies(inputs, proxies) + return inputs + + def post_hook(self, outputs, inputs, hook_id): + assert self.hooks_proxy is not None + hook = self.hooks_proxy[hook_id] # type: ignore[index] + proxies = self.proxy_call_hook( + hook, + outputs, + inputs, + ) + with disable_proxy_modes_tracing(): + outputs = [maybe_clone(x) for x in outputs] + self.bind_tensors_to_proxies(outputs, proxies) + return outputs + + def post_acc_grad_hook(self, input, hook_id): + assert isinstance(input, torch.Tensor) + assert self.hooks_proxy is not None + hook = self.hooks_proxy[hook_id] # type: ignore[index] + proxies = self.proxy_call_hook( + hook, + input, + ) + with disable_proxy_modes_tracing(): + input = [maybe_clone(input)] + self.bind_tensors_to_proxies(input, proxies) + return input + + def end_capture(self, outputs): + self.stack.close() + self.fx_tracer.create_node( + "output", + "output", + (self.fx_tracer.create_arg(self.to_proxy(outputs)),), + {}, + ) + graph = GraphModule( + self.fx_tracer.root, self.fx_tracer.graph, "CompiledAutograd" + ) + compiled_autograd_log.info( + "%s", lazy_format_graph_code("Compiled autograd graph", graph) + ) + trace_structured( + "compiled_autograd_graph", + payload_fn=lambda: graph.print_readable(print_output=False), + ) + return self.compiler_fn(graph) + + def to_proxy(self, t): + if t is None: + return None + if isinstance(t, list): + return [self.to_proxy(x) for x in t] + if isinstance(t, tuple): + return tuple(self.to_proxy(x) for x in t) + assert isinstance(t, (torch.Tensor, torch.SymInt)) + return fetch_object_proxy(self.fx_tracer)(t).proxy + + def bind_tensors_to_proxies(self, tensors, proxies): + if isinstance(proxies, torch.fx.Proxy): + proxies = [proxies[i] for i in range(len(tensors))] + assert len(tensors) == len(proxies) + track_tensor_tree(tensors, proxies, constant=None, tracer=self.fx_tracer) + + def bind_backward_state(self, index: int): + assert self.hooks_proxy is not None + proxy = self.hooks_proxy[index] # type: ignore[index] + bw_state = BackwardState() + track_tensor_tree(bw_state, proxy, constant=None, tracer=self.fx_tracer) + return bw_state + + +compiled_autograd_enabled = False + +# We may have code like: +# with enable(compiler_fn): +# ... +# with disable(): +# ... +# ... +# The disable() call just want to disable compiled autograd temporarily. +# But overall the feature is enabled. +# +# The code covered by the disable context manager has no way to know if +# compiled autograd is overall eanbled. Use another variable +# compiled_autograd_enabled_count to indicate how many times compiled +# autograd has been enabled in the call stack for this purpose. +compiled_autograd_enabled_count = 0 + + +@contextlib.contextmanager +def enable(compiler_fn): + prior = torch._C._dynamo.compiled_autograd.set_autograd_compiler( + functools.partial(AutogradCompilerInstance, compiler_fn) + ) + global compiled_autograd_enabled, compiled_autograd_enabled_count + compiled_autograd_enabled = True + compiled_autograd_enabled_count += 1 + try: + with torch.autograd.set_multithreading_enabled(False): + yield + finally: + compiled_autograd_enabled_count -= 1 + if not prior: + compiled_autograd_enabled = False + torch._C._dynamo.compiled_autograd.set_autograd_compiler(prior) + + +@contextlib.contextmanager +def disable(): + prior = torch._C._dynamo.compiled_autograd.set_autograd_compiler(None) + global compiled_autograd_enabled + compiled_autograd_enabled = False + try: + yield + finally: + if prior: + compiled_autograd_enabled = True + torch._C._dynamo.compiled_autograd.set_autograd_compiler(prior) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/comptime.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/comptime.py new file mode 100644 index 0000000000000000000000000000000000000000..7b876258bd485e0f3a2ef4a33ce9ce07d0ec8b3b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/comptime.py @@ -0,0 +1,373 @@ +# This file establishes the public comptime interface to Dynamo. +# This allows Dynamo users to execute arbitrary Python code while +# Dynamo is symbolically evaluating their original programs. +# +# The goal of the public API is to give users rope, without actually +# leaking private implementation details of Dynamo. + +import builtins +import dis +import traceback +from typing import Optional, Union + +import torch +from torch.fx.experimental.symbolic_shapes import free_symbols + +from .exc import unimplemented +from .variables.constant import ConstantVariable +from .variables.tensor import SymNodeVariable + + +class ComptimeVar: + """ + A ComptimeVar represents a Python value, at some particular point + in time, in the Python code we are symbolically evaluating with + torchdynamo. This must be distinguished from a runtime value, as + at compile-time there are some properties of the variable we + do not know (for example, if the ComptimeVar represents a Tensor, + we only know metadata about the tensor; we do NOT know what the + actual data in the Tensor is.) + """ + + def __init__(self, v): + self.__variable = v + + def as_proxy(self): + """ + Returns an fx.Proxy (or tuple/list of fx.Proxy) representing + this variable in the FX graph we are assembling to pass + to the user compiler. + + This method only works for variables we actually track in + the FX graph, aka Tensors (and ints, if you are compiling + with dynamic shapes). In particular, if you have a list + or tuple of tensors, you will get a list/tuple of proxies + (not a single proxy representing the entire list/tuple). + """ + return self.__variable.as_proxy() + + def is_proxy(self): + """ + Returns True if as_proxy() would succeed. + """ + return self.__variable.is_proxy() + + def as_fake(self): + """ + Returns a "fake" value (either a FakeTensor or a SymInt) + representing the variable in question. This only works + for variables that denote Tensor or int. You can use + this to query metadata; e.g., v.as_fake().size(0) will + tell you the compile-time known size of the tensor. + + WARNING: Do NOT mutate the returned tensor. + """ + return self.__variable.as_proxy().node.meta["example_value"] + + def size(self, dim: Optional[int] = None) -> Union[int, torch.SymInt]: + """ + Returns the size of the tensor (if dim is None) or the size + at the dimension dim. The returned size may be a SymInt. + """ + return self.as_fake().size(dim) + + def python_type(self): + """ + Returns what type(v) would have returned for the variable + at compile time. + """ + return self.__variable.python_type() + + def as_python_constant(self): + """ + Returns the Python value this variable would have, but only if it is + completely known at compile-time (e.g., it is constant). + + WARNING: Do NOT mutate the returned constant. The returned constant + may or may not correspond to the actual value this variable may take + on at runtime; for example, if the variable in question is a constant + list, we may return a copy of that list. + """ + return self.__variable.as_python_constant() + + def is_python_constant(self): + """ + Returns True if as_python_constant would succeed. + """ + return self.__variable.is_python_constant() + + def is_dynamic(self): + if isinstance(self.__variable, SymNodeVariable): + fs = free_symbols(self.__variable.sym_num) + return bool(fs) + return False + + def force_static(self): + """ + Forces that a value is static, inducing a guard on its specific value + """ + if isinstance(self.__variable, SymNodeVariable): + self.__variable.evaluate_expr() + elif isinstance(self.__variable, ConstantVariable): + # TODO: Maybe complain if this isn't a int/bool/float variable + pass + else: + raise AssertionError( + f"cannot force {self.__variable} ({type(self.__variable)}) static" + ) + + def _i_will_not_complain_if_bc_breaks_VariableTracker(self): + """ + Returns the internal data structure VariableTracker that Dynamo uses + to represent variables at compile time. There are no BC guarantees on + this API and WE RESERVE THE RIGHT TO BREAK YOUR CODE if you rely on + it. + """ + return self.__variable + + def __repr__(self): + # TODO: The default repr is pretty bad, do better + return repr(self.__variable) + + # TODO: API for adding a custom guard + + +class ComptimeContext: + """ + This context class provides access to a public API for Dynamo's internals. + If there is something here you would find useful that is missing, please + file a feature request at https://github.com/pytorch/pytorch/ + """ + + def __init__(self, tx): + self.__tx = tx + + def get_local(self, name: str, *, stacklevel=0) -> ComptimeVar: + """ + Retrieve the compile-time known information about a local. + """ + tx = self.__get_tx(stacklevel) + return ComptimeVar(tx.symbolic_locals[name]) + + def graph_break(self, msg="ComptimeContext.graph_break"): + """ + Manually trigger a graph break + """ + unimplemented(msg) + + def graph(self): + """ + Retrieve the partially constructed FX graph that would be + passed to the user compiler after compilation. + """ + return self.__tx.output.graph + + def assert_static(self, val): + """ + Asserts that the int is static (and not dynamic, per dynamic shapes) + """ + assert ( + not val.is_dynamic() + ), "expected static but got dynamic (run with TORCH_LOGS=dynamic for more info)" + + def print_graph(self, *, verbose=True, file=None): + """ + Print the partially constructed FX graph that would be passed + to the user compiler after compilation. + """ + print( + self.__tx.output.graph.python_code("self", verbose=verbose).src, file=file + ) + + def parent(self): + return ComptimeContext(self.__tx.parent) + + def __get_tx(self, stacklevel): + tx = self.__tx + for _ in range(stacklevel): + tx = tx.parent + return tx + + def print_disas(self, *, file=None, stacklevel=0): + """ + Print the current series of opcodes being executed (not including + parent frames), including where you are in the particular opcode + stream. + """ + tx = self.__get_tx(stacklevel) + print( + dis.Bytecode( + tx.f_code, + current_offset=tx.instructions[tx.instruction_pointer].offset, + ).dis(), + file=file, + ) + + def print_value_stack(self, *, file=None, stacklevel=0): + """ + Print the current Python value stack. Note that this is NOT the same + as the traceback; use print_bt() to print that. Note that at + stacklevel=0, this will typically be empty, as comptime cannot + currently be used in an expression context where there would be + intermediates on the stack. If you would find this useful, please + file a bug at https://github.com/pytorch/pytorch/ + + NB: Stack grows downwards in our print + """ + # TODO: improve printing + tx = self.__get_tx(stacklevel) + for s in tx.stack: + print(f"- {s}", file=file) + + def print_locals(self, *, file=None, stacklevel=0): + """ + Print all of the locals available in the current context. + By default this view is very limited; you can get more information + about any individual local using get_local(). + """ + # TODO: improve by improving the VariableTracker printing + tx = self.__get_tx(stacklevel) + for k, v in tx.symbolic_locals.items(): + print(f"{k} = {v}", file=file) + + def print_bt(self, *, file=None, stacklevel=0): + """ + Print the user code backtrace, starting at the beginning of the + frame Dynamo started evaluating. Note that this MAY NOT go all + the way to the torch.compile invocation, as we may have done + a graph break and are compiling an intermediate frame as the + starting point. If you think the other behavior would be better, + file a bug at https://github.com/pytorch/pytorch/ + """ + stack = [] + tx = self.__get_tx(stacklevel) + while tx is not None: + stack.append(tx.frame_summary()) + tx = getattr(tx, "parent", None) + print( + "".join(traceback.StackSummary.from_list(reversed(stack)).format()), + file=file, + ) + + def print_guards(self, *, file=None): + """ + Print the currently installed guards for the Dynamo context. + This does NOT include guards associated with variables that + may or may not be installed in the future if those variables + are used. + """ + # TODO: improve print format, current guard format is extremely + # verbose + print( + "\n".join(f"{repr(guard)}" for guard in sorted(self.__tx.output.guards)), + file=file, + ) + + def _i_will_not_complain_if_bc_breaks_InstructionTranslator(self): + """ + Returns the internal data structure InstructionTranslator that Dynamo + uses to track state of symbolic evaluation. There are no BC + guarantees on this API and WE RESERVE THE RIGHT TO BREAK YOUR CODE if + you rely on it. + """ + return self.__tx + + +class _Comptime: + @staticmethod + def __call__(fn): + """fn gets called at compile time in TorchDynamo, does nothing otherwise""" + return + + # Convenience wrappers that are more compact to use + + @staticmethod + def graph_break(): + comptime(lambda ctx: ctx.graph_break()) + + @staticmethod + def print_graph(): + comptime(lambda ctx: ctx.print_graph()) + + @staticmethod + def print_disas(*, stacklevel=0): + comptime( + lambda ctx: ctx.print_disas( + stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1 + ) + ) + + @staticmethod + def print_value_stack(*, stacklevel=0): + comptime( + lambda ctx: ctx.print_value_stack( + stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1 + ) + ) + + # This is a more useful variant of print_value_stack that can be used + # in an expression context; e.g., x + print_value_stack_and_return(y + z), + # you will see x on the stack prior to the addition operation + @staticmethod + def print_value_stack_and_return(e, *, stacklevel=0): + comptime( + lambda ctx: ctx.print_value_stack( + stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1 + ) + ) + return e + + @staticmethod + def print_locals(*, stacklevel=0): + comptime( + lambda ctx: ctx.print_locals( + stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1 + ) + ) + + @staticmethod + def print_bt(*, stacklevel=0): + comptime( + lambda ctx: ctx.print_bt( + stacklevel=ctx.get_local("stacklevel").as_python_constant() + 1 + ) + ) + + @staticmethod + def print_guards(): + comptime(lambda ctx: ctx.print_guards()) + + @staticmethod + def assert_static(val): + comptime(lambda ctx: ctx.assert_static(ctx.get_local("val"))) + + @staticmethod + def force_static(val): + comptime(lambda ctx: ctx.get_local("val").force_static()) + + @staticmethod + def breakpoint(): + """ + Like pdb breakpoint(), but drop into pdb whenever this line + of code is compiled by dynamo. Use it by putting + this in your model code:: + + from torch._dynamo.comptime import comptime + comptime.breakpoint() + + And then, inside pdb, you can access 'ctx' to query things + about the compilation context:: + + (Pdb) !ctx.print_bt() + (Pdb) !ctx.print_locals() + (Pdb) p ctx.get_local("attention").as_fake() + """ + + def inner(inner_ctx): + ctx = inner_ctx.parent() + builtins.breakpoint() + + comptime(inner) + + +comptime = _Comptime() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/config.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/config.py new file mode 100644 index 0000000000000000000000000000000000000000..a41094fc36b5447b0c6ac7decb17ca202384f3aa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/config.py @@ -0,0 +1,423 @@ +import getpass +import inspect +import os +import re +import sys +import tempfile +from os.path import abspath, dirname +from typing import Any, Callable, Dict, Optional, Set, Type, TYPE_CHECKING, Union + +import torch + +# to configure logging for dynamo, aot, and inductor +# use the following API in the torch._logging module +# torch._logging.set_logs(dynamo=, aot=, inductor) +# or use the environment variable TORCH_LOGS="dynamo,aot,inductor" (use a prefix + to indicate higher verbosity) +# see this design doc for more detailed info +# Design doc: https://docs.google.com/document/d/1ZRfTWKa8eaPq1AxaiHrq4ASTPouzzlPiuquSBEJYwS8/edit# +# the name of a file to write the logs to +# [@compile_ignored: debug] +log_file_name: Optional[str] = None + +# [@compile_ignored: debug] Verbose will print full stack traces on warnings and errors +verbose = os.environ.get("TORCHDYNAMO_VERBOSE", "0") == "1" + +# [@compile_ignored: runtime_behaviour] verify the correctness of optimized backend +verify_correctness = False + +# need this many ops to create an FX graph +minimum_call_count = 1 + +# turn on/off DCE pass +dead_code_elimination = True + +# disable (for a function) when cache reaches this size + +# controls the maximum number of cache entries with a guard on same ID_MATCH'd +# object. It also controls the maximum size of cache entries if they don't have +# any ID_MATCH'd guards. +# [@compile_ignored: runtime_behaviour] +cache_size_limit = 8 + +# [@compile_ignored: runtime_behaviour] controls the maximum number of entries for a code object. +accumulated_cache_size_limit = 64 + +# whether or not to specialize on int inputs. This only has an effect with +# dynamic_shapes; when dynamic_shapes is False, we ALWAYS specialize on int +# inputs. Note that assume_static_by_default will also cause ints to get +# specialized, so this is mostly useful for export, where we want inputs +# to be dynamic, but accesses to ints should NOT get promoted into inputs. +specialize_int = False + +# legacy config, does nothing now! +dynamic_shapes = True + +use_lazy_graph_module = ( + os.environ.get("TORCH_COMPILE_USE_LAZY_GRAPH_MODULE", "1") == "1" +) + +# This is a temporarily flag, which changes the behavior of dynamic_shapes=True. +# When assume_static_by_default is True, we only allocate symbols for shapes marked dynamic via mark_dynamic. +# NOTE - this flag can be removed once we can run dynamic_shapes=False w/ the mark_dynamic API +# see [Note - on the state of mark_dynamic] +assume_static_by_default = True + +# This flag changes how dynamic_shapes=True works, and is meant to be used in conjunction +# with assume_static_by_default=True. +# With this flag enabled, we always compile a frame as fully static for the first time, and, if we fail +# any guards due to wobbles in shape, we recompile with *all* the wobbled shapes as being marked dynamic. +automatic_dynamic_shapes = True + +# This flag changes how the shapes of parameters are treated. +# If this flag is set to True, then the shapes of torch.nn.Parameter as well as of torch.Tensor are attempted to be dynamic +# If this flag is set to False, then the shapes of torch.nn.Parameter are assumed to be static, +# while the shapes of torch.Tensor are assumed to be dynamic. +force_parameter_static_shapes = True + +# This flag ensures that the shapes of a nn module are always assumed to be static +# If the flag is set to True, then the shapes of a nn.module are assumed to be static +# If the flag is set to False, then the shapes of a nn.module can be dynamic +force_nn_module_property_static_shapes = True + +# Typically, if you mark_dynamic a dimension, we will error if the dimension +# actually ended up getting specialized. This knob changes the behavior so +# that we don't error at all. This is helpful for our CI where I'm using a +# heuristic to mark batch dimensions as dynamic and the heuristic may get it +# wrong. +allow_ignore_mark_dynamic = False + +# Set this to False to assume nn.Modules() contents are immutable (similar assumption as freezing) +guard_nn_modules = False + +# Uses CPython internal dictionary tags to detect mutation. There is some +# overlap between guard_nn_modules_using_dict_tags and guard_nn_modules flag. +# guard_nn_modules unspecializes the nn module instance and adds guard for each +# relevant member of the nn modules. On the other hand, +# guard_nn_modules_using_dict_tags specializes on each nn module instance but +# uses low overhead dict version matching to detect mutations, obviating the +# need to guard on members of the nn modules. With +# guard_nn_modules_using_dict_tags, the guard_nn_modules is not really required +# but kept around for debugging and discussing unspecializing nn module +# variables. +# TODO(janimesh, voz): Remove both of these flags (or atleast guard_nn_modules) +# once we have reached stability for the guard_nn_modules_using_dict_tags. +guard_nn_modules_using_dict_tags = True + +# This feature doesn't really work. We offer this flag for experimental +# purposes / if you want to help us build out support. +# +# torchdynamo has very limited support for tensor subclasses that implement +# __torch_function__. Our current support is limited to tensor subclasses +# that DO NOT store metadata on the tensor (in general, dynamo does not +# support Python code that stores extra attributes on tensors at present). +# If your tensor subclass purely changes function call behavior via +# __torch_function__, you can allow torchdynamo to trace into it by +# adding it to traceable_tensor_subclasses. We don't do any safety checks, +# so it is up to you to ensure that your subclass is well behaved. See also +# https://github.com/pytorch/torchdynamo/issues/1948 +# +# We do NOT currently support __torch_dispatch__. The implementation is +# currently buggy, the main show stopper for nontrivial use is +# https://github.com/pytorch/torchdynamo/issues/1952 +traceable_tensor_subclasses: Set[Type[Any]] = set() + +# Suppress errors in torch._dynamo.optimize, instead forcing a fallback to eager. +# This is a good way to get your model to work one way or another, but you may +# lose optimization opportunities this way. Devs, if your benchmark model is failing +# this way, you should figure out why instead of suppressing it. +suppress_errors = bool(os.environ.get("TORCHDYNAMO_SUPPRESS_ERRORS", False)) + +# Record and write an execution record of the current frame to a file +# if an exception is encountered +# @compile_ignored[debug] +replay_record_enabled = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1" + +# Rewrite assert statement in python with torch._assert +rewrite_assert_with_torch_assert = True + +# Disable dynamo +disable = os.environ.get("TORCH_COMPILE_DISABLE", False) + +# [@compile_ignored: runtime_behaviour] Get a cprofile trace of Dynamo +cprofile = os.environ.get("TORCH_COMPILE_CPROFILE", False) + +# legacy config, does nothing now! +skipfiles_inline_module_allowlist: Dict[Any, Any] = {} + +# If a string representing a PyTorch module is in this ignorelist, +# the `allowed_functions.is_allowed` function will not consider it +# when creating a list of PyTorch functions that will appear in +# FX IR. +allowed_functions_module_string_ignorelist = { + "torch.distributions", + "torch.testing", + "torch._refs", + "torch._prims", + "torch._decomp", +} + +# Debug Flag to try minifier at different stages. Possible values are {None, "aot", "dynamo"} +# None - Minifier is switched off +# dynamo - Runs minifier on the TorchDynamo produced graphs, if compilation fails +# aot - Runs minifier on the Aot Autograd produced graphs, if compilation fails +# [@compile_ignored: debug] +repro_after = os.environ.get("TORCHDYNAMO_REPRO_AFTER", None) + +# Compiler compilation debug info +# 1: Dumps the original graph out to repro.py if compilation fails +# 2: Dumps a minifier_launcher.py if compilation fails. +# 3: Always dumps a minifier_launcher.py. Good for segfaults. +# 4: Dumps a minifier_launcher.py if the accuracy fails. +# [@compile_ignored: debug] +repro_level = int(os.environ.get("TORCHDYNAMO_REPRO_LEVEL", 2)) + +# By default, we try to detect accuracy failure by running both forward +# and backward of a torchdynamo produced graph (if you are using repro_after +# 'dynamo'). This setting forces us to only test the forward graph and +# not the backward graph. This can be helpful if you're trying to debug +# an inference only problem, but the minifier seems to be choking on the +# backwards step +# TODO: Detect this situation automatically so the user doesn't need +# to manually configure this +# [@compile_ignored: debug] +repro_forward_only = os.environ.get("TORCHDYNAMO_REPRO_FORWARD_ONLY") == "1" + +# The tolerance we should use when testing if a compiled graph +# has diverged so that we should treat it as an accuracy failure +# [@compile_ignored: debug] +repro_tolerance = 1e-3 + +# If True, when testing if two models are the same, we will test them against +# a third fp64 reference and only report a problem if the RMSE relative to the +# fp64 is greater. However, this will use more memory; you may disable this +# if memory usage is too high. +# [@compile_ignored: runtime_behaviour] +same_two_models_use_fp64 = True + +# Not all backends support scalars. Some calls on torch.Tensor (like .item()) return a scalar type. +# When this flag is set to False, we introduce a graph break instead of capturing. +# This requires dynamic_shapes to be True. +capture_scalar_outputs = False + +# Not all backends support operators that have dynamic output shape (e.g., +# nonzero, unique). When this flag is set to False, we introduce a graph +# break instead of capturing. This requires dynamic_shapes to be True. +# If you set this to True, you probably also want capture_scalar_outputs +# (these are separated for historical reasons). +capture_dynamic_output_shape_ops = False + +# By default, dynamo will treat all ints as backed SymInts, which means (1) it +# will wait to see the int change over multiple runs before generalizing and +# (2) it will still always 0/1 specialize an int. When true, this knob +# forces dynamo to treat _length_per_key and _offset_per_key on +# KeyedJaggedTensor from torchrec as size-like unbacked SymInts, so that +# they (1) generalize immediately and (2) unsoundly never compare equal to +# 0/1. This is not on by default as AOTAutograd/Inductor cannot currently +# compile this code; however, this can be useful for export. +force_unspec_int_unbacked_size_like_on_torchrec_kjt = False + +# Should almost always be true in prod. This relaxes the requirement that cond's true_fn and +# false_fn produces code with identical guards. +enforce_cond_guards_match = True + +# Specify how to optimize a compiiled DDP module. The flag accepts a bollean +# value or a string. There are 4 modes. +# 1. "ddp_optimizer" (or True): with "ddp_ptimizer", Dynamo will automatically +# split model graph into pieces to match DDP bucket sizes to allow DDP +# comm/compute overlap. +# 2. "python_reducer" (experimental): this optimization requires the usage +# of compiled_autograd. With "python_reducer", DDP will disable the C++ reducer +# and use the Python reducer to allow compiled_autograd to trace the +# communication and allow comm/compute overlap without graph-breaks. +# 3. "python_reducer_without_compiled_forward" (experimental): this mode is +# similar to "python_reducer". One should only use this optimization mode +# when compiled_autograd is used but the DDP module is not compiled. +# 4. "no_optimization" (or False): Dynamo won't split the model graph, nor +# will Python reducer be used. With this mode, there will be no graph-breaks +# and the original DDP C++ reducer will be used. There will no comm/compute +# overlap. This mode CANNOT be used with compiled_autograd. +# Note that to avoid breaking the existing usage, mode 1 and mode 4 can be +# specified with a boolean value. True is using ddp_optimizer and False is +# no optimization. +optimize_ddp: Union[bool, str] = True + +_ddp_optimization_mode = [ + "ddp_optimizer", + "python_reducer", # experimental mode + "python_reducer_without_compiled_forward", # experimental mode + "no_optimization", +] + + +def _get_optimize_ddp_mode(): + m = sys.modules[__name__] + if isinstance(m.optimize_ddp, bool): + if m.optimize_ddp: + mode = "ddp_optimizer" + else: + mode = "no_optimization" + elif isinstance(m.optimize_ddp, str): + mode = m.optimize_ddp + else: + raise ValueError(f"Invalid type, {type(optimize_ddp)=}") + + assert mode in m._ddp_optimization_mode, f"Invalid mode {mode=}" + return mode + + +# If True, delays DDPOptimizer submodule compilation to 1st run of the model, +# so that real tensor strides are used in all submodules +# (instead of using FakeTensor strides which can differ from real tensor strides and causes error in some cases). +# This feature is not hardened yet and it's known to cause issues to some models, so False by default. +optimize_ddp_lazy_compile = False + +# Whether to skip guarding on FSDP-managed modules +skip_fsdp_guards = True + +# Make dynamo skip guarding on hooks on nn modules +# Note: unsafe: if your model actually has hooks and you remove them, or doesn't and you add them, +# dynamo will not notice and will execute whichever version you first compiled. +skip_nnmodule_hook_guards = True + +# If True, raises exception if TorchDynamo is called with a context manager +raise_on_ctx_manager_usage = True + +# If True, raise when aot autograd is unsafe to use +raise_on_unsafe_aot_autograd = False + +# If true, error if you torch.jit.trace over a dynamo-optimized function. +# If false, silently suppress dynamo +error_on_nested_jit_trace = True + +# If true, error with a better message if we symbolically trace over a +# dynamo-optimized function. If false, silently suppress dynamo. +error_on_nested_fx_trace = True + +# Disables graph breaking on rnn. YMMV with backends. +allow_rnn = False + +# If true, error if we try to compile a function that has +# been seen before. +# [@compile_ignored: runtime_behaviour] +error_on_recompile = False + +# [@compile_ignored: debug] Whether to report any guard failures (deprecated: does not do anything) +report_guard_failures = True + +# [@compile_ignored: debug] root folder of the project +base_dir = dirname(dirname(dirname(abspath(__file__)))) + +# Trace through NumPy or graphbreak +trace_numpy = True + +# Trace through torch.distributed code +trace_distributed = False + +# Default NumPy dtypes when tracing with torch.compile +# We default to 64bits. For efficiency, one may want to change these to float32 +numpy_default_float = "float64" +numpy_default_complex = "complex128" +numpy_default_int = "int64" + +# use numpy's PRNG if True, pytorch otherwise +use_numpy_random_stream = False + + +def is_fbcode(): + return not hasattr(torch.version, "git_version") + + +def default_debug_dir_root(): + # [@compile_ignored: debug] + DEBUG_DIR_VAR_NAME = "TORCH_COMPILE_DEBUG_DIR" + if DEBUG_DIR_VAR_NAME in os.environ: + return os.path.join(os.environ[DEBUG_DIR_VAR_NAME], "torch_compile_debug") + elif is_fbcode(): + return os.path.join( + tempfile.gettempdir(), getpass.getuser(), "torch_compile_debug" + ) + else: + return os.path.join(os.getcwd(), "torch_compile_debug") + + +# [@compile_ignored: debug] +debug_dir_root = default_debug_dir_root() + +# [@compile_ignored: debug] +_save_config_ignore = { + "repro_after", + "repro_level", + # workaround: "cannot pickle PyCapsule" + "constant_functions", + # workaround: "cannot pickle module" + "skipfiles_inline_module_allowlist", +} + +# for backend="cudagraphs", mutations on input be sent to the cudagraph backend +# or replayed in aot_autograd epilogue. default is False because mutation on inputs +# can prevent cudagraphing. +cudagraph_backend_keep_input_mutation = False + +# When True, only ops that have the torch.Tag.pt2_compliant tag +# will be allowed into the graph; all other ops will be disallowed +# and will fall back to eager-mode PyTorch. Useful to ensure +# correctness of custom ops. +only_allow_pt2_compliant_ops = False + +capture_autograd_function = True + +# enable/disable dynamo tracing for `torch.func` transforms +capture_func_transforms = False + +# enable/disable user-defined triton kernel optimizations +optimize_user_defined_triton_kernels = True + +# If to log Dynamo compilation metrics into log files (for OSS) and Scuba tables (for fbcode). +log_compilation_metrics = True + +# A set of logging functions which will be reordered to the end of graph breaks, +# allowing dynamo to construct larget graph. Note that there are some +# limitations to this, such as how it does not correctly print objects that were +# mutated after the print statement. +reorderable_logging_functions: Set[Callable[[Any], None]] = set() + +# simulates what would happen if we didn't have support for BUILD_SET opcode, +# used for testing +inject_BUILD_SET_unimplemented_TESTING_ONLY = False + +_autograd_backward_strict_mode_banned_ops = [ + "stride", + "requires_grad", + "storage_offset", + "layout", + "data", +] + +_autograd_backward_strict_mode_banned_ops.extend( + [name for name, _ in inspect.getmembers(torch.Tensor) if re.match(r"^is_.*", name)] +) + +# Enables caching of dispatches to fake tensors. +fake_tensor_cache_enabled = ( + os.environ.get("TORCH_FAKE_TENSOR_DISPATCH_CACHE", "1") == "1" +) + +# Enables cross checking between the fake tensor cache and dispatch. +fake_tensor_cache_crosscheck_enabled = ( + os.environ.get("TORCH_FAKE_TENSOR_DISPATCH_CACHE_CROSSCHECK", "0") == "1" +) + +# support `context_fn` in torch.utils.checkpoint.checkpoint API under torch.compile(). +# WARNING: this is an experimental flag and is subject to change. +_experimental_support_context_fn_in_torch_utils_checkpoint = False + +if TYPE_CHECKING: + from torch.utils._config_typing import * # noqa: F401, F403 + + def _make_closure_patcher(**changes): + ... + + +from torch.utils._config_module import install_config_module + +install_config_module(sys.modules[__name__]) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py new file mode 100644 index 0000000000000000000000000000000000000000..15456e30858112dcf1e23577c15bf96755628d88 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/convert_frame.py @@ -0,0 +1,924 @@ +import collections +import dis +import functools +import itertools +import logging +import os +import random +import sys +import threading +import time +import traceback +import types +import typing +import weakref +from typing import Any, Callable, Dict, List, Optional, Set + +from torch.fx._lazy_graph_module import ( # type: ignore[attr-defined] + _use_lazy_graph_module, +) + +try: + import numpy as np +except ModuleNotFoundError: + np = None # type: ignore[assignment] + +import torch +import torch._logging +from torch._guards import compile_context, CompileContext, CompileId, tracing +from torch._logging import structured +from torch._utils_internal import signpost_event +from torch.fx.experimental.symbolic_shapes import ( + ConstraintViolationError, + GuardOnDataDependentSymNode, +) +from torch.fx.graph_module import _forward_from_src as original_forward_from_src +from torch.nn.parallel.distributed import DistributedDataParallel +from torch.utils._python_dispatch import _disable_current_modes +from torch.utils._traceback import format_traceback_short + +from . import config, exc, trace_rules +from .backends.registry import CompilerFn +from .bytecode_analysis import remove_dead_code, remove_pointless_jumps +from .bytecode_transformation import ( + check_inst_exn_tab_entries_valid, + Instruction, + is_generator, + propagate_inst_exn_table_entries, + transform_code_object, +) +from .cache_size import ( + CacheSizeRelevantForFrame, + compute_cache_size, + exceeds_cache_size_limit, + is_recompilation, +) +from .eval_frame import always_optimize_code_objects, skip_code, TorchPatcher +from .exc import ( + augment_exc_message, + BackendCompilerFailed, + format_error_msg, + InternalTorchDynamoError, + TorchRuntimeError, + UncapturedHigherOrderOpError, + unimplemented, + Unsupported, +) +from .guards import ( + CheckFunctionManager, + get_and_maybe_log_recompilation_reason, + GuardedCode, +) +from .hooks import Hooks +from .output_graph import OutputGraph +from .replay_record import ExecutionRecord +from .symbolic_convert import InstructionTranslator, SpeculationLog +from .trace_rules import is_numpy +from .types import BytecodeHook +from .utils import ( + CleanupManager, + CompilationMetrics, + counters, + dynamo_timed, + format_bytecode, + frame_phase_timing, + gen_record_file_name, + increment_frame, + is_namedtuple, + istype, + LazyString, + maybe_cprofile, + orig_code_map, + record_compilation_metrics, + reset_graph_break_dup_checker, + setup_compile_debug, + troubleshooting_url, + write_record_to_file, +) + +log = logging.getLogger(__name__) +bytecode_log = torch._logging.getArtifactLogger(__name__, "bytecode") +GlobalStateGuard = torch._C._dynamo.guards.GlobalStateGuard + +compile_lock = threading.RLock() + + +class Tracker: + def __init__(self): + self.seen = [] + self.seen_ids = set() + + def add(self, strong_obj): + idx = id(strong_obj) + if idx not in self.seen_ids: + obj = weakref.ref(strong_obj, lambda _: self.seen_ids.remove(idx)) + self.seen.append(obj) + self.seen_ids.add(idx) + + def __contains__(self, item): + return id(item) in self.seen_ids + + def clear(self): + self.seen.clear() + self.seen_ids.clear() + + +input_codes = Tracker() +output_codes = Tracker() + +initial_global_state: Optional[GlobalStateGuard] = None + + +@functools.wraps(original_forward_from_src) +def fx_forward_from_src_skip_result(*args, **kwargs): + # we monkey patch FX to prevent infinite loop of trying to convert + # our generated code + result: types.FunctionType = original_forward_from_src(*args, **kwargs) + skip_code(result.__code__) + return result + + +def preserve_global_state(fn): + """ + Context manager to: + 1) Save/restore torch.is_grad_enabled() state + 2) Save/restore python random state + 3) Save/restore torch random state + 4) Monkey patch torch.fx.graph_module._forward_from_src + """ + + @functools.wraps(fn) + def _fn(*args, **kwargs): + guards = GlobalStateGuard() + prior_grad_mode = torch.is_grad_enabled() + prior_inference_mode = torch.is_inference_mode_enabled() + prior_deterministic = torch.are_deterministic_algorithms_enabled() + prior_warn_only = torch.is_deterministic_algorithms_warn_only_enabled() + py_rng_state = random.getstate() + torch_rng_state = torch.random.get_rng_state() + if torch.cuda.is_available(): + cuda_rng_state = torch.cuda.get_rng_state() + prior_fwd_from_src = torch.fx.graph_module._forward_from_src + torch.fx.graph_module._forward_from_src = fx_forward_from_src_skip_result + cleanup = setup_compile_debug() + try: + return fn(*args, **kwargs) + finally: + cleanup.close() + torch._C._set_grad_enabled(prior_grad_mode) + torch.torch.autograd.grad_mode._enter_inference_mode(prior_inference_mode) + torch.use_deterministic_algorithms( + prior_deterministic, warn_only=prior_warn_only + ) + random.setstate(py_rng_state) + torch.random.set_rng_state(torch_rng_state) + if torch.cuda.is_available(): + torch.cuda.set_rng_state(cuda_rng_state) # type: ignore[possibly-undefined] + torch.fx.graph_module._forward_from_src = prior_fwd_from_src + assert ( + guards.check() + ), "Global state changed while dynamo tracing, please report a bug" + + _fn._torchdynamo_orig_callable = fn # type: ignore[attr-defined] + return _fn + + +@TorchPatcher.suppress_torch_distributed_warnings +def has_tensor_in_frame(frame): + """Check if the frame has torch.* related bits""" + # Check if the function was decorated using torch._dynamo.optimize + if frame.f_code in always_optimize_code_objects: + return True + + # Check if there is global import of torch.* + for co_name in frame.f_code.co_names: + if co_name in frame.f_globals: + obj = frame.f_globals[co_name] + if isinstance(obj, types.ModuleType) and ( + obj.__name__.startswith("torch.") or obj is torch + ): + return True + # ... or a global import of numpy.* + if np and config.trace_numpy and (obj is np or is_numpy(obj)): + return True + + seen_ids: Dict[int, bool] = dict() + + def has_tensor(obj): + """Recursively check if the obj has a tensor""" + obj_id = id(obj) + if obj_id in seen_ids: + return seen_ids[obj_id] + seen_ids[obj_id] = False + + if isinstance(obj, (torch.Tensor, torch.nn.Module)) or ( + istype(obj, type) and issubclass(obj, torch.nn.Module) + ): + seen_ids[obj_id] = True + return seen_ids[obj_id] + elif ( + config.trace_numpy + and np + and (istype(obj, np.ndarray) or isinstance(obj, np.generic)) + ): + seen_ids[obj_id] = True + return seen_ids[obj_id] + elif istype(obj, (list, tuple)): + seen_ids[obj_id] = any(has_tensor(v) for v in obj) + return seen_ids[obj_id] + elif istype(obj, dict): + # Some packages like pytest can be updated during runtime. So, make a + # copy of values to avoid issues like "RuntimeError: dictionary + # changed size during iteration" + values = list(obj.values()) + seen_ids[obj_id] = any(has_tensor(v) for v in values) + return seen_ids[obj_id] + elif istype(obj, (str, int, float, type(None), bool)): + seen_ids[obj_id] = False + return seen_ids[obj_id] + elif is_namedtuple(obj) and hasattr(obj, "_fields"): + seen_ids[obj_id] = any(has_tensor(getattr(obj, v)) for v in obj._fields) + return seen_ids[obj_id] + else: + # if config.debug: + # print( + # f"Assuming that object of type {type(obj)} does not have a tensor" + # ) + return False + + # Check if the passed arguments are of type Tensor + for value in frame.f_locals.values(): + if has_tensor(value): + return True + + log.debug( + "skipping because no torch.* %s \ + %s %s", + frame.f_code.co_name, + frame.f_code.co_filename, + frame.f_code.co_firstlineno, + ) + + return False + + +def exception_handler(e, code, frame=None, export=False): + record_filename = None + if hasattr(e, "exec_record"): + record_filename = gen_record_file_name(e, code) + write_record_to_file(record_filename, e.exec_record) + e.record_filename = record_filename + + augment_exc_message(e, export=export) + + +FRAME_COUNTER = 0 +FRAME_COMPILE_COUNTER: typing.Counter[int] = collections.Counter() + + +def convert_frame_assert( + compiler_fn: CompilerFn, + one_graph: bool = True, + export: bool = False, + export_constraints=None, +): + """Fully convert a frame into an FX graph""" + reset_graph_break_dup_checker() + + def _convert_frame_assert( + frame: types.FrameType, cache_entry, hooks: Hooks, frame_state, *, skip: int = 0 + ): + increment_frame() + + code = frame.f_code + + cache_size = compute_cache_size(frame, cache_entry) + recompile_reasons = None + if is_recompilation(cache_size): + recompile_reasons = get_and_maybe_log_recompilation_reason( + cache_entry, frame + ) + + input_codes.add(code) + if code in output_codes: + return None + if ( + os.environ.get("TORCHDYNAMO_DEBUG_FUNCTION") + and os.environ.get("TORCHDYNAMO_DEBUG_FUNCTION") != code.co_name + ): + return None + if code.co_name == "" and code.co_filename.endswith( + ( + "transformers/file_utils.py", + "transformers/utils/generic.py", + "diffusers/utils/outputs.py", + ) + ): + # not needed, but cleans up torchbench error stats + return None + if code.co_name == "__setattr__": + # setattr could be tricky to handle generally, + # but also not likely useful to compile- skip the whole frame + return None + if code.co_name == "__init__" and code.co_filename.startswith( + os.path.dirname(torch.optim.__file__) + ): + # optimizer support is still incomplete see + # test_state_dict in test/dynamo/test_optimizers.py + return None + + # Check if the frame is generated by an exec builtin call + # TODO - Running exec generated frame seems propagates f_globals to the + # next frames. + if code.co_name == "" and code.co_filename == "": + return None + + if ( + code.co_name == "" + and code.co_filename == "" + and not bool(frame.f_builtins) + ): + # namedtuple subclass constructor. Empty builtins cause issue with + # len keyword in LIST_LEN guard. + return None + + if is_generator(code): + unimplemented("generator") + exceeded, limit_type = exceeds_cache_size_limit(cache_size) + if exceeded: + + def format_func_info(code): + return f"'{code.co_name}' ({code.co_filename}:{code.co_firstlineno})" + + def format_guard_failures(): + assert recompile_reasons, "TODO(whc) any other recompile reasons?" + return recompile_reasons[-1] + + log.warning( + "torch._dynamo hit config.%s (%s)\n" + " function: %s\n" + " last reason: %s\n" + 'To log all recompilation reasons, use TORCH_LOGS="recompiles".\n' + "To diagnose recompilation issues, see %s.", + limit_type, + getattr(config, limit_type), + format_func_info(code), + format_guard_failures(), + troubleshooting_url, + ) + unimplemented(f"{limit_type} reached") + + if not has_tensor_in_frame(frame): + return None + + global initial_global_state + initial_global_state = GlobalStateGuard() + + global FRAME_COUNTER + if "_id" not in frame_state: + frame_state["_id"] = FRAME_COUNTER + FRAME_COUNTER += 1 + frame_id = frame_state["_id"] + + frame_compile_id = FRAME_COMPILE_COUNTER[frame_id] + FRAME_COMPILE_COUNTER[frame_id] += 1 + + compile_id = CompileId(frame_id, frame_compile_id) + + signpost_event( + "dynamo", + "_convert_frame_assert._compile", + { + "co_name": code.co_name, + "co_filename": code.co_filename, + "co_firstlineno": code.co_firstlineno, + "cache_size": cache_size.num_cache_entries_with_same_id_matched_objs, + "accumulated_cache_size": cache_size.num_cache_entries, + }, + ) + + return _compile( + frame.f_code, + frame.f_globals, + frame.f_locals, + frame.f_builtins, + compiler_fn, + one_graph, + export, + export_constraints, + hooks, + cache_size, + frame, + frame_state=frame_state, + compile_id=compile_id, + skip=skip + 1, + ) + + _convert_frame_assert._torchdynamo_orig_callable = compiler_fn # type: ignore[attr-defined] + + def _clone_with_backend(backend): + return convert_frame_assert(backend, one_graph, export, export_constraints) + + _convert_frame_assert._clone_with_backend = _clone_with_backend # type: ignore[attr-defined] + return _convert_frame_assert + + +from collections import OrderedDict + +from torch.utils.hooks import RemovableHandle + +# we have to use `OrderedDict` to make `RemovableHandle` work. +_bytecode_hooks: Dict[int, BytecodeHook] = OrderedDict() + + +def register_bytecode_hook(hook: BytecodeHook) -> RemovableHandle: + """Register hooks for bytecode generated by Dynamo. The hook can do some + logging, as well as return a new code object to be used. Please refer + to `BytecodeHook` for the hook signature. + """ + handle = RemovableHandle(_bytecode_hooks) + _bytecode_hooks[handle.id] = hook + return handle + + +@_use_lazy_graph_module(config.use_lazy_graph_module) +@maybe_cprofile +def _compile( + code: types.CodeType, + globals: Dict[str, object], + locals: Dict[str, object], + builtins: Dict[str, object], + compiler_fn: CompilerFn, + one_graph: bool, + export: bool, + export_constraints, + hooks: Hooks, + cache_size: CacheSizeRelevantForFrame, + frame: Optional[types.FrameType] = None, + frame_state=None, + compile_id=None, + *, + skip: int = 0, +) -> Optional[GuardedCode]: + from torch.fx.experimental.validator import ( + bisect, + BisectValidationException, + translation_validation_enabled, + ValidationException, + ) + + output: Optional[OutputGraph] = None + tracer: Optional[InstructionTranslator] = None + # This is shared across restarts + mutated_closure_cell_contents: Set[str] = set() + speculation_log = SpeculationLog() + torch._dynamo.callback_handler.run_start_callbacks() + + @preserve_global_state + def transform(instructions, code_options): + nonlocal output + nonlocal tracer + speculation_log.restart() + tracer = InstructionTranslator( + instructions, + code, + locals, + globals, + builtins, + code_options, + compiler_fn, + one_graph, + export, + export_constraints, + mutated_closure_cell_contents, + frame_state=frame_state, + speculation_log=speculation_log, + ) + + try: + with tracing(tracer.output.tracing_context), tracer.set_current_tx(): + tracer.run() + except exc.UnspecializeRestartAnalysis: + speculation_log.clear() + raise + except (exc.SpeculationRestartAnalysis, exc.SkipFrame): + raise + except Exception: + if translation_validation_enabled(): + bisect(tracer.output.shape_env) + raise + finally: + tracer.output.call_cleanup_hooks() + + output = tracer.output + assert output is not None + assert output.output_instructions + instructions[:] = output.output_instructions + code_options.update(output.code_options) + + if config.dead_code_elimination: + propagate_inst_exn_table_entries(instructions) + check_inst_exn_tab_entries_valid(instructions) + instructions[:] = remove_pointless_jumps(remove_dead_code(instructions)) + + @dynamo_timed(phase_name="entire_frame_compile") + def compile_inner( + code: types.CodeType, + one_graph: bool, + hooks: Hooks, + transform: Callable[[List[Instruction], Dict[str, Any]], Any], + ) -> Optional[GuardedCode]: + nonlocal output + for attempt in itertools.count(): + CompileContext.get().attempt = attempt + try: + out_code = transform_code_object(code, transform) + break + except exc.RestartAnalysis as e: + log.info( + "Restarting analysis due to %s", + LazyString(format_traceback_short, e.__traceback__), + ) + if attempt > 100: + unimplemented("100+ RestartAnalysis() calls") + except exc.SkipFrame as e: + log.debug( + "Skipping frame %s %s \ + %s %s", + e, + code.co_name, + code.co_filename, + code.co_firstlineno, + ) + if one_graph: + log.debug("No graph captured with one_graph=True") + return None + + def log_bytecode(prefix, name, filename, line_no, code): + if bytecode_log.isEnabledFor(logging.DEBUG): + bytecode_log.debug( + format_bytecode(prefix, name, filename, line_no, code) + ) + + log_bytecode( + "ORIGINAL BYTECODE", + code.co_name, + code.co_filename, + code.co_firstlineno, + code, + ) + log_bytecode( + "MODIFIED BYTECODE", + code.co_name, + code.co_filename, + code.co_firstlineno, + out_code, # type: ignore[possibly-undefined] + ) + + for hook in _bytecode_hooks.values(): + hook_output = hook(code, out_code) + if hook_output is not None: + out_code = hook_output + + orig_code_map[out_code] = code + output_codes.add(out_code) + + assert output is not None + + # Tests for new code objects. + # The rationale for these tests can be found in torch/csrc/dynamo/eval_frame.c + # Only test once the code object is created. + # They are not tested during runtime. + + def count_args(code): + import inspect + + return ( + code.co_argcount + + code.co_kwonlyargcount + + bool(code.co_flags & inspect.CO_VARARGS) + + bool(code.co_flags & inspect.CO_VARKEYWORDS) + ) + + total_argcount_old = count_args(code) + total_argcount_new = count_args(out_code) + msg = "arg mismatch: " + msg += f"old code object has args {code.co_varnames[:total_argcount_old]}, " + msg += f"new code object has args {out_code.co_varnames[:total_argcount_new]}" + assert ( + code.co_varnames[:total_argcount_old] + == out_code.co_varnames[:total_argcount_new] + ), msg + + msg = "free var mismatch: " + msg += f"old code object has free var {code.co_freevars}, " + msg += f"new code object has free var {out_code.co_freevars}" + assert code.co_freevars == out_code.co_freevars, msg + + msg = "cell var mismatch: " + msg += f"old code object has cell var {code.co_cellvars}, " + msg += f"new code object has cell var {out_code.co_cellvars}" + assert code.co_cellvars == out_code.co_cellvars, msg + + # Skipping Dynamo on a frame without any extracted graph. + # This does not affect eager functionality. But this is necessary + # for export for cases where Dynamo-reconstructed bytecode can create + # new function frames, confusing export in thinking that there + # are extra graphs now. + + if output.export and output.is_empty_graph(): + return None + + assert output.guards is not None + CleanupManager.instance[out_code] = output.cleanups + check_fn = CheckFunctionManager( + output, + hooks.guard_fail_fn if hooks else None, + ) + + guarded_code = GuardedCode(out_code, check_fn.check_fn) + + if not output.is_empty_graph() and hooks.guard_export_fn is not None: + # We should not run the guard_export_fn when Dynamo does not + # generate any graph. This can happen in export when TorchDynamo + # generated bytecode has some reconstruction logic for mutated + # variables which can trigger TorchDynamo on the children frames but + # they are benign and do not generate any new graphs. + hooks.guard_export_fn(output.guards) + + return guarded_code + + with compile_context(CompileContext(compile_id)): + log.debug( + "torchdynamo start compiling %s %s:%s, stack (elided %s frames):\n%s", + code.co_name, + code.co_filename, + code.co_firstlineno, + skip + 2, + # -2: omit current frame, omit contextlib decorator + "".join(traceback.format_list(traceback.extract_stack()[: -2 - skip])), + ) + # -4: -2 as above, plus trace_structured frames + torch._logging.trace_structured( + "dynamo_start", + lambda: { + "stack": structured.from_traceback( + traceback.extract_stack()[: -4 - skip] + ) + }, + ) + start_time = time.time() + fail_type: Optional[str] = None + fail_reason: Optional[str] = None + fail_user_frame_filename: Optional[str] = None + fail_user_frame_lineno: Optional[int] = None + try: + guarded_code = compile_inner(code, one_graph, hooks, transform) + return guarded_code + except ( + Unsupported, + TorchRuntimeError, + BackendCompilerFailed, + AssertionError, + ConstraintViolationError, + GuardOnDataDependentSymNode, + ValidationException, + UncapturedHigherOrderOpError, + BisectValidationException, + ) as e: + fail_type = str(type(e)) + fail_reason = str(e) + exception_handler(e, code, frame, export=export) + if e.innermost_user_frame_summary is not None: # type: ignore[union-attr] + fail_user_frame_filename = e.innermost_user_frame_summary.filename # type: ignore[union-attr] + fail_user_frame_lineno = e.innermost_user_frame_summary.lineno # type: ignore[union-attr] + raise + except Exception as e: + fail_type = str(type(e)) + fail_reason = str(e) + exception_handler(e, code, frame, export=export) + if e.innermost_user_frame_summary is not None: # type: ignore[attr-defined] + fail_user_frame_filename = e.innermost_user_frame_summary.filename # type: ignore[attr-defined] + fail_user_frame_lineno = e.innermost_user_frame_summary.lineno # type: ignore[attr-defined] + raise InternalTorchDynamoError(str(e)).with_traceback( + e.__traceback__ + ) from None + finally: + if tracer: + tracer.output.local_scope = {} + + from .utils import curr_frame + + frame_key = str(curr_frame) + if ( + fail_reason is None + and output is not None + and frame_key in frame_phase_timing + ): + guard_count = len(output.guards) + shape_env_guard_count = len(output.shape_env.guards) + graph_op_count = output.count_calls() + graph_node_count = len(output.graph.nodes) + graph_input_count = len(output.placeholders) + entire_frame_compile_time = frame_phase_timing[frame_key].get( + "entire_frame_compile", None + ) + backend_compile_time = frame_phase_timing[frame_key].get( + "backend_compile", None + ) + inductor_compile_time = frame_phase_timing[frame_key].get( + "inductor_compile", None + ) + code_gen_time = frame_phase_timing[frame_key].get("code_gen", None) + non_compliant_ops = {op.__qualname__ for op in output.non_compliant_ops} + compliant_custom_ops = { + op.__qualname__ for op in output.compliant_custom_ops + } + else: + guard_count = None + shape_env_guard_count = None + graph_op_count = None + graph_node_count = None + graph_input_count = None + entire_frame_compile_time = None + backend_compile_time = None + inductor_compile_time = None + code_gen_time = None + non_compliant_ops = set({}) + compliant_custom_ops = set({}) + metrics = CompilationMetrics( + frame_key, + code.co_name, + code.co_filename, + code.co_firstlineno, + cache_size.num_cache_entries_with_same_id_matched_objs, + cache_size.num_cache_entries, + guard_count, + shape_env_guard_count, + graph_op_count, + graph_node_count, + graph_input_count, + start_time, + entire_frame_compile_time, + backend_compile_time, + inductor_compile_time, + code_gen_time, + fail_type, + fail_reason, + fail_user_frame_filename, + fail_user_frame_lineno, + non_compliant_ops, + compliant_custom_ops, + ) + record_compilation_metrics(metrics) + torch._dynamo.callback_handler.run_end_callbacks() + + +def convert_frame(compiler_fn: CompilerFn, hooks: Hooks): + """Try to convert a frame into an FX graph, if error leave frame unmodified""" + inner_convert = convert_frame_assert(compiler_fn, one_graph=False) + + def _convert_frame( + frame: types.FrameType, cache_entry, hooks: Hooks, frame_state, skip: int = 0 + ): + counters["frames"]["total"] += 1 + try: + result = inner_convert( + frame, cache_entry, hooks, frame_state, skip=skip + 1 + ) + counters["frames"]["ok"] += 1 + return result + except Exception as e: + # These two exception types are "soft" failure, in the sense that + # we know this is due to something we didn't implement all the + # way, scare the user less about it. That being said, if you + # are trying to understand why a graph break happened, it's still + # important to have this information, so offer it. + # + # NB: NotImplementedError used to be on this list, but actually + # it is impossible for it to reach here, as it is converted into + # InternalTorchDynamoError. This behavior seemed reasonable + # to me (ezyang, Aug 2023) so I kept it, but maybe at some point + # someone wanted these to also get suppressed. If so, you'll + # need to make these exceptions not get wrapped + + # We intentionally don't want to suppress error here. + if isinstance(e, UncapturedHigherOrderOpError): + raise + + soft_fail = isinstance(e, Unsupported) + if not config.suppress_errors and not soft_fail: + raise + + # Suppress the error. NB: It's very important to do the + # suppression logging HERE, where the actual suppression + # happens. Previously it was somewhere else and so it was + # possible to accidentally not log at all. + record_filename = getattr(e, "record_filename", None) + code = frame.f_code + error_msg = format_error_msg(e, code, record_filename, frame) + + if soft_fail: + log.info(error_msg, exc_info=True) + else: + log.warning(error_msg, exc_info=True) + return None + + _convert_frame._torchdynamo_orig_callable = compiler_fn # type: ignore[attr-defined] + _convert_frame._clone_with_backend = lambda backend: convert_frame(backend, hooks) # type: ignore[attr-defined] + return _convert_frame + + +# TODO mlazos: add support for same args, or record them +def replay(filename): + from .backends.debugging import eager + + original_replay_val = config.replay_record_enabled + config.replay_record_enabled = False + with open(filename, "rb") as in_file: + record = ExecutionRecord.load(in_file) + record.globals = dict(itertools.chain(record.globals.items(), globals().items())) + + try: + _compile( + record.code, + record.globals, + record.locals, + record.builtins, + compiler_fn=eager, + one_graph=False, + export=False, + export_constraints=None, + hooks=Hooks(), + cache_size=CacheSizeRelevantForFrame(0, 0), + frame=None, + frame_state={}, + ) + finally: + config.replay_record_enabled = original_replay_val + + +def first_real_inst_idx(code): + if sys.version_info < (3, 11): + return 0 + for inst in dis.get_instructions(code): + if inst.opname == "RESUME": + return inst.offset // 2 + raise RuntimeError("RESUME instruction not found in code") + + +def catch_errors_wrapper(callback, hooks: Hooks): + @functools.wraps(callback) + def catch_errors(frame, cache_entry, frame_state): + assert frame_state is not None + + is_skipfile = trace_rules.check(frame.f_code) + if ( + # TODO: the first condition is not covered by any test + frame.f_lasti >= first_real_inst_idx(frame.f_code) + or is_skipfile + or config.disable + ): + if log.isEnabledFor(logging.DEBUG): + skip_reason = ( + "traced frame already" + if frame.f_lasti >= first_real_inst_idx(frame.f_code) + else "in skipfiles" + if trace_rules.check(frame.f_code) + else "dynamo tracing is disabled" + ) + if not is_skipfile or config.verbose: + log.debug( + "skipping: %s (reason: %s, file: %s)", + frame.f_code.co_name, + skip_reason, + frame.f_code.co_filename, + ) + return None + if frame.f_code.co_filename == "" and frame.f_code.co_name == "__new__": + # nametuple constructor + return None + if config._get_optimize_ddp_mode() == "ddp_optimizer": + ddp_module = DistributedDataParallel._get_active_ddp_module() + if ddp_module: + with compile_lock: + from torch._dynamo.backends.distributed import DDPOptimizer + + ddp_optimizer = DDPOptimizer( + bucket_bytes_cap=ddp_module.bucket_bytes_cap, + backend_compile_fn=callback._torchdynamo_orig_callable, + ) + assert hasattr( + callback, "_clone_with_backend" + ), "DDPOptimizer only supports callback fns that know how to clone themselves." + hijacked_callback = callback._clone_with_backend( + ddp_optimizer.compile_fn, + ) + return hijacked_callback(frame, cache_entry, hooks, frame_state) + + with compile_lock, _disable_current_modes(): + # skip=1: skip this frame + return callback(frame, cache_entry, hooks, frame_state, skip=1) + + catch_errors._torchdynamo_orig_callable = callback # type: ignore[attr-defined] + return catch_errors diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/current_scope_id.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/current_scope_id.py new file mode 100644 index 0000000000000000000000000000000000000000..1289bdcdffe47bce04e3aa0a663b5d8d5c443b10 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/current_scope_id.py @@ -0,0 +1,23 @@ +import contextlib +import threading + +# Global variable to identify which SubgraphTracer we are in. +# It is sometimes difficult to find an InstructionTranslator to use. +_current_scope_id = threading.local() + + +def current_scope_id(): + global _current_scope_id + if not hasattr(_current_scope_id, "value"): + _current_scope_id.value = 1 + return _current_scope_id.value + + +@contextlib.contextmanager +def enter_new_scope(): + global _current_scope_id + try: + _current_scope_id.value = current_scope_id() + 1 + yield + finally: + _current_scope_id.value = current_scope_id() - 1 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/device_interface.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/device_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..4857b963881f784eb72115cdbb6560282796c4b4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/device_interface.py @@ -0,0 +1,199 @@ +import inspect +from typing import Any, Callable, Dict, Iterable, Optional, Tuple, Type, Union + +import torch +from torch._streambase import _EventBase, _StreamBase + +get_cuda_stream: Optional[Callable[[int], int]] +if torch.cuda._is_compiled(): + from torch._C import _cuda_getCurrentRawStream as get_cuda_stream +else: + get_cuda_stream = None + +_device_t = Union[torch.device, str, int, None] + +# Recording the device properties in the main process but used in worker process. +caching_worker_device_properties: Dict[str, Any] = {} +caching_worker_current_devices: Dict[str, int] = {} + + +class DeviceInterfaceMeta(type): + def __new__(metacls, *args, **kwargs): + class_member = args[2] + if "Event" in class_member: + assert inspect.isclass(class_member["Event"]) and issubclass( + class_member["Event"], _EventBase + ), "DeviceInterface member Event should be inherit from _EventBase" + if "Stream" in class_member: + assert inspect.isclass(class_member["Stream"]) and issubclass( + class_member["Stream"], _StreamBase + ), "DeviceInterface member Stream should be inherit from _StreamBase" + return super().__new__(metacls, *args, **kwargs) + + +class DeviceInterface(metaclass=DeviceInterfaceMeta): + """ + This is a simple device runtime interface for Inductor. It enables custom + backends to be integrated with Inductor in a device-agnostic semantic. + """ + + class device: + def __new__(cls, device: _device_t): + raise NotImplementedError() + + class Worker: + """ + Worker API to query device properties that will work in multi processing + workers that cannot use the GPU APIs (due to processing fork() and + initialization time issues). Properties are recorded in the main process + before we fork the workers. + """ + + @staticmethod + def set_device(device: int): + raise NotImplementedError() + + @staticmethod + def current_device() -> int: + raise NotImplementedError() + + @staticmethod + def get_device_properties(device: _device_t = None): + raise NotImplementedError() + + @staticmethod + def current_device(): + raise NotImplementedError() + + @staticmethod + def set_device(device: _device_t): + raise NotImplementedError() + + @staticmethod + def device_count(): + raise NotImplementedError() + + @staticmethod + def is_available() -> bool: + raise NotImplementedError() + + @staticmethod + def stream(stream: torch.Stream): + raise NotImplementedError() + + @staticmethod + def current_stream(): + raise NotImplementedError() + + @staticmethod + def set_stream(stream: torch.Stream): + raise NotImplementedError() + + @staticmethod + def _set_stream_by_id(stream_id: int, device_index: int, device_type: int): + raise NotImplementedError() + + @staticmethod + def get_raw_stream(): + raise NotImplementedError() + + @staticmethod + def synchronize(device: _device_t = None): + raise NotImplementedError() + + @staticmethod + def get_device_properties(device: _device_t = None): + raise NotImplementedError() + + @staticmethod + def get_compute_capability(device: _device_t = None): + raise NotImplementedError() + + +class CudaInterface(DeviceInterface): + device = torch.cuda.device + + # register Event and Stream class into the backend interface + # make sure Event and Stream are implemented and inherited from the _EventBase and _StreamBase + Event = torch.cuda.Event + Stream = torch.cuda.Stream + + class Worker: + @staticmethod + def set_device(device: int): + caching_worker_current_devices["cuda"] = device + + @staticmethod + def current_device() -> int: + if "cuda" in caching_worker_current_devices: + return caching_worker_current_devices["cuda"] + return torch.cuda.current_device() + + @staticmethod + def get_device_properties(device: _device_t = None): + if device is not None: + if isinstance(device, str): + device = torch.device(device) + assert device.type == "cuda" + if isinstance(device, torch.device): + device = device.index + if device is None: + device = CudaInterface.Worker.current_device() + + if "cuda" not in caching_worker_device_properties: + device_prop = [ + torch.cuda.get_device_properties(i) + for i in range(torch.cuda.device_count()) + ] + caching_worker_device_properties["cuda"] = device_prop + + return caching_worker_device_properties["cuda"][device] + + current_device = staticmethod(torch.cuda.current_device) + set_device = staticmethod(torch.cuda.set_device) + device_count = staticmethod(torch.cuda.device_count) + stream = staticmethod(torch.cuda.stream) # type: ignore[assignment] + current_stream = staticmethod(torch.cuda.current_stream) + set_stream = staticmethod(torch.cuda.set_stream) # type: ignore[assignment] + _set_stream_by_id = staticmethod(torch.cuda._set_stream_by_id) # type: ignore[assignment] + synchronize = staticmethod(torch.cuda.synchronize) + get_device_properties = staticmethod(torch.cuda.get_device_properties) # type: ignore[assignment] + get_raw_stream = staticmethod(get_cuda_stream) # type: ignore[arg-type] + + # Can be mock patched by @patch decorator. + @staticmethod + def is_available() -> bool: + return torch.cuda.is_available() + + @staticmethod + def get_compute_capability(device: _device_t = None): + major, min = torch.cuda.get_device_capability(device) + return major * 10 + min + + +device_interfaces: Dict[str, Type[DeviceInterface]] = {} + + +def register_interface_for_device( + device: Union[str, torch.device], device_interface: Type[DeviceInterface] +): + if isinstance(device, torch.device): + device = str(device) + device_interfaces[device] = device_interface + + +def get_interface_for_device(device: Union[str, torch.device]) -> Type[DeviceInterface]: + if isinstance(device, torch.device): + device = str(device) + if device in device_interfaces: + return device_interfaces[device] + raise NotImplementedError(f"No interface for device {device}") + + +def get_registered_device_interfaces() -> Iterable[Tuple[str, Type[DeviceInterface]]]: + return device_interfaces.items() + + +register_interface_for_device("cuda", CudaInterface) +for i in range(torch.cuda.device_count()): + register_interface_for_device(f"cuda:{i}", CudaInterface) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/exc.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/exc.py new file mode 100644 index 0000000000000000000000000000000000000000..f3c75ad7e68d510e08da5ef319ba8585fa81b5a1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/exc.py @@ -0,0 +1,335 @@ +import os +import textwrap +from enum import auto, Enum +from traceback import extract_stack, format_exc, format_list, StackSummary +from typing import cast, NoReturn, Optional + +import torch._guards + +from . import config + +from .utils import counters + + +def exportdb_error_message(case_name): + return ( + "For more information about this error, see: " + + "https://pytorch.org/docs/main/generated/exportdb/index.html#" + + case_name.replace("_", "-") + ) + + +import logging + +log = logging.getLogger(__name__) +graph_breaks_log = torch._logging.getArtifactLogger(__name__, "graph_breaks") + + +class TorchDynamoException(RuntimeError): + pass + + +class InternalTorchDynamoError(TorchDynamoException): + pass + + +class RestartAnalysis(TorchDynamoException): + pass + + +class SpeculationRestartAnalysis(RestartAnalysis): + pass + + +class UnspecializeRestartAnalysis(RestartAnalysis): + pass + + +class SkipFrame(TorchDynamoException): + pass + + +class TorchRuntimeError(TorchDynamoException): + pass + + +class InvalidBackend(TorchDynamoException): + def __init__(self, name): + super().__init__( + f"Invalid backend: {name!r}, see `torch._dynamo.list_backends()` for available backends." + ) + + +class ResetRequired(TorchDynamoException): + def __init__(self): + super().__init__( + textwrap.dedent( + """ + Must call `torch._dynamo.reset()` before changing backends. Detected two calls to + `torch.compile()` with a different backend compiler arguments. + """ + ) + ) + + +class BackendCompilerFailed(TorchDynamoException): + def __init__(self, backend_fn, inner_exception): + self.backend_name = getattr(backend_fn, "__name__", "?") + self.inner_exception = inner_exception + msg = f"backend={self.backend_name!r} raised:\n{type(inner_exception).__name__}: {inner_exception}" + super().__init__(msg) + + +class Unsupported(TorchDynamoException): + def __init__(self, msg): + super().__init__(msg) + self.real_stack = torch._guards.TracingContext.extract_stack() + self.msg = msg + self.category: Optional[str] = None + self.add_to_stats() + + def remove_from_stats(self): + assert self.category is not None + counters[self.category][self.msg] -= 1 + if counters[self.category][self.msg] <= 0: + del counters[self.category][self.msg] + + def add_to_stats(self, category="unimplemented"): + self.category = category + counters[category][self.msg] += 1 + + +class RecompileError(TorchDynamoException): + pass + + +class ArgsMismatchError(Unsupported): + def __init__(self, msg): + super().__init__(msg) + + +class AttributeMutationError(Unsupported): + def __init__(self, msg): + super().__init__(msg) + + +class CondOpArgsMismatchError(ArgsMismatchError): + """ + Internal error from cond() due to arguments mismatch. + """ + + def __init__(self, msg): + super().__init__(msg) + + +class UserErrorType(Enum): + DYNAMIC_CONTROL_FLOW = auto() + ANTI_PATTERN = auto() + STANDARD_LIBRARY = auto() + CONSTRAINT_VIOLATION = auto() + DYNAMIC_DIM = auto() + INVALID_INPUT = auto() + INVALID_OUTPUT = auto() + + +class UserError(Unsupported): + def __init__(self, error_type: UserErrorType, msg, case_name=None): + """ + Type of errors that would be valid in Eager, but not supported in TorchDynamo. + The error message should tell user about next actions. + + error_type: Type of user error + msg: Actionable error message + case_name: (Optional) Unique name (snake case) for the usage example in exportdb. + """ + if case_name is not None: + assert isinstance(case_name, str) + if msg.endswith("."): + msg += " " + else: + msg += "\n" + msg += exportdb_error_message(case_name) + super().__init__(msg) + self.error_type = error_type + self.message = msg + + +class UncapturedHigherOrderOpError(TorchDynamoException): + pass + + +class IncorrectUsage(Exception): + pass + + +# These exceptions are ok to fallback to eager/graph_break. +exceptions_allowed_to_be_fallback = ( + torch._subclasses.fake_tensor.DataDependentOutputException, + torch._subclasses.fake_tensor.DynamicOutputShapeException, + torch._subclasses.fake_tensor.UnsupportedOperatorException, + torch._subclasses.fake_tensor.UnsupportedFakeTensorException, +) + + +def unimplemented_with_warning(e: Exception, code, msg: str) -> NoReturn: + # This function calls unimplemented internally and eventually graph breaks + # or falls to eager. unimplemented itself does not print any user warnings, + # i.e., its very silent. This helper function is intended when an error is + # encountered in the torch.compile stack which is worth showing as warning + # to the user. For example, if AOT Autograd backend fails with a fake tensor + # exception, its ok to fallback to eager but not silently. Here, we can use + # this function to log the message and the stack trace. + graph_break_msg = format_error_msg_verbose(e, code) + graph_breaks_log.debug("%s", graph_break_msg) + log.warning(msg) + raise unimplemented(msg) from e + + +def unimplemented(msg: str) -> NoReturn: + assert msg != os.environ.get("BREAK", False) + raise Unsupported(msg) + + +def warning(msg: str) -> None: + counters["warnings"][msg] += 1 + assert msg != os.environ.get("BREAK", False) + + +# KeyError has special handling for its args +# see https://github.com/python/cpython/blob/3.11/Objects/exceptions.c#L2534 for details +class KeyErrorMsg: + def __init__(self, value): + self.value = value + + def __str__(self): + return str(self.value) + + def __repr__(self) -> str: + return self.__str__() + + +def augment_exc_message(exc: Exception, msg: str = "\n", export: bool = False) -> None: + import traceback + + exc.innermost_user_frame_summary = None # type: ignore[attr-defined] + + real_stack = get_real_stack(exc) + if real_stack is not None and len(real_stack) > 0: + exc.innermost_user_frame_summary = real_stack[-1] # type: ignore[attr-defined] + msg += f"\nfrom user code:\n {''.join(traceback.format_list(real_stack))}" + + if config.replay_record_enabled and hasattr(exc, "record_filename"): + msg += f"\nLast frame execution written to {exc.record_filename}. To run only this frame while debugging, run\ + torch._dynamo.replay('{exc.record_filename}').\n" + + if not config.verbose and hasattr(exc, "real_stack"): + msg += '\nSet TORCH_LOGS="+dynamo" and TORCHDYNAMO_VERBOSE=1 for more information\n' + + if hasattr(exc, "inner_exception") and hasattr( + exc.inner_exception, "minifier_path" + ): + if hasattr(exc.inner_exception, "buck_command"): + msg += ( + f"\nMinifier script written to {exc.inner_exception.minifier_path}. Run " + f"this buck command to find the smallest traced graph " + f"which reproduces this error: {exc.inner_exception.buck_command}\n" + ) + else: + msg += ( + f"\nMinifier script written to {exc.inner_exception.minifier_path}. Run " + "this script to find the smallest traced graph which reproduces this error.\n" + ) + + if not config.suppress_errors and not export: + msg += ( + "\n\n" + "You can suppress this exception and fall back to eager by setting:\n" + " import torch._dynamo\n" + " torch._dynamo.config.suppress_errors = True\n" + ) + + old_msg = "" if len(exc.args) == 0 else str(exc.args[0]) + + if isinstance(exc, KeyError): + exc.args = (KeyErrorMsg(old_msg + msg),) + exc.args[1:] + else: + new_msg = old_msg + msg + exc.args = (new_msg,) + exc.args[1:] + + +def get_real_stack(exc: Exception, frame=None) -> Optional[StackSummary]: + real_stack = getattr(exc, "real_stack", None) + if real_stack is None: + return None + + # NB: it's possible for real_stack to be []; we still attempt to + # report a stack anyway because the stack_above_dynamo may still + # be useful for debugging + + stack_above_dynamo = [] + if frame is not None: + # NB: frame is PyInterpreterFrame on Python 3.11 and later, + # not a TRUE frame object. You can't actually feed it + # to traceback because it doesn't have enough information. + # To solve this problem, we technically should just materialize + # the frame, the same way _PyFrame_GetFrameObject would do + # (but we cannot actually do this, because this populates + # frame_obj field, which default eval frame doesn't like). + # + # Fortunately, in this case, we can hack it: there's no need + # to actually use the truly top frame, we can just extract + # from where we are right now and rely on filter_stack to + # get rid of all the dynamo frames. For ease of testing + # we apply this behavior to ALL Python versions + stack_above_dynamo = filter_stack(extract_stack()) + + return cast(StackSummary, stack_above_dynamo + real_stack) + + +# filter out all frames after entering dynamo +def filter_stack(stack): + user_stack = [] + for frame in stack: + if "convert_frame" in frame.filename: + break + if "eval_frame" in frame.filename or "torch._dynamo.optimize(" in frame.line: + continue + user_stack.append(frame) + + return user_stack + + +def format_error_msg_verbose( + exc: Exception, code, record_filename=None, frame=None +) -> str: + msg = ( + f"WON'T CONVERT {code.co_name} {code.co_filename} line {code.co_firstlineno}\n" + ) + msg += "=" * 10 + " TorchDynamo Stack Trace " + "=" * 10 + "\n" + msg += format_exc() + real_stack = get_real_stack(exc, frame) + if real_stack is not None: + msg += ( + "\n" + + "=" * 10 + + " The above exception occurred while processing the following code " + + "=" * 10 + + "\n\n" + ) + msg += "".join(format_list(real_stack)) + msg += "\n" + msg += "=" * 10 + + return msg + + +def format_error_msg(exc: Exception, code, record_filename=None, frame=None) -> str: + msg = os.linesep * 2 + + if config.verbose: + msg = format_error_msg_verbose(exc, code, record_filename, frame) + else: + msg = f"WON'T CONVERT {code.co_name} {code.co_filename}\ + line {code.co_firstlineno} \ndue to: \n{format_exc()}" + + return msg diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/external_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/external_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..87e1e44fbe1aac89176e2fbce6982a19f8fe2ddb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/external_utils.py @@ -0,0 +1,103 @@ +# This module contains functions that *will be allowed* by dynamo + +import functools + +import torch +import torch.utils._pytree as pytree + +try: + import numpy as np +except ModuleNotFoundError: + np = None # type: ignore[assignment] + + +def is_compiling() -> bool: + """ + Indicates whether we are tracing/compiling with torch.compile() or torch.export(). + + If need to check specifically that TorchDynamo is used, then use + torch.compiler.is_dynamo_compiling(). + + TODO(khabinov): we should deprecate this function and use one of these two: + * torch.compiler.is_compiling(), + * torch.compiler.is_dynamo_compiling(). + It will depend on the context where to use what. + """ + return torch.compiler.is_compiling() + + +def wrap_inline(fn): + """ + Create an extra frame around fn that is not in skipfiles + """ + + @functools.wraps(fn) + def inner(*args, **kwargs): + return fn(*args, **kwargs) + + return inner + + +def call_hook(hook, *args): + """ + Used by compiled autograd to handle hook returning None + """ + result = hook(*args) + if result is None: + return args[0] + return result + + +def wrap_numpy(f): + r"""Decorator that turns a function from ``np.ndarray``s to ``np.ndarray``s into a function + from ``torch.Tensor``s to ``torch.Tensor``s. + """ + if not np: + return f + + @functools.wraps(f) + def wrap(*args, **kwargs): + args, kwargs = pytree.tree_map_only( + torch.Tensor, lambda x: x.numpy(), (args, kwargs) + ) + out = f(*args, **kwargs) + return pytree.tree_map_only(np.ndarray, lambda x: torch.as_tensor(x), out) + + return wrap + + +class FakeContext: + def __init__(self, saved_tensors): + # this will cache the results of saved_tensors + # and will no longer call into c++ binding + self.saved_tensors = saved_tensors + + +def call_backward(backward_fn, saved_tensors, *args): + grads = backward_fn(FakeContext(saved_tensors), *args) + + # in eager, we wrap in a tuple when there's only one grad output + if type(grads) is not tuple: + grads = (grads,) + + return grads + + +def untyped_storage_size(x: torch.Tensor): + return x.untyped_storage().size() + + +def call_hook_from_backward_state(*args, bw_state, hook_name: str, **kwargs): + return getattr(bw_state, hook_name)(*args, **kwargs) + + +def call_module_hooks_from_backward_state( + _, result, *args, bw_state, hooks_name: str, module_name: str +): + module = getattr(bw_state, module_name) + hooks = getattr(bw_state, hooks_name) + for hook in hooks: + new_result = hook(module, result, *args) + if new_result is not None: + result = new_result + return result diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/funcname_cache.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/funcname_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..c43e9a230517677062b26dc7509b57c898f98143 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/funcname_cache.py @@ -0,0 +1,57 @@ +import tokenize + +from typing import Dict, List, Optional + +cache: Dict[str, Dict[int, str]] = {} + + +def clearcache() -> None: + cache.clear() + + +def _add_file(filename: str) -> None: + try: + with open(filename) as f: + tokens = list(tokenize.generate_tokens(f.readline)) + except OSError: + cache[filename] = {} + return + + # NOTE: undefined behavior if file is not valid Python source, + # since tokenize will have undefined behavior. + result: Dict[int, str] = {} + # current full funcname, e.g. xxx.yyy.zzz + cur_name = "" + cur_indent = 0 + significant_indents: List[int] = [] + + for i, token in enumerate(tokens): + if token.type == tokenize.INDENT: + cur_indent += 1 + elif token.type == tokenize.DEDENT: + cur_indent -= 1 + # possible end of function or class + if significant_indents and cur_indent == significant_indents[-1]: + significant_indents.pop() + # pop the last name + cur_name = cur_name.rpartition(".")[0] + elif ( + token.type == tokenize.NAME + and i + 1 < len(tokens) + and tokens[i + 1].type == tokenize.NAME + and (token.string == "class" or token.string == "def") + ): + # name of class/function always follows class/def token + significant_indents.append(cur_indent) + if cur_name: + cur_name += "." + cur_name += tokens[i + 1].string + result[token.start[0]] = cur_name + + cache[filename] = result + + +def get_funcname(filename: str, lineno: int) -> Optional[str]: + if filename not in cache: + _add_file(filename) + return cache[filename].get(lineno, None) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/guards.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/guards.py new file mode 100644 index 0000000000000000000000000000000000000000..682610aeb908a47d756da38a18543493e753b970 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/guards.py @@ -0,0 +1,1505 @@ +from __future__ import annotations + +import ast +import builtins +import collections +import dataclasses +import enum +import functools +import importlib +import inspect +import itertools +import logging +import math +import os +import re +import sys +import textwrap +import types +import weakref +from inspect import currentframe, getframeinfo +from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union +from weakref import ReferenceType + + +try: + import numpy as np +except ModuleNotFoundError: + np = None # type: ignore[assignment] + +import torch +import torch.utils._device +from torch._dynamo.source import ( + is_from_local_source, + TensorProperty, + TensorPropertySource, +) + +from torch._guards import ( + DuplicateInputs, + Guard, + GuardBuilderBase, + GuardEnvExpr, + GuardSource, + Source, +) + +from torch._logging import structured +from torch.fx.experimental.symbolic_shapes import ( + EqualityConstraint, + is_symbolic, + SYMPY_INTERP, +) +from torch.utils._traceback import format_frame, report_compile_source_on_error +from torch.utils.weak import TensorWeakRef + +from . import config, convert_frame, exc, mutation_guard +from .eval_frame import set_guard_error_hook +from .source import AttrSource, DefaultsSource, LocalSource, TypeSource +from .types import CacheEntry, ExtraState, GuardedCode, GuardFail, GuardFn # noqa: F401 +from .utils import ( + common_constant_types, + dict_keys_repr, + guard_failures, + istype, + key_is_id, + key_to_id, + orig_code_map, + tensor_always_has_static_shape, + tuple_iterator_getitem, + tuple_iterator_len, +) + +log = logging.getLogger(__name__) +guards_log = torch._logging.getArtifactLogger(__name__, "guards") +recompiles_log = torch._logging.getArtifactLogger(__name__, "recompiles") +recompiles_verbose_log = torch._logging.getArtifactLogger( + __name__, "recompiles_verbose" +) +verbose_guards_log = torch._logging.getArtifactLogger(__name__, "verbose_guards") + +TensorGuards = torch._C._dynamo.guards.TensorGuards +check_obj_id = torch._C._dynamo.guards.check_obj_id +check_type_id = torch._C._dynamo.guards.check_type_id +dict_version = torch._C._dynamo.guards.dict_version + + +# For user stack printing +@functools.lru_cache(None) +def uninteresting_files(): + import torch._dynamo.external_utils + + mods = [ + torch._dynamo.external_utils, + ] + return {inspect.getfile(m) for m in mods} + + +CLOSURE_VARS = { + "___check_type_id": check_type_id, + "___check_obj_id": check_obj_id, + "___odict_getitem": collections.OrderedDict.__getitem__, + "___key_to_id": key_to_id, + "___dict_version": dict_version, + "___dict_contains": lambda a, b: a in b, + "___tuple_iterator_len": tuple_iterator_len, + "___tuple_iterator_getitem": tuple_iterator_getitem, + "__math_isnan": math.isnan, + "__numpy_isnan": None if np is None else np.isnan, + "inf": float("inf"), + "__load_module": importlib.import_module, + "utils_device": torch.utils._device, + "device": torch.device, + "___from_numpy": + # If not numpy array, piggy back on e.g. tensor guards to check type + (lambda a: torch.as_tensor(a) if isinstance(a, (np.generic, np.ndarray)) else a), + "torch": torch, + "inspect": inspect, +} + +if sys.version_info[:2] <= (3, 8): + # [Note: Python Version <= 3.8] + # This branch should be dropped when we drop support for Python 3.8. + # Reason: 'ast.unparse' function was introduced in Python 3.9. + + try: + import astunparse # type: ignore[import] + + def _ast_unparse(node: ast.AST) -> str: + return astunparse.unparse(node).replace("\n", "") + + HAS_UNPARSE_FUNCTIONS = True + except ImportError: + HAS_UNPARSE_FUNCTIONS = False + pass +else: + HAS_UNPARSE_FUNCTIONS = True + + def _ast_unparse(node: ast.AST) -> str: + return ast.unparse(node).replace("\n", "") + + +def strip_function_call(name): + """ + "___odict_getitem(a, 1)" => "a" + "a.layers[slice(2)][0]._xyz" ==> "a" + "getattr(a.layers[slice(2)][0]._abc, '0')" ==> "a" + "getattr(getattr(a.x[3], '0'), '3')" ==> "a" + "a.layers[slice(None, -1, None)][0]._xyz" ==> "a" + """ + # recursively find valid object name in function + valid_name = re.compile("[A-Za-z_].*") + curr = "" + for char in name: + if char in " (": + curr = "" + elif char in "),[]": + if curr and curr != "None" and valid_name.match(curr): + return strip_function_call(curr) + else: + curr += char + + return strip_getattr_getitem(name) + + +def strip_getattr_getitem(name): + """ + "a[1]" => "a" + "a.foo" => "a" + """ + return re.split(r"[.\[]", name)[0] + + +def get_verbose_code_part(code_part, guard): + extra = "" + if guard.user_stack: + for fs in reversed(guard.user_stack): + if fs.filename not in uninteresting_files(): + extra = f" # {format_frame(fs, line=True)}" + break + elif guard.stack: + extra = f" # {format_frame(guard.stack.summary()[-1])}" + + return f"{code_part:<60}{extra}" + + +def convert_to_concrete_values(size_or_stride): + converted: List[Optional[int]] = [] + for dim in size_or_stride: + if not is_symbolic(dim): + converted.append(dim) + else: + assert isinstance(dim, torch.SymInt) + converted.append(dim.node.maybe_as_int()) + return converted + + +def get_tensor_guard_code_part(value, name, sizes, strides): + pytype = type(value) + dispatch_key = ( + torch._C._dispatch_keys(value) | torch._C._dispatch_tls_local_include_set() + ) - torch._C._dispatch_tls_local_exclude_set() + dtype = value.dtype + device_index = value.device.index + requires_grad = value.requires_grad + guard_str = ( + f"check_tensor({name}, {pytype.__qualname__}, {dispatch_key}, {dtype}, " + f"device={device_index}, requires_grad={requires_grad}, size={sizes}, stride={strides})" + ) + return guard_str + + +# The ready to eval generated code (possibly multiple parts) for a guard, plus +# the original guard object that created it for provenance +@dataclasses.dataclass +class GuardCodeList: + code_list: List[str] + guard: Guard + + +class GuardBuilder(GuardBuilderBase): + def __init__( + self, + id_ref: Callable[[Any], str], + source_ref: Callable[[Source], str], + lookup_weakrefs: Callable[[object], ReferenceType[object]], + local_scope: Dict[str, object], + global_scope: Dict[str, object], + check_fn_manager: CheckFunctionManager, + ): + self.id_ref = id_ref + self.source_ref = source_ref + self.lookup_weakrefs = lookup_weakrefs + self.scope: Dict[str, Dict[str, object]] = {"L": local_scope, "G": global_scope} + self.scope["__builtins__"] = builtins.__dict__.copy() + for ( + name, + package_module, + ) in torch.package.package_importer._package_imported_modules.items(): + name = name.replace(">", "_").replace("<", "_").replace(".", "_dot_") + # Write the package module into the scope so that we can import it + self.scope["__builtins__"][name] = package_module + # Write the demangled name to the scope so that we can use it + self.scope[name] = package_module + + self.argnames: List[str] = [] + # Code is python expression strings generated for each guard + self.code: List[GuardCodeList] = [] + # shape_env_code is only used by builder and is used for + # shape env code. This exists only because we need to make sure + # shape env guards get run after tensor match guards (since the + # tensor match guards make sure we actually have tensors) + self.shape_env_code: List[GuardCodeList] = [] + + # [Note - On Eager Tensor Guards] + # Most of the time, we generate Python code in a guard to directly + # check various properties. However, tensors are a bit special; + # it is too slow to check their properties one-by-one in Python. + # Instead, there is a C++ function TensorGuards.check which takes + # all of the tensor arguments and checks them all against compile-time + # examples entirely in C++. Thus, every time we process a + # TENSOR_MATCH guard, we just add another entry to + # tensor_check_names/tensor_check_examples, saying "for this local, + # check it against this example", and it all ends up getting + # swept up into a single call to ___check_tensors. Invariant: + # len(tensor_check_names) == len(tensor_check_examples). + # TODO: something here + self.tensor_check_names: List[str] = [] + self.tensor_check_examples: List[torch.Tensor] = [] + self.tensor_check_guards: List[Guard] = [] + + self.check_fn_manager: CheckFunctionManager = check_fn_manager + # Keep track of weak references of objects with ID_MATCH guard. This + # info is stored alongside optimized_code and check_fn and is used to + # limit the number of cache entries with same ID_MATCH'd object. + self.id_matched_objs: Dict[str, ReferenceType[object]] = {} + + # Warning: use this with care! This lets you access what the current + # value of the value you are guarding on is. You probably don't want + # to actually durably save this value though (because it's specific + # to this frame!) Instead, you should be reading out some property + # (like its type) which is what you permanently install into the + # guard code. + def get(self, name: str) -> Any: + return eval(name, self.scope, CLOSURE_VARS) + + # Registers the usage of the source name referenced by the + # string (or stored in the Guard) as being guarded upon. It's important + # to call this before generating some code that makes use of 'guard', + # because without this call, we won't actually bind the variable + # you reference in the actual guard closure (oops!) + def arg_ref(self, guard: Union[str, Guard]) -> str: + name: str + if isinstance(guard, str): + name = guard + else: + name = guard.name + base = strip_getattr_getitem(strip_function_call(name)) + if base not in self.argnames: + if re.match(r"[a-zA-Z0-9_]+", base): + if re.match(r"^\d+$", base): + log.warning("invalid var name: %s", guard) + self.argnames.append(base) + + return name + + def _guard_on_attribute(self, guard: Guard, attr_name: str, guard_fn): + attr_source = AttrSource(guard.originating_source, attr_name) + # Copy the stack info + new_guard = Guard( + attr_source, guard_fn, stack=guard.stack, user_stack=guard.user_stack + ) + new_guard.create(self) + + def TYPE_MATCH(self, guard: Guard) -> None: + # ___check_type_id is same as `id(type(x)) == y` + t = type(self.get(guard.name)) + obj_id = self.id_ref(t) + code = f"___check_type_id({self.arg_ref(guard)}, {obj_id})" + self._produce_guard_code(guard, [code]) + + def DICT_VERSION(self, guard: Guard): + # ___check_dict_version is same as `dict_version(x) == y` + ref = self.arg_ref(guard) + version = dict_version(self.get(guard.name)) + code = f"___dict_version({ref}) == {version}" + self._produce_guard_code(guard, [code]) + + def DICT_CONTAINS(self, guard: Guard, key: str, invert: bool): + dict_ref = self.arg_ref(guard) + + maybe_not = "not " if invert else "" + code = f"{maybe_not}___dict_contains({key!r}, {dict_ref})" + return self._produce_guard_code(guard, [code]) + + def BOOL_FALSE(self, guard: Guard): + # Guard on the runtime value being 'False', + # can be faster than seemingly equivalent checks like DICT_KEYS for empty dict + # + # WARNING: this guard is not safe to use generally. It only works if the runtime + # value is of a type that supports bool(), and some types e.g. Tensor do not. + # Only use this guard in cases you can guarantee the runtime type will be friendly. + # (e.g. Specialized NNModule with mutation protection via setattr) + # + # Why not simply check the runtime type inside this guard? It's slow enough to defeat + # the purpose of using this guard, which itself is supposed to be a faster alternative + # to DICT_KEYS. + ref = self.arg_ref(guard) + code = f"not {ref}" + self._produce_guard_code(guard, [code]) + + def ID_MATCH(self, guard: Guard): + # ___check_obj_id is same as `id(x) == y` + if isinstance(guard.originating_source, TypeSource): + # optional optimization to produce cleaner/faster guard code + return self.TYPE_MATCH( + Guard(guard.originating_source.base, GuardBuilder.TYPE_MATCH) # type: ignore[arg-type] + ) + + ref = self.arg_ref(guard) + val = self.get(guard.name) + code = f"___check_obj_id({ref}, {self.id_ref(val)})" + self._produce_guard_code(guard, [code]) + + # Keep track of ID_MATCH'd objects. This will be used to modify the + # cache size logic + if isinstance(guard.originating_source, LocalSource): + # TODO(janimesh) - This is currently restricted to nn.Module objects + # because many other ID_MATCH'd objects fail - like DeviceMesh. + # Increase the scope of ID_MATCH'd objects. + if isinstance(val, torch.nn.Module): + local_name = guard.originating_source.local_name + weak_id = self.lookup_weakrefs(val) + if weak_id is not None: + self.id_matched_objs[local_name] = weak_id + + def NAME_MATCH(self, guard: Guard): + obj = self.get(guard.name) + self._guard_on_attribute(guard, "__name__", GuardBuilder.EQUALS_MATCH) + + def DATA_PTR_MATCH(self, guard: Guard): + obj = self.get(guard.name) + code = f"{self.arg_ref(guard)}.data_ptr() == {obj.data_ptr()}" + self._produce_guard_code(guard, [code]) + + def HASATTR(self, guard: Guard): + assert isinstance( + guard.originating_source, AttrSource + ), f"invalid source {guard.name}" + base_source = guard.originating_source.base + base = base_source.name() + attr = guard.originating_source.member + + ref = self.arg_ref(base) + val = hasattr(self.get(base), attr) + code = None + if val: + code = f"hasattr({ref}, {attr!r})" + else: + code = f"not hasattr({ref}, {attr!r})" + + self._produce_guard_code(guard, [code], provided_guarded_object=self.get(base)) + + def FUNCTORCH_STACK_MATCH(self, guard: Guard): + # Invalidate functorch code if current level is different than + # the one when FX graph was generated + # if torch._C._functorch.peek_interpreter_stack() is not None: + cis = torch._functorch.pyfunctorch.retrieve_all_functorch_interpreters() + states = [ci.get_state() for ci in cis] + code = [f"torch._functorch.pyfunctorch.compare_functorch_state({states})"] + self._produce_guard_code(guard, code) + + def EQUALS_MATCH(self, guard: Guard): + ref = self.arg_ref(guard) + val = self.get(guard.name) + t = type(val) + if np: + np_types: Tuple[Type[Any], ...] = ( + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.float16, + np.float32, + np.float64, + ) + else: + np_types = () + ok_types = tuple( + common_constant_types + | { + type, + list, + tuple, + set, + frozenset, + slice, + range, + torch.Size, + *np_types, + } + ) + if istype(val, dict): + assert all( + istype(x, ok_types) for x in itertools.chain(val.keys(), val.values()) + ) + else: + assert istype( + val, + ok_types, + ), f"Unexpected type {type(val)}, not in {ok_types}" + + # Special case for nan because float("nan") == float("nan") evaluates to False + if istype(val, float) and math.isnan(val): + self.TYPE_MATCH(guard) + code = list() + code.append(f"__math_isnan({ref})") + self._produce_guard_code(guard, code) + return + # Python math library doesn't support complex nan, so we need to use numpy + elif istype(val, complex) and np.isnan(val): + self.TYPE_MATCH(guard) + code = list() + code.append(f"__numpy_isnan({ref})") + self._produce_guard_code(guard, code) + return + + code = list() + + # If matching equality against list/tuple, we must also check that + # the internal types match. (TODO: what about nested lists?) + if istype(val, (list, tuple)): + # NB: SEQUENCE_LENGTH takes care of the outer __check_type_id test + self.SEQUENCE_LENGTH(guard) + + for idx, elem in enumerate(val): + code.append( + f"___check_type_id({ref}[{idx}], {self.id_ref(type(elem))})" + ) + else: + # Add type check to prevent equality check between tensor and non-tensor. + self.TYPE_MATCH(guard) + + if istype(val, torch.Size): + val = tuple(val) + + # Code object can not be compared against their string representation + # I.e `eval(f"{compile('2+2','','exec')!r}")` raises SyntaxError + assert not istype(val, types.CodeType) + + # TODO: It feels like it would be better to just implement our own + # equality test in C that handles all of the necessary type checking + # and NaN tests + code.append(f"{ref} == {val!r}") + self._produce_guard_code(guard, code) + + def CONSTANT_MATCH(self, guard: Guard): + val = self.get(guard.name) + if istype(val, (bool, type(None), types.CodeType)): + self.ID_MATCH(guard) + else: + self.EQUALS_MATCH(guard) + + def NN_MODULE(self, guard: Guard): + self.ID_MATCH(guard) + ref = self.arg_ref(guard) + val = self.get(guard.name) + + def setup_guard(): + assert istype(val.training, bool) + self._guard_on_attribute(guard, "training", GuardBuilder.CONSTANT_MATCH) + + if hasattr(val, "training"): + # There are cases where a monkeypatched object has a guard made between __new__ and __init__ + setup_guard() + else: + exc.unimplemented(f"Guard setup for uninitialized class {type(val)}") + + def FUNCTION_MATCH(self, guard: Guard): + """things like torch.add and user defined functions""" + if guard.is_local(): + return self.ID_MATCH(guard) + + def CLOSURE_MATCH(self, guard: Guard): + """matches a closure by __code__ id.""" + if guard.is_local(): + val = self.get(guard.name) + # Strictly only want user-defined functions + if type(val) == types.FunctionType and hasattr(val, "__code__"): + self._guard_on_attribute(guard, "__code__", GuardBuilder.HASATTR) + self._guard_on_attribute(guard, "__code__", GuardBuilder.FUNCTION_MATCH) + else: + self.FUNCTION_MATCH(guard) + + def BUILTIN_MATCH(self, guard: Guard): + return self.FUNCTION_MATCH(guard) + + def PYMODULE_MATCH(self, guard: Guard): + return self.FUNCTION_MATCH(guard) + + def SEQUENCE_LENGTH(self, guard): + # This guard is used to check lenght of PySequence objects like list, + # tuple, collections.deque etc + ref = self.arg_ref(guard) + value = self.get(guard.name) + t = type(value) + + self.TYPE_MATCH(guard) + code = list() + if len(value) == 0: + code.append(f"not {ref}") + else: + code.append(f"len({ref}) == {len(value)}") + + self._produce_guard_code(guard, code) + + def DICT_LENGTH(self, guard): + self.SEQUENCE_LENGTH(guard) + + def TUPLE_ITERATOR_LEN(self, guard): + ref = self.arg_ref(guard) + value = self.get(guard.name) + t = type(value) + + self.TYPE_MATCH(guard) + code = list() + code.append(f"___tuple_iterator_len({ref}) == {tuple_iterator_len(value)}") + + self._produce_guard_code(guard, code) + + # TODO(voz): Deduplicate w/ AOTAutograd dupe input guards + def DUPLICATE_INPUT(self, guard, source_b): + ref_a = self.arg_ref(guard) + ref_b = self.arg_ref(source_b.name()) + + code = [f"{ref_b} is {ref_a}"] + self._produce_guard_code(guard, code) + + def DICT_KEYS(self, guard): + # Guard on the keys and their order + ref = self.arg_ref(guard) + value = self.get(guard.name) + t = type(value) + + self.TYPE_MATCH(guard) + code = list() + any_key_is_id = any(key_is_id(k) for k in value.keys()) + const_keys_repr = dict_keys_repr( + key_to_id(value), + local=is_from_local_source(guard.originating_source), + ) + if any_key_is_id: + code.append(f"___key_to_id({ref}) == {const_keys_repr}") + else: + code.append(f"list({ref}.keys()) == {const_keys_repr}") + + self._produce_guard_code(guard, code) + + def WEAKREF_ALIVE(self, guard): + self._produce_guard_code(guard, [f"{self.arg_ref(guard)} is not None"]) + + def NN_MODULE_PARAM_NAMES(self, guard): + ref = self.arg_ref(guard) + value = self.get(guard.name) + t = type(value) + keys = {k for k, v in value.named_parameters()} + + self.TYPE_MATCH(guard) + code = list() + code.append(f"{{k for k, v in {ref}.named_parameters()}} == {keys!r}") + + self._produce_guard_code(guard, code) + + def DICT_CONST_KEYS(self, guard): + """Constant keys match""" + ref = self.arg_ref(guard) + value = self.get(guard.name) + t = type(value) + + self.TYPE_MATCH(guard) + code = list() + code.append(f"list({ref}.keys()) == {list(value.keys())!r}") + + self._produce_guard_code(guard, code) + + def OBJECT_MUTATION(self, guard: Guard): + mutation_guard.watch(self.get(guard.name), self.check_fn_manager) + + def GRAD_MODE(self, guard: Guard): + pass # we always guard on this via GlobalStateGuard() + + def DETERMINISTIC_ALGORITHMS(self, guard: Guard): + pass # we always guard on this via GlobalStateGuard() + + def TORCH_FUNCTION_STATE(self, guard: Guard): + pass # we always guard on this via GlobalStateGuard() + + def DEFAULT_DEVICE(self, guard: Guard): + """Guard on CURRENT_DEVICE per torch.utils._device""" + assert guard.source is GuardSource.GLOBAL + import torch.utils._device as m + + self._produce_guard_code( + guard, [f"utils_device.CURRENT_DEVICE == {m.CURRENT_DEVICE!r}"] + ) + + def BACKEND_MATCH(self, guard: Guard): + """Guard on backend matching based on id of current_backend""" + assert guard.source is GuardSource.GLOBAL + backend_id = ( + f"{id(torch._dynamo.eval_frame.guarded_backend_cache.current_backend)}" + ) + code = [f"___check_current_backend({backend_id})"] + self._produce_guard_code(guard, code) + + def SHAPE_ENV(self, guard: Guard): + # Let's handle ShapeEnv guards. To do this, we will resolve + # shape variables to sources from tracked_fakes. This must happen after + # tensor checks. + assert guard.name == "" + output_graph = self.check_fn_manager.output_graph + # NB: self.output_graph can be None in the debug_nops tests + fs = output_graph.tracked_fakes + input_contexts = [a.symbolic_context for a in fs] + + def get_sources(t_id, dim): + # Looks up base sources mapped to a tensor id and uses them to create + # sources for the corresponding tensor dimension. + return [ + TensorPropertySource(source, TensorProperty.SIZE, dim) + for source in output_graph.tracked_fakes_id_to_source[t_id] + ] + + if output_graph.export_constraints: + from sympy import Symbol + + source_pairs: List[Tuple[Source, Source]] = [] + derived_equalities: List[ # type: ignore[type-arg] + Tuple[Source, Union[Source, Symbol], Callable] + ] = [] + phantom_symbols: Dict[str, Symbol] = {} + for constraint in output_graph.export_constraints: + if constraint.t_id in output_graph.tracked_fakes_id_to_source: + torch.export.dynamic_shapes._process_equalities( + constraint, + get_sources, + output_graph.shape_env, + source_pairs, + derived_equalities, + phantom_symbols, + ) + else: + log.warning("Untracked tensor used in export constraints") + equalities_inputs = EqualityConstraint( + source_pairs=source_pairs, + derived_equalities=derived_equalities, + phantom_symbols=list(phantom_symbols.values()), + warn_only=False, + ) + else: + equalities_inputs = None + guards = output_graph.shape_env.produce_guards( + [a.fake for a in fs], + [a.source for a in fs], + input_contexts=input_contexts, + equalities_inputs=equalities_inputs, + source_ref=self.source_ref, + # Export keeps static. + ignore_static=(not self.check_fn_manager.output_graph.export), + ) + # When exporting, we may work with the shape constraints some more in + # postprocessing, so don't freeze yet + if not self.check_fn_manager.output_graph.export: + output_graph.shape_env.freeze() + for shape_guard in guards: + self._produce_guard_code(guard, [shape_guard], shape_env=True) + + def TENSOR_MATCH(self, guard: Guard, value=None): + if guard.is_nn_module() or guard.originating_source.is_dict_key(): + self.ID_MATCH(guard) + else: + if isinstance(value, TensorWeakRef): + value = value() + + value = value if value is not None else self.get(guard.name) + assert isinstance(value, torch.Tensor) + + tensor_name = self.arg_ref(guard) + # [Note - On Export Tensor Guards] + # + # In eager mode, tensor guards are evaluated through C++, in guards.cpp + # see [Note - On Eager Tensor Guards] for more info. + # + # In export mode, we instead maintain parallel logic between C++ and python + # here, with an exception of checking the dispatch key - with the idea that a dispatch key + # is an entirely runtime notion that would make no sense to keep in an exported graph. + # + # Now, this idea is okay, but to paraphrase @ezyang, this mental model is sufficient for now, although + # not entirely true. + # For example, suppose one of the input tensors had the negative dispatch key. + # You should end up with a graph that is specialized for tensors that have a negative dispatch key. + # If you allow a Tensor that does NOT have this bit set, you will accidentally run it "as if" it were negated. + # Now, negative key only shows up for complex numbers, and most likely, the exported to target doesn't + # support this feature at all, but the point stands that :some: tensor state only shows up on dispatch key. + # TODO(voz): Either populate a dispatch_key check into the guards, or error on users passing in an unsupported + # subset of keys during export. + # + # The list of tensor fields and calls we care about can be found in `terms` below. + # TODO(voz): We are missing storage offset in all our tensor guards? + code: List[str] = list() + if self.check_fn_manager.output_graph.export: + self.TYPE_MATCH(guard) + terms = [ + "dtype", + "device", + "requires_grad", + "ndimension()", + ] + + for term in terms: + real_value = self.get(tensor_name + "." + term) + if istype(real_value, (torch.device, torch.dtype)): + # copy pasted from EQUALS_MATCH + code.append(f"str({tensor_name}.{term}) == {str(real_value)!r}") + else: + code.append(f"{tensor_name}.{term} == {real_value}") + else: + self.tensor_check_names.append(tensor_name) + self.tensor_check_examples.append(value) + self.tensor_check_guards.append(guard) + + # A frame is valid for reuse with dynamic dimensions if the new + # (user-requested) dynamic dimensions are a subset of the old + # (already compiled) dynamic dimensions. + # + # It's a little non-obvious why you'd want this: in particular, + # if an already compiled frame matches all of the guards, why + # not just use it, why force a recompile? + # + # We force it for two reasons: + # + # - The user *required* us to compile with a new dynamic dimension, + # we should not ignore that and serve up the old, specialized + # frame. Listen to the user! + # + # - In fact, we are obligated to *raise an error* if we fail to + # make the requested dimension dynamic. If we don't + # recompile, we can't tell if that dimension can actually be + # made dynamic. + # + # If the new dynamic dims are a subset of the old, we already know + # we can make them dynamic (since we made them dynamic in old). + # This is slightly unsound, because maybe your input size is + # [s0, s0, s1] and so you can do it dynamic if you say dynamic + # dims {0, 1, 2} but you can't if you only do {0, 2} (because now + # the second s0 is specialized). But we're not entirely sure if + # this is a good idea anyway lol... (if you want to try removing + # this logic, be my guest! -- ezyang 2024) + # + assert guard.source is not None + static, reason = tensor_always_has_static_shape( + value, is_tensor=True, guard_source=guard.source + ) + if not static: + if hasattr(value, "_dynamo_dynamic_indices"): + code.append( + f"(({tensor_name}._dynamo_dynamic_indices.issubset({value._dynamo_dynamic_indices})) if hasattr({tensor_name}, '_dynamo_dynamic_indices') else True)" # noqa: B950 + ) + # In the case of us not having any dynamic dimension indices, we compiled the frame with no chance of + # raising for this specific tensor - and any inputs with more dynamic user directives specified must be recompiled. + else: + code.append( + f"hasattr({tensor_name}, '_dynamo_dynamic_indices') == False" + ) + if len(code) > 0: + self._produce_guard_code(guard, code) + + # A util that appends guarded code, or, in the case of export, adds data onto guards + def _produce_guard_code( + self, guard, code_list, provided_guarded_object=None, shape_env=False + ): + # WARNING: It is important that cur_frame/caller do NOT stay in + # the current frame, because they will keep things live longer + # than they should. See TestMisc.test_release_module_memory + cur_frame = currentframe() + assert cur_frame is not None + caller = cur_frame.f_back + del cur_frame + assert caller is not None + func_name = getframeinfo(caller)[2] + del caller + # We use func_name for export, so might as well get a nice defensive check out of it + assert func_name in dir( + self.__class__ + ), f"_produce_guard_code must be called from inside GuardedCode. Called from {func_name}" + + if shape_env: + self.shape_env_code.append(GuardCodeList(code_list, guard)) + else: + self.code.append(GuardCodeList(code_list, guard)) + + # Not all guards have names, some can be installed globally (see asserts on HAS_GRAD) + if provided_guarded_object is None: + name_valid = guard.name is not None and guard.name != "" + + guarded_object = self.get(guard.name) if name_valid else None + else: + guarded_object = provided_guarded_object + + guarded_object_type = ( + weakref.ref(type(guarded_object)) if guarded_object is not None else None + ) + obj_ref = None + # Not necessary to have weakref for Enum type, but there is a bug that + # makes hasattr(guarded_object.__class__, "__weakref__") return True. + if hasattr(guarded_object.__class__, "__weakref__") and not isinstance( + guarded_object, enum.Enum + ): + obj_ref = weakref.ref(guarded_object) + + guard.set_export_info( + func_name, + guarded_object_type, + code_list, + obj_ref, + ) + + +# Common Sub-Expression Elimination for Python expressions. +# +# There are 2 steps to this pass: +# 1. Count the frequency of each sub-expression (i.e. inner +# node in the AST tree) +# +# 2. Replace those that occur more than once by a fresh variable 'v'. +# 'v' will be defined in the 'preface' list (output argument to +# 'NodeTransformer') +# +# NB: the use of 'ast.unparse' while visiting the nodes makes this pass +# quadratic on the depth of the tree. +# +# NB: this pass creates a new variable for each AST node that is repeated +# more than 'USE_THRESHOLD'. e.g. if 'a.b.c.d' is used 10 times, 'a.b.c' +# and 'a.b' are also used 10 times. So, there will be a new variable for +# each of them. +class PyExprCSEPass: + # Maximum number of times a given expression can be used without being + # replaced by a fresh variable. + USE_THRESHOLD = 1 + + # Ad-Hoc: AST nodes this pass focuses on. + ALLOWED_NODE_TYPES = (ast.Attribute, ast.Call, ast.Subscript) + + @dataclasses.dataclass + class Config: + expr_count: Dict[str, int] + expr_to_name: Dict[str, str] + + class ExprCounter(ast.NodeVisitor): + def __init__(self, config: PyExprCSEPass.Config) -> None: + self._config = config + + def visit(self, node: ast.AST) -> Any: + if isinstance(node, PyExprCSEPass.ALLOWED_NODE_TYPES): + self._config.expr_count[_ast_unparse(node)] += 1 + super().visit(node) + + class Replacer(ast.NodeTransformer): + def __init__( + self, + config: PyExprCSEPass.Config, + gen_name: Callable[[], str], + ) -> None: + super().__init__() + self._config = config + self._gen_name = gen_name + self.preface: List[str] = [] + + def visit(self, node: ast.AST) -> Any: + if isinstance(node, PyExprCSEPass.ALLOWED_NODE_TYPES): + expr = _ast_unparse(node) + + # Replacement only occurs if a given expression is used more + # than once. + if self._config.expr_count[expr] > PyExprCSEPass.USE_THRESHOLD: + if expr not in self._config.expr_to_name: + # Parent 'visit' is called so that we CSE the inner expressions first. + # + # The resulting expression is used as right-hand-side of the variable + # assignment. i.e. we are CSE-ing the children before the parents. + # + # Indexing still uses the old 'node', since that's what was counted + # by the 'NodeVisitor'. + node_ = super().visit(node) + expr_ = _ast_unparse(node_) + var_name = self._gen_name() + self.preface.append(f"{var_name} = {expr_}") + self._config.expr_to_name[expr] = var_name + else: + var_name = self._config.expr_to_name[expr] + return ast.Name(var_name, ast.Load()) + + return super().visit(node) + + def __init__(self) -> None: + self._counter = 0 + self._config = self.Config( + expr_count=collections.defaultdict(lambda: 0), expr_to_name={} + ) + + def _new_var(self, prefix: str = "_var") -> str: + name = f"{prefix}{self._counter}" + self._counter += 1 + return name + + def count(self, exprs: List[str]) -> None: + counter = self.ExprCounter(self._config) + for e in exprs: + try: + counter.visit(ast.parse(e)) + except SyntaxError as ex: + log.exception("Failed to visit expr at line %s.\n%s", ex.lineno, e) + raise + + def replace(self, expr: str) -> Tuple[List[str], str]: + replacer = self.Replacer(self._config, self._new_var) + new_node = replacer.visit(ast.parse(expr)) + return replacer.preface, _ast_unparse(new_node) + + +def must_add_nn_module_guards(guard): + # For config.guard_nn_modules=False, we can skip all the guards that + # originate from inside of nn module except for a few categories. + return ( + # Guard for defaults + isinstance(guard.originating_source, DefaultsSource) + # Guard using dict tags if the config flag is set + or ( + config.guard_nn_modules_using_dict_tags + and guard.create_fn is GuardBuilder.NN_MODULE + ) + ) + + +class DeletedGuardFn: + pass + + +# NB: Naively, you'd expect this to only be a function that produces +# the callable that constitutes the guard. However, there is some +# delicate handling for invalidating this check function when the +# locals/globals get invalidated, so there's some extra state +# we have to hold in this manager class. +class CheckFunctionManager: + def __init__( + self, + output_graph=None, + guard_fail_fn: Optional[Callable[[GuardFail], None]] = None, + ): + guards = output_graph.guards if output_graph else None + self._weakrefs: Dict[int, ReferenceType[object]] = {} + self.output_graph = output_graph + w_builder = None + + def source_ref(source): + guard_source = source.guard_source() + if guard_source is GuardSource.CONSTANT: + # No need to track constants + return source.name() + assert w_builder + r_builder = w_builder() + assert r_builder is not None + return r_builder.arg_ref(source.name()) + + builder = GuardBuilder( + self.id_ref, + source_ref, + self.lookup_weakrefs, + output_graph.local_scope, + output_graph.global_scope, + self, + ) + + # Break retain cycle. See test_release_scope_memory + def cleanup_builder(weak_b): + b = weak_b() + if b: + b.scope = None + + # Break retain cycle. See test_release_input_memory + w_builder = weakref.ref(builder, cleanup_builder) + + for guard in sorted(guards or [], key=Guard.sort_key): + if ( + not config.guard_nn_modules + and guard.is_nn_module() + # Default func args must be guarded on. + # TODO: we could make use of 'DefaultsSource' and offer a .guard.is_defaults() API + and "__defaults__" not in guard.name + and "__kwdefaults__" not in guard.name + and (config.skip_nnmodule_hook_guards or "hooks" not in guard.name) + ): + continue + + guard.create(builder) + self.check_fn = self.compile_check_fn(builder, guards, guard_fail_fn) + # Keep track of weak references of objects with ID_MATCH guard. This + # info is stored alongside optimized_code and check_fn and is used to + # limit the number of cache entries with same ID_MATCH'd object. + # TODO(janimesh) - Currently this information is stored as an attr on + # the check_fn itself to avoid changing CacehEntry datastructure in + # eval_frame.c. In future, we should probably replace check_fn with a + # queryable data structure such that this information is already present + # in some form. + self.check_fn.id_matched_objs = builder.id_matched_objs + + # NB - We have to very careful of cleaning up here. Because of the + # invalidate function, we can create a weakref finalizer that keeps + # `self` alive for very long. Sometimes by mistake, we can run + # invalidate for a type/object (check id_ref method) that Python can + # leak by design, preventing us from calling the finalizer. In that + # case, the `self` will be alive even though the cache entry will be + # deleted (check invalidate method), which can cause a memory leak, + # e.g., not setting output_graph = None can keep hold of nn_modules. + self._weakrefs.clear() + self.output_graph = None + + def compile_check_fn(self, builder, guards_out, guard_fail_fn): + # see parallel handling of ".0" / "___implicit0" in _eval_frame.c + largs = builder.argnames + largs += ["**___kwargs_ignored"] + + guards_log.debug("GUARDS:") + + # Don't report this guard, it's always the same, useless! + code_parts = ["___check_global_state()"] + verbose_code_parts = code_parts[:] + structured_guard_fns = [] + + def add_code_part(code_part, guard, log_only=False): + verbose_code_part = get_verbose_code_part(code_part, guard) + guards_log.debug("%s", verbose_code_part) + + structured_guard_fns.append( + lambda: { + "code": code_part, + "stack": structured.from_traceback(guard.stack.summary()) + if guard.stack + else None, + "user_stack": structured.from_traceback(guard.user_stack) + if guard.user_stack + else None, + } + ) + + if verbose_guards_log.isEnabledFor(logging.DEBUG): + maybe_stack = "" + maybe_user_stack = "" + if guard is not None: + if guard.stack: + maybe_stack = f"\nStack:\n{''.join(guard.stack.format())}" + if guard.user_stack: + maybe_user_stack = ( + f"\nUser stack:\n{''.join(guard.user_stack.format())}" + ) + verbose_guards_log.debug( + "Guard: %s%s%s", + code_part, + maybe_stack, + maybe_user_stack, + ) + + if not log_only: + code_parts.append(code_part) + verbose_code_parts.append(verbose_code_part) + + seen = set() + for gcl in builder.code: + for code in gcl.code_list: + if code not in seen: + add_code_part(code, gcl.guard) + seen.add(code) + + tensor_check_names = builder.tensor_check_names + check_tensors_fn = None + check_tensors_verbose_fn = None + if tensor_check_names: + assert ( + not self.output_graph.export + ), "Illegal to set tensor_check_names in export." + tensor_check_examples = builder.tensor_check_examples + + dynamic_dims_sizes = [ + convert_to_concrete_values( + self.output_graph.tensor_weakref_to_sizes_strides[t]["size"] + ) + for t in tensor_check_examples + ] + + dynamic_dims_strides = [ + convert_to_concrete_values( + self.output_graph.tensor_weakref_to_sizes_strides[t]["stride"] + ) + for t in tensor_check_examples + ] + + tensor_guards = TensorGuards( + *tensor_check_examples, + dynamic_dims_sizes=dynamic_dims_sizes, + dynamic_dims_strides=dynamic_dims_strides, + ) + check_tensors_fn = tensor_guards.check + check_tensors_verbose_fn = tensor_guards.check_verbose + tensor_check_args = ", ".join( + tensor_check_names + ["tensor_check_names=tensor_check_names"] + ) + # Do this manually, to un-stagger the guards in log message + code_parts.append(f"___check_tensors({tensor_check_args})") + verbose_code_parts.append(f"___check_tensors({tensor_check_args})") + tensor_check_guards = builder.tensor_check_guards + + for i, name in enumerate(tensor_check_names): + # This is a copy of what guards.cpp checks against + # Keep this in sync with TensorCheck constructor + t = tensor_check_examples[i] + sizes = dynamic_dims_sizes[i] + strides = dynamic_dims_strides[i] + code_part = get_tensor_guard_code_part(t, name, sizes, strides) + add_code_part(code_part, tensor_check_guards[i], log_only=True) + + aotautograd_guards: List[GuardEnvExpr] = ( + self.output_graph.tracing_context.guards_context.aotautograd_guards + if self.output_graph + else [] + ) + for guard in aotautograd_guards: + if isinstance(guard, DuplicateInputs): + source_a = guard.input_source_a + source_b = guard.input_source_b + add_code_part(f"{source_a.name()} is {source_b.name()}", None) + else: + raise RuntimeError(f"Unknown GuardEnvExpr: {guard}") + + # TODO: the "guard" here is actually just the top level SHAPE_ENV + # which is useless. Get ShapeEnv to pass in more provenance. + for gcl in builder.shape_env_code: + for code in gcl.code_list: + add_code_part(code, gcl.guard) + + # OK, all done generating guards + torch._logging.trace_structured( + "dynamo_guards", payload_fn=lambda: [f() for f in structured_guard_fns] + ) + + global_state = convert_frame.initial_global_state + if global_state is None: + # we should only hit this case in NopTests() + global_state = convert_frame.GlobalStateGuard() + closure_vars = { + "___check_tensors": check_tensors_fn, + "___check_tensors_verbose": check_tensors_verbose_fn, + "___check_global_state": global_state.check, + "___check_current_backend": torch._dynamo.eval_frame.check_current_backend, + "tensor_check_names": tensor_check_names, + **SYMPY_INTERP, + **CLOSURE_VARS, + } + + unique_code_parts = list(unique(code_parts)) + make_guard_fn_args = ", ".join(closure_vars.keys()) + guard_body, pycode = build_guard_function(unique_code_parts, make_guard_fn_args) + + if os.environ.get("TORCHDYNAMO_PRINT_GUARDS", None) == "1": + print("GUARDS\n", guard_body) + + out: Dict[str, Any] = dict() + + # We don't put builder.scope as the globals in exec call because + # guard_fn.__globals__ becomes equal to builder.scope. This causes + # guard_fn to hold a referece to f_locals sitting in builder.scope["L"] + globals_for_guard_fn = {"G": builder.scope["G"]} + try: + exec(pycode, globals_for_guard_fn, out) + except SyntaxError as ex: + log.exception("Failed to exec guard at line %s.\n%s", ex.lineno, pycode) + raise + guard_fn = out["___make_guard_fn"](*closure_vars.values()) + guard_fn.closure_vars = closure_vars + # TODO(whc) maybe '.code_parts' was only kept around for the guard callback? so we don't need both + guard_fn.args = largs + guard_fn.code_parts = code_parts + guard_fn.verbose_code_parts = verbose_code_parts + # Grab only G, but preserve "G" because guards access it as "G" + guard_fn.global_scope = globals_for_guard_fn + guard_fn.guard_fail_fn = guard_fail_fn + # will be populated by a non-owning reference to CacheEntry/ExtraState + # when the CacheEntry is constructed + guard_fn.cache_entry = None + guard_fn.extra_state = None + return guard_fn + + def invalidate(self): + # Some tests reveal that CheckFunctionManager has no attribute + # check_fn, but this case should not be of any concern. + # This case doesn't seem easy to repro. + if ( + hasattr(self, "check_fn") + and self.check_fn is not DeletedGuardFn + and (cache_entry := self.check_fn.cache_entry) is not None + and (extra_state := self.check_fn.extra_state) is not None + ): + assert isinstance(cache_entry, CacheEntry) + assert isinstance(extra_state, ExtraState) + extra_state.invalidate(cache_entry) + self.check_fn.cache_entry = None + self.check_fn.extra_state = None + self.check_fn = DeletedGuardFn + + def id_ref(self, obj): + """add a weakref, return the id""" + try: + if id(obj) not in self._weakrefs: + # We will clear the _weakrefs dict at the end of __init__ + # function, which will delete the callbacks as well. Therefore, + # we are using a finalizer which is kept alive. + self._weakrefs[id(obj)] = weakref.ref(obj) + weakref.finalize(obj, self.invalidate) + except TypeError: + pass # cannot weakref bool object + return id(obj) + + def lookup_weakrefs(self, obj): + """Lookup the _weakrefs created in id_ref function for ID_MATCH'd objects""" + if id(obj) in self._weakrefs: + return self._weakrefs[id(obj)] + return None + + +def build_guard_function(code_parts, closure_args) -> Tuple[str, str]: + from torch._inductor.utils import IndentedBuffer + + if HAS_UNPARSE_FUNCTIONS: + csepass = PyExprCSEPass() + csepass.count(code_parts) + + def replace(expr: str) -> Tuple[List[str], str]: + return csepass.replace(expr) + + else: + + def replace(expr: str) -> Tuple[List[str], str]: + return [], expr + + # Generate the inner body of the guard function. + # i.e. if-chain of the guard expressions. + guard_body = IndentedBuffer() + for expr in code_parts: + preface, expr = replace(expr) + guard_body.writelines(preface) + guard_body.writeline(f"if not ({expr}):") + with guard_body.indent(): + guard_body.writeline("return False") + + # Wrap the inner body into the actual guard function. + guard = IndentedBuffer() + guard.writeline("def guard(L):") + with guard.indent(): + guard.splice(guard_body) + guard.writeline("return True") + + # Wrap the whole guard function into another function + # with the closure variables. + make_guard_fn = IndentedBuffer() + make_guard_fn.writeline(f"def ___make_guard_fn({closure_args}):") + with make_guard_fn.indent(): + make_guard_fn.splice(guard) + make_guard_fn.writeline("return guard") + + return guard_body.getvalue(), make_guard_fn.getvalue() + + +def is_recompiles_enabled(): + return torch._logging._internal.log_state.is_artifact_enabled("recompiles") + + +def is_recompiles_verbose_enabled(): + return torch._logging._internal.log_state.is_artifact_enabled("recompiles_verbose") + + +def get_guard_fail_reason( + guard_fn: GuardFn, + code: types.CodeType, + f_locals: Dict[str, object], +) -> str: + """ + Return the reason why `guard_fn` failed. + Updates `guard_failures` with the generated reason. + Only the first failed check of guard_fn is reported. + """ + scope = {"L": f_locals, "G": guard_fn.global_scope["G"]} + scope.update(guard_fn.closure_vars) + scope["___check_tensors"] = scope["___check_tensors_verbose"] + reasons: List[str] = [] + for part in guard_fn.verbose_code_parts: + global_scope = dict(guard_fn.global_scope) + global_scope["__compile_source__"] = part + with report_compile_source_on_error(): + try: + fail_reason = eval(part, global_scope, scope) + except Exception as e: + if is_recompiles_verbose_enabled(): + continue + else: + raise + # Only ___check_tensors knows how to return a fancy fail reason; + # for everything else we just report the code that failed + + if isinstance(fail_reason, bool) and not fail_reason: + fail_reason = part + if isinstance(fail_reason, str): + reasons.append(fail_reason) + if not is_recompiles_verbose_enabled(): + break + + reason_str = "\n".join(reasons) + guard_failures[orig_code_map[code]].append(reason_str) + + try: + if guard_fn.guard_fail_fn is not None: + guard_fn.guard_fail_fn( + GuardFail(reason_str or "unknown reason", orig_code_map[code]) + ) + except Exception as e: + log.exception( + "Failure in guard_fail_fn callback - raising here will cause a NULL Error on guard eval", + ) + + return reason_str + + +def get_and_maybe_log_recompilation_reason( + cache_entry, frame: types.FrameType +) -> List[str]: + """ + Return the list of guard failure reasons using cache_entry. + Logs the recompilation reason if `recompiles` logging is enabled. + Raises a RecompileError if `config.error_on_recompile` is enabled. + """ + reasons = [] + while cache_entry is not None: + reason = get_guard_fail_reason( + cache_entry.check_fn, cache_entry.code, frame.f_locals + ) + if reason: + reasons.append(reason) + cache_entry = cache_entry.next + + code = frame.f_code + + # at least one of "recompiles" or "recompiles_verbose" is enabled + do_recompiles_log = is_recompiles_enabled() or is_recompiles_verbose_enabled() + + if do_recompiles_log or config.error_on_recompile: + if is_recompiles_verbose_enabled(): + failures = "\n\n".join( + f"guard {i} failures:\n" + textwrap.indent(reason, "- ") + for i, reason in enumerate(reasons) + ) + else: + failures = textwrap.indent("\n".join(reasons), "- ") + guard_failure_details = ( + f"triggered by the following guard failure(s):\n{failures}" + ) + message = ( + f"Recompiling function {code.co_name} in {code.co_filename}:{code.co_firstlineno}\n" + f"{textwrap.indent(guard_failure_details, ' ')}" + ) + if do_recompiles_log: + if is_recompiles_verbose_enabled(): + recompiles_verbose_log.debug(message) + else: + recompiles_log.debug(message) + if config.error_on_recompile: + raise exc.RecompileError(message) + + return reasons + + +def guard_error_hook( + guard_fn: GuardFn, + code: types.CodeType, + f_locals: Dict[str, object], + index: int, + last: bool, +): + print( + f"ERROR RUNNING GUARDS {code.co_name} {code.co_filename}:{code.co_firstlineno}" + ) + print("lambda " + ", ".join(guard_fn.args) + ":") + print(" ", " and\n ".join(guard_fn.code_parts)) + local_scope = {"L": f_locals, **guard_fn.closure_vars} + for guard in guard_fn.code_parts: + try: + eval(guard, guard_fn.global_scope, local_scope) + except: # noqa: B001,E722 + print(f"Malformed guard:\n{guard}") + + +set_guard_error_hook(guard_error_hook) + + +def unique(seq): + seen = set() + for x in seq: + if x not in seen: + yield x + seen.add(x) + + +def make_dupe_guard(obj_source, dupe_source): + # Note - we may end up in a situation where we invoke something like + # def fn(x, y) + # with fn(x, x) + # Prior to the addition of tracking to all relevant objects, we would handle this just fine by + # eagerly re-entering VB and rewrapping inputs, correctly creating graphargs and placeholders. However, + # with tracking on inputs, duplicate inputs or aliased relationships may end up getting erased here - + # In the fn(x, x) example call above look like a graph with a single input. + # In order to ensure that we do not reuse fn(x, x) for fn(x, y), we create a duplicate input guard. + + # Note - we may not have a source, that is fine, it just means we had an object that is safe to have + # leave unsourced - like a local list created and discharged entirely within a local scope. + if dupe_source and dupe_source != obj_source: + ser_source_is_local = is_from_local_source(dupe_source) + source_is_local = is_from_local_source(obj_source) + # Note - both must be local, or global, or we will run afoul of a lack of merging in how we currently + # reconcile guards builder scopes in compile_check_fn. This technically means we miss a guard here, + # so maybe we should do this refactor before we land this... + # TODO(voz): Combine local and global guard builders. + if ser_source_is_local == source_is_local: + # Note - this is a little aggressive - these being duplicate input does not always matter. + # However, this should always be a sound guard to add here. + return functools.partial(GuardBuilder.DUPLICATE_INPUT, source_b=dupe_source) + return None + + +def install_guard(*guards, skip=0): + """ + Add dynamo guards to the current tracing context. + + Args: + guards: guard(s) to add + skip: number of stack frames to ignore for debug stack trace + """ + from torch._guards import TracingContext + + collect_debug_stack = guards_log.isEnabledFor( + logging.DEBUG + ) or verbose_guards_log.isEnabledFor(logging.DEBUG) + add = TracingContext.get().guards_context.dynamo_guards.add + for guard in guards: + assert isinstance(guard, Guard) + add(guard, collect_debug_stack=collect_debug_stack, skip=skip + 1) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/logging.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..1e9a820785be20a33a7eabc136eabb38c2894948 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/logging.py @@ -0,0 +1,57 @@ +import itertools +import logging + +from torch.hub import _Faketqdm, tqdm + +# Disable progress bar by default, not in dynamo config because otherwise get a circular import +disable_progress = True + + +# Return all loggers that torchdynamo/torchinductor is responsible for +def get_loggers(): + return [ + logging.getLogger("torch.fx.experimental.symbolic_shapes"), + logging.getLogger("torch._dynamo"), + logging.getLogger("torch._inductor"), + ] + + +# Creates a logging function that logs a message with a step # prepended. +# get_step_logger should be lazily called (i.e. at runtime, not at module-load time) +# so that step numbers are initialized properly. e.g.: + +# @functools.lru_cache(None) +# def _step_logger(): +# return get_step_logger(logging.getLogger(...)) + +# def fn(): +# _step_logger()(logging.INFO, "msg") + +_step_counter = itertools.count(1) + +# Update num_steps if more phases are added: Dynamo, AOT, Backend +# This is very inductor centric +# _inductor.utils.has_triton() gives a circular import error here + +if not disable_progress: + try: + import triton # noqa: F401 + + num_steps = 3 + except ImportError: + num_steps = 2 + pbar = tqdm(total=num_steps, desc="torch.compile()", delay=0) + + +def get_step_logger(logger): + if not disable_progress: + pbar.update(1) + if not isinstance(pbar, _Faketqdm): + pbar.set_postfix_str(f"{logger.name}") + + step = next(_step_counter) + + def log(level, msg, **kwargs): + logger.log(level, "Step %s: %s", step, msg, **kwargs) + + return log diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/output_graph.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/output_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..1c5beddbd0c910b13b9422186571eda31e03a2ea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/output_graph.py @@ -0,0 +1,2063 @@ +import collections +import contextlib +import copy +import functools +import itertools +import logging +import operator +import re +import sys +import traceback +import weakref +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, NamedTuple, Optional, Set, Tuple, Union + +import sympy + +import torch._guards + +import torch._logging + +import torch.nn +import torch.utils._pytree as pytree +from torch import fx +from torch._guards import ( + Checkpointable, + GlobalContextCheckpointState, + GuardsCheckpointState, + Source, + TracingContext, +) +from torch._utils_internal import signpost_event +from torch.fx._lazy_graph_module import _make_graph_module # type: ignore[attr-defined] +from torch.fx.experimental._backward_state import BackwardState +from torch.fx.experimental.sym_node import SymNode +from torch.fx.experimental.symbolic_shapes import free_symbols, is_symbolic, ShapeEnv +from torch.utils._python_dispatch import is_traceable_wrapper_subclass +from torch.utils._sympy.interp import sympy_interp +from torch.utils._sympy.reference import PythonReferenceAnalysis +from torch.utils.weak import WeakTensorKeyDictionary + +from . import config, logging as torchdynamo_logging, variables +from .backends.registry import CompiledFn, CompilerFn +from .bytecode_transformation import ( + create_call_function, + create_instruction, + Instruction, + unique_id, +) +from .code_context import code_context +from .codegen import PyCodegen +from .current_scope_id import enter_new_scope +from .exc import ( + BackendCompilerFailed, + exceptions_allowed_to_be_fallback, + SkipFrame, + unimplemented, + unimplemented_with_warning, +) +from .guards import GuardBuilder, install_guard +from .mutation_guard import is_dynamic_nn_module +from .side_effects import SideEffects +from .source import ( + AttrSource, + BackwardStateSource, + ConstantSource, + GlobalStateSource, + is_constant_source, + is_from_local_source, + LocalSource, + ParamBufferSource, + ShapeEnvSource, + TensorProperty, + TensorPropertySource, +) +from .utils import ( + checkpoint_params, + CleanupHook, + clone_inputs, + count_calls, + counters, + dynamo_timed, + get_instruction_source_311, + get_static_address_type, + graph_break_reasons, + increment_op_count, + lazy_format_graph_code, + lazy_format_graph_tabular, + LazyString, + same, +) +from .variables.base import VariableTracker +from .variables.builder import ( + BackwardStateGraphArg, + GraphArg, + TrackedFake, + VariableBuilder, + wrap_fx_proxy, +) +from .variables.nn_module import NNModuleVariable +from .variables.tensor import ( + NumpyNdarrayVariable, + SymNodeVariable, + TensorVariable, + UnspecializedPythonVariable, +) + +from .variables.torch_function import TensorWithTFOverrideVariable + +log = logging.getLogger(__name__) +graph_tabular_log = torch._logging.getArtifactLogger(__name__, "graph") +graph_code_log = torch._logging.getArtifactLogger(__name__, "graph_code") +graph_sizes_log = torch._logging.getArtifactLogger(__name__, "graph_sizes") +trace_call_log = torch._logging.getArtifactLogger(__name__, "trace_call") + + +class OutputGraphState(NamedTuple): + input_source_to_var: Dict[Source, VariableTracker] + tracked_fakes: List[TrackedFake] + guard_state: GuardsCheckpointState + nn_modules: Optional[Dict[str, torch.nn.Module]] + register_finalizer_fns: List[Callable[[fx.GraphModule], None]] + global_state: Optional[Dict[str, bool]] + param_name_to_source: Optional[Dict[str, Source]] + side_effects: SideEffects + timestamp: int + non_compliant_ops: Set[torch._ops.OpOverload] + compliant_custom_ops: Set[torch._ops.OpOverload] + + def diff(self, other: "OutputGraphState", *, prefix: str = "") -> Optional[str]: + for k in self._fields: + if k == "guard_state": + r = self.guard_state.diff(other.guard_state) + if r is not None: + return r + continue + elif k == "side_effects": + r = self.side_effects.diff(other.side_effects) + if r is not None: + return r + continue + + sv = getattr(self, k) + ov = getattr(other, k) + if sv != ov: + return f"{prefix}{k} mismatch: {sv} != {ov}" + return None + + # Back compat .guards api + @property + def guards(self): + return self.guard_state.dynamo_guards + + +@functools.lru_cache(None) +def _step_logger(): + return torchdynamo_logging.get_step_logger(log) + + +@dataclass +class GraphCompileReason: + """Stores why a given output graph was compiled; i.e. what caused the graph break.""" + + reason: str + user_stack: List[traceback.FrameSummary] + + # Indicates if this was a graph compile reason due to graph break. + graph_break: bool = True + + def __post_init__(self): + if self.graph_break: + graph_break_reasons.append(self) + + +def _get_gen_rand_values_fn(random_calls): + def _gen_rand_values(): + return [fn(*args, **kwargs) for fn, args, kwargs in random_calls] + + return _gen_rand_values + + +class FakeRootModule(torch.nn.Module): + """Trick the constructor of fx.GraphModule""" + + def __init__(self, nn_modules: Dict[str, torch.nn.Module]): + super().__init__() + for k, v in nn_modules.items(): + setattr(self, k, v) + + def __repr__(self): + return "FakeRootModule(...)" + + +class WrapperBackend: + def __init__(self, backend: CompilerFn): + self.backend: CompilerFn = backend + + def __call__(self, gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): + self.restore = checkpoint_params(gm) + self.gm = gm + copy_gm = copy.deepcopy(self.gm) + self.candidate = self.backend(copy_gm, example_inputs) + + if self.candidate is None or self.candidate is self.gm.forward: + return self.gm.forward + + if not config.verify_correctness: + return self.candidate + + # if verify_correctness=True + try: + correct = self.gm.forward(*clone_inputs(example_inputs)) + result = self.candidate(*clone_inputs(example_inputs)) + + # TODO: replace `same` function with the one in testing + if same(correct, result): + return self.candidate + + raise RuntimeError(f"incorrect results of backend {self}") + return self.gm.forward + + except Exception: + log.exception("error in verify_correctness") + raise + finally: + self.restore() + + +Scope = Dict[str, object] + + +class OutputGraph(Checkpointable[OutputGraphState]): + """ + Wrapper class to hold outputs of InstructionTranslator. Mainly the + generated fx.Graph. + + OutputGraph is 1:1 with a frame being processed. Each frame is associated + with some root InstructionTranslator. When user code calls a function, + we construct a InliningInstructionTranslator that continues to write into + the root InstructionTranslator's OutputGraph. + """ + + def __init__( + self, + code_options: Dict[str, Any], + compiler_fn: Optional[CompilerFn], + root_tx, + export: bool, + export_constraints, + frame_state, + local_scope: Scope, + global_scope: Scope, + f_code, + ): + super().__init__() + self.tracers = [SubgraphTracer(self, export_root=export)] + # Map from graph input's `Source` to its `VariableTracker` to + # de-duplicate graph inputs by source and reuse the tracker + self.input_source_to_var: Dict[Source, VariableTracker] = {} + self.export = export + self.export_constraints = export_constraints + self.frame_state = frame_state + self.tensor_weakref_to_sizes_strides = WeakTensorKeyDictionary() + self.cleanup_hooks: List[Callable[[], Any]] = [] + # compile_id is an id number for the current torch.compile + self.compile_id: int = next(_compile_id_counter) + # Set of globals installed via install_global* APIs + self.installed_globals: Set[str] = set() + + # TODO: maybe should just pass the entire f_code in here? Not + # sure... + self.co_fields = { + "co_name": f_code.co_name, + "co_filename": f_code.co_filename, + "co_firstlineno": f_code.co_firstlineno, + } + + # tracked_fakes says where any tensor that was wrapped to fake came + # from. It is similar to GraphArg, in that all GraphArgs will get + # will get added to TrackedFakes, but TrackedFakes also contains + # GraphArgs that got pruned, and things like Tensor attributes which + # aren't explicit graph inputs. Used by shape guard + self.tracked_fakes: List[TrackedFake] = [] + + # List of symbols for which we have exact bindings in the arguments + # already + self.bound_symbols: Set[sympy.Symbol] = set() + + shape_env = ShapeEnv( + # Reference Cycle! + # Share a reference to the list of TrackedFake. + # + # ShapeEnv needs this in order to be able to reproduce the call + # to produce_guards at an arbitrary time point. That is because + # TrackedFake instances may have its metadata changed throughout + # the program execution. + tracked_fakes=self.tracked_fakes, + allow_scalar_outputs=config.capture_scalar_outputs, + allow_dynamic_output_shape_ops=config.capture_dynamic_output_shape_ops, + co_fields=self.co_fields, + ) + + # In export mode, we force the shape_env to strictly disallow any constraining + # of the user marked dynamic dims + fake_mode = torch._subclasses.FakeTensorMode( + shape_env=shape_env, + # TODO (tmanlaibaatar) Remove this once we always lift params and buffers + allow_non_fake_inputs=True if self.export else False, + ) + self.tracing_context: TracingContext = TracingContext(fake_mode) + self.init_ambient_guards() + + # Map each tensor id to a list of sources. This is necessary because + # tensor ids cannot be recovered from tracked fakes (in general). + # We use this map to interpret (i.e., check for violations of) constraints, + # specifically equality constraints, which have shared tensor ids in them. + # This map should also be generally useful, e.g., for (de)serialization. + self.tracked_fakes_id_to_source: Dict[ + int, List[Source] + ] = collections.defaultdict(list) + # Stores the full fqn of a param or buffer to the relevant source. + self.param_name_to_source: Optional[Dict[str, Source]] = dict() + self.side_effects = SideEffects() + self.code_options = dict(code_options) + self.output_instructions: List[Instruction] = [] + # used to track nodes that are added between calls of copy_graphstate + # and restore_graphstate + self.timestamp = 0 + + # A list of register_finalizer_fns to apply to the output graph module + self.register_finalizer_fns: List[Callable[[fx.GraphModule], None]] = [] + + # Not checkpointed + self.compiler_fn: Optional[CompilerFn] = compiler_fn + self.global_scope = global_scope + self.local_scope = local_scope + self.root_tx = root_tx + from torch._dynamo.symbolic_convert import InstructionTranslatorBase + + # Given a source, what are the user stacks of all locations that + # accessed it? + # + # For efficiency, we only populate this: + # - During export, and + # - If the source could potentially lead to a spurious export input + # + # Feel free to populate this more frequently if other use-cases arise, + # but be aware that we have to generate full stacks for each + # recording! + self.source_to_user_stacks: Dict[Source, List[traceback.StackSummary]] = {} + + self._current_tx: List[InstructionTranslatorBase] = [] + self.cleanups: List[CleanupHook] = [] + self.should_exit = False + self.unspec_variable_map: Dict[str, UnspecializedPythonVariable] = {} + self.torch_function_enabled = torch._C._is_torch_function_enabled() + # Tracks if the output graph has a user defined allowed function in the + # graph. This is used later to determine if we should fallback to eager + # for certain exceptions. THe idea is that if the user has applied + # allow_in_graph, they would like to see the error instead of falling + # back for backend errors. + self.has_user_defined_allowed_in_graph = False + + # Tracks a list of called ops that were not tagged with "pt2_compliant_tag". + # This information is useful for logging. + self.non_compliant_ops: Set[torch._ops.OpOverload] = set({}) + + # Tracks a list of called custom ops that were tagged with "pt2_compliant_tag". + # This information is useful for logging. + self.compliant_custom_ops: Set[torch._ops.OpOverload] = set({}) + + # We save the global torch state here to be restored in case of graph + # breaks. The relevant issue is seen here + # https://github.com/pytorch/pytorch/pull/100570#issuecomment-1543427086 + # where inlining of a function changes the global state (because of the + # presence of torch.no_grad) and there is a graph break. + self.save_global_state() + + # Tracks the original FQNs of the constant tensors from the original graph, + # i.e. buffers and parameters. + self.dynamo_flat_name_to_original_fqn: Dict[str, str] = {} + + # All calls to random() are replaced with a single call to __gen_rand_values + # functions that returns a tuple of random values for each original call. + # random_calls tracks calls to random() and random_values_var stores the name of + # the variable that stores __gen_rand_values results. + self.random_calls: List[ + Tuple[Callable[..., object], Tuple[object, ...], Dict[str, object]] + ] = [] + self.random_values_var = None + + # Bytecode to insert right before we call the graph + self.pregraph_bytecode: List[Instruction] = [] + + # Use to pass values to backward hooks when using compiled autograd + self.backward_state: Dict[str, VariableTracker] = {} + self.backward_state_proxy: Optional[torch.fx.Proxy] = None + self.backward_state_var: Optional[str] = None + + def add_backward_state_hook(self, hook: VariableTracker): + name = f"hook{len(self.backward_state)}" + assert name not in self.backward_state + self.backward_state[name] = hook + return name, self.get_backward_state_proxy() + + def get_backward_state_proxy(self): + if self.backward_state_proxy is None: + if self.export: + unimplemented("backward_state does not support export") + self.backward_state_proxy = self.root_tracer.create_graph_input( + "dynamo_backward_state", BackwardState, source=BackwardStateSource() + ) + self.backward_state_proxy.node.meta["grapharg"] = BackwardStateGraphArg() + self.backward_state_proxy.node.meta["example_value"] = BackwardState() + self.backward_state_var = self.new_var() + return self.backward_state_proxy + + # This gets its own helper function so guards DEBUG logs are more informative + def init_ambient_guards(self): + # Register a SHAPE_ENV guard to make sure we setup shape guards + # that show up in ShapeEnv + self.guards.add(ShapeEnvSource().make_guard(GuardBuilder.SHAPE_ENV)) + + self.guards.add( + GlobalStateSource().make_guard(GuardBuilder.DETERMINISTIC_ALGORITHMS) + ) + + self.guards.add(GlobalStateSource().make_guard(GuardBuilder.GRAD_MODE)) + + self.guards.add(GlobalStateSource().make_guard(GuardBuilder.DEFAULT_DEVICE)) + + self.guards.add( + GlobalStateSource().make_guard(GuardBuilder.TORCH_FUNCTION_STATE) + ) + + self.guards.add(GlobalStateSource().make_guard(GuardBuilder.BACKEND_MATCH)) + + def add_cleanup_hook(self, fn: Callable[[], Any]): + self.cleanup_hooks.append(fn) + + def call_cleanup_hooks(self): + for hook in reversed(self.cleanup_hooks): + hook() + self.cleanup_hooks.clear() + + @property + def root_tracer(self): + return self.tracers[0] + + @property + def current_tracer(self): + return self.tracers[-1] + + def is_root_tracer(self): + # Helper to tell if we are inside the higher order operator tracing. + return len(self.tracers) == 1 + + @property + def graph(self): + return self.current_tracer.graph + + # TODO(rzou): can delete after we refactor speculate_subgraph to use nested GraphTracer. + @graph.setter + def graph(self, value): + self.current_tracer.graph = value + + @property + def input_name_to_proxy(self): + return self.current_tracer.input_name_to_proxy + + @property + def real_value_cache(self): + return self.current_tracer.real_value_cache + + # If you are here, and you're looking for create_graph_input, + # to avoid ambiguity, please call one of the following: + # - self.current_tracer.create_graph_input + # - self.root_tracer.create_graph_input + # See NOTE [HigherOrderOperator tracing design] for more context. + + def create_proxy(self, *args, **kwargs): + return self.current_tracer.create_proxy(*args, **kwargs) + + def create_node(self, *args, **kwargs): + return self.current_tracer.create_node(*args, **kwargs) + + def remove_node(self, *args, **kwargs): + return self.current_tracer.remove_node(*args, **kwargs) + + @contextlib.contextmanager + def subtracer(self, source_target, prior_tracer): + new_scope_ctx = enter_new_scope() + try: + if prior_tracer: + # Lineage MUST stay preserved + assert prior_tracer.parent is self.current_tracer + new_scope_ctx.__enter__() + tracer = ( + prior_tracer + if prior_tracer + else SubgraphTracer( + self, parent=self.current_tracer, source_target=source_target + ) + ) + self.tracers.append(tracer) + yield tracer + finally: + new_scope_ctx.__exit__(None, None, None) + self.tracers.pop() + + @property + def output(self): + return self + + @property + def fake_mode(self): + return self.tracing_context.fake_mode + + @property + def shape_env(self): + return self.tracing_context.fake_mode.shape_env + + @property + def guards(self) -> torch._guards.GuardsSet: + return self.tracing_context.guards_context.dynamo_guards + + @property + def nn_modules(self) -> Dict[str, Any]: + return self.tracing_context.module_context.nn_modules + + def save_global_state(self, out=None): + """ + Saves to out if it is provided. Else saves to the tracing context's global_state. + """ + global_state = ( + out if out is not None else self.tracing_context.global_context.global_state + ) + + # TODO - Consider having a torch level API for torch_function_state. As + # of now, we create a ref cycle by passing the + # output.set_torch_function_state to + # output.tracing_context.global_context.global_state. In the interim, + # the problem can be solved by manually set + # output.tracing_context.global_context.global_state to None at cleanup. + global_state["torch_function_enabled"] = ( + self.set_torch_function_state, + self.torch_function_enabled, + ) + global_state["grad_enabled"] = (torch.set_grad_enabled, torch.is_grad_enabled()) + global_state["autocast_enabled"] = ( + torch.set_autocast_enabled, + torch.is_autocast_enabled(), + ) + global_state["autocast_cpu_enabled"] = ( + torch.set_autocast_cpu_enabled, + torch.is_autocast_cpu_enabled(), + ) + global_state["autocast_gpu_dtype"] = ( + torch.set_autocast_gpu_dtype, + torch.get_autocast_gpu_dtype(), + ) + global_state["autocast_cpu_dtype"] = ( + torch.set_autocast_cpu_dtype, + torch.get_autocast_cpu_dtype(), + ) + global_state["autocast_cache_enabled"] = ( + torch.set_autocast_cache_enabled, + torch.is_autocast_cache_enabled(), + ) + + def push_tx(self, tx): + self._current_tx.append(tx) + + def pop_tx(self): + return self._current_tx.pop() + + @property + def current_tx(self): + return self.root_tx if not self._current_tx else self._current_tx[-1] + + def copy_graphstate(self) -> OutputGraphState: + """Create a checkpoint of the current state by copying everything""" + assert self.param_name_to_source is not None + guards_graph_state = self.tracing_context.guards_context.copy_graphstate() + module_state = self.tracing_context.module_context.copy_graphstate() + global_state = self.tracing_context.global_context.copy_graphstate() + state = OutputGraphState( + dict(self.input_source_to_var), + list(self.tracked_fakes), + guards_graph_state, + module_state, + list(self.register_finalizer_fns), + global_state, + dict(self.param_name_to_source), + self.side_effects.clone(), + self.timestamp, + set(self.non_compliant_ops), + set(self.compliant_custom_ops), + ) + self.timestamp += 1 + return state + + def restore_graphstate(self, state: OutputGraphState): + """Restore a checkpoint created by self.copy_graphstate()""" + ( + self.input_source_to_var, + self.tracked_fakes, + guards_state, + module_state, + self.register_finalizer_fns, + global_state, + self.param_name_to_source, + self.side_effects, + self.timestamp, + self.non_compliant_ops, + self.compliant_custom_ops, + ) = state + self.tracing_context.guards_context.restore_graphstate(guards_state) + self.tracing_context.module_context.restore_graphstate(module_state) + self.tracing_context.global_context.restore_graphstate(global_state) + + # FX deepcopy doesn't work for a partially created graph, so just remove new nodes + removed_nodes = 0 + for node in reversed(list(self.graph.nodes)): + if ( + node.meta["creation_timestamp"] > self.timestamp + # placeholders here may have been lazily added by existing objects + and node.op != "placeholder" + ): + # Erasing node alone does not remove the meta information + # So, remove the help tensor explicitly + if "example_value" in node.meta: + del node.meta["example_value"] + self.remove_node(node) + self.real_value_cache.pop(node, None) + removed_nodes += 1 + log.debug("restore_graphstate: removed %s nodes", removed_nodes) + + def add_symbol_bindings(self, arg: GraphArg): + # Insert implicit size vars as necessary. With dynamic shapes, we + # maintain the invariant that every sizevar gets a direct SymInt input + # into the graph. This means downstream graph transforms can assume + # every size variable is explicitly bound and accessible, instead of + # having to pull it out implicitly from tensors. + + if self.export: + return + + assert arg.fake_tensor is not None + + def bind_symint(s, prop): + if not (is_symbolic(s) and isinstance(s.node.expr, sympy.Symbol)): + return + s0 = s.node.expr + if s0 in self.bound_symbols: + return + self.bound_symbols.add(s0) + log.debug("bind_symint %s %s", s, prop.name()) + # TODO: don't readd symint if we already have it in graph + # (this is harmless because we do remove the unused ones later) + proxy = self.root_tracer.create_graph_input( + str(s0), + torch.SymInt, + before=True, + source=prop, + ) + proxy.node.meta["example_value"] = s + proxy.node.meta["grapharg"] = GraphArg( + prop, + s, + is_unspecialized=False, + fake_tensor=None, + is_tensor=False, + ) + + def handle_tensor(t, src): + for i, s in enumerate(t.size()): + bind_symint(s, TensorPropertySource(src, TensorProperty.SIZE, i)) + for i, s in enumerate(t.stride()): + bind_symint(s, TensorPropertySource(src, TensorProperty.STRIDE, i)) + bind_symint( + t.storage_offset(), + TensorPropertySource(src, TensorProperty.STORAGE_OFFSET), + ) + if is_traceable_wrapper_subclass(t): + attrs, ctx = t.__tensor_flatten__() + for attr in attrs: + inner_t = getattr(t, attr) + handle_tensor(inner_t, AttrSource(src, attr)) + + handle_tensor(arg.fake_tensor, arg.source) + + def count_calls(self): + return count_calls(self.graph) + + def is_empty_graph(self): + return len(list(self.graph.nodes)) == 0 + + def get_submodule(self, keys): + assert keys + obj: Union[torch.nn.Module, Dict[str, torch.nn.Module]] = self.nn_modules + for k in keys.split("."): + if isinstance(obj, dict): + obj = obj[k] + else: + obj = getattr(obj, k) + return obj + + def new_var(self, name="tmp"): + existing = set(self.code_options["co_varnames"]) + for i in itertools.count(): + var = f"{name}_{i}" + if var not in existing: + self.code_options["co_varnames"] += (var,) + return var + + def update_co_names(self, name): + """Ensure self.code_options.co_names contains name""" + if name not in self.code_options["co_names"]: + self.code_options["co_names"] += (name,) + + @staticmethod + def module_key_name(*names): + # create a new unique name + name = "_".join(map(str, names)) + # Strip the guard lookup L/G access + name = re.sub(r"^[GL]\['?(.*?)'?\]$", r"\1", name) + # e.g. replace abc.xyz[123].qkv with abc.xyz_123.qkv + name = re.sub(r"\[(\d+)\]", r"_\g<1>", name) + # e.g. replace abc.xyz_123.qkv with abc_xyz_123_qkv + name = re.sub(r"[^a-zA-Z0-9]", "_", name) + + if not name or not name[0].isalpha(): + name = "sub" + name + + return name + + def register_attr_or_module( + self, + target: Union[torch.nn.Module, torch.Tensor, Any], + *names, + **options, + ): + if is_dynamic_nn_module(target): + return variables.UnspecializedNNModuleVariable(target, **options) + + options = dict(options) + assert "source" in options + source = options["source"] + assert not isinstance(source, ParamBufferSource) + + if isinstance(target, torch.Tensor): + tracer = self.current_tracer + if not self.is_root_tracer(): + # For higher order ops, we don't want to insert the get_attr in + # innermost graph. Instead, we want to raise the params/buffers + # as inputs to the higher-order graph, and register them as + # get_attrs in the root tracer. + + # Note that Dynamo will still call lift_tracked_freevar_to_input + # when these inputs are encountered for the inner graph. The + # only difference is what happens at the root tracer for + # nn.Parameters vs free inputs. The free inputs are registered + # as placeholders in the root graph, whereas the nn.Parameters + # are registered as get_attr nodes in the root graph. + tracer = self.root_tracer + + if not is_constant_source(source): + install_guard(source.make_guard(GuardBuilder.TENSOR_MATCH)) + + if get_static_address_type(target) == "guarded": + install_guard(source.make_guard(GuardBuilder.DATA_PTR_MATCH)) + + def wrap_name(module_key): + assert self.param_name_to_source is not None + self.param_name_to_source[module_key] = source + + return wrap_fx_proxy( + self.root_tx, + tracer.create_proxy("get_attr", module_key, tuple(), {}), + example_value=target, + **options, + ) + + elif isinstance(target, torch.nn.Module): + assert isinstance(target, torch.nn.Module) + + install_guard(source.make_guard(GuardBuilder.NN_MODULE)) + + def wrap_name(module_key): + return NNModuleVariable(type(target), module_key, target, **options) + + elif isinstance(target, (torch.SymInt, torch.SymFloat)): + # HACKY CODE REGION BEGIN + # WE ARE PIGGYBACKING ON EXISTING INFRA TO REGISTER ATTRS + # This ultimately gets written to self.nn_modules, which is unfortunate + # Attrs that are tenors and symints and such need to be migrated to have their + # own storage + # alas, this is like this for now + + def wrap_name(module_key): + return SymNodeVariable.create( + self, + self.create_proxy("get_attr", module_key, tuple(), {}), + sym_num=target, + **options, + ) + + # HACKY CODE REGION END + else: + + def wrap_name(module_key): + self.output.update_co_names(module_key) + self.global_scope[module_key] = target + return VariableBuilder(self, ConstantSource(source_name=module_key))( + target + ) + + for k, v in self.nn_modules.items(): + if v is target: + # it already exists + return wrap_name(k) + + name = OutputGraph.module_key_name(*names) + + base = name + for i in itertools.count(): + if name not in self.nn_modules: + self.nn_modules[name] = target + if isinstance(target, torch.nn.Module): + + def register_leaf_name(leaf_name): + assert self.param_name_to_source is not None + new_source = ParamBufferSource(source, leaf_name) + new_name = f"{name}.{leaf_name}" + self.param_name_to_source[new_name] = new_source + if isinstance(source, LocalSource): + self.dynamo_flat_name_to_original_fqn[ + OutputGraph.module_key_name(new_source.name()) + ] = leaf_name + + # annoying, but there are cases when we do not have parameters + # see test_nn_moduledict_contains + if hasattr(target, "_parameters"): + for leaf_name, _ in target.named_parameters(): + register_leaf_name(leaf_name) + if hasattr(target, "_buffers"): + for leaf_name, _ in target.named_buffers(): + register_leaf_name(leaf_name) + + return wrap_name(name) + name = f"{base}_{i}" + + raise AssertionError("unreachable") + + def compile_subgraph( + self, tx, partial_convert=False, reason: Optional[GraphCompileReason] = None + ): + """ + Generate a subgraph to continue execution on user code. + Automatically restore live variables. + """ + assert reason is not None + + from .decorators import disable + + self.partial_convert = partial_convert + self.compile_subgraph_reason = reason + self.should_exit = True + + log.debug("COMPILING GRAPH due to %s", reason) + + if not all(block.can_restore() for block in tx.block_stack): + unimplemented("compile_subgraph with block_depth != 0") + + prefix_insts: List[Instruction] = [] + if sys.version_info >= (3, 11): + # prefix instructions (Python 3.11+) + for inst in tx.prefix_insts: + if inst.opname == "MAKE_CELL": + prefix_insts.append( + create_instruction("MAKE_CELL", argval=inst.argval) + ) + elif inst.opname == "COPY_FREE_VARS": + prefix_insts.append( + create_instruction( + "COPY_FREE_VARS", arg=len(tx.code_options["co_freevars"]) + ) + ) + else: + prefix_insts.append(copy.copy(inst)) + assert not ( + self.pregraph_bytecode and self.export + ), "export does not support pregraph_bytecode" + prefix_insts.extend(self.pregraph_bytecode) + + def append_prefix_insts(): + self.add_output_instructions(prefix_insts) + prefix_insts.clear() + + for block in reversed(tx.block_stack): + block.exit(tx) + + self.cleanup_graph() + tx.prune_dead_locals() + stack_values = list(tx.stack) + root = FakeRootModule(self.nn_modules) + # Add all the local vars to the "stack" so restore at the end + restore_vars = [] + val_to_names: Dict[VariableTracker, List[str]] = {} + if stack_values: + val_to_names[stack_values[-1]] = list() + # NB: Typically (i.e., for graph compile from RETURN_VALUE), + # symbolic_locals will be empty at this point, as prune_dead_locals + # will clear out all of symbolic_locals because RETURN_VALUE is the + # last instruction and no more locals are used. The fanciness here + # is only needed for partial graphs. + for k, v in tx.symbolic_locals.items(): + # Note! this explicitly uses .local_name for matching + # Failure to do so will cause spurious registrations in val_to_names. + # This will in turn result in spurious variables showing up in the graph. + # This was very tricky to debug. For an example, dump the graph at call_user_compiler + # while running test_subgraphs.py + if isinstance(v.source, LocalSource) and v.source.local_name == k: + continue # no need to restore initial state + if v not in val_to_names: + val_to_names[v] = list() + val_to_names[v].append(k) + for v in val_to_names.keys(): + restore_vars.extend(val_to_names[v]) + stack_values.extend([v] * len(val_to_names[v])) + + # to handle random calls + if len(self.random_calls) > 0: + append_prefix_insts() + random_calls_instructions = [] + self.random_values_var = self.new_var("random_values") + rand_fn = disable(_get_gen_rand_values_fn(self.random_calls)) + rand_fn_name = self.install_global("__gen_rand_values", rand_fn) + codegen = PyCodegen(tx, root) + random_calls_instructions.extend( + codegen.load_function_name(rand_fn_name, True) + ) + random_calls_instructions.extend(create_call_function(0, False)) + random_calls_instructions.append( + codegen.create_store(tx.output.random_values_var), + ) + self.add_output_instructions(random_calls_instructions) + + if ( + stack_values + and all( + not isinstance( + v, + ( + UnspecializedPythonVariable, + NumpyNdarrayVariable, + TensorWithTFOverrideVariable, + ), + ) + for v in stack_values + ) + and all(isinstance(x, TensorVariable) for x in stack_values) + and len(set(stack_values)) == len(stack_values) + and self.side_effects.is_empty() + and not len(tx.debug_locals) != 0 + and not self.backward_state + ): + append_prefix_insts() + # optimization to generate better code in a common case + self.add_output_instructions( + self.compile_and_call_fx_graph(tx, list(reversed(stack_values)), root) + + [create_instruction("UNPACK_SEQUENCE", arg=len(stack_values))] + ) + else: + graph_output_var = self.new_var("graph_out") + pass1 = PyCodegen(tx, root, graph_output_var) + self.codegen_suffix(tx, stack_values, pass1) + + # one more time now that we have established tempvars + pass2 = PyCodegen( + tx, + root, + graph_output_var, + tempvars={val: None for val, count in pass1.uses.items() if count > 1}, + ) + self.codegen_suffix(tx, stack_values, pass2) + + output = [] + if count_calls(self.graph) != 0 or len(pass2.graph_outputs) != 0: + output.extend( + self.compile_and_call_fx_graph(tx, pass2.graph_output_vars(), root) + ) + + if len(pass2.graph_outputs) != 0: + output.append(pass2.create_store(graph_output_var)) + else: + output.append(create_instruction("POP_TOP")) + append_prefix_insts() + self.add_output_instructions(output + pass2.get_instructions()) + + # restore all the live local vars + self.add_output_instructions( + [PyCodegen(tx).create_store(var) for var in reversed(restore_vars)] + ) + + def codegen_suffix(self, tx, stack_values, cg): + if self.backward_state: + assert not self.export + for name, val in self.backward_state.items(): + cg(val) + cg.append_output(cg.create_load(self.backward_state_var)) + cg.store_attr(name) + self.side_effects.codegen_hooks(cg) + self.side_effects.codegen_save_tempvars(cg) + + # Return variables used for logging at the end + for debug_var, args in tx.debug_locals: + cg(debug_var) + for arg in args: + cg(arg) + cg.extend_output(create_call_function(len(args), True)) + + cg.restore_stack(stack_values, value_from_source=not tx.export) + self.side_effects.codegen_update_mutated(cg) + + def cleanup_graph(self): + """ + Remove "creation_timestamp" from node meta + + Remove this pattern from the graph: + torch._C._set_grad_enabled(False) + torch._C._set_grad_enabled(True) + """ + assert self.should_exit + nodes = list(self.graph.nodes) + for node in nodes: + node.meta.pop("creation_timestamp", None) + + grad_enabled = torch.is_grad_enabled() + for node1, node2 in zip(nodes, nodes[1:]): + if ( + node1.target is torch._C._set_grad_enabled + and tuple(node1.args) == (not grad_enabled,) + and not node1._erased + ): + grad_enabled = node1.args[0] + if ( + node2.target is torch._C._set_grad_enabled + and tuple(node2.args) == (not grad_enabled,) + and not node2._erased + ): + grad_enabled = node2.args[0] + self.graph.erase_node(node1) + self.graph.erase_node(node2) + + def get_graph_sizes_structured(self): + ret = {} + for node in self.graph.nodes: + example_value = node.meta.get("example_value", None) + if isinstance(example_value, torch._subclasses.FakeTensor): + size = example_value.size() + ret[node.name] = [s if isinstance(s, int) else repr(s) for s in size] + return ret + + def get_graph_sizes(self, name: str): + graph_sizes_str = "TRACED GRAPH TENSOR SIZES\n" + graph_sizes_str += f"===== {name} =====\n" + for node in self.graph.nodes: + example_value = node.meta.get("example_value", None) + if isinstance(example_value, torch._subclasses.FakeTensor): + size = example_value.size() + graph_sizes_str += f"{node.name}: {tuple(size)}\n" + concrete_size = [] + has_symint = False + for sz in size: + if isinstance(sz, int): + concrete_size.append(sz) + elif isinstance(sz, torch.SymInt): + has_symint = True + concrete_size.append(sz.node.hint) + else: + break + else: + if has_symint: + graph_sizes_str += ( + f"{node.name} (concrete): {tuple(concrete_size)}\n" + ) + return graph_sizes_str + + @contextlib.contextmanager + def restore_global_state(self): + """ + Momentarily restores the global state to what it was prior to tracing the current output + """ + prior_global_state = self.tracing_context.global_context.copy_graphstate() + current_global_state: Dict[str, Tuple[Any, bool]] = {} + self.save_global_state(out=current_global_state) + try: + # Set to state prior to tracing the graph + self.tracing_context.global_context.restore_graphstate(prior_global_state) + yield + finally: + # Reset to state at the current time (e.g. before calling the user compiler) + self.tracing_context.global_context.restore_graphstate( + GlobalContextCheckpointState(current_global_state) + ) + + @torch._guards.TracingContext.clear_frame() + def compile_and_call_fx_graph(self, tx, rv, root): + """ + Generate code from self.graph and return the Instruction()s to + call that generated code. + """ + from .decorators import disable + + assert self.should_exit + + name = unique_id("__compiled_fn") + + assert isinstance(rv, list) + assert isinstance(root, FakeRootModule) + self.create_node( + "output", + "output", + (self.current_tracer.create_arg(tuple(x.as_proxy() for x in rv)),), + {}, + ) + self.insert_deferred_runtime_asserts(root, name) + # NB: deferred runtime asserts can keep graphargs live, so make sure + # those are inserted before pruning + self.remove_unused_graphargs() + ncalls = count_calls(self.graph) + counters["stats"]["calls_captured"] += ncalls + + # free a bit of memory + self.real_value_cache.clear() + + gm = _make_graph_module(root, self.graph) + for register_finalizer in self.register_finalizer_fns: + register_finalizer(gm) + + gm.compile_subgraph_reason = self.compile_subgraph_reason + gm.meta[ + "dynamo_flat_name_to_original_fqn" + ] = self.dynamo_flat_name_to_original_fqn.copy() + + graph_code_log.debug("%s", lazy_format_graph_code(name, gm)) + torch._logging.trace_structured( + "dynamo_output_graph", + lambda: {"sizes": self.get_graph_sizes_structured()}, + payload_fn=lambda: gm.print_readable(print_output=False), + ) + graph_tabular_log.debug("%s", lazy_format_graph_tabular(name, gm)) + graph_sizes_log.debug("%s", LazyString(lambda: self.get_graph_sizes(name))) + self.call_cleanup_hooks() + old_fake_mode = self.tracing_context.fake_mode + if not self.export: + # TODO(voz): The way export uses gm, and fake tensors, is not supported with us resetting + backend_fake_mode = torch._subclasses.FakeTensorMode( + shape_env=old_fake_mode.shape_env, + ) + # TODO(voz): Ostensibily, this should be scoped and + # restore back to old_fake_mode, but doing so currently violates + # a lot of fake_tensor ownership assumptions and runs afoul of detect_fake_mode + self.tracing_context.fake_mode = backend_fake_mode + + with self.restore_global_state(): + compiled_fn = self.call_user_compiler(gm) + compiled_fn = disable(compiled_fn) + + counters["stats"]["unique_graphs"] += 1 + # This is safe because we pre-process name to be unique + self.install_global_unsafe(name, compiled_fn) + + cg = PyCodegen(tx) + cg.make_call_generated_code(name) + return cg.get_instructions() + + @property + def placeholders(self) -> List[fx.Node]: + r = [] + for node in self.graph.nodes: + if node.op == "placeholder": + r.append(node) + continue + break + return r + + @property + def graphargs(self) -> List[GraphArg]: + return [node.meta["grapharg"] for node in self.placeholders] + + @dynamo_timed(phase_name="backend_compile") + def call_user_compiler(self, gm: fx.GraphModule) -> CompiledFn: + assert self.compiler_fn is not None + tot = 0 + placeholders = [] + for node in gm.graph.nodes: + if node.op in ("call_function", "call_method", "call_module"): + tot += 1 + if node.op == "placeholder": + placeholders.append(node) + increment_op_count(tot) + for pl in placeholders: + arg = pl.meta["grapharg"] + # TODO: Why isn't this stored in meta :think: + pl._dynamo_source = arg.source + + gm._param_name_to_source = self.param_name_to_source # type: ignore[assignment] + gm._source_to_user_stacks = self.source_to_user_stacks # type: ignore[assignment] + + try: + name = ( + self.compiler_fn.__name__ + if hasattr(self.compiler_fn, "__name__") + else "" + ) + _step_logger()(logging.INFO, f"calling compiler function {name}") + compiler_fn = self.compiler_fn + if config.verify_correctness: + compiler_fn = WrapperBackend(compiler_fn) + compiled_fn = compiler_fn(gm, self.example_inputs()) + _step_logger()(logging.INFO, f"done compiler function {name}") + assert callable(compiled_fn), "compiler_fn did not return callable" + except exceptions_allowed_to_be_fallback as e: + if self.has_user_defined_allowed_in_graph: + raise BackendCompilerFailed(self.compiler_fn, e).with_traceback( + e.__traceback__ + ) from None + msg = ( + "Backend compiler failed with a fake tensor exception at \n" + f"{self.root_tx.format_frame_summary()}" + "Adding a graph break." + ) + unimplemented_with_warning(e, self.root_tx.f_code, msg) + except SkipFrame as e: + # The backend compiler has requested that we skip the frame, instead of + # aborting execution. + raise e + except Exception as e: + raise BackendCompilerFailed(self.compiler_fn, e).with_traceback( + e.__traceback__ + ) from None + + signpost_event( + "dynamo", + "OutputGraph.call_user_compiler", + { + **self.co_fields, + "op_count": tot, + "node_count": len(gm.graph.nodes), + "input_count": len(placeholders), + }, + ) + + return compiled_fn + + def example_inputs(self) -> List[torch.Tensor]: + result = [] + for arg in self.graphargs: + result.append(arg.example) + return result + + def remove_unused_graphargs(self) -> None: + assert self.should_exit + # Miniature DCE pass, but only for obviously trivial operations + for node in reversed(list(self.graph.nodes)): + if len(list(node.users)) == 0: + if node.op == "get_attr": + self.remove_node(node) + elif node.op == "call_function" and node.target is operator.getitem: + self.remove_node(node) + + def placeholder_binds_symbol(node): + arg = node.meta["grapharg"] + example = arg.example + if isinstance(example, torch.SymInt) and isinstance( + example.node.expr, sympy.Symbol + ): + return example.node.expr + return None + + def remove_unused(node): + log.debug("REMOVE UNUSED GRAPHARG %s", node.meta["grapharg"].source.name()) + # I'm not really sure why you need to delete these from the + # node since the node is going to get removed + del node.meta["grapharg"] + self.remove_node(node) + self.real_value_cache.pop(node, None) + + used_symbols = set() + recheck_placeholders = [] + for node in self.placeholders: + binds_symbol = placeholder_binds_symbol(node) is not None + # Don't delete symbol bindings yet + if binds_symbol: + if not node.users: + recheck_placeholders.append(node) + else: + if not node.users and not isinstance( + node.meta["grapharg"], BackwardStateGraphArg + ): + remove_unused(node) + else: + # Register the free symbols as uses + arg = node.meta["grapharg"] + if isinstance(arg, BackwardStateGraphArg): + continue + fake = ( + arg.fake_tensor if arg.fake_tensor is not None else arg.example + ) + used_symbols |= free_symbols(fake) + + # After removing unused graphargs, prune unused binds_symbol + for node in recheck_placeholders: + symbol = placeholder_binds_symbol(node) + if symbol is not None: + if symbol not in used_symbols: + remove_unused(node) + else: + # Make sure we delete later occurrences of the same symbol + used_symbols.remove(symbol) + + # TODO: this is a generic pass that should live outside of Dynamo + def insert_deferred_runtime_asserts(self, root, name) -> None: + """ + During tracing, we may have discovered that some data-dependent values + had runtime assert on them; e.g., torch.empty(x.item()) induces a runtime + that x.item() >= 0. This asserts can happen unpredictably during fake + tensor propagation, so we cannot conveniently insert them into the FX graph + when they occur. Instead, we accumulate them in the ShapeEnv, and in this + pass insert them into the graph as proper tests. + """ + # TODO: Request simplification on runtime asserts before emitting them + ras_by_symbol = self.shape_env.deferred_runtime_asserts.copy() + + if not any(ras for ras in ras_by_symbol.values()): + return + + gm = fx.GraphModule(root, self.graph) + graph_code_log.debug( + "%s", + lazy_format_graph_code(f"pre insert_deferred_runtime_asserts {name}", gm), + ) + + # We are going to mutate the dict + symbol_to_proxy = {} + placeholders = set() + last_placeholder = None + for node in self.graph.nodes: + if node.op != "placeholder": + last_placeholder = node + break + placeholders.add(node) + assert last_placeholder is not None + + # Identify what symbols we need to reify. This isn't strictly needed + # but helps reduce churn on the graph + needed_symbols: Set[sympy.Symbol] = set() + for ras in ras_by_symbol.values(): + for ra in ras: + needed_symbols.update(free_symbols(ra.expr)) + + log.debug("needed_symbols = %s", needed_symbols) + + for node in self.graph.nodes: + # Placeholders can match symbols, but when we destructure them + # with size we have to make sure we insert the nodes after all + # the placeholders + with self.graph.inserting_before( + node.next if node not in placeholders else last_placeholder.next + ): + if "example_value" not in node.meta: + continue + + defs = [] + + # For every new unbacked symbol, we need an fx.Node representing + # precisely this value. There are a few places where the unbacked + # symbol could have come from, and we will check them to setup + # these nodes. + # + # For a case like item(), this is trivial (no new node is added.) + # + # For nonzero(), we need to add something like i0 = out.size(0) + # + # We could end up with duplicate nodes this way but it is not a + # big deal. + # + # We also do this to setup backed SymInts, but those are all going + # to be matched from placeholders + def match_symbol(symint, cb): + if ( + isinstance(symint, torch.SymInt) + and isinstance(symint.node, SymNode) + and isinstance(s := symint.node.expr, sympy.Symbol) + and s not in symbol_to_proxy + and s in needed_symbols + ): + symbol_to_proxy[s] = fx.Proxy(cb()) + log.debug("symbol_to_proxy[%s] = %s", s, symbol_to_proxy[s]) + defs.append(s) + + match_symbol(node.meta["example_value"], lambda: node) + if isinstance(t := node.meta["example_value"], torch.Tensor): + for i, s in enumerate(t.size()): + match_symbol( + s, lambda: self.graph.call_method("size", (node, i)) + ) + for i, s in enumerate(t.stride()): + match_symbol( + s, lambda: self.graph.call_method("stride", (node, i)) + ) + match_symbol( + t.storage_offset(), + lambda: self.graph.call_method("storage_offset", (node,)), + ) + + for i0 in defs: + ras = ras_by_symbol.pop(i0, []) + # Before we perform any asserts, first apply range + # refinement. This is important, because if we are going + # to retrace the graph (and we typically are if we send + # the graph to AOTAutograd), we need to make sure we apply + # range refinement (ala _check_is_size) first, BEFORE we + # run any of the asserts. Otherwise, we may decide to + # perform substitutions based on the asserts which we then + # can't back out, because value ranges can only be applied + # to asserts.) + # + # A perhaps better long term plan is to avoid this order + # dependence by making it possible to refine ranges on + # arbitrary expressions, not just symbols. But it is not + # so easy to make use of this information, see + # https://twitter.com/ezyang/status/1745801370299482492 + # We actually made an attempt at this in + # https://github.com/pytorch/pytorch/pull/119043 + # which didn't work. + # + # Another ideas for how to do this: + # - Have bound_sympy be the source of truth of the ranges of any expression + # - Cache intermediate results for every subexpression of bound_sympy + # - This cache should be possible to edit to refine ranges + # + # One issue with this proposal is that if + # we have a bound on 2x, we are not going to be able to + # apply it for 4x. Similarly, we may have bounds for an + # equivalent expression that we are not applying because + # it's not a perfect match (e.g. x < y vs y > x)". + # + # The first issue we already have it and it's impossible + # to solve in general, so any implementation on a best + # effort basis should do. + # + # The second issue is a preexisting one. It can be mitigated + # with a normalisation algorithm. In general, it may also + # be on a best effort basis, but since our grammar is not + # terribly difficult, chances are we could even fully + # normalise SymPy expressions... who knows. + + if i0 in self.shape_env.size_like: + self.graph.call_function( + torch._check_is_size, (symbol_to_proxy[i0].node,) + ) + + vr = self.shape_env.var_to_range[i0] + if not self.shape_env._default_unspecified_value_range().issubset( + vr + ): + # The runtime range is constrained, so add a runtime + # assert and also explicitly refine the range + # (refinement should not be necessary once runtime + # asserts cause refinement, but that's NYI) + def convert(s): + try: + return int(s) + except TypeError: + return None + + self.graph.call_function( + torch._constrain_as_value, + ( + symbol_to_proxy[i0].node, + convert(vr.lower), + convert(vr.upper), + ), + ) + + for ra in ras: + log.debug("inserting runtime assert %s", ra.expr) + # Need to process ALL free symbols, not just unbacked ones + fvs = free_symbols(ra.expr) + missing = fvs - symbol_to_proxy.keys() + if missing: + i1 = sorted(missing)[0] + # TODO: Remove relaxing assert on unbacked_symint https://github.com/pytorch/pytorch/issues/119689 + # assert self.shape_env.is_unbacked_symint(i1), i1 + ras_by_symbol.setdefault(i1, []).append(ra) + else: + # Convert the sympy expression into a sequence of FX + # nodes + res = sympy_interp( + PythonReferenceAnalysis, symbol_to_proxy, ra.expr + ).node + self.graph.call_function( + torch.ops.aten._assert_scalar.default, + # TODO: use ra.msg here, but it's pretty + # useless right now + ( + res, + f"Deferred runtime assertion failed {ra.expr}", + ), + ) + + def add_output_instructions(self, prefix: List[Instruction]) -> None: + """ + We call this on the creation of a new compiled subgraph that is inserted + before user code. + """ + self.output_instructions.extend(prefix) + self.should_exit = True + + def install_global_unsafe(self, name, value) -> None: + """ + WARNING: prefer the safer `install_global_by_id/install_global`. + torch.compile instances should be independent of each other; + one footgun is to have one instance depend on the existence of + a global installed by another instance. This can happen if we mangle + a global the same way across both instances. + """ + assert name not in self.installed_globals + self.installed_globals.add(name) + self.cleanups.append(CleanupHook.create(self.global_scope, name, value)) + + def install_global_by_id(self, prefix, value) -> str: + """ + Installs a global if it hasn't been installed already. + This is determined by (prefix, id(value)) pair. + + Returns the name of the newly installed global. + """ + # NB: need self.compile_id to distinguish this global + # from another global created in a different torch.compile instance + name = f"{prefix}_{id(value)}_c{self.compile_id}" + if name in self.installed_globals: + return name + self.install_global_unsafe(name, value) + return name + + def install_global(self, prefix, value) -> str: + """ + Installs a global, generating a unique name for it. + + Returns the name of the newly installed global. + """ + # NB: unique_id is unique, even across torch.compile instances + name = unique_id(prefix) + self.install_global_unsafe(name, value) + return name + + def cleanup(self) -> None: + # There is a reference cycle between tracer and OutputGraph, causing + # some of the tensor objects to be held alive for longer than necessary. + self.root_tx = None + self.nn_modules.clear() + self.param_name_to_source = None + + for node in self.graph.nodes: + if "grapharg" in node.meta: + del node.meta["grapharg"] + self.real_value_cache.clear() + self.input_name_to_proxy.clear() + self.side_effects.clear() + self.register_finalizer_fns.clear() + self.dynamo_flat_name_to_original_fqn.clear() + self.tracing_context.clear() + + def set_torch_function_state(self, enabled: bool) -> None: + self.torch_function_enabled = enabled + + def add_graph_finalizer( + self, register_finalizer: Callable[[fx.GraphModule], None] + ) -> None: + self.register_finalizer_fns.append(register_finalizer) + + def example_value_from_input_node(self, node: torch.fx.Node): + """Extract the non-fake example tensor""" + if node.op == "placeholder": + return node.meta["grapharg"].example + assert node.op == "get_attr" + return self.nn_modules[node.target] # type: ignore[index] + + +err_epilogue = ( + "With the current config, we will graph break " + "(and fall back to eager-mode PyTorch) on all ops " + "that have do not have the 'pt2_compliant_tag'. " + "Please see the following doc for how to mark this op as PT2 compliant " + "https://docs.google.com/document/d/1W--T6wz8IY8fOI0Vm8BF44PdBgs283QvpelJZWieQWQ" +) + + +def check_pt2_compliant_op(output_graph, kind, target, args, kwargs): + if kind != "call_function": + return + + def encountered_compliant_op(target): + if target.namespace in {"prim", "prims", "aten"}: + return + output_graph.compliant_custom_ops.add(target) + + def encountered_non_compliant_op(target, msg): + output_graph.non_compliant_ops.add(target) + if config.only_allow_pt2_compliant_ops: + unimplemented(msg + " " + err_epilogue) + + if isinstance(target, torch._ops.OpOverload): + if torch.Tag.pt2_compliant_tag in target.tags: + encountered_compliant_op(target) + return + encountered_non_compliant_op( + target, + f"Encountered the torch.ops.OpOverload {target} " + f"that is not PT2 compliant.", + ) + return + + if isinstance(target, torch._ops.OpOverloadPacket): + overloads = tuple(target.overloads()) + # Optimization: Overload resolution is expensive. + # If there's only one overload, we know what it will resolve to. + if len(overloads) == 1: + op = getattr(target, overloads[0]) + if torch.Tag.pt2_compliant_tag in op.tags: + encountered_compliant_op(op) + return + encountered_non_compliant_op( + op, + f"Encountered the non-overloaded " + f"torch.ops.OpOverloadPacket {target} " + f"that is not PT2 compliant. ", + ) + return + + args, kwargs = torch._dynamo.utils.get_fake_values_from_nodes( + output_graph.current_tx, (args, kwargs), False + ) + try: + overload = torch._C._jit_resolve_packet( + target._qualified_op_name, *args, **kwargs + ) + except RuntimeError as e: + unimplemented(str(e)) + + op = getattr(target, overload) + if torch.Tag.pt2_compliant_tag in op.tags: + encountered_compliant_op(op) + else: + encountered_non_compliant_op( + op, + f"Encountered the torch.ops.OpOverloadPacket {target} " + f"which resolves to the overload ({overload}) that is " + f"not PT2 compliant.", + ) + + +_compile_id_counter = itertools.count() + + +class SubgraphTracer(fx.Tracer): + """ + Holds an FX graph that is being traced. OutputGraph owns a SubgraphTracer + and the separation of responsibilities is that SubgraphTracer is + responsible for building the graph while OutputGraph is responsible for + compiling and executing the graph. + """ + + def __init__( + self, output_graph, parent=None, export_root=False, source_target=None + ): + super().__init__() + self.output_graph = weakref.proxy(output_graph) + self.graph = torch.fx.Graph() + + # The export is only ever set for the ROOT tracer. It controls + # whether or not certain inputs are allowed to be added or not. + # Look at call sites of create_graph_input to see how it is used. + if export_root: + assert parent is None + self.export_root = export_root + # Map from graph input name to its placeholder proxy object, where the + # map's keys give all current placeholder node names and can be used to + # create unique node names + self.input_name_to_proxy: Dict[str, fx.Proxy] = {} + # Node => computed real value (see utils.get_real_value) + self.real_value_cache: Dict[fx.Node, torch.Tensor] = {} + + # SubgraphTracers can be nested. See NOTE [HigherOrderOperator tracing design] + self.parent = parent + # A dict mapping previously free variables (Proxy objects) + # to new Proxy objects that wrap inputs to this subgraph. + # + # This dict serves two purposes: + # - Proxies are associated with VariableTrackers. If we see + # the same VariableTracker twice (and it is a free variable), + # then we want to use the same Proxy in the current subgraph to + # record the tracing. + # - If we are tracing a HigherOrderOperator's body_fn, then we + # need to keep track of what free variables were lifted so we can + # rewrite the HigherOrderOperator call using the traced body_fn. + # Dicts maintain the order of args for the HigherOrderOperator call. + self.lifted_freevars = {} + self.prev_inst = None + + self._cur_code = None + self._orig_gm_meta = None + self._orig_gm_lineno_map = None + self._orig_gm_firstlineno = None + # Each SubgraphTracer is associated with a source target, which indicates + # which operator this subgraph is attached to. We compute a source_fn_stack + # based on the source target. For the root tracer, it's set to []. + # This is useful for debugging and transforming the exported graph. + if self.parent is None: + self.source_fn_stack = [] + else: + self.source_fn_stack = self.parent.source_fn_stack + [ + (self.graph._target_to_str(source_target), source_target) + ] + + def create_proxy( + self, + kind, + target, + args, + kwargs, + name=None, + type_expr=None, + proxy_factory_fn=None, + ): + # NOTE: [Nested SubgraphTracer and free_variable handling] + # -------------------------------------------------------- + # Read NOTE [HigherOrderOperator tracing design] first. + # + # Let's say we're in the middle of introspecting the body of a possibly + # nested HigherOrderOperator, and we see a free variable. + # + # There are two cases: + # 1. We see a free variable that is already tracked by Dynamo. + # 2. We see a free variable that has not been tracked by Dynamo + # + # In case 1, we call `maybe_lift_tracked_freevar_to_input` (below) + # which will lift the freevar to be an input of this subgraph + # and also recursively lift it to be an input on the parent(s). + # + # In case 2, before the call to `create_proxy`, the InstructionTranslator + # will see the freevar when it gets loaded by Python bytecode. + # E.g. for Python 3.11 the bytecodes that may do this are LOAD_DEREF or + # LOAD_GLOBAL. + # There, the InstructionTranslator asks Dynamo to begin tracking the + # freevar by building a new Variable. + # Building a new Variable automatically lifts the freevar to be an + # input of the root SubgraphTracer. + # + # The implications for the code below are: + # - We will always be in Case 1 when we get to this code. + # - Any "free variable" we encounter here is guaranteed to already be + # bound, that is, it is either a graph input of the root graph, or + # some local variable of the root graph or a subgraph. + # - The additional work we need to do here is *only* that we need to + # lift this free variable into inputs (recursively) of each nested + # higher-order-op subgraph until we hit the subgraph where the free + # variable is bound + if self.parent is not None: + flat_args, tree_spec = pytree.tree_flatten((args, kwargs)) + new_flat_args = [] + for arg in flat_args: + maybe_new_arg = self.maybe_lift_tracked_freevar_to_input(arg) + new_flat_args.append(maybe_new_arg) + + args, kwargs = pytree.tree_unflatten(new_flat_args, tree_spec) + + rv = super().create_proxy( + kind, target, args, kwargs, name, type_expr, proxy_factory_fn + ) + + # append stack trace to fx node + tx = self.output_graph.current_tx + + # log detailed location of line of code in 3.11 + if sys.version_info >= (3, 11) and kind in ( + "call_function", + "call_method", + "call_module", + ): + cur_inst = tx.current_instruction + if ( + cur_inst is not self.prev_inst + and cur_inst.positions is not None + and cur_inst.positions.lineno is not None + ): + tx_code = tx.f_code + header = tx.get_line_of_code_header(lineno=cur_inst.positions.lineno) + + def get_trace_call_log_str(): + line = get_instruction_source_311(tx_code, cur_inst).rstrip() + return f"TRACE FX call {rv.node.name} from {header}\n{line}" + + trace_call_log.debug("%s", LazyString(get_trace_call_log_str)) + self.prev_inst = cur_inst + + # update reference to original meta if we're tracing a new code object + is_retracing = False + if tx.f_code is not self._cur_code: + orig_graphmodule_maybe = code_context.get_context(tx.f_code).get( + "orig_graphmodule", lambda: None + )() + if isinstance(orig_graphmodule_maybe, torch.fx.GraphModule): + is_retracing = True + self._orig_gm_meta = [ + nd.meta for nd in orig_graphmodule_maybe.graph.nodes + ] + self._orig_gm_lineno_map = orig_graphmodule_maybe._lineno_map + self._orig_gm_firstlineno = ( + orig_graphmodule_maybe.forward.__code__.co_firstlineno + ) + else: + self._orig_gm_meta = None + self._orig_gm_lineno_map = None + self._orig_gm_firstlineno = None + nn_module_stack = tx.nn_module_stack + if nn_module_stack: + rv.node.meta["nn_module_stack"] = nn_module_stack.copy() + + if kind in {"call_function", "call_method"}: + rv.node.meta["source_fn_stack"] = self.source_fn_stack + [ + (rv.node.name, target) + ] + elif kind == "call_module": + if self.parent is not None: + unimplemented("Invoking an nn.Module inside HigherOrderOperator") + # For modules we store the class + rv.node.meta["source_fn_stack"] = self.source_fn_stack + [ + ( + rv.node.name, + rv.node.meta["nn_module_stack"][target][1], + ) + ] + + # preserve original meta if it is available + if ( + self._orig_gm_meta + and self._orig_gm_lineno_map + and self._orig_gm_firstlineno + ): + lineno = tx.current_instruction.starts_line + node_idx = None + if lineno is not None: + node_idx = self._orig_gm_lineno_map.get( + lineno - self._orig_gm_firstlineno, None + ) + if node_idx is not None: + meta = self._orig_gm_meta[node_idx] + for field in fx.proxy._COPY_META_FIELDS: + if field in meta: + rv.node.meta[field] = meta[field] + if "stack_trace" in meta: + rv.node.meta["stack_trace"] = meta["stack_trace"] + + if not is_retracing: + if "nn_module_stack" not in rv.node.meta: + nn_module_stack = tx.nn_module_stack + if nn_module_stack: + rv.node.meta["nn_module_stack"] = nn_module_stack.copy() + + if "source_fn_stack" not in rv.node.meta: + if kind in {"call_function", "call_method"}: + rv.node.meta["source_fn_stack"] = self.source_fn_stack + [ + (rv.node.name, target) + ] + elif kind == "call_module": + if self.parent is not None: + unimplemented( + "Invoking an nn.Module inside HigherOrderOperator" + ) + # For modules we store the class + rv.node.meta["source_fn_stack"] = self.source_fn_stack + [ + ( + rv.node.name, + rv.node.meta["nn_module_stack"][target][1], + ) + ] + + if "stack_trace" not in rv.node.meta: + frame_summaries: List[traceback.FrameSummary] = [] + while tx: + frame_summaries.append(tx.frame_summary()) + tx = getattr(tx, "parent", None) + # Reverse the frame_summaries, such that the innermost frame is at the last + frame_summaries.reverse() + + # official from_list stub doesn't have new-style type + msgs = traceback.StackSummary.from_list(frame_summaries).format() + rv.node.stack_trace = "".join(msgs) + + return rv + + def create_node( + self, op, target, args=None, kwargs=None, name=None, type_expr=None + ): + check_pt2_compliant_op(self.output_graph, op, target, args, kwargs) + if self.parent is not None: + flat_args = pytree.arg_tree_leaves(*args, **kwargs) + for arg in flat_args: + if not isinstance(arg, torch.fx.Node): + continue + assert ( + arg.graph == self.graph + ), "create_node using arg not from this SubgraphTracer" + + node = super().create_node(op, target, args, kwargs, name, type_expr) + node.meta["creation_timestamp"] = self.output_graph.timestamp + return node + + # Note: we did not override erase_node since + # we call self.graph.erase_node elsewhere + def remove_node(self, node): + if len(node.users) > 0: + user_graph_nodes: List[torch.fx.Node] = [] + for user in node.users.keys(): + # For the case where user.graph == self.graph, that is a real bug and will raise + # properly. + if user.graph != self.graph: + # This is a nested graph, which needs to be deleted. + # If we do not do this, we will raise on attempting to remove this. + # As we only get here during restoration cleanup, this is sound. + user_graph_nodes.extend(reversed(list(user.graph.nodes))) + for other_graph_node in user_graph_nodes: + other_graph_node.graph.erase_node(other_graph_node) + self.graph.erase_node(node) + self.input_name_to_proxy.pop(node.name, None) + + # when before=True, we will insert this input before the most recent + # inserted proxy. This is a hack to get around an ordering problem, + # where we first insert a tensor argument, and then insert bindings + # for SymInts that may occur in the tensor argument. + # Remove this if https://github.com/pytorch/pytorch/issues/99007 gets + # fixed. + def create_graph_input(self, name, type_expr=None, before=False, source=None): + log.debug( + "create_graph_input %s %s", + name, + source.name() if source is not None else "(none)", + ) + if source is None: + assert ( + self.parent is not None + ), "you are required to provide a source for inputs on the root tracer" + + # In eager, we are generally OK with adding graph inputs whenever we + # want, because we take care of writing the bytecode that knows how + # to source all the inputs. + # + # In export, this is bad, because you want a self-contained export + # object which only depends on the inputs you explicitly passed to it. + # So we are a bit more strict about what sources can become inputs + # in export + if self.export_root: + if not is_from_local_source(source, allow_cell_or_freevar=False): + self.output_graph.source_to_user_stacks.setdefault(source, []).append( + TracingContext.extract_stack() + ) + + # unique + if name in self.input_name_to_proxy: + for i in itertools.count(): + candidate_name = f"{name}_{i}" + if candidate_name not in self.input_name_to_proxy: + name = candidate_name + break + + if self.input_name_to_proxy: + prev_name = next(reversed(self.input_name_to_proxy)) + node = self.input_name_to_proxy[prev_name].node + if before: + ctx = self.graph.inserting_before(node) + else: + ctx = self.graph.inserting_after(node) + else: + ctx = self.graph.inserting_before(None) + with ctx: + proxy = self.create_proxy("placeholder", name, (), {}, type_expr=type_expr) + if self.input_name_to_proxy and before: + k, v = self.input_name_to_proxy.popitem() + self.input_name_to_proxy[name] = proxy + self.input_name_to_proxy[k] = v + else: + self.input_name_to_proxy[name] = proxy + return proxy + + # See NOTE: [Nested SubgraphTracer and free_variable handling] for more details + def lift_tracked_freevar_to_input(self, proxy): + # You're doing something wrong if we are the root SubgraphTracer because + # Dynamo adds tensors to graph inputs before creating a proxy for them. + assert ( + self.parent is not None + ), "lift_tracked_freevar_to_input should not be called on root SubgraphTracer" + # Proxys are associated with VariableTracker. + # It is possible that we've already lifted the Proxy to be an input. + # If that is the case, just return the already lifted Proxy. + if proxy in self.lifted_freevars: + return self.lifted_freevars[proxy] + new_proxy = self.create_graph_input(proxy.node.name) + new_proxy.node.meta["example_value"] = proxy.node.meta["example_value"] + self.lifted_freevars[proxy] = new_proxy + if self.parent is not None and proxy.tracer != self.parent: + self.parent.lift_tracked_freevar_to_input(proxy) + return new_proxy + + def maybe_lift_tracked_freevar_to_input(self, arg): + """ + If arg is a free variable, then lift it to be an input. + Returns the new lifted arg (if arg was a freevar), else the + original arg. + """ + if not isinstance(arg, torch.fx.Proxy): + return arg + elif arg.tracer == self: + return arg + return self.lift_tracked_freevar_to_input(arg) + + +# NOTE: [HigherOrderOperator tracing design] +# Ignoring HigherOrderOperators for a moment, +# OutputGraph represents the graph being built by Dynamo that may be compiled +# and executed. It holds a root SubgraphTracer where the FX graph is built. +# +# HigherOrderOperators are operators that take functions as their arguments. +# When Dynamo encounters a HigherOrderOperator, then it attempts to introspect +# the function passed to it (call this the "body function"), capture it into a +# GraphModule, and rewrite the call to the HigherOrderOperator to use the +# GraphModule. +# +# The way we handle the capture of body functions is through having +# (possibly nested) SubgraphTracers, one per body function. +# +# Mechanically, we do the introspection by: +# - Creating a new SubgraphTracer via OutputGraph.subtracer +# - Executing the body function. +# This constructs the graph of the body function in the new SubgraphTracer +# while modifying the state of the OutputGraph. For example: +# - the OutputGraph can receive new GraphArgs (if we discover any new +# untracked Tensors) +# - side effects from the body function get accumulated into +# OutputGraph.side_effects +# - guards produced by the body function get accumulated into OutputGraph.guards +# +# The traced function has some special properties that make it easier for us +# to transform later down the line: +# - we lift all free variables to being inputs. +# +# If the introspection fails (due to the existence of graph breaks), then +# we roll back the current OutputGraph state and graph break on the +# HigherOrderOperator. diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/polyfill.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/polyfill.py new file mode 100644 index 0000000000000000000000000000000000000000..fc82d90c4c8a0344e9e027f683fee4c9e6195b44 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/polyfill.py @@ -0,0 +1,47 @@ +# mypy: ignore-errors + +""" +Python polyfills for common builtins. +""" +import math + +import torch + + +def all(iterator): + for elem in iterator: + if not elem: + return False + return True + + +def any(iterator): + for elem in iterator: + if elem: + return True + return False + + +def index(iterator, item, start=0, end=None): + for i, elem in enumerate(list(iterator))[start:end]: + if item == elem: + return i + # This will not run in dynamo + raise ValueError(f"{item} is not in {type(iterator)}") + + +def repeat(item, count): + for i in range(count): + yield item + + +def radians(x): + return math.pi / 180.0 * x + + +def accumulate_grad(x, new_grad): + new_grad = torch.clone(new_grad) + if x.grad is None: + x.grad = new_grad + else: + x.grad.add_(new_grad) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/profiler.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..b52551c67137519ce65b49c4416fd08c814545ed --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/profiler.py @@ -0,0 +1,155 @@ +import dataclasses +import os +from typing import Any, List + +import torch + +from .utils import print_once + + +@dataclasses.dataclass +class ProfileMetrics: + microseconds: float = 0.0 + operators: int = 0 + fusions: int = 0 + graphs: int = 0 + + def __iadd__(self, other: "ProfileMetrics"): + self.microseconds += other.microseconds + self.operators += other.operators + self.fusions += other.fusions + return self + + def __add__(self, other: "ProfileMetrics"): + assert isinstance(other, ProfileMetrics) + return ProfileMetrics( + self.microseconds + other.microseconds, + self.operators + other.operators, + self.fusions + other.fusions, + ) + + def __truediv__(self, other): + if isinstance(other, int): + other = ProfileMetrics(other, other, other) + return ProfileMetrics( + self.microseconds / max(1, other.microseconds), + self.operators / max(1, other.operators), + self.fusions / max(1, other.fusions), + ) + + def __str__(self): + return f"{self.operators:4.0%} ops {self.microseconds:4.0%} time" + + def tocsv(self): + return [self.operators, self.microseconds] + + +class ProfileResult: + def __init__(self, captured, total, unique_graphs): + self.captured: ProfileMetrics = captured or ProfileMetrics() + self.total: ProfileMetrics = total or ProfileMetrics() + self.unique_graphs: int = unique_graphs + + def __iadd__(self, other: "ProfileResult"): + self.captured += other.captured + self.total += other.total + self.unique_graphs += other.unique_graphs + return self + + def percent(self): + return self.captured / self.total + + def __str__(self): + return ( + f"{self.unique_graphs:2} graphs {self.captured.graphs:2} graph calls " + f"{self.captured.operators:4}/{self.total.operators:4} = " + + str(self.percent()) + ) + + def tocsv(self): + return [ + self.unique_graphs, + self.captured.graphs, + self.captured.operators, + self.total.operators, + ] + self.percent().tocsv() + + +def should_print_missing(): + return os.environ.get("TORCHDYNAMO_PRINT_MISSING") == "1" + + +def print_missing(stack): + if any("/torch/autograd/profiler.py" in x for x in stack): + return + stack = [ + x for x in stack if ("> ".join(stack[-3:])) + + +class Profiler: + unique_graphs = 0 + + def __init__(self): + self.prof = torch.profiler.profile( + activities=[torch.profiler.ProfilerActivity.CPU], + with_stack=should_print_missing(), + ) + + def results(self): + captured_regions = 0 + captured_ops = 0 + captured_microseconds = 0 + total_ops = 0 + total_microseconds = 0 + + last_op_end_time = -1 + captured_region_end_time = -1 + events = sorted(self.prof.events(), key=lambda x: x.time_range.start) + for e in events: + if e.name == "TORCHDYNAMO": + captured_region_end_time = e.time_range.end + captured_regions += 1 + # ignore `handle = torch.zeros(1)` in record_function.__init__() + total_ops -= 1 + elif e.time_range.start >= last_op_end_time: + last_op_end_time = e.time_range.end + if e.time_range.end <= captured_region_end_time: + captured_ops += 1 + captured_microseconds += e.time_range.elapsed_us() + elif should_print_missing(): + print_missing(e.stack) + total_ops += 1 + total_microseconds += e.time_range.elapsed_us() + else: + pass # ops recursively called from other ops (ignored) + + unique_graphs = Profiler.unique_graphs + Profiler.unique_graphs = 0 + # we counted one extra op that is part of the profiler setup code + total_ops -= 1 + + return ProfileResult( + captured=ProfileMetrics( + microseconds=captured_microseconds, + operators=captured_ops, + fusions=captured_ops - captured_regions, + graphs=captured_regions, + ), + total=ProfileMetrics( + microseconds=total_microseconds, + operators=total_ops, + fusions=total_ops - 1, + ), + unique_graphs=unique_graphs, + ) + + +def fx_insert_profiling(gm: torch.fx.GraphModule, example_inputs: List[Any]): + def _wrapped(*args): + with torch.profiler.record_function("TORCHDYNAMO"): + return gm.forward(*args) + + Profiler.unique_graphs += 1 + return _wrapped diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/resume_execution.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/resume_execution.py new file mode 100644 index 0000000000000000000000000000000000000000..b0b565aa275a2822e373fada9f043994d8c0eebe --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/resume_execution.py @@ -0,0 +1,648 @@ +import copy +import dataclasses +import sys +import types +from typing import Any, cast, Dict, List, Optional, Tuple + +from .bytecode_transformation import ( + create_call_function, + create_call_method, + create_dup_top, + create_instruction, + create_jump_absolute, + Instruction, + InstructionExnTabEntry, + transform_code_object, + unique_id, +) +from .utils import ExactWeakKeyDictionary + +# taken from code.h in cpython +CO_OPTIMIZED = 0x0001 +CO_NEWLOCALS = 0x0002 +CO_VARARGS = 0x0004 +CO_VARKEYWORDS = 0x0008 +CO_NESTED = 0x0010 +CO_GENERATOR = 0x0020 +CO_NOFREE = 0x0040 +CO_COROUTINE = 0x0080 +CO_ITERABLE_COROUTINE = 0x0100 +CO_ASYNC_GENERATOR = 0x0200 + + +@dataclasses.dataclass(frozen=True) +class ReenterWith: + stack_index: int + target_values: Optional[Tuple[Any, ...]] = None + + # If we do not want to destroy the stack, we can do the same thing as a + # `SETUP_WITH` block, only that we store the context manager in a local_symbol + def try_except(self, code_options, cleanup: List[Instruction]): + """ + Codegen based off of: + load args + enter context + try: + (rest) + finally: + exit context + """ + load_args = [] + if self.target_values: + load_args = [ + create_instruction("LOAD_CONST", argval=val) + for val in self.target_values + ] + ctx_name = unique_id(f"___context_manager_{self.stack_index}") + if ctx_name not in code_options["co_varnames"]: + code_options["co_varnames"] += (ctx_name,) + for name in ["__enter__", "__exit__"]: + if name not in code_options["co_names"]: + code_options["co_names"] += (name,) + + except_jump_target = create_instruction( + "NOP" if sys.version_info < (3, 11) else "PUSH_EXC_INFO" + ) + cleanup_complete_jump_target = create_instruction("NOP") + + setup_finally = [ + *load_args, + *create_call_function(len(load_args), True), + create_instruction("STORE_FAST", argval=ctx_name), + create_instruction("LOAD_FAST", argval=ctx_name), + create_instruction("LOAD_METHOD", argval="__enter__"), + *create_call_method(0), + create_instruction("POP_TOP"), + ] + + if sys.version_info < (3, 11): + setup_finally.append( + create_instruction("SETUP_FINALLY", target=except_jump_target) + ) + else: + exn_tab_begin = create_instruction("NOP") + exn_tab_end = create_instruction("NOP") + exn_tab_begin.exn_tab_entry = InstructionExnTabEntry( + exn_tab_begin, + exn_tab_end, + except_jump_target, + self.stack_index + 1, + False, + ) + setup_finally.append(exn_tab_begin) + + def create_reset(): + return [ + create_instruction("LOAD_FAST", argval=ctx_name), + create_instruction("LOAD_METHOD", argval="__exit__"), + create_instruction("LOAD_CONST", argval=None), + create_dup_top(), + create_dup_top(), + *create_call_method(3), + create_instruction("POP_TOP"), + ] + + if sys.version_info < (3, 9): + epilogue = [ + create_instruction("POP_BLOCK"), + create_instruction("BEGIN_FINALLY"), + except_jump_target, + *create_reset(), + create_instruction("END_FINALLY"), + ] + elif sys.version_info < (3, 11): + epilogue = [ + create_instruction("POP_BLOCK"), + *create_reset(), + create_instruction("JUMP_FORWARD", target=cleanup_complete_jump_target), + except_jump_target, + *create_reset(), + create_instruction("RERAISE"), + cleanup_complete_jump_target, + ] + else: + finally_exn_tab_end = create_instruction("RERAISE", arg=0) + finally_exn_tab_target = create_instruction("COPY", arg=3) + except_jump_target.exn_tab_entry = InstructionExnTabEntry( + except_jump_target, + finally_exn_tab_end, + finally_exn_tab_target, + self.stack_index + 2, + True, + ) + epilogue = [ + exn_tab_end, + *create_reset(), + create_instruction("JUMP_FORWARD", target=cleanup_complete_jump_target), + except_jump_target, # PUSH_EXC_INFO + *create_reset(), + finally_exn_tab_end, # RERAISE 0 + finally_exn_tab_target, # COPY 3 + create_instruction("POP_EXCEPT"), + create_instruction("RERAISE", arg=1), + cleanup_complete_jump_target, + ] + + cleanup[:] = epilogue + cleanup + return setup_finally + + def __call__(self, code_options, cleanup): + """ + Codegen based off of: + with ctx(args): + (rest) + """ + load_args = [] + if self.target_values: + load_args = [ + create_instruction("LOAD_CONST", argval=val) + for val in self.target_values + ] + if sys.version_info < (3, 9): + with_cleanup_start = create_instruction("WITH_CLEANUP_START") + begin_finally = create_instruction("BEGIN_FINALLY") + cleanup[:] = [ + create_instruction("POP_BLOCK"), + begin_finally, + with_cleanup_start, + create_instruction("WITH_CLEANUP_FINISH"), + create_instruction("END_FINALLY"), + ] + cleanup + + return [ + *load_args, + create_instruction("CALL_FUNCTION", arg=len(load_args)), + create_instruction("SETUP_WITH", target=with_cleanup_start), + create_instruction("POP_TOP"), + ], None + elif sys.version_info < (3, 11): + with_except_start = create_instruction("WITH_EXCEPT_START") + pop_top_after_with_except_start = create_instruction("POP_TOP") + + cleanup_complete_jump_target = create_instruction("NOP") + + cleanup[:] = [ + create_instruction("POP_BLOCK"), + create_instruction("LOAD_CONST", argval=None), + create_instruction("DUP_TOP"), + create_instruction("DUP_TOP"), + create_instruction("CALL_FUNCTION", arg=3), + create_instruction("POP_TOP"), + create_instruction("JUMP_FORWARD", target=cleanup_complete_jump_target), + with_except_start, + create_instruction( + "POP_JUMP_IF_TRUE", target=pop_top_after_with_except_start + ), + create_instruction("RERAISE"), + pop_top_after_with_except_start, + create_instruction("POP_TOP"), + create_instruction("POP_TOP"), + create_instruction("POP_EXCEPT"), + create_instruction("POP_TOP"), + cleanup_complete_jump_target, + ] + cleanup + + return [ + *load_args, + create_instruction("CALL_FUNCTION", arg=len(load_args)), + create_instruction("SETUP_WITH", target=with_except_start), + create_instruction("POP_TOP"), + ], None + else: + pop_top_after_with_except_start = create_instruction("POP_TOP") + cleanup_complete_jump_target = create_instruction("NOP") + + def create_load_none(): + return create_instruction("LOAD_CONST", argval=None) + + exn_tab_1_begin = create_instruction("POP_TOP") + exn_tab_1_end = create_instruction("NOP") + exn_tab_1_target = create_instruction("PUSH_EXC_INFO") + exn_tab_2_end = create_instruction("RERAISE", arg=2) + exn_tab_2_target = create_instruction("COPY", arg=3) + + exn_tab_1_begin.exn_tab_entry = InstructionExnTabEntry( + exn_tab_1_begin, + exn_tab_1_end, + exn_tab_1_target, + self.stack_index + 1, + True, + ) + exn_tab_1_target.exn_tab_entry = InstructionExnTabEntry( + exn_tab_1_target, + exn_tab_2_end, + exn_tab_2_target, + self.stack_index + 3, + True, + ) + pop_top_after_with_except_start.exn_tab_entry = InstructionExnTabEntry( + pop_top_after_with_except_start, + pop_top_after_with_except_start, + exn_tab_2_target, + self.stack_index + 3, + True, + ) + + cleanup[:] = [ + exn_tab_1_end, + create_load_none(), + create_load_none(), + create_load_none(), + *create_call_function(2, False), + create_instruction("POP_TOP"), + create_instruction("JUMP_FORWARD", target=cleanup_complete_jump_target), + exn_tab_1_target, # PUSH_EXC_INFO + create_instruction("WITH_EXCEPT_START"), + create_instruction( + "POP_JUMP_FORWARD_IF_TRUE", + target=pop_top_after_with_except_start, + ), + exn_tab_2_end, # RERAISE 2 + exn_tab_2_target, # COPY 3 + create_instruction("POP_EXCEPT"), + create_instruction("RERAISE", arg=1), + pop_top_after_with_except_start, + create_instruction("POP_EXCEPT"), + create_instruction("POP_TOP"), + create_instruction("POP_TOP"), + cleanup_complete_jump_target, + ] + cleanup + + return [ + *load_args, + *create_call_function(len(load_args), True), + create_instruction("BEFORE_WITH"), + exn_tab_1_begin, # POP_TOP + ], exn_tab_1_target + + +@dataclasses.dataclass +class ResumeFunctionMetadata: + code: types.CodeType + instructions: List[Instruction] = dataclasses.field(default_factory=list) + # Python 3.11+ fields + # NOTE: Python 3.11 removed blocks, but for our purposes, a "block" consists + # of instructions of all exception table entries that have the same target. + + # map from PUSH_EXC_INFO's in the prefix to original block target offset + prefix_block_target_offset_remap: List[int] = dataclasses.field( + default_factory=list + ) + # map from new block target offsets to original block target offsets + block_target_offset_remap: Optional[Dict[int, int]] = None + + +def _filter_iter(l1, l2, cond): + """ + Two-pointer conditional filter. + e.g. _filter_iter(insts, sorted_offsets, lambda i, o: i.offset == o) + returns the instructions with offsets in sorted_offsets + """ + it = iter(l2) + res = [] + try: + cur = next(it) + for val in l1: + if cond(val, cur): + res.append(val) + cur = next(it) + except StopIteration: + pass + return res + + +class ContinueExecutionCache: + cache = ExactWeakKeyDictionary() + generated_code_metadata = ExactWeakKeyDictionary() + + @classmethod + def lookup(cls, code, lineno, *key): + if code not in cls.cache: + cls.cache[code] = dict() + key = tuple(key) + if key not in cls.cache[code]: + cls.cache[code][key] = cls.generate(code, lineno, *key) + return cls.cache[code][key] + + @classmethod + def generate( + cls, + code, + lineno, + offset: int, + setup_fn_target_offsets: Tuple[int], # only used in Python 3.11+ + nstack: int, + argnames: Tuple[str], + setup_fns: Tuple[ReenterWith], + null_idxes: Tuple[int], + ) -> types.CodeType: + assert offset is not None + assert not ( + code.co_flags + & (CO_GENERATOR | CO_COROUTINE | CO_ITERABLE_COROUTINE | CO_ASYNC_GENERATOR) + ) + assert code.co_flags & CO_OPTIMIZED + if code in ContinueExecutionCache.generated_code_metadata: + return cls.generate_based_on_original_code_object( + code, + lineno, + offset, + setup_fn_target_offsets, + nstack, + argnames, + setup_fns, + null_idxes, + ) + + is_py311_plus = sys.version_info >= (3, 11) + meta = ResumeFunctionMetadata(code) + + def update(instructions: List[Instruction], code_options: Dict[str, Any]): + meta.instructions = copy.deepcopy(instructions) + + args = [f"___stack{i}" for i in range(nstack)] + args.extend(v for v in argnames if v not in args) + freevars = tuple(code_options["co_cellvars"] or []) + tuple( + code_options["co_freevars"] or [] + ) + code_options[ + "co_name" + ] = f"torch_dynamo_resume_in_{code_options['co_name']}_at_{lineno}" + if is_py311_plus: + qualified_path = code_options["co_qualname"].rsplit(".", maxsplit=1) + if len(qualified_path) == 1: + code_options["co_qualname"] = code_options["co_name"] + else: + assert len(qualified_path) == 2 + module_name, co_name = qualified_path + code_options[ + "co_qualname" + ] = f"{module_name}.torch_dynamo_resume_in_{co_name}_at_{lineno}" + code_options["co_firstlineno"] = lineno + code_options["co_cellvars"] = tuple() + code_options["co_freevars"] = freevars + code_options["co_argcount"] = len(args) + code_options["co_posonlyargcount"] = 0 + code_options["co_kwonlyargcount"] = 0 + code_options["co_varnames"] = tuple( + args + [v for v in code_options["co_varnames"] if v not in args] + ) + code_options["co_flags"] = code_options["co_flags"] & ~( + CO_VARARGS | CO_VARKEYWORDS + ) + target = next(i for i in instructions if i.offset == offset) + + prefix = [] + if is_py311_plus: + if freevars: + prefix.append( + create_instruction("COPY_FREE_VARS", arg=len(freevars)) + ) + prefix.append(create_instruction("RESUME", arg=0)) + + cleanup: List[Instruction] = [] + hooks = {fn.stack_index: fn for fn in setup_fns} + hook_target_offsets = { + fn.stack_index: setup_fn_target_offsets[i] + for i, fn in enumerate(setup_fns) + } + offset_to_inst = {inst.offset: inst for inst in instructions} + # map old hook targets to new targets generated by the hook + old_hook_target_remap = {} + null_idxes_i = 0 + for i in range(nstack): + while ( + null_idxes_i < len(null_idxes) + and null_idxes[null_idxes_i] == i + null_idxes_i + ): + prefix.append(create_instruction("PUSH_NULL")) + null_idxes_i += 1 + prefix.append(create_instruction("LOAD_FAST", argval=f"___stack{i}")) + if i in hooks: + hook = hooks.pop(i) + hook_insts, exn_target = hook(code_options, cleanup) + prefix.extend(hook_insts) + if is_py311_plus: + hook_target_offset = hook_target_offsets.pop(i) + old_hook_target = offset_to_inst[hook_target_offset] + meta.prefix_block_target_offset_remap.append(hook_target_offset) + old_hook_target_remap[old_hook_target] = exn_target + if is_py311_plus: + # reverse the mapping since targets of later/nested contexts are inserted + # into the mapping later, but show up earlier in the prefix. + meta.prefix_block_target_offset_remap = list( + reversed(meta.prefix_block_target_offset_remap) + ) + + assert not hooks + + prefix.append(create_jump_absolute(target)) + + # because the line number table monotonically increases from co_firstlineno + # remove starts_line for any instructions before the graph break instruction + # this will ensure the instructions after the break have the correct line numbers + for inst in instructions: + if inst.offset == target.offset: + break + inst.starts_line = None + if sys.version_info >= (3, 11): + inst.positions = None + + if cleanup: + prefix.extend(cleanup) + prefix.extend(cls.unreachable_codes(code_options)) + + # remap original instructions' exception table entries + if old_hook_target_remap: + assert is_py311_plus + for inst in instructions: + if ( + inst.exn_tab_entry + and inst.exn_tab_entry.target in old_hook_target_remap + ): + inst.exn_tab_entry.target = old_hook_target_remap[ + inst.exn_tab_entry.target + ] + + # TODO(jansel): add dead code elimination here + instructions[:] = prefix + instructions + + new_code = transform_code_object(code, update) + ContinueExecutionCache.generated_code_metadata[new_code] = meta + return new_code + + @staticmethod + def unreachable_codes(code_options) -> List[Instruction]: + """Codegen a `raise None` to make analysis work for unreachable code""" + return [ + create_instruction("LOAD_CONST", argval=None), + create_instruction("RAISE_VARARGS", arg=1), + ] + + @classmethod + def generate_based_on_original_code_object( + cls, code, lineno, offset: int, setup_fn_target_offsets: Tuple[int, ...], *args + ): + """ + This handles the case of generating a resume into code generated + to resume something else. We want to always generate starting + from the original code object so that if control flow paths + converge we only generated 1 resume function (rather than 2^n + resume functions). + """ + + meta: ResumeFunctionMetadata = ContinueExecutionCache.generated_code_metadata[ + code + ] + new_offset = None + + def find_new_offset( + instructions: List[Instruction], code_options: Dict[str, Any] + ): + nonlocal new_offset + (target,) = (i for i in instructions if i.offset == offset) + # match the functions starting at the last instruction as we have added a prefix + (new_target,) = ( + i2 + for i1, i2 in zip(reversed(instructions), reversed(meta.instructions)) + if i1 is target + ) + assert target.opcode == new_target.opcode + new_offset = new_target.offset + + transform_code_object(code, find_new_offset) + + if sys.version_info >= (3, 11): + # setup_fn_target_offsets currently contains the target offset of + # each setup_fn, based on `code`. When we codegen the resume function + # based on the original code object, `meta.code`, the offsets in + # setup_fn_target_offsets must be based on `meta.code` instead. + if not meta.block_target_offset_remap: + block_target_offset_remap = meta.block_target_offset_remap = {} + + def remap_block_offsets( + instructions: List[Instruction], code_options: Dict[str, Any] + ): + # NOTE: each prefix block generates exactly one PUSH_EXC_INFO, + # so we can tell which block a prefix PUSH_EXC_INFO belongs to, + # by counting. Then we can use meta.prefix_block-target_offset_remap + # to determine where in the original code the PUSH_EXC_INFO offset + # replaced. + prefix_blocks: List[Instruction] = [] + for inst in instructions: + if len(prefix_blocks) == len( + meta.prefix_block_target_offset_remap + ): + break + if inst.opname == "PUSH_EXC_INFO": + prefix_blocks.append(inst) + + # offsets into prefix + for inst, o in zip( + prefix_blocks, meta.prefix_block_target_offset_remap + ): + block_target_offset_remap[cast(int, inst.offset)] = o + + # old bytecode targets are after the prefix PUSH_EXC_INFO's + old_start_offset = ( + cast(int, prefix_blocks[-1].offset) if prefix_blocks else -1 + ) + # offsets into old bytecode + old_inst_offsets = sorted( + n for n in setup_fn_target_offsets if n > old_start_offset + ) + targets = _filter_iter( + instructions, old_inst_offsets, lambda inst, o: inst.offset == o + ) + new_targets = _filter_iter( + zip(reversed(instructions), reversed(meta.instructions)), + targets, + lambda v1, v2: v1[0] is v2, + ) + for new, old in zip(new_targets, targets): + block_target_offset_remap[old.offset] = new[1].offset + + transform_code_object(code, remap_block_offsets) + + # if offset is not in setup_fn_target_offsets, it is an error + setup_fn_target_offsets = tuple( + meta.block_target_offset_remap[n] for n in setup_fn_target_offsets + ) + return ContinueExecutionCache.lookup( + meta.code, lineno, new_offset, setup_fn_target_offsets, *args + ) + + +""" +# partially finished support for with statements + +def convert_locals_to_cells( + instructions: List[Instruction], + code_options: Dict[str, Any]): + + code_options["co_cellvars"] = tuple( + var + for var in code_options["co_varnames"] + if var not in code_options["co_freevars"] + and not var.startswith("___stack") + ) + cell_and_free = code_options["co_cellvars"] + code_options["co_freevars"] + for inst in instructions: + if str(inst.argval).startswith("___stack"): + continue + elif inst.opname == "LOAD_FAST": + inst.opname = "LOAD_DEREF" + elif inst.opname == "STORE_FAST": + inst.opname = "STORE_DEREF" + elif inst.opname == "DELETE_FAST": + inst.opname = "DELETE_DEREF" + else: + continue + inst.opcode = dis.opmap[inst.opname] + assert inst.argval in cell_and_free, inst.argval + inst.arg = cell_and_free.index(inst.argval) + +def patch_setup_with( + instructions: List[Instruction], + code_options: Dict[str, Any] +): + nonlocal need_skip + need_skip = True + target_index = next( + idx for idx, i in enumerate(instructions) if i.offset == offset + ) + assert instructions[target_index].opname == "SETUP_WITH" + convert_locals_to_cells(instructions, code_options) + + stack_depth_before = nstack + stack_effect(instructions[target_index].opcode, + instructions[target_index].arg) + + inside_with = [] + inside_with_resume_at = None + stack_depth = stack_depth_before + idx = target_index + 1 + for idx in range(idx, len(instructions)): + inst = instructions[idx] + if inst.opname == "BEGIN_FINALLY": + inside_with_resume_at = inst + break + elif inst.target is not None: + unimplemented("jump from with not supported") + elif inst.opname in ("BEGIN_FINALLY", "WITH_CLEANUP_START", "WITH_CLEANUP_FINISH", "END_FINALLY", + "POP_FINALLY", "POP_EXCEPT", + "POP_BLOCK", "END_ASYNC_FOR"): + unimplemented("block ops not supported") + inside_with.append(inst) + stack_depth += stack_effect(inst.opcode, inst.arg) + assert inside_with_resume_at + + instructions = [ + create_instruction("LOAD_FAST", f"___stack{i}") for i in range(nstack) + ] + [ + create_instruction("SETUP_WITH", target=instructions[target_index].target) + ... call the function ... + unpack_tuple + ] + [ + create_instruction("JUMP_ABSOLUTE", target=inside_with_resume_at) + ] +""" diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/side_effects.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/side_effects.py new file mode 100644 index 0000000000000000000000000000000000000000..80bd9e0b0c1a5f3e4e599b12cfc5a71a93913ec3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/side_effects.py @@ -0,0 +1,542 @@ +import inspect +from typing import Any, Dict, List, Optional, Union + +import torch.nn + +from . import utils, variables +from .bytecode_transformation import ( + create_call_function, + create_call_method, + create_instruction, +) +from .codegen import PyCodegen +from .exc import unimplemented +from .source import LocalSource, Source +from .utils import nn_module_new, object_new +from .variables.base import ( + is_side_effect_safe, + MutableLocalBase, + MutableLocalSource, + VariableTracker, +) + + +class MutableSideEffects(MutableLocalBase): + """ + VariableTracker.mutable_local marker to indicate a list passed as + an input that if we mutate we need to re-apply those mutations after + the graph runs. + """ + + def __init__(self, source: Source, is_modified: bool = False): + super().__init__(MutableLocalSource.Existing) + self.source = source + self.is_modified = is_modified + + +class AttributeMutation(MutableLocalBase): + """ + VariableTracker.mutable_local marker to track changes to attributes + """ + + def __init__(self, typ: MutableLocalSource, source: Optional[Source]): + super().__init__(typ) + self.source = source + + +class AttributeMutationExisting(AttributeMutation): + def __init__(self, source: Source): + super().__init__(MutableLocalSource.Existing, source) + self.source = source + + +class AttributeMutationNew(AttributeMutation): + def __init__(self, source: Optional[Source], cls_source: Optional[Source]): + super().__init__(MutableLocalSource.Local, source) + self.cls_source = cls_source + + +class SideEffects: + """ + Track side effects (list mutation, setattr, etc) that need to be + applied after an FX graph is run. + """ + + id_to_variable: Dict[int, VariableTracker] + store_attr_mutations: Dict[MutableLocalBase, Dict[str, VariableTracker]] + keepalive: List[Any] + + def __init__( + self, + id_to_variable=None, + store_attr_mutations=None, + keepalive=None, + save_for_backward=None, + tensor_hooks=None, + ): + super().__init__() + self.id_to_variable = id_to_variable or {} + self.store_attr_mutations = store_attr_mutations or {} + self.keepalive = keepalive or [] + self.save_for_backward = save_for_backward or [] + self.tensor_hooks = tensor_hooks or {} + + def __eq__(self, other: object) -> bool: + assert isinstance(other, SideEffects) + # NB: do NOT test keepalive + return ( + self.id_to_variable == other.id_to_variable + and self.store_attr_mutations == other.store_attr_mutations + and self.save_for_backward == other.save_for_backward + and self.tensor_hooks == other.tensor_hooks + ) + + def diff(self, other: "SideEffects") -> Optional[str]: + if self.id_to_variable != other.id_to_variable: + sk_itv = self.id_to_variable.keys() + ok_itv = other.id_to_variable.keys() + if sk_itv != ok_itv: + return f"id_to_variable keys: {sk_itv} != {ok_itv}" + # Feel free to augment this with more fancy diffing logic + # if needed for debugging + return "id_to_variable: unknown diff" + elif self.store_attr_mutations != other.store_attr_mutations: + sk_sam = self.store_attr_mutations.keys() + ok_sam = other.store_attr_mutations.keys() + if sk_sam != ok_sam: + return f"store_attr_mutations keys: {sk_sam} != {ok_sam}" + return "store_attr_mutations: unknown diff" + elif self.save_for_backward != other.save_for_backward: + return "save_for_backward" + elif self.tensor_hooks != other.tensor_hooks: + return "tensor_hooks" + else: + return None + + def clone(self): + """Create a shallow copy""" + return self.__class__( + id_to_variable=dict(self.id_to_variable), + store_attr_mutations={ + k: dict(v) for k, v in self.store_attr_mutations.items() + }, + keepalive=list(self.keepalive), + save_for_backward=self.save_for_backward, + tensor_hooks=self.tensor_hooks, + ) + + def apply(self, fn, cache=None, skip_fn=lambda _: False): + if cache is None: + cache = dict() + + self.id_to_variable = { + k: VariableTracker.apply(fn, v, cache, skip_fn) + for k, v in self.id_to_variable.items() + } + self.store_attr_mutations = { + k: VariableTracker.apply(fn, v, cache, skip_fn) + for k, v in self.store_attr_mutations.items() + } + self.save_for_backward = VariableTracker.apply( + fn, self.save_for_backward, cache, skip_fn + ) + self.tensor_hooks = VariableTracker.apply(fn, self.tensor_hooks, cache, skip_fn) + + def __contains__(self, item): + return id(item) in self.id_to_variable + + def __getitem__(self, item): + return self.id_to_variable[id(item)] + + def check_allowed_side_effect(self, item): + from torch._dynamo.variables.misc import AutogradFunctionContextVariable + + # People do things like self.dim = dim inside autograd.Function. + # These are benign. + if isinstance(item, AutogradFunctionContextVariable): + return True + if not is_side_effect_safe(item.mutable_local): + unimplemented( + "HigherOrderOperator: Mutating a variable not in the current scope (SideEffects)" + ) + + def store_attr(self, item: VariableTracker, name: str, value: VariableTracker): + assert self.is_attribute_mutation(item) + self.check_allowed_side_effect(item) + if item.mutable_local not in self.store_attr_mutations: + self.store_attr_mutations[item.mutable_local] = {} + self.store_attr_mutations[item.mutable_local][name] = value + + def load_attr(self, item, name, deleted_ok=False): + assert self.is_attribute_mutation(item) + result = self.store_attr_mutations[item.mutable_local][name] + if not deleted_ok and isinstance(result, variables.DeletedVariable): + unimplemented("read deleted attribute") + return result + + def store_cell(self, cellvar, value): + assert isinstance(cellvar, variables.NewCellVariable) + assert isinstance(value, variables.VariableTracker) + self.store_attr(cellvar, "cell_contents", value) + + def load_cell(self, cellvar): + assert isinstance(cellvar, variables.NewCellVariable) + return self.load_attr(cellvar, "cell_contents") + + def load_global(self, gvar: VariableTracker, name: str): + assert isinstance(gvar, variables.VariableTracker) + return self.load_attr(gvar, name) + + def store_global(self, gvar: VariableTracker, name: str, value: VariableTracker): + assert isinstance(gvar, variables.VariableTracker) + assert isinstance(value, variables.VariableTracker) + self.store_attr(gvar, name, value) + + @staticmethod + def cls_supports_mutation_side_effects(cls): + return inspect.getattr_static(cls, "__setattr__", None) in ( + object.__setattr__, + torch.nn.Module.__setattr__, + ) + + def is_attribute_mutation(self, item): + return isinstance(item.mutable_local, AttributeMutation) + + def has_pending_mutation(self, item): + return self.is_attribute_mutation(item) and bool( + self.store_attr_mutations.get(item.mutable_local) + ) + + def is_modified(self, item): + if isinstance(item.mutable_local, AttributeMutationNew): + return True + if self.is_attribute_mutation(item): + return item.mutable_local in self.store_attr_mutations + return item.mutable_local.is_modified + + def _track_obj( + self, + item: Any, + variable: VariableTracker, + mutable_cls=MutableSideEffects, + ): + """Start tracking a new variable for mutation""" + assert variable.source is not None + variable.mutable_local = mutable_cls(variable.source) + self.id_to_variable[id(item)] = variable + self.keepalive.append(item) + return variable + + track_mutable = _track_obj + + def track_object_existing( + self, + item: Any, + variable: VariableTracker, + ): + return self._track_obj(item, variable, mutable_cls=AttributeMutationExisting) + + def track_object_new( + self, + cls_source: Source, + user_cls: Any, + variable_cls: Any, + options, + ): + if user_cls is torch.autograd.function.FunctionCtx: + obj = torch.autograd.Function() + elif issubclass(user_cls, torch.nn.Module): + obj = nn_module_new(user_cls) + else: + obj = object_new(user_cls) + variable = variable_cls( + obj, + mutable_local=AttributeMutationNew(None, cls_source), + **options, + ) + self.id_to_variable[id(obj)] = variable + self.keepalive.append(obj) + return variable + + def track_cell_new( + self, + ): + obj = object() + variable = variables.NewCellVariable( + mutable_local=AttributeMutationNew(None, None), + ) + self.id_to_variable[id(obj)] = variable + self.keepalive.append(obj) + return variable + + def track_cell_existing(self, source: Source, item: Any): + variable = variables.NewCellVariable( + mutable_local=AttributeMutationExisting(source), + ) + self.id_to_variable[id(item)] = variable + self.keepalive.append(item) + return variable + + def track_global_existing(self, source: Source, item: Any): + variable = variables.NewGlobalVariable( + mutable_local=AttributeMutationExisting(source), + ) + self.id_to_variable[id(item)] = variable + self.keepalive.append(item) + return variable + + def track_save_for_backward(self, ctx, args): + assert isinstance(ctx, variables.AutogradFunctionContextVariable) + self.save_for_backward.append((ctx, args)) + + def track_tensor_variables_from_runahead_side_effects(self, other): + # In higher order ops we want to keep track of tensors seen in the + # speculate_subgraph so that we don't lift them again as a new input in + # other speculate_subgraph or in the root tracer. + for other_item in other.keepalive: + other_id = id(other_item) + other_variable = other.id_to_variable[other_id] + if other_id not in self.id_to_variable and isinstance( + other_variable, variables.TensorVariable + ): + self.track_object_existing(other_item, other_variable) + + def prune_dead_object_new(self, tx): + live_new_objects = set() + skip_obj = None + + def visit(var: VariableTracker): + if ( + isinstance(var.mutable_local, AttributeMutationNew) + and var.mutable_local is not skip_obj + ): + live_new_objects.add(var.mutable_local) + return var + + def is_live(var: Union[MutableLocalBase, VariableTracker]): + if isinstance(var, AttributeMutationNew): + return var in live_new_objects + if isinstance(var, VariableTracker): + return is_live(var.mutable_local) + return True + + VariableTracker.apply(visit, (tx.stack, tx.symbolic_locals)) + for var in self.id_to_variable.values(): + if not isinstance(var.mutable_local, AttributeMutationNew): + VariableTracker.apply(visit, var) + + for skip_obj, setattrs in self.store_attr_mutations.items(): + VariableTracker.apply(visit, setattrs) + + self.id_to_variable = { + k: v for k, v in self.id_to_variable.items() if is_live(v) + } + self.store_attr_mutations = { + k: v for k, v in self.store_attr_mutations.items() if is_live(k) + } + + def mutation(self, var): + self.check_allowed_side_effect(var) + if isinstance(var.mutable_local, MutableSideEffects): + var.mutable_local = MutableSideEffects(var.mutable_local.source, True) + + def _get_modified_vars(self): + return [var for var in self.id_to_variable.values() if self.is_modified(var)] + + def codegen_save_tempvars(self, cg: PyCodegen): + for var in self._get_modified_vars(): + if isinstance( + var.mutable_local, (AttributeMutationExisting, AttributeMutationNew) + ) and isinstance(var, variables.NewCellVariable): + cg.load_import_from(utils.__name__, "make_cell") + cg.extend_output(create_call_function(0, True)) + cg.add_cache(var) + if isinstance(var.mutable_local, AttributeMutationNew): + var.mutable_local.source = LocalSource(cg.tempvars[var]) # type: ignore[attr-defined] + elif isinstance(var.mutable_local, AttributeMutationNew): + if isinstance(var, variables.AutogradFunctionContextVariable): + unimplemented("AutogradFunctionContextVariable escaped") + if "__call_nn_module_init" in self.store_attr_mutations.get( + var.mutable_local, {} + ): + assert isinstance(var, variables.UnspecializedNNModuleVariable) + cg.load_import_from(utils.__name__, "nn_module_new") + else: + cg.load_import_from(utils.__name__, "object_new") + cg(var.mutable_local.cls_source) + cg.extend_output(create_call_function(1, True)) + cg.add_cache(var) + var.mutable_local.source = LocalSource(cg.tempvars[var]) + elif var in cg.tempvars: + assert cg.tempvars.get(var) is None + # subsequent usage should point to the original variable + cg(var.mutable_local.source) + cg.add_cache(var) + + for ctx, args in self.save_for_backward: + cg(ctx.source) + cg.extend_output( + [create_instruction("LOAD_METHOD", argval="save_for_backward")] + ) + for arg in args: + cg(arg) + cg.extend_output( + [ + *create_call_method(len(args)), + create_instruction("POP_TOP"), + ] + ) + + def register_hook(self, tensor, hook, handle, name): + assert isinstance(tensor, variables.TensorVariable) + assert isinstance(hook, variables.VariableTracker) + assert ( + isinstance(handle, variables.RemovableHandleVariable) + and handle.mutable_local + ) + assert hasattr(torch.Tensor, name) + idx = len(self.tensor_hooks.keys()) + # duplicate index possible because of self.remove_hook() + while idx in self.tensor_hooks: + idx += 1 + self.tensor_hooks[idx] = (tensor, hook, handle, name) + assert not handle.idx + handle.idx = idx + + def remove_hook(self, idx): + del self.tensor_hooks[idx] + + def codegen_hooks(self, cg): + for ( + tensor, + hook, + handle, + name, + ) in self.tensor_hooks.values(): + # Note: [On tensor.register_hook] + # + # register_hook on a tensor, AKA backward hooks, have slightly nuanced differences in how they are implemented + # when it comes to hooks on objects with sources (inputs, params) vs objects without sources (intermediaries). + # + # For tensors with a source, we bypass direct inclusion of register_hook calls in the graph. + # Instead, these are tracked and stashed as a global variable, enabling their association with tensors in + # the residuals. During dynamo's frame creation, these hooks are invoked seamlessly on known reconstructible/fetch-able + # tensors. Because a source indicates knowledge of this object outside the torch compile region, and + # because we are running residuals firmly before .backward() can be run, it is sound to invoke + # `register_hook` on a known tensor. + # + # For tensors without a source, we support a limited subset of hooks. Global functions only, and + # compiled_autograd must be enabled or we will graph break. + # + # Handling the Handle: When a user retains the register_hook result in a handle, we intercept the + # STORE_FAST operation to record the user-designated local variable name. This ensures the reconstructed + # bytecode retains this name. If no handle is defined, we simply pop the generated value to keep the + # stack intact. + # + # Dynamo Tensor Hooks Workflow: + # - Functions passed to register_hook are lifted globally. + # - For tensors with sources: + # - In the "side_effects" phase of codegen, we iterate over tensors with hooks to: + # - Generate the tensor. + # - Issue a register_hook call on the tensor, linking to the globally stored function. + # - Incorporate a handle if one was established in the eager phase. + # - For tensors without sources: + # - We don't generate any instructions for registering a hook. + # - Handles from intermediary hooks are NYI. + # - We produce a call function that utilizes the trace_wrapped higher order op, closing over it. + # - We then manually insert the call function above into the graph. + # - The handle's exact user-specified name, "user_code_variable_name", is discerned and associated during STORE_FAST. + assert tensor.source, "Hooks on non input tensors NYI - should not get here" + cg(tensor) + cg.extend_output([cg.create_load_attr(name)]) + cg(hook) + cg.extend_output(create_call_function(1, True)) + + # Adding the handle to the cache means RemovableHandleVariable().reconstruct() will + # be associated with the return value of register_hook(). This consumes the top of stack. + cg.add_cache(handle) + + def codegen_update_mutated(self, cg: PyCodegen): + suffixes = [] + for var in self._get_modified_vars(): + if isinstance(var, variables.ListVariable): + # old[:] = new + cg(var, allow_cache=False) + cg(var.mutable_local.source) # type: ignore[attr-defined] + cg.extend_output( + [ + cg.create_load_const(None), + cg.create_load_const(None), + create_instruction("BUILD_SLICE", arg=2), + ] + ) + suffixes.append([create_instruction("STORE_SUBSCR")]) + elif isinstance(var, variables.ConstDictVariable): + cg.tx.output.update_co_names("clear") + cg.tx.output.update_co_names("update") + + cg(var.mutable_local.source) # type: ignore[attr-defined] + cg.extend_output([create_instruction("LOAD_METHOD", argval="update")]) + cg(var, allow_cache=False) + + cg(var.mutable_local.source) # type: ignore[attr-defined] + cg.extend_output([create_instruction("LOAD_METHOD", argval="clear")]) + + suffixes.append( + [ + *create_call_method(0), # clear + create_instruction("POP_TOP"), + *create_call_method(1), # update + create_instruction("POP_TOP"), + ] + ) + elif self.is_attribute_mutation(var): + for name, value in self.store_attr_mutations.get( + var.mutable_local, {} + ).items(): + if isinstance(var, variables.NewGlobalVariable): + cg.tx.output.update_co_names(name) + cg(value) + suffixes.append( + [create_instruction("STORE_GLOBAL", argval=name)] + ) + elif name == "__call_nn_module_init": + pass # handled in codegen_save_tempvars + elif isinstance(value, variables.DeletedVariable): + if isinstance( + var.mutable_local, AttributeMutationExisting + ) and hasattr(getattr(var, "value", None), name): + cg.tx.output.update_co_names(name) + cg(var.mutable_local.source) + suffixes.append( + [create_instruction("DELETE_ATTR", argval=name)] + ) + else: + cg.tx.output.update_co_names(name) + cg(value) + cg(var.mutable_local.source) + suffixes.append([create_instruction("STORE_ATTR", argval=name)]) + elif isinstance(var, variables.TupleIteratorVariable): + for _ in range(var.index): + cg.load_import_from(utils.__name__, "iter_next") + cg(var.mutable_local.source) # type: ignore[attr-defined] + cg.extend_output(create_call_function(1, True)) + cg.append_output(create_instruction("POP_TOP")) + else: + raise AssertionError(type(var)) + + # do all the actual mutations at the very end to handle dependencies + for suffix in reversed(suffixes): + cg.extend_output(suffix) + + def is_empty(self): + return not ( + any(map(self.is_modified, self.id_to_variable.values())) + or self.tensor_hooks + or self.save_for_backward + or self.tensor_hooks + ) + + def clear(self): + self.keepalive.clear() + self.id_to_variable.clear() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py new file mode 100644 index 0000000000000000000000000000000000000000..6701115cae873402cd8fac9aafe888f49a621be0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/symbolic_convert.py @@ -0,0 +1,2603 @@ +import collections +import contextlib +import copy +import dataclasses +import dis +import functools +import importlib +import inspect +import itertools +import linecache +import logging +import operator +import sys +import textwrap +import threading +import traceback +import types +import typing +import weakref +from typing import Any, Dict, List, NamedTuple, Optional, Set, Tuple, Type +from unittest.mock import patch + +import torch +import torch._logging +from torch._guards import Checkpointable, tracing, TracingContext + +from . import config, exc, logging as torchdynamo_logging, trace_rules, variables +from .bytecode_analysis import ( + get_indexof, + JUMP_OPNAMES, + livevars_analysis, + propagate_line_nums, +) +from .bytecode_transformation import ( + cleaned_instructions, + create_call_function, + create_instruction, + create_jump_absolute, + Instruction, + is_generator, + unique_id, +) +from .code_context import code_context +from .codegen import PyCodegen +from .current_scope_id import current_scope_id +from .exc import ArgsMismatchError, BackendCompilerFailed, unimplemented, Unsupported +from .funcname_cache import get_funcname +from .guards import GuardBuilder, install_guard +from .output_graph import GraphCompileReason, OutputGraph, OutputGraphState +from .replay_record import DummyModule, ExecutionRecorder +from .resume_execution import ContinueExecutionCache, ReenterWith +from .source import ( + AttrSource, + GetItemSource, + GlobalSource, + GlobalWeakRefSource, + LocalSource, + Source, +) +from .trace_rules import is_builtin_constant, is_forbidden +from .utils import ( + counters, + get_fake_value, + get_instruction_source_311, + graph_break_dup_warning_checker, + istype, + LazyString, + proxy_args_kwargs, +) +from .variables.base import ( + _is_top_level_scope, + is_side_effect_safe, + MutableLocal, + typestr, + VariableTracker, +) +from .variables.builder import VariableBuilder, wrap_fx_proxy +from .variables.builtin import BuiltinVariable +from .variables.constant import ConstantVariable +from .variables.ctx_manager import ( + ContextWrappingVariable, + GenericContextWrappingVariable, + WithExitFunctionVariable, +) +from .variables.dicts import ConstDictVariable, SetVariable +from .variables.functions import ( + BaseUserFunctionVariable, + NestedUserFunctionVariable, + SkipFunctionVariable, + UserFunctionVariable, + UserMethodVariable, +) +from .variables.lists import ( + BaseListVariable, + ListIteratorVariable, + ListVariable, + SliceVariable, + TupleVariable, +) +from .variables.misc import ( + ClosureVariable, + GetAttrVariable, + InlinedClosureVariable, + NullVariable, + PythonModuleVariable, + UnknownVariable, +) +from .variables.nn_module import NNModuleVariable +from .variables.tensor import ( + supported_const_comparison_ops, + supported_tensor_comparison_ops, + SymNodeVariable, + TensorVariable, +) +from .variables.user_defined import ( + RemovableHandleVariable, + UserDefinedClassVariable, + UserDefinedObjectVariable, + UserDefinedVariable, +) + +log = logging.getLogger(__name__) +graph_break_log = torch._logging.getArtifactLogger(__name__, "graph_breaks") +trace_call_log = torch._logging.getArtifactLogger(__name__, "trace_call") +trace_source_log = torch._logging.getArtifactLogger(__name__, "trace_source") +tls = threading.local() + + +@dataclasses.dataclass +class SpeculationEntry: + filename: str + lineno: int + instruction_pointer: int + failed: bool = False + reason: Optional[GraphCompileReason] = None + + def fail_and_restart_analysis(self): + """ + Start tracing of the current frame over again, and don't take this branch. + """ + self.failed = True + raise exc.SpeculationRestartAnalysis() + + +@dataclasses.dataclass +class SpeculationLog: + """ + SpeculationLog replaces the prior copy_graphstate/restore_graphstate + checkpointing. Rather than saving/restoring state, we restart the + dynamo conversion process over from the beginning -- but when we + hit the start of the speculation that failed, we instead generate + a graph break. + """ + + entries: List[SpeculationEntry] = dataclasses.field(default_factory=list) + index: int = 0 + + def restart(self): + self.index = 0 + + def clear(self): + self.entries.clear() + self.index = 0 + + def next(self, filename: str, lineno: int, instruction_pointer) -> SpeculationEntry: + """ + Lookup or create a SpeculationEntry() that is shared across + RestartAnalysis calls. Args are used only for debug checks. + """ + if len(self.entries) == self.index: + self.entries.append(SpeculationEntry(filename, lineno, instruction_pointer)) + entry = self.entries[self.index] + self.index += 1 + assert ( + entry.instruction_pointer == instruction_pointer + and entry.filename == filename + and entry.lineno == lineno + ), textwrap.dedent( + f""" + SpecuationLog diverged at {self.index} of {len(self.entries)}: + - Run1: {entry.filename}:{entry.lineno} (ip={entry.instruction_pointer}) + - Run2: {filename}:{lineno} (ip={instruction_pointer}) + Please submit a bug report. + """ + ) + return entry + + +@functools.lru_cache(None) +def _step_logger(): + return torchdynamo_logging.get_step_logger(log) + + +@dataclasses.dataclass +class BlockStackEntry: + target: Instruction + stack_index: Optional[int] = None + with_context: Optional[ContextWrappingVariable] = None + + def can_restore(self): + return self.with_context is not None + + def resume_fn(self): + assert self.stack_index is not None + if self.with_context and self.with_context.target_values: + return ReenterWith(self.stack_index, tuple(self.with_context.target_values)) + else: + return ReenterWith(self.stack_index) + + def exit(self, tx): + assert self.with_context is not None + return self.with_context.exit(tx) + + +class InstructionTranslatorGraphState(NamedTuple): + output: OutputGraphState + symbolic_locals: Dict[str, VariableTracker] + stack: List[VariableTracker] + block_stack: List[BlockStackEntry] + instruction_pointer: Optional[int] + current_instruction: Instruction + next_instruction: Optional[Instruction] + lineno: int + + def diff(self, other: "InstructionTranslatorGraphState") -> Optional[str]: + for k in self._fields: + if k == "output": + return self.output.diff(other.output, prefix=f"{k}.") + sv = getattr(self, k) + ov = getattr(other, k) + if sv != ov: + return f"{k} mismatch: {sv} != {ov}" + return None + + +def stack_op(fn: typing.Callable[..., object]): + nargs = len(inspect.signature(fn).parameters) + fn_var = BuiltinVariable(fn) + + @functools.wraps(fn) + def impl(self: "InstructionTranslatorBase", inst: Instruction): + self.push(fn_var.call_function(self, self.popn(nargs), {})) + + return impl + + +def _detect_and_normalize_assert_statement( + self: "InstructionTranslatorBase", + truth_fn: typing.Callable[[object], bool], + push: bool, +): + # Detect if this jump instruction is assert and normalize the assert + # by pushing dummy error message when nothing is given. + # + # Python 3.9 assertion is in following format: + # 18 POP_JUMP_IF_TRUE 28 + # 20 LOAD_ASSERTION_ERROR + # 22 LOAD_CONST 3 ('Assert message') -> optional instruction + # 24 CALL_FUNCTION 1 -> optional instruction + # 26 RAISE_VARARGS + # + # Python 3.8 assertion is in following format: + # 18 POP_JUMP_IF_TRUE 28 + # 20 LOAD_GLOBAL 0 (Assertion type) + # 22 LOAD_CONST 3 ('Assert message') -> optional instruction + # 24 CALL_FUNCTION 1 -> optional instruction + # 26 RAISE_VARARGS 1 + + if (truth_fn is not operator.truth) or push: + return False + + assert isinstance(self.instruction_pointer, int) + current_instruction_pointer = self.instruction_pointer + inst = self.instructions[current_instruction_pointer] + # Detect LOAD_ASSERTION_ERROR or LOAD_GLOBAL 0 + if sys.version_info < (3, 9): + if inst.opname != "LOAD_GLOBAL" or inst.argval != "AssertionError": + return False + else: + if inst.opname != "LOAD_ASSERTION_ERROR": + return False + + current_instruction_pointer += 1 + + # Use dummy error message if its hard to extract + error_msg = "assertion error" + + inst = self.instructions[current_instruction_pointer] + # DETECT RAISE_VARARGS or LOAD CONST + if inst.opname == "LOAD_CONST": + if not isinstance(inst.argval, str): + return False + error_msg = inst.argval + + # if it is LOAD_CONSTANT, it must be followed by CALL_FUNCTION + # (PRECALL for Python 3.11+) + current_instruction_pointer += 1 + inst = self.instructions[current_instruction_pointer] + if inst.opname not in ("CALL_FUNCTION", "PRECALL"): + return False + + # for Python 3.11+, PRECALL should be followed by CALL, then RAISE_VARARGS + # for Python < 3.11, CALL_FUNCTION should be followed by RAISE_VARARGS + current_instruction_pointer += 1 + if inst.opname == "PRECALL": + current_instruction_pointer += 1 + inst = self.instructions[current_instruction_pointer] + + if inst.opname != "RAISE_VARARGS": + return False + + self.push(ConstantVariable.create(error_msg)) + + return True + + +def generic_jump(truth_fn: typing.Callable[[object], bool], push: bool): + def inner(self: "InstructionTranslatorBase", inst: Instruction): + value: VariableTracker = self.pop() + if ( + config.rewrite_assert_with_torch_assert + and _detect_and_normalize_assert_statement(self, truth_fn, push) + ): + error_msg: VariableTracker = self.pop() + # Skip over things like `assert True` + if value.is_python_constant() and bool(value.as_python_constant()): + self.jump(inst) + return + + # TODO maybe should respect DtoH sync intention of users later?? + # Manually insert torch._assert_async instead of python assert and jump over + # assert related instructions as we don't need them anymore. + + # if we see Tensor as assert statement, no need to call scalar_tensor + if isinstance(value, TensorVariable): + self.output.create_proxy( + "call_function", + torch._assert_async, + *proxy_args_kwargs((value, error_msg), {}), + ) + self.jump(inst) + return + + if isinstance(value, SymNodeVariable): + # if the assertion is normal shape expression. + # just install guard and bail out. + sym_expr = value.sym_num + if not isinstance(sym_expr, torch.SymBool): + sym_expr = sym_expr != 0 + + result = torch.fx.experimental.symbolic_shapes.expect_true(sym_expr) + if not result: + raise unimplemented( + "Assertion failed on symbolic shapes. Did you make sure eager mode succeeds?" + ) + self.jump(inst) + return + + scalar_to_tensor_proxy = self.output.create_proxy( + "call_function", torch.scalar_tensor, *proxy_args_kwargs((value,), {}) + ) + + scalar_to_tensor = wrap_fx_proxy( + self, + scalar_to_tensor_proxy, + example_value=get_fake_value(scalar_to_tensor_proxy.node, self), + ) + + self.output.create_proxy( + "call_function", + torch._assert_async, + *proxy_args_kwargs((scalar_to_tensor, error_msg), {}), + ) + self.jump(inst) + return + + if value.is_python_constant(): + if truth_fn(value.as_python_constant()): + push and self.push(value) + self.jump(inst) + elif ( + isinstance(value, (TensorVariable)) and self.should_compile_partial_graph() + ): + # compile a partial subgraph prefix then jump into user code + if self.has_backedge(): + msg = ( + "Skipping frame because there is a graph break in a for/while loop\n" + f"{self.frame_summary()}" + ) + log.info(msg) + raise exc.SkipFrame(msg) + + self.push(value) + log.debug("generic_jump triggered compile") + self.output.compile_subgraph( + self, + reason=GraphCompileReason( + f"generic_jump {typestr(value)}", [self.frame_summary()] + ), + ) + self.pop() + + if_next = self.create_call_resume_at(self.next_instruction) + push and self.push(value) + if_jump = self.create_call_resume_at(inst.target) + + self.output.add_output_instructions( + [create_instruction(inst.opname, target=if_jump[0])] + if_next + if_jump + ) + elif isinstance(value, NNModuleVariable): + # Equivalent of "self.nn_module is not None" + mod = self.output.get_submodule(value.module_key) + if truth_fn(mod): + push and self.push(value) + self.jump(inst) + elif isinstance(value, UserDefinedObjectVariable): + x = value.var_getattr(self, "__bool__") + # if __bool__ is missing, trying __len__ to infer a truth value. + if isinstance(x, GetAttrVariable): + x = value.var_getattr(self, "__len__") + + # __bool__ or __len__ is function + if isinstance(x, UserMethodVariable): + result = x.call_function(self, [], {}) + if isinstance(result, ConstantVariable) and isinstance( + result.value, (bool, int) + ): + if truth_fn(result.value): + push and self.push(value) + self.jump(inst) + else: + unimplemented( + "generic_jump on UserDefined with __bool__ returning non-constant" + ) + # __bool__ or __len__ is non-function or not existed in the user defined object + else: + if truth_fn(True): + push and self.push(value) + self.jump(inst) + elif not isinstance(value, TensorVariable) and value.has_unpack_var_sequence( + self + ): + if truth_fn(len(value.unpack_var_sequence(self))): + push and self.push(value) + self.jump(inst) + elif isinstance(value, SymNodeVariable): + eval_result = value.evaluate_expr(self.output) + if truth_fn(eval_result): + push and self.push(value) + self.jump(inst) + elif isinstance(value, variables.BackwardHookVariable): + if truth_fn(True): + push and self.push(value) + self.jump(inst) + else: + from .source import is_constant_source + + if value.source is not None and is_constant_source(value.source): + if truth_fn(value.get_real_value()): # type: ignore[attr-defined] + push and self.push(value) + self.jump(inst) + else: + # TODO link the torch.cond doc later + raise exc.UserError( + exc.UserErrorType.DYNAMIC_CONTROL_FLOW, + "Dynamic control flow is not supported at the moment. Please use " + "functorch.experimental.control_flow.cond to explicitly capture the control flow.", + case_name="cond_operands", + ) + + return inner + + +explain = False + + +def break_graph_if_unsupported(*, push): + def decorator(inner_fn): + @functools.wraps(inner_fn) + def wrapper(self: "InstructionTranslatorBase", inst: Instruction): + speculation = self.speculate() + if speculation.failed: + assert speculation.reason is not None + return handle_graph_break(self, inst, speculation.reason) + try: + TracingContext.set_current_loc( + self.f_code.co_filename, self.lineno, self.f_code.co_name + ) + return inner_fn(self, inst) + except Unsupported as excp: + if self.generic_context_manager_depth > 0: + # We don't support graph break under GenericContextWrappingVariable, + # If there is, we roll back to the checkpoint and fall back. + excp.remove_from_stats() + unimplemented("Graph break under GenericContextWrappingVariable") + + if isinstance(excp, exc.UncapturedHigherOrderOpError): + raise + + if not self.should_compile_partial_graph(): + raise + + user_stack = excp.real_stack + # TODO: Also report the traceback from the parent frame + user_stack_formatted = "".join(traceback.format_list(user_stack)) + frame_loc = (user_stack[-1].filename, user_stack[-1].lineno) + # torch._dynamo.explain() formats this a little nicer, and presents a slightly + # more actionable user code pointer + if ( + graph_break_log.isEnabledFor(logging.DEBUG) + and not explain + and graph_break_dup_warning_checker.add(frame_loc) + ): + # This log line is exercised from + # python test/dynamo/test_exc.py -k test_graph_break_log + graph_break_log.debug( + "Graph break: from user code at:\n%s", + user_stack_formatted, + exc_info=True, + ) + else: + # This log line MUST NOT contain the string "Graph break", + # exercised by + # python test/dynamo/test_misc.py -k test_duplicate_graph_break_log + log.debug( + "Unsupported break in user code at %s:%s (details suppressed)", + *frame_loc, + ) + + if self.has_backedge(): + msg = ( + "Skipping frame because there is a graph break in a for/while loop\n" + f"{self.frame_summary()}" + ) + log.info(msg) + raise exc.SkipFrame(msg) from excp + + excp.remove_from_stats() + excp.add_to_stats("graph_break") + speculation.reason = GraphCompileReason(excp.msg, user_stack) + speculation.fail_and_restart_analysis() + + def handle_graph_break( + self: "InstructionTranslatorBase", + inst: Instruction, + reason: GraphCompileReason, + ): + self.output.compile_subgraph(self, reason=reason) + cg = PyCodegen(self) + cleanup: List[Instruction] = [] + # Reconstruct the context variables in the block stack + for b in self.block_stack: + assert b.with_context is not None + cg(b.with_context) + cg.extend_output(b.resume_fn().try_except(cg.code_options, cleanup)) + self.output.add_output_instructions(cg.get_instructions()) + del cg + + if sys.version_info >= (3, 11) and inst.opname == "CALL": + kw_names = ( + self.kw_names.as_python_constant() + if self.kw_names is not None + else () + ) + if len(kw_names) > 0: + self.output.add_output_instructions( + [create_instruction("KW_NAMES", argval=kw_names)] + ) + self.output.add_output_instructions( + create_call_function(inst.arg, False) + ) + else: + # copy instruction, but without exception table data + assert inst.target is None + inst_copy = copy.copy(inst) + inst_copy.exn_tab_entry = None + self.output.add_output_instructions([inst_copy]) + + self.output.add_output_instructions(cleanup) + + if sys.version_info >= (3, 11) and inst.opname == "CALL": + # stack effect for PRECALL + CALL is split between the two instructions + stack_effect = dis.stack_effect( + dis.opmap["PRECALL"], inst.arg + ) + dis.stack_effect(dis.opmap["CALL"], inst.arg) + else: + stack_effect = dis.stack_effect(inst.opcode, inst.arg) + self.popn(push - stack_effect) + + for _ in range(push): + self.push(UnknownVariable()) + self.output.add_output_instructions( + self.create_call_resume_at(self.next_instruction) + ) + + return wrapper + + return decorator + + +class InstructionTranslatorBase(Checkpointable[InstructionTranslatorGraphState]): + output: OutputGraph + symbolic_locals: Dict[str, VariableTracker] + symbolic_globals: Dict[str, VariableTracker] + stack: List[VariableTracker] + instruction_pointer: Optional[int] + current_instruction: Instruction + next_instruction: Optional[Instruction] + block_stack: List[BlockStackEntry] + lineno: int + kw_names: Optional[ConstantVariable] + accept_prefix_inst: bool + prefix_insts: List[Instruction] + inline_depth: int + inconsistent_side_effects: bool + current_speculation: Optional[SpeculationEntry] + + def mark_inconsistent_side_effects(self): + """ + InstructionTranslator has encountered instructions which may cause + dynamo to see a different version of history from eager + See: https://github.com/pytorch/pytorch/issues/110765 + """ + self.inconsistent_side_effects = True + + def has_backedge(self): + cur_offset = self.current_instruction.offset + assert self.instruction_pointer is not None + for inst in self.instructions[self.instruction_pointer :]: + if inst.opname in JUMP_OPNAMES: + jump_offset = inst.argval + if jump_offset < cur_offset: + return True + return False + + def cell_and_freevars(self): + if not hasattr(self, "_cell_and_freevars"): + self._cell_and_freevars = tuple( + self.code_options["co_cellvars"] or [] + ) + tuple(self.code_options["co_freevars"] or []) + return self._cell_and_freevars + + def prune_dead_locals(self): + reads = livevars_analysis(self.instructions, self.current_instruction) + # implicit use by super() + # reads = reads | {"__class__"} + # output variables? + reads = reads | set(self.cell_and_freevars()) + self.symbolic_locals = { + k: v for k, v in self.symbolic_locals.items() if k in reads + } + self.output.side_effects.prune_dead_object_new(self) + + def call_function( + self, + fn: VariableTracker, + args: List[VariableTracker], + kwargs: Dict[str, VariableTracker], + ): + assert isinstance(fn, VariableTracker) + assert isinstance(args, list) + assert isinstance(kwargs, dict) + assert all( + isinstance(x, VariableTracker) + for x in itertools.chain(args, kwargs.values()) + ) + inner_fn = None + if hasattr(fn, "value"): + inner_fn = fn.value + if hasattr(fn, "fn"): + inner_fn = fn.fn + if inner_fn and callable(inner_fn) and is_forbidden(inner_fn): + raise AssertionError(f"Attempt to trace forbidden callable {inner_fn}") + self.push(fn.call_function(self, args, kwargs)) + + def inline_user_function_return(self, fn, args, kwargs): + """ + A call to some user defined function by inlining it. + """ + return InliningInstructionTranslator.inline_call(self, fn, args, kwargs) + + def get_line_of_code_header(self, lineno=None): + if lineno is None: + lineno = self.lineno + inline_depth_str = ( + f" (inline depth: {self.inline_depth})" if self.inline_depth > 0 else "" + ) + funcname = get_funcname(self.f_code.co_filename, lineno) + funcname_str = "" if funcname is None else f" ({funcname})" + return f"{self.f_code.co_filename}:{lineno} in {self.f_code.co_name}{funcname_str}{inline_depth_str}" + + def get_log_starts_line_log_str(self): + log_str = f"TRACE starts_line {self.get_line_of_code_header()}\n" + line = linecache.getline(self.f_code.co_filename, self.lineno).rstrip() + log_str += f" {line}" + return log_str + + def log_starts_line(self): + trace_source_log.debug("%s", LazyString(self.get_log_starts_line_log_str)) + + def step(self): + """Process exactly one instruction, return False we should exit""" + assert isinstance(self.instruction_pointer, int) + inst = self.instructions[self.instruction_pointer] + self.current_instruction = inst + self.instruction_pointer += 1 + if self.instruction_pointer < len(self.instructions): + self.next_instruction = self.instructions[self.instruction_pointer] + else: + self.instruction_pointer = None + self.next_instruction = None + if inst.starts_line and self.lineno != inst.starts_line: + self.lineno = inst.starts_line + self.log_starts_line() + + if ( + len(self.stack) == 0 + and self.should_compile_partial_graph() + and self.is_non_empty_graph() + ): + self.current_speculation = self.speculate() + if self.current_speculation.failed: + return self.step_graph_break(inst) + + log.debug("TRACE %s %s %s", inst.opname, inst.argval, self.stack) + + # 3.11 no longer uses a block stack, but we still keep track of one + # so that we know which contexts are currently active. + # For our purposes, all exception table entries with the same target + # are considered to be part of the same "block". + if sys.version_info >= (3, 11): + entry = inst.exn_tab_entry + if not ( + # still in the same block + self.block_stack + and entry + and self.block_stack[-1].target is entry.target + ): + if not entry: + # no longer in any block + # It is possible for NOPs to be between two instructions + # in the same block, but the NOPs are not covered by an + # exception table entry. In this case, assume that we + # are still in the same block. + if self.block_stack and inst.opname != "NOP": + # If we really escape from a block and the current + # instruction is not in another block, then there + # should be no other nested blocks that we are in. + assert len(self.block_stack) == 1 + self.block_stack.pop() + elif ( + # current instruction is in the previous block + len(self.block_stack) > 1 + and self.block_stack[-2].target is entry.target + ): + # exit the current block + self.block_stack.pop() + else: + # current instruction is in a new block + # push block to stack - note, BEFORE_WITH blocks won't + # be pushed here since BEFORE_WITH pushes the block, and + # the current instruction would be counted as being in that block. + self.block_stack.append( + BlockStackEntry(entry.target, len(self.stack)) + ) + + try: + if not hasattr(self, inst.opname): + unimplemented(f"missing: {inst.opname}") + TracingContext.set_current_loc( + self.f_code.co_filename, self.lineno, self.f_code.co_name + ) + getattr(self, inst.opname)(inst) + + return inst.opname != "RETURN_VALUE" + except Unsupported: + if self.current_speculation is None: + log.debug("empty checkpoint") + raise + log.debug("step triggered compile", exc_info=True) + + self.current_speculation.fail_and_restart_analysis() + + def step_graph_break(self, continue_inst): + # generate code from checkpoint + assert not self.output.output_instructions + assert self.current_speculation is not None + self.output.compile_subgraph( + self, + partial_convert=True, + reason=GraphCompileReason("step_unsupported", [self.frame_summary()]), + ) + self.output.add_output_instructions( + [create_jump_absolute(continue_inst)] + self.instructions + ) + + def run_ctx_mgr(self): + # NB: Don't push the top level frame summary; set_current_loc will + # take care of it. However, DO make sure we attach real_stack to + # exceptions + return TracingContext.current_frame(None) + + def run(self): + with self.run_ctx_mgr(): + try: + self.output.push_tx(self) + while ( + self.instruction_pointer is not None + and not self.output.should_exit + and self.step() + ): + pass + except BackendCompilerFailed: + raise + except Exception as e: + if config.replay_record_enabled: + e.exec_record = self.exec_recorder.get_record() # type: ignore[attr-defined] + raise + finally: + self.output.pop_tx() + # Cleanup the outputGraph to delete the held tensors. We perform the + # cleanup only for InstructionTranslator and not + # InliningInstructionTranslator. The InliningInstructionTranslator + # mutates the output object and is restored to original state if + # there was an exception. + if isinstance(self, InstructionTranslator): + self.output.cleanup() + + def push(self, val: Optional[VariableTracker]): + assert val is None or isinstance( + val, VariableTracker + ), f"push expects VariableTracker, got {typestr(val)}" + self.stack.append(val) # type: ignore[arg-type] + + def push_many(self, vals: List[VariableTracker]): + for val in vals: + self.push(val) + + def pop(self) -> VariableTracker: + return self.stack.pop() + + def popn(self, n: int) -> List[VariableTracker]: + assert n >= 0 + return list(reversed([self.pop() for _ in range(n)])) + + def LOAD_FAST(self, inst): + name = inst.argval + if name in self.f_locals and config.replay_record_enabled: + self.exec_recorder.add_local_var(name, self.f_locals[name]) + + if name.startswith(".") and name not in self.symbolic_locals: + # This happens in dict/list comprehensions + name = name.replace(".", "implicit") + assert name not in self.cell_and_freevars() + if name not in self.symbolic_locals: + unimplemented("undefined LOAD_FAST") + self.push(self.symbolic_locals[name]) + if name.startswith("___stack"): + self.symbolic_locals.pop(name) + + def LOAD_DEREF(self, inst): + assert inst.argval in self.cell_and_freevars() + + if inst.argval in self.f_locals and config.replay_record_enabled: + self.exec_recorder.add_local_var(inst.argval, self.f_locals[inst.argval]) + + if inst.argval not in self.symbolic_locals: + unimplemented(f"undefined LOAD_DEREF {inst.argval}") + self.push(self.symbolic_locals[inst.argval]) + + def STORE_FAST(self, inst): + loaded_vt = self.pop() + name = inst.argval + # Only rename at the top-level scope, this is to avoid the confusion between + # mutating a variable vs renaming it (e.g. a = b) during speculating a higher order op, + # where mutation is prohibited and it's difficult to differentiate it with renaming. + if _is_top_level_scope(current_scope_id()): + loaded_vt = loaded_vt.rename(self, name) + self.symbolic_locals[name] = loaded_vt + + def DELETE_FAST(self, inst): + del self.symbolic_locals[inst.argval] + + STORE_DEREF = STORE_FAST + + def LOAD_CLOSURE(self, inst): + self.push(ClosureVariable(name=inst.argval)) + + def LOAD_CONST(self, inst): + # For empty tuples, create empty TupleVariable + if isinstance(inst.argval, tuple) and not inst.argval: + self.push(TupleVariable([])) + else: + self.push(ConstantVariable.create(value=inst.argval)) + + def get_global_source(self, name): + source: Source + if self.output.global_scope is self.f_globals: + source = GlobalSource(name) + else: + if "__name__" in self.f_globals: + source = AttrSource( + self.import_source(self.f_globals["__name__"]), name + ) + else: + mangled_name = self.output.install_global_by_id( + "___unnamed_scope", self.f_globals + ) + source = GetItemSource(GlobalSource(mangled_name), name) + return source + + def LOAD_GLOBAL(self, inst): + if sys.version_info >= (3, 11): + if inst.arg % 2: + self.PUSH_NULL(inst) + + name = inst.argval + + if config.replay_record_enabled: + if name in self.f_globals: + self.exec_recorder.add_global_var(name, self.f_globals[name]) + else: + assert name in self.f_builtins + self.exec_recorder.builtins[name] = self.f_builtins[name] + + if inst.argval == "AssertionError": + unimplemented("assert with non-string message") + + if name in self.symbolic_globals: + variable = self.output.side_effects[self.symbolic_globals[name]] + self.push(self.output.side_effects.load_global(variable, name)) + return + + try: + value = self.f_globals[name] + except KeyError: + return self.load_builtin(inst) + + source = self.get_global_source(name) + self.push(VariableBuilder(self, source)(value)) + + def STORE_GLOBAL(self, inst): + value = self.pop() + name = inst.argval + source = self.get_global_source(name) + if name not in self.symbolic_globals: + self.symbolic_globals[name] = object() # type: ignore[assignment] # sentinel object + variable = self.output.side_effects.track_global_existing( + source, self.symbolic_globals[name] + ) + if isinstance(value, RemovableHandleVariable): + unimplemented("Storing handles in globals - NYI") + self.output.side_effects.store_global(variable, name, value) + + def import_source(self, module_name): + """Create an alias to a module for use in guards""" + if "torch_package" in module_name: + value = torch.package.package_importer._package_imported_modules[ + module_name + ] + alias = ( + module_name.replace(">", "_").replace("<", "_").replace(".", "_dot_") + ) + else: + value = importlib.import_module(module_name) + alias = f"__import_{module_name.replace('.', '_dot_')}" + f_globals = self.output.global_scope + assert alias not in f_globals or f_globals[alias] is value + f_globals[alias] = value + self.output.update_co_names(alias) + return GlobalSource(alias) + + def resolve_name(self, name, package, level): + """ + Copied from the Cpython implementation of __import__ + Resolve a relative module name to an absolute one. + https://github.com/python/cpython/blob/5a094f0255eea1db58fb2cf14c200971e64ec36e/Lib/importlib/_bootstrap.py#L902 + """ + bits = package.rsplit(".", level - 1) + if len(bits) < level: + raise ImportError("attempted relative import beyond top-level package") + base = bits[0] + return f"{base}.{name}" if name else base + + def calc_package(self): + """ + Copied from the Cpython implementation of __import__ + https://github.com/python/cpython/blob/5a094f0255eea1db58fb2cf14c200971e64ec36e/Lib/importlib/_bootstrap.py#L1090 + """ + package = self.f_globals.get("__package__") + spec = self.f_globals.get("__spec__") + if package is not None: + if spec is not None and package != spec.parent: + log.warning( + "__package__ != __spec__.parent (%r != %r)", + package, + spec.parent, + stacklevel=3, + ) + return package + elif spec is not None: + return spec.parent + else: + log.warning( + "can't resolve package from __spec__ or __package__, " + "falling back on __name__ and __path__", + stacklevel=3, + ) + package = self.f_globals["__name__"] + if "__path__" not in self.f_globals: + package = package.rpartition(".")[0] + return package + + def IMPORT_NAME(self, inst): + level, fromlist = self.popn(2) + level = level.as_python_constant() + fromlist = fromlist.as_python_constant() + module_name = inst.argval + + # Are we replaying? if so, load recorded module + recorded_name = ( + f"{ExecutionRecorder.LOCAL_MOD_PREFIX}_{level}_{fromlist}_{module_name}" + ) + if recorded_name in self.f_globals: + value = self.f_globals[recorded_name] + source = GlobalSource(recorded_name) + else: + value = __import__( + module_name, + fromlist=fromlist, + level=level, + globals=self.f_globals, + ) + + if level != 0: + pkg = self.calc_package() + module_name = self.resolve_name(module_name, pkg, level) + + # For __import__, when the name variable is of the form package.module, + # normally, the top-level package (the name up till the first dot) is + # returned, not the module named by module_name. However, when a + # non-empty fromlist argument is given, the module named by name is + # returned. Therefore, we set the source correctly here. + if not fromlist: + top_level_module_name = module_name.partition(".")[0] + source = self.import_source(top_level_module_name) + else: + source = self.import_source(module_name) + + if config.replay_record_enabled: + self.exec_recorder.add_local_mod(recorded_name, value) + + if istype(value, (types.ModuleType, DummyModule)): + self.push(PythonModuleVariable(value, source=source)) + else: + unimplemented(f"IMPORT_NAME {typestr(value)}") + + def IMPORT_FROM(self, inst): + self.DUP_TOP(inst) + self.LOAD_ATTR(inst) + + def load_builtin(self, inst): + if inst.argval not in self.f_builtins: + raise NameError(f"name '{inst.argval}' is not defined") + val = self.f_builtins[inst.argval] + + if callable(val): + self.push(VariableBuilder(self, GlobalSource(inst.argval))(val)) + else: + assert is_builtin_constant(val) + self.push(ConstantVariable.create(value=val)) + + def jump(self, inst): + self.instruction_pointer = self.indexof[inst.target] + + JUMP_FORWARD = jump + JUMP_ABSOLUTE = jump + + POP_JUMP_IF_FALSE = generic_jump(operator.not_, False) + POP_JUMP_IF_TRUE = generic_jump(operator.truth, False) + JUMP_IF_FALSE_OR_POP = generic_jump(operator.not_, True) + JUMP_IF_TRUE_OR_POP = generic_jump(operator.truth, True) + + def SETUP_LOOP(self, inst): + # only exists in python<=3.7 + self.block_stack.append(BlockStackEntry(inst.target)) + + def SETUP_EXCEPT(self, inst): + # only exists in python<=3.7 + self.block_stack.append(BlockStackEntry(inst.target)) + + def POP_BLOCK(self, inst): + self.block_stack.pop() + + def SETUP_WITH(self, inst): + self.setup_or_before_with(inst) + + def SETUP_FINALLY(self, inst): + self.block_stack.append(BlockStackEntry(inst.target)) + + def BEGIN_FINALLY(self, inst): + self.push(None) + + def WITH_CLEANUP_START(self, inst): + exit, exc = self.popn(2) + assert exc is None + self.push(exc) + self.push(exit.call_function(self, [ConstantVariable.create(None)] * 3, {})) + + def WITH_CLEANUP_FINISH(self, inst): + self.popn(2) + self.push(None) + + def CALL_FINALLY(self, inst): + """ + pushes the address of the next instruction onto the stack and increments + bytecode counter by delta + """ + # Python 3.8 only + assert self.next_instruction is not None + addr = self.indexof[self.next_instruction] + self.push(ConstantVariable.create(addr)) + self.instruction_pointer = self.indexof[inst.target] + + def END_FINALLY(self, inst): + # Python 3.8 only + # https://docs.python.org/3.8/library/dis.html#opcode-END_FINALLY + tos = self.pop() + if isinstance(tos, ConstantVariable): + self.instruction_pointer = tos.as_python_constant() + else: + pass + + def POP_FINALLY(self, inst): + # Python 3.8 only + preserve_tos = inst.argval + if preserve_tos: + tos = self.pop() + _ = self.pop() + if preserve_tos: + self.push(tos) # type: ignore[possibly-undefined] + + def FOR_ITER(self, inst): + it = self.pop().realize() + if isinstance(it, (variables.ListIteratorVariable, variables.IteratorVariable)): + try: + val, next_iter = it.next_variables(self) + self.push(next_iter) + self.push(val) + except StopIteration: + self.jump(inst) + else: + unimplemented(f"FOR_ITER {typestr(it)}") + + def COMPARE_OP(self, inst): + left, right = self.popn(2) + op = inst.argval + supported_any = dict( + itertools.chain( + supported_tensor_comparison_ops.items(), + supported_const_comparison_ops.items(), + ) + ) + if ( + isinstance( + left, + ( + TensorVariable, + SymNodeVariable, + NNModuleVariable, + BaseListVariable, + UserDefinedVariable, + BaseUserFunctionVariable, + ConstDictVariable, + ), + ) + and isinstance(right, ConstantVariable) + and right.value is None + and op in supported_const_comparison_ops + ): + # is None + self.push( + ConstantVariable.create( + supported_const_comparison_ops[op](object(), right.value) + ) + ) + + elif ( + left.is_python_constant() + and right.is_python_constant() + and op in supported_any + ): + # constant fold + self.push( + ConstantVariable.create( + supported_any[op]( + left.as_python_constant(), right.as_python_constant() + ), + ) + ) + elif op in ("in", "not in"): + self.push(right.call_method(self, "__contains__", [left], {})) + if op == "not in": + self.UNARY_NOT(inst) + else: + self.push( + BuiltinVariable(supported_any[op]).call_function( + self, [left, right], {} + ) + ) + + def GET_ITER(self, inst): + self.call_function(BuiltinVariable(iter), [self.pop()], {}) + + @break_graph_if_unsupported(push=1) + def CALL_FUNCTION(self, inst): + args = self.popn(inst.argval) + fn = self.pop() + self.call_function(fn, args, {}) + + @break_graph_if_unsupported(push=1) + def CALL_FUNCTION_EX(self, inst): + kwargsvars: VariableTracker + if inst.argval == 0: + kwargsvars = ConstDictVariable({}) + argsvars = self.pop() + elif inst.argval == 1: + kwargsvars = self.pop() + argsvars = self.pop() + else: + unimplemented("CALL_FUNCTION_EX") + fn = self.pop() + if sys.version_info >= (3, 11): + null = self.pop() + assert isinstance(null, NullVariable) + + if ( + isinstance(fn, GetAttrVariable) + and isinstance(fn.obj, TensorVariable) + and fn.name == "view" + and isinstance(argsvars, (ConstantVariable, TensorVariable)) + ): + # Hack to handle special case in some bert models. Converts + # x.view(*shape) into x.view(shape), which is correct for view() + # but not generally. See test_transpose_for_scores(). + argsvars = TupleVariable([argsvars]) + + if not isinstance( + argsvars, BaseListVariable + ) and argsvars.has_unpack_var_sequence(self): + argsvars = TupleVariable(argsvars.unpack_var_sequence(self)) + + if not isinstance(argsvars, BaseListVariable) or not isinstance( + kwargsvars, ConstDictVariable + ): + unimplemented(f"non-static call {typestr(argsvars)} {typestr(kwargsvars)}") + + # Map to a dictionary of str -> VariableTracker + kwargsvars = kwargsvars.keys_as_python_constant() + self.call_function(fn, argsvars.items, kwargsvars) + + @break_graph_if_unsupported(push=1) + def CALL_FUNCTION_KW(self, inst): + argnames = self.pop() + args = self.popn(inst.argval) + fn = self.pop() + assert isinstance(argnames, TupleVariable) and argnames.is_python_constant() + argnames = argnames.as_python_constant() + args, kwargs_list = args[: -len(argnames)], args[-len(argnames) :] + kwargs = dict(zip(argnames, kwargs_list)) + assert len(kwargs) == len(argnames) + self.call_function(fn, args, kwargs) + + def LOAD_METHOD_SUPER(self, inst): + self.CALL_FUNCTION(dataclasses.replace(inst, argval=2)) + arg = inst.argval[0] + argval = self.code_options["co_names"][arg] + if sys.version_info < (3, 11): + self.LOAD_ATTR(dataclasses.replace(inst, argval=argval)) + else: + self.LOAD_METHOD(dataclasses.replace(inst, argval=argval)) + + def LOAD_ATTR_SUPER(self, inst): + self.CALL_FUNCTION(dataclasses.replace(inst, argval=2)) + arg = inst.argval[0] + argval = self.code_options["co_names"][arg] + self.LOAD_ATTR(dataclasses.replace(inst, argval=argval)) + + def LOAD_METHOD(self, inst): + self.LOAD_ATTR(inst) + obj = self.pop() + if sys.version_info >= (3, 11): + # always follow the NULL + fn convention, since if obj + # is actually a method, self is already bound to it, so it + # doesn't need to be passed in as an arg. + self.PUSH_NULL(inst) + self.push(obj) + else: + self.push(obj) + self.push(None) + + def CALL_METHOD(self, inst): + args = self.popn(inst.argval) + dummy = self.pop() + assert dummy is None + fn = self.pop() + self.call_function(fn, args, {}) + + def LOAD_ATTR(self, inst): + obj = self.pop() + result = BuiltinVariable(getattr).call_function( + self, [obj, ConstantVariable.create(inst.argval)], {} + ) + self.push(result) + + def STORE_ATTR(self, inst): + speculation = self.speculate() + if speculation.failed: + return self.store_attr_graph_break(inst) + val, obj = self.popn(2) + + if isinstance(obj, NNModuleVariable): + # We don't allow side effects during export + # https://github.com/pytorch/torchdynamo/issues/1475 + assert ( + not self.export + ), f"Mutating module attribute {inst.argval} during export." + + try: + BuiltinVariable(setattr).call_function( + self, [obj, ConstantVariable.create(inst.argval), val], {} + ) + return + except Unsupported as e: + if not self.should_compile_partial_graph(): + raise + log.debug("STORE_ATTR triggered compile", exc_info=True) + e.remove_from_stats() + e.add_to_stats("graph_break") + speculation.fail_and_restart_analysis() + + def store_attr_graph_break(self, inst): + self.output.compile_subgraph( + self, reason=GraphCompileReason("store_attr", [self.frame_summary()]) + ) + self.output.add_output_instructions([copy.copy(inst)]) + self.popn(2) + self.output.add_output_instructions( + self.create_call_resume_at(self.next_instruction) + ) + + def DELETE_ATTR(self, inst): + obj = self.pop() + BuiltinVariable(delattr).call_function( + self, [obj, ConstantVariable.create(inst.argval)], {} + ) + + def create_call_resume_at(self, offset): + raise AssertionError( + f"create_call_resume_at not overridden by subclass {type(self)}" + ) + + def should_compile_partial_graph(self) -> bool: + raise AssertionError( + f"should_compile_partial_graph not overridden by subclass {type(self)}" + ) + + @break_graph_if_unsupported(push=0) + def STORE_SUBSCR(self, inst): + val, obj, key = self.popn(3) + result = obj.call_method(self, "__setitem__", [key, val], {}) + + def BUILD_TUPLE(self, inst): + items = self.popn(inst.argval) + self.push(TupleVariable(items)) + + def BUILD_SLICE(self, inst): + items = self.popn(inst.argval) + self.push(SliceVariable(items)) + + def BUILD_LIST(self, inst): + items = self.popn(inst.argval) + self.push(ListVariable(items, mutable_local=MutableLocal())) + + def BUILD_SET(self, inst): + if config.inject_BUILD_SET_unimplemented_TESTING_ONLY: + unimplemented("missing: BUILD_SET") + items = self.popn(inst.argval) + new_set = SetVariable(items, mutable_local=MutableLocal()) + self.push(new_set) + + def BUILD_LIST_UNPACK(self, inst, cls=ListVariable): + seqs = self.popn(inst.argval) + items = list() + for seq in seqs: + try: + items.extend(seq.unpack_var_sequence(self)) + except NotImplementedError: + unimplemented(f"BUILD_LIST_UNPACK {seq}") + self.push(cls(items, mutable_local=MutableLocal())) + + def BUILD_TUPLE_UNPACK(self, inst): + self.BUILD_LIST_UNPACK(inst, cls=TupleVariable) + + BUILD_TUPLE_UNPACK_WITH_CALL = BUILD_TUPLE_UNPACK + + def BUILD_MAP(self, inst): + items = self.popn(inst.argval * 2) + d = dict(zip(items[::2], items[1::2])) + self.push(ConstDictVariable(d, mutable_local=MutableLocal())) + + def BUILD_MAP_UNPACK(self, inst): + items = self.popn(inst.argval) + # ensure everything is a dict + items = [BuiltinVariable(dict).call_function(self, [x], {}) for x in items] + result = dict() + for x in items: + assert isinstance(x, ConstDictVariable) + result.update(x.items) + self.push( + ConstDictVariable( + result, + mutable_local=MutableLocal(), + ) + ) + + BUILD_MAP_UNPACK_WITH_CALL = BUILD_MAP_UNPACK + + def BUILD_CONST_KEY_MAP(self, inst): + keys = self.pop() + values = self.popn(inst.argval) + assert isinstance(keys, TupleVariable) + assert keys.is_python_constant() + + keys = keys.unpack_var_sequence(self) + assert len(keys) == len(values) + + self.push( + ConstDictVariable( + dict(zip(keys, values)), + mutable_local=MutableLocal(), + ) + ) + + def MAP_ADD(self, inst): + k, v = self.popn(2) + assert inst.argval > 0 + obj = self.stack[-inst.arg].realize() + assert isinstance(obj, ConstDictVariable) + obj.call_method(self, "__setitem__", (k, v), {}) # type: ignore[arg-type] + + def SET_ADD(self, inst): + v = self.pop() + assert inst.argval > 0 + obj = self.stack[-inst.arg] + assert isinstance(obj, SetVariable) + assert obj.mutable_local + return obj.call_method(self, "add", [v], {}) + + def LIST_APPEND(self, inst): + v = self.pop() + assert inst.argval > 0 + obj = self.stack[-inst.arg].realize() + assert isinstance(obj, ListVariable) + assert obj.mutable_local + self.output.side_effects.mutation(obj) + obj.items.append(v) + + def MAKE_FUNCTION(self, inst): + flags = inst.arg + old_stack = list(self.stack) + if sys.version_info < (3, 11): + fn_name = self.pop() + code = self.pop() + if sys.version_info >= (3, 11): + # MAKE_FUNCTION behavior actually changed in 3.11, see + # https://github.com/python/cpython/pull/93189/ + assert hasattr(code.value, "co_qualname") # type: ignore[attr-defined] + fn_name = ConstantVariable.create(value=code.value.co_qualname) # type: ignore[attr-defined] + defaults = None + closure = None + annotations = None + kwdefaults = None + + if flags & 0x08: + closure = self.pop() + if flags & 0x04: + annotations = self.pop() + if flags & 0x02: + kwdefaults = self.pop() + if flags & 0x01: + defaults = self.pop() + + self.push( + NestedUserFunctionVariable( + fn_name, + code, + self.f_globals, + defaults, + kwdefaults, + annotations, + closure, + closure_scope=self, + ) + ) + + def UNPACK_SEQUENCE(self, inst): + seq = self.pop() + if isinstance(seq, TensorVariable): + val = seq.unpack_var_sequence(self, idxes=range(inst.argval)) + elif isinstance(seq, GetAttrVariable) and isinstance(seq.obj, TensorVariable): + # x, y = a.shape + proxy = getattr(seq.obj.as_proxy(), seq.name) + val = [wrap_fx_proxy(self, proxy[i]) for i in range(inst.argval)] + elif seq.has_unpack_var_sequence(self): + val = seq.unpack_var_sequence(self) + else: + unimplemented(f"UNPACK_SEQUENCE {seq}") + if len(val) != inst.argval: + unimplemented("UNPACK_SEQUENCE length mismatch") + for i in reversed(val): + self.push(i) + + def UNPACK_EX(self, inst): + assert 0 <= inst.argval <= 0xFFFF + prefix = inst.argval & 0xFF # low byte + suffix = inst.argval >> 8 # high byte + seq = self.pop() + if seq.has_unpack_var_sequence(self): + vals = list(seq.unpack_var_sequence(self)) + assert len(vals) >= prefix + suffix + vals_prefix = vals[:prefix] + vals_list = vals[prefix : len(vals) - suffix] + vals_suffix = vals[len(vals) - suffix :] + for item in reversed(vals_suffix): + self.push(item) + self.push(TupleVariable(vals_list)) + for item in reversed(vals_prefix): + self.push(item) + else: + unimplemented(f"UNPACK_EX {seq}") + + def NOP(self, inst): + pass + + def POP_TOP(self, inst): + self.pop() + + def ROT_TWO(self, inst): + a = self.pop() + b = self.pop() + self.push(a) + self.push(b) + + def ROT_THREE(self, inst): + a = self.pop() + b = self.pop() + c = self.pop() + self.push(a) + self.push(c) + self.push(b) + + def ROT_FOUR(self, inst): + a = self.pop() + b = self.pop() + c = self.pop() + d = self.pop() + self.push(a) + self.push(d) + self.push(c) + self.push(b) + + def DUP_TOP(self, inst): + a = self.pop() + self.push(a) + self.push(a) + + def DUP_TOP_TWO(self, inst): + a = self.pop() + b = self.pop() + self.push(b) + self.push(a) + self.push(b) + self.push(a) + + def FORMAT_VALUE(self, inst): + flags = inst.arg + if (flags & 0x04) == 0x04: + fmt_spec = self.pop() + else: + fmt_spec = ConstantVariable.create("") + + value = self.pop() + if isinstance(value, SymNodeVariable): + value = ConstantVariable.create(str(value.sym_num)) + if (flags & 0x03) == 0x01: + value = BuiltinVariable(str).call_function(self, [value], {}) + elif (flags & 0x03) == 0x02: + value = BuiltinVariable(repr).call_function(self, [value], {}) + elif (flags & 0x03) == 0x03: + value = BuiltinVariable(ascii).call_function(self, [value], {}) + + fmt_var = ConstantVariable.create("{:" + fmt_spec.as_python_constant() + "}") + + self.call_function(BuiltinVariable(str.format), [fmt_var, value], {}) + + def BUILD_STRING(self, inst): + format_string_parts: List[str] = [] + args: List[VariableTracker] = [] + kwargs: Dict[str, VariableTracker] = {} + for part in self.popn(inst.arg): + if isinstance(part, ConstantVariable): + format_string_parts.append("{}") + args.append(part) + elif isinstance(part, variables.StringFormatVariable): + format_string_parts.append(part.format_string) + args.extend(part.sym_args) + if set(kwargs.keys()) & set(part.sym_kwargs.keys()): + unimplemented( + f"BUILD_STRING key conflict {kwargs} & {part.sym_kwargs}" + ) + kwargs.update(part.sym_kwargs) + else: + unimplemented(f"BUILD_STRING {part}") + self.push( + variables.StringFormatVariable.create( + "".join(format_string_parts), args, kwargs + ) + ) + + def IS_OP(self, inst): + assert inst.argval == 0 or inst.argval == 1 + if inst.argval == 0: + new_argval = "is" + else: + new_argval = "is not" + new_inst = create_instruction("COMPARE_OP", argval=new_argval) + self.COMPARE_OP(new_inst) + + def CONTAINS_OP(self, inst): + assert inst.argval == 0 or inst.argval == 1 + left, right = self.popn(2) + op = inst.argval + self.push(right.call_method(self, "__contains__", [left], {})) + if op == 1: + self.UNARY_NOT(inst) + + def LIST_EXTEND(self, inst): + v = self.pop() + assert inst.argval > 0 + obj = self.stack[-inst.arg] + assert isinstance(obj, ListVariable) + assert obj.mutable_local + obj.call_method(self, "extend", [v], {}) + + def LIST_TO_TUPLE(self, inst): + self.push(BuiltinVariable(tuple).call_function(self, [self.pop()], {})) + + def DICT_MERGE(self, inst): + v = self.pop() + assert inst.argval > 0 + obj = self.stack[-inst.arg].realize() + assert isinstance(obj, ConstDictVariable) + assert obj.mutable_local + obj.call_method(self, "update", [v], {}) + + DICT_UPDATE = DICT_MERGE + + def GEN_START(self, inst): + self.pop() + + def GET_LEN(self, inst): + tos = self.stack[-1] + if tos.is_python_constant(): + self.push(ConstantVariable.create(len(tos.as_python_constant()))) + else: + self.push(tos.call_method(self, "__len__", [], {})) + + def MATCH_MAPPING(self, inst): + tos = self.stack[-1] + assert isinstance(tos, ConstDictVariable) + if isinstance(tos.items, collections.abc.Mapping): + self.push(ConstantVariable.create(True)) + else: + self.push(ConstantVariable.create(False)) + + def MATCH_SEQUENCE(self, inst): + tos = self.stack[-1] + assert tos.is_python_constant() + tos_value = tos.as_python_constant() + if isinstance(tos_value, collections.abc.Sequence) and not isinstance( + tos_value, (str, bytes, bytearray) + ): + self.push(ConstantVariable.create(True)) + else: + self.push(ConstantVariable.create(False)) + + def MATCH_KEYS(self, inst): + tos = self.stack[-1] + tos1 = self.stack[-2] + assert isinstance(tos1, ConstDictVariable) + + if all(k in tos1 for k in tos): # type: ignore[attr-defined] + self.push(TupleVariable([tos1.getitem_const(k) for k in tos])) # type: ignore[attr-defined] + if sys.version_info < (3, 11): + self.push(ConstantVariable.create(True)) + else: + self.push(ConstantVariable.create(None)) + if sys.version_info < (3, 11): + self.push(ConstantVariable.create(False)) + + def LOAD_ASSERTION_ERROR(self, inst): + unimplemented("assert with non-string message") + + UNARY_POSITIVE = stack_op(operator.pos) + UNARY_NEGATIVE = stack_op(operator.neg) + UNARY_NOT = stack_op(operator.not_) + UNARY_INVERT = stack_op(operator.invert) + + BINARY_POWER = stack_op(operator.pow) + BINARY_MULTIPLY = stack_op(operator.mul) + BINARY_MATRIX_MULTIPLY = stack_op(operator.matmul) + BINARY_FLOOR_DIVIDE = stack_op(operator.floordiv) + BINARY_TRUE_DIVIDE = stack_op(operator.truediv) + BINARY_MODULO = stack_op(operator.mod) + BINARY_REMAINDER = stack_op(operator.mod) + BINARY_ADD = stack_op(operator.add) + BINARY_SUBTRACT = stack_op(operator.sub) + BINARY_SUBSCR = break_graph_if_unsupported(push=1)(stack_op(operator.getitem)) + BINARY_LSHIFT = stack_op(operator.lshift) + BINARY_RSHIFT = stack_op(operator.rshift) + BINARY_AND = stack_op(operator.and_) + BINARY_OR = stack_op(operator.or_) + BINARY_XOR = stack_op(operator.xor) + + INPLACE_POWER = stack_op(operator.ipow) + INPLACE_MULTIPLY = stack_op(operator.imul) + INPLACE_MATRIX_MULTIPLY = stack_op(operator.imatmul) + INPLACE_FLOOR_DIVIDE = stack_op(operator.ifloordiv) + INPLACE_TRUE_DIVIDE = stack_op(operator.itruediv) + INPLACE_MODULO = stack_op(operator.imod) + INPLACE_REMAINDER = stack_op(operator.imod) + INPLACE_ADD = stack_op(operator.iadd) + INPLACE_SUBTRACT = stack_op(operator.isub) + INPLACE_LSHIFT = stack_op(operator.ilshift) + INPLACE_RSHIFT = stack_op(operator.irshift) + INPLACE_AND = stack_op(operator.iand) + INPLACE_XOR = stack_op(operator.ixor) + INPLACE_OR = stack_op(operator.ior) + + # 3.11 opcodes + def RESUME(self, inst): + if inst.arg == 0: + self.append_prefix_inst(inst) + self.accept_prefix_inst = False + else: + assert not self.accept_prefix_inst + + def BINARY_OP(self, inst): + if sys.version_info >= (3, 11): + opname = dis._nb_ops[inst.arg][0][3:] # type: ignore[attr-defined] + if opname.startswith("INPLACE"): + return getattr(self, "INPLACE_" + opname[8:])(inst) + return getattr(self, "BINARY_" + opname)(inst) + else: + unimplemented("BINARY_OP requires Python 3.11+") + + def PRECALL(self, inst): + pass + + def KW_NAMES(self, inst): + kw_names = self.code_options["co_consts"][inst.arg] + assert isinstance(kw_names, tuple) + for name in kw_names: + assert isinstance(name, str) + assert self.kw_names is None + self.kw_names = ConstantVariable.create(value=kw_names) # type: ignore[assignment] + + def PUSH_NULL(self, inst): + self.push(NullVariable()) + + @break_graph_if_unsupported(push=1) + def CALL(self, inst): + # see https://docs.python.org/3.11/library/dis.html#opcode-CALL + # for convention + contents = self.popn(inst.arg + 2) + if isinstance(contents[0], NullVariable): + fn = contents[1] + args = [] + else: + fn = contents[0] + args = [contents[1]] + kw_names = self.kw_names.value if self.kw_names else () + if kw_names: + args = args + contents[2 : -len(kw_names)] + kwargs_list = contents[-len(kw_names) :] + kwargs = dict(zip(kw_names, kwargs_list)) + assert len(kwargs) == len(kw_names) + else: + args = args + contents[2:] + kwargs = {} + self.call_function(fn, args, kwargs) + self.kw_names = None + + def COPY(self, inst): + self.push(self.stack[-inst.arg]) + + def SWAP(self, inst): + self.stack[-1], self.stack[-inst.arg] = self.stack[-inst.arg], self.stack[-1] + + JUMP_BACKWARD = jump + JUMP_BACKWARD_NO_INTERRUPT = jump + + POP_JUMP_FORWARD_IF_TRUE = generic_jump(operator.truth, False) + POP_JUMP_BACKWARD_IF_TRUE = generic_jump(operator.truth, False) + POP_JUMP_FORWARD_IF_FALSE = generic_jump(operator.not_, False) + POP_JUMP_BACKWARD_IF_FALSE = generic_jump(operator.not_, False) + + def CACHE(self, inst): + pass + + def BEFORE_WITH(self, inst): + self.setup_or_before_with(inst) + + def setup_or_before_with(self, inst): + ctx = self.pop() + if not isinstance(ctx, ContextWrappingVariable): + unimplemented(f"{inst.opname} {ctx}") + + if isinstance(ctx, GenericContextWrappingVariable): + self.generic_context_manager_depth += 1 + + exit = WithExitFunctionVariable( + ctx, + inst.target, + ) + if sys.version_info >= (3, 11): + # see create_call_resume_at for block stack details + assert self.next_instruction + assert self.next_instruction.exn_tab_entry + target = self.next_instruction.exn_tab_entry.target + else: + target = inst.target + if isinstance(self, InstructionTranslator): + self.block_stack.append(BlockStackEntry(target, len(self.stack), ctx)) + else: + self.block_stack.append(BlockStackEntry(target)) + + self.push(exit) + self.push(ctx.enter(self)) + + def append_prefix_inst(self, inst): + assert self.accept_prefix_inst + self.prefix_insts.append(inst) + + def MAKE_CELL(self, inst): + self.append_prefix_inst(inst) + + def COPY_FREE_VARS(self, inst): + self.append_prefix_inst(inst) + + def RETURN_GENERATOR(self, inst): + self.append_prefix_inst(inst) + + def copy_graphstate(self) -> InstructionTranslatorGraphState: + """Create a checkpoint of the current state by copying everything""" + return InstructionTranslatorGraphState( + self.output.copy_graphstate(), + dict(self.symbolic_locals), + list(self.stack), + list(self.block_stack), + self.instruction_pointer, + self.current_instruction, + self.next_instruction, + self.lineno, + ) + + def restore_graphstate(self, state: InstructionTranslatorGraphState): + """Restore a checkpoint created by self.copy_graphstate()""" + ( + output_state, + self.symbolic_locals, + self.stack, + self.block_stack, + self.instruction_pointer, + self.current_instruction, + self.next_instruction, + self.lineno, + ) = state + self.output.restore_graphstate(output_state) + + def is_non_empty_graph(self): + if self.output.count_calls() > 1: + # perf optimization only + self.is_non_empty_graph = lambda: True # type: ignore[method-assign] + return True + return False + + def format_frame_summary(self, additional_stack_frames=None): + if additional_stack_frames is None: + additional_stack_frames = [] + return "".join( + traceback.format_list( + [self.frame_summary()] + list(reversed(additional_stack_frames)) + ) + ) + + def frame_summary(self): + return traceback.FrameSummary( + getattr(self.f_code, "co_filename", ""), + self.lineno, + getattr(self.f_code, "co_name", ""), + lookup_line=False, + ) + + def store_global_weakref_by_id(self, prefix, value): + global_name = self.output.install_global_by_id(prefix, weakref.ref(value)) + install_guard( + GlobalWeakRefSource(global_name).make_guard(GuardBuilder.WEAKREF_ALIVE) + ) + return global_name + + @property + def fake_mode(self): + return self.output.tracing_context.fake_mode + + def find_symbolic_locals_name(self, tensor_variable): + for key, value in self.symbolic_locals.items(): + if value is tensor_variable: + return key + return None + + @contextlib.contextmanager + def strict_translation_mode(self): + self.strict_checks_enabled = True + try: + yield + finally: + self.strict_checks_enabled = False + + def speculate(self) -> SpeculationEntry: + return self.speculation_log.next( + self.f_code.co_filename, self.lineno, self.instruction_pointer + ) + + def __init__( + self, + output: OutputGraph, + instructions: List[Instruction], + f_locals: Dict[str, Any], + f_globals: Dict[str, Any], + f_builtins: Dict[str, Any], + code_options: Dict[str, Any], + symbolic_locals: Dict[str, VariableTracker], + symbolic_globals: Dict[str, VariableTracker], + f_code: types.CodeType, + export: bool, + inline_depth: int, + speculation_log: SpeculationLog, + ): + super().__init__() + self.speculation_log = speculation_log + + # Mutable state checkpointed by copy_graphstate() + self.output = output + self.symbolic_locals = symbolic_locals + self.symbolic_globals = symbolic_globals + self.stack = [] + self.instruction_pointer = 0 + self.current_instruction = create_instruction("NOP") + self.next_instruction = None + self.block_stack = [] + # states before SETUP_WITH for checkpointing and fallback + self.generic_context_manager_depth = 0 + self.lineno = code_options["co_firstlineno"] + self.kw_names = None + self.accept_prefix_inst = True + self.prefix_insts = [] + + # Properties of the input/output code + self.instructions: List[Instruction] = instructions + self.indexof: Dict[Instruction, int] = get_indexof(self.instructions) + self.f_locals: Dict[ + str, Any + ] = f_locals # needed for recording accessed locals for replay + self.f_globals: Dict[str, Any] = f_globals + self.f_builtins: Dict[str, Any] = f_builtins + self.code_options: Dict[str, Any] = code_options + self.f_code: types.CodeType = f_code + + # Execution record for replaying errors + self.exec_recorder = ExecutionRecorder(code=f_code, code_options=code_options) + # Stack of module being parsed, current nn.module is at the end of ordered dict. + # The first field of tuple is the fully qualified name of current module + # in original hierarchy. The second field is the type of current nn.module + self.nn_module_stack: Dict[str, Tuple[str, Type[Any]]] = {} + # Flag to indicate whether tracing is used for export. + self.export = export + + self.current_speculation = None + + self.strict_checks_enabled = False + + if sys.version_info >= (3, 10): + from .resume_execution import ( + CO_ASYNC_GENERATOR, + CO_COROUTINE, + CO_GENERATOR, + CO_ITERABLE_COROUTINE, + ) + + if f_code.co_flags & ( + CO_GENERATOR | CO_COROUTINE | CO_ITERABLE_COROUTINE | CO_ASYNC_GENERATOR + ): + self.push(BuiltinVariable(None)) + + self.inline_depth = inline_depth + self.inconsistent_side_effects = False + linecache.lazycache(f_code.co_filename, f_globals) + self.log_starts_line() + + +class InstructionTranslator(InstructionTranslatorBase): + mutated_closure_cell_contents: Set[str] + + @staticmethod + def current_tx() -> "InstructionTranslator": + return tls.current_tx + + @contextlib.contextmanager + def set_current_tx(self): + prior = getattr(tls, "current_tx", None) + tls.current_tx = self + try: + yield + finally: + tls.current_tx = prior + + def __init__( + self, + instructions: List[Instruction], + f_code, + f_locals, + f_globals, + f_builtins, + code_options, + compiler_fn, + one_graph, + export, + export_constraints, + mutated_closure_cell_contents: Set[str], + frame_state, + speculation_log: SpeculationLog, + ): + _step_logger()( + logging.INFO, + f"torchdynamo start tracing {f_code.co_name} {code_options['co_filename']}:{code_options['co_firstlineno']}", + ) + super().__init__( + output=OutputGraph( + code_options, + compiler_fn, + self, + export, + export_constraints, + frame_state, + local_scope=f_locals, + global_scope=f_globals, + f_code=f_code, + ), + instructions=instructions, + f_locals=f_locals, + f_globals=f_globals, + f_builtins=f_builtins, + code_options=code_options, + symbolic_locals={}, # set below + # A global var is inserted only after a STORE_GLOBAL happens to it + symbolic_globals={}, + f_code=f_code, + export=export, + inline_depth=0, + speculation_log=speculation_log, + ) + + self._throw_if_in_functorch() + + # as soon as we create the tracing context we should keep it active, so any calls + # into dynamo apis can rely on finding it + with tracing(self.output.tracing_context), self.set_current_tx(): + self.one_graph: bool = one_graph + self.export = export + self.mutated_closure_cell_contents = mutated_closure_cell_contents + if self.export: + assert ( + self.one_graph + ), "Export without one graph - something has gone wrong." + + vars = list(code_options["co_varnames"]) + cells_and_freevars = [x for x in self.cell_and_freevars() if x not in vars] + vars.extend(cells_and_freevars) + cells_and_freevars_set = set(cells_and_freevars) + + self.symbolic_locals = { + k: variables.LazyVariableTracker.create( + f_locals[k], + source=LocalSource(k, cell_or_freevar=k in cells_and_freevars_set), + ) + for k in vars + if k in f_locals + } + self.debug_locals: List[Tuple[VariableTracker, List[VariableTracker]]] = [] + if export: + # export gets confused if we never realize unused inputs + # in export mode just eagerly realize everything + self.symbolic_locals = VariableTracker.apply( + lambda x: x.realize(), self.symbolic_locals + ) + + self._freevars_ids = dict() + for name in self.code_options["co_freevars"]: + if name in f_locals: + self._freevars_ids[name] = id(f_locals[name]) + + def _throw_if_in_functorch(self): + # Fallback to eager in case of a graph break inside vmap + eager = torch._dynamo.lookup_backend("eager") + compiler_fn = inspect.getattr_static( + self.output.compiler_fn, "compiler_fn", self.output.compiler_fn + ) + ci = torch._C._functorch.peek_interpreter_stack() + forbidden_keys = ( + torch._C._functorch.TransformType.Vmap, + torch._C._functorch.TransformType.Grad, + ) + if ci is not None and ci.key() in forbidden_keys and compiler_fn is not eager: + # if it reaches here, it means Dynamo failed to inline a functorch function + name = ci.key().name.lower() + msg = f"torch.func.{name}(fn) requires the function to be inlined by dynamo" + unimplemented(msg) + + def get_example_value(self, source: Source): + if isinstance(source, LocalSource): + return self.f_locals[source.local_name] + if isinstance(source, GlobalSource): + return self.f_globals[source.global_name] + raise KeyError() + + def run(self): + super().run() + + def match_nested_cell(self, name, cell): + """Match a cell in this method to one in a function we are inlining""" + try: + value = cell.cell_contents + except ValueError: + return None + # TODO(jansel): check the id of the cell rather than the contents + if id(value) != self._freevars_ids.get(name): + return None + return self.symbolic_locals[name] + + def should_compile_partial_graph(self): + return ( + all(b.can_restore() for b in self.block_stack) + and not self.one_graph + and self.generic_context_manager_depth == 0 + ) + + def create_call_resume_at(self, inst): + self.instruction_pointer = None + + if inst.opname == "RETURN_VALUE": + return [create_instruction("RETURN_VALUE")] + + reads = livevars_analysis(self.instructions, inst) + argnames = tuple( + k + for k in self.symbolic_locals.keys() + if k in reads and k not in self.cell_and_freevars() + ) + + cg = PyCodegen(self) + + # Python does not allow null to be an arg to a function, so + # we remove nulls from the stack and restore them in the + # prologue of the resume function + + # sorted list of indices of nulls on the stack + null_idxes: List[int] = [] + if sys.version_info >= (3, 11): + # find indices of NullVariables + for i, var in enumerate(self.stack): + if isinstance(var, NullVariable): + null_idxes.append(i) + # generate bytecode to pop the nulls + null_cnt = 0 + for i, var in enumerate(reversed(self.stack)): + if isinstance(var, NullVariable): + for j in range(2, i + 2 - null_cnt): + cg.append_output(create_instruction("SWAP", arg=j)) + cg.extend_output(cg.pop_null()) + null_cnt += 1 + + # we popped all nulls from the stack at runtime, + # so we should not count NullVariables + stack_len = len(self.stack) - len(null_idxes) + nargs = stack_len + len(argnames) + + name = unique_id(f"__resume_at_{inst.offset}") + + new_code: types.CodeType = ContinueExecutionCache.lookup( + self.f_code, + self.lineno, + inst.offset, + tuple(b.target.offset for b in self.block_stack), + stack_len, + argnames, + tuple(b.resume_fn() for b in self.block_stack), + tuple(null_idxes), + ) + + # Add original GraphModule context to the resume function to handle + # the case of a graph break while tracing a GraphModule + orig_graphmodule_maybe = code_context.get_context(self.f_code).get( + "orig_graphmodule", lambda: None + )() + if orig_graphmodule_maybe is not None: + code_context.get_context(new_code)["orig_graphmodule"] = weakref.ref( + orig_graphmodule_maybe + ) + + if new_code.co_freevars: + cg.make_function_with_closure(name, new_code, True, stack_len) + else: + # This is safe: we pre-generate a unique name + self.output.install_global_unsafe( + name, types.FunctionType(new_code, self.f_globals, name) + ) + cg.extend_output(cg.load_function_name(name, True, stack_len)) + + cg.extend_output([cg.create_load(k) for k in argnames]) + cg.extend_output(create_call_function(nargs, False)) + cg.append_output(create_instruction("RETURN_VALUE")) + return cg.get_instructions() + + def symbolic_locals_contain_module_class(self): + for v in self.symbolic_locals.values(): + if isinstance(v, UserDefinedClassVariable) and issubclass( + v.as_python_constant(), torch.nn.Module + ): + return True + return False + + def RETURN_VALUE(self, inst): + if ( + self.output.count_calls() == 0 + and not self.inconsistent_side_effects + and not self.symbolic_locals_contain_module_class() + and not self.export + ): + raise exc.SkipFrame("because no content in function call") + self.instruction_pointer = None + _step_logger()( + logging.INFO, + f"torchdynamo done tracing {self.f_code.co_name} (RETURN_VALUE)", + ) + log.debug("RETURN_VALUE triggered compile") + self.output.compile_subgraph( + self, + reason=GraphCompileReason( + "return_value", [self.frame_summary()], graph_break=False + ), + ) + self.output.add_output_instructions([create_instruction("RETURN_VALUE")]) + + +class InliningInstructionTranslator(InstructionTranslatorBase): + """Trace and inline a called method""" + + symbolic_result: Optional[TensorVariable] + + @classmethod + def inline_call(cls, parent, func, args, kwargs): + with patch.dict(counters, {"unimplemented": counters["inline_call"]}): + return cls.inline_call_(parent, func, args, kwargs) + + @staticmethod + def check_inlineable(func): + if func.has_self(): + unimplemented("inline with __self__") + + result = trace_rules.check_verbose(func, is_inlined_call=True) + if result.skipped: + from torch._dynamo.variables.misc import produce_trampoline_autograd_apply + + # _origin marks this as coming from an internal dynamo known function that is safe to + # trace through. + if hasattr(getattr(func, "fn", None), "_origin") and func.fn._origin in [ + produce_trampoline_autograd_apply, + ]: + # Known sound + return trace_rules.SkipResult( + False, "allowlist in dynamo known function" + ) + fn_qualname = func.fn.__qualname__ if hasattr(func, "fn") else "" + unimplemented( + f"'inline in skipfiles: {fn_qualname} | {func.get_name()} {func.get_filename()}, {result.reason}'" + ) + + if isinstance(func, UserFunctionVariable) and inspect.getattr_static( + func.get_function(), "_torchdynamo_disable", False + ): + unimplemented( + f"call torch._dynamo.disable() wrapped function {func.get_function()}" + ) + else: + return result + + @staticmethod + def inline_call_( + parent, func: VariableTracker, args: List[VariableTracker], kwargs + ): + if isinstance(func, SkipFunctionVariable): + unimplemented("inline with functions in skip files") + assert isinstance( + func, + (UserFunctionVariable, NestedUserFunctionVariable), + ) + result = InliningInstructionTranslator.check_inlineable(func) + assert result.skipped is False + try: + sub_locals, closure_cells = func.bind_args(parent, args, kwargs) + except TypeError as e: + # Wrap the general TypeError during bind_args() to the internal ArgsMismatchError with detailed info + raise ArgsMismatchError( # noqa: TRY200 + "{reason}.\n func = {func}, args = {args}, kwargs = {kwargs}".format( + reason=str(e), + func=f"'{func.get_name()}' {func.get_filename()}:{func.get_code().co_firstlineno}", + args=[arg.python_type() for arg in args], + kwargs=kwargs, + ), + ) + + for v in itertools.chain(sub_locals.values(), closure_cells.values()): + if not isinstance(v, VariableTracker): + unimplemented(f"unconverted arg {v}") + + code: types.CodeType = func.get_code() + if code.co_name in ("__setitem__", "__setattr__") and not ( + args is not None + and len(args) > 0 + and isinstance(args[0], variables.CustomizedDictVariable) + ): + unimplemented(f"inline {code.co_name}") + + suffix = "" + # TODO: mlazos, add support for enabling multiple artifact logs + # with a single alias + if torch._logging._internal.log_state.is_artifact_enabled("output_code"): + suffix = f"\n{dis.Bytecode(code).dis()}" + if sys.version_info >= (3, 11): + cur_inst = parent.current_instruction + parent_code = parent.f_code + header = parent.get_line_of_code_header(lineno=cur_inst.positions.lineno) + + def get_trace_call_log_str(): + line = get_instruction_source_311(parent_code, cur_inst).rstrip() + return f"TRACE inlined call {code.co_name} from {header}\n{line}" + + trace_call_log.debug("%s", LazyString(get_trace_call_log_str)) + log.debug("INLINING %s%s, %s", code, suffix, result.reason) + + # Detect inline GraphModule calls in order to propagate node metadata, + # by checking if the first argument (self) is a variable tracking a GraphModule. + if args and isinstance(args[0], NNModuleVariable): + module = parent.output.get_submodule(args[0].module_key) + if isinstance(module, torch.fx.GraphModule): + # The inline call might not actually be a call to `forward`, + # but it is enough to add a context for `forward` in case it is called. + code_context.get_context(module.forward.__code__)[ + "orig_graphmodule" + ] = weakref.ref(module) + + tracer: InliningInstructionTranslator + if is_generator(code): + tracer = InliningGeneratorInstructionTranslator( + parent, code, sub_locals, parent.symbolic_globals, closure_cells, func + ) + else: + tracer = InliningInstructionTranslator( + parent, code, sub_locals, parent.symbolic_globals, closure_cells, func + ) + + strict_ctx: Any = contextlib.nullcontext() + if parent.strict_checks_enabled: + strict_ctx = tracer.strict_translation_mode() + try: + with strict_ctx: + tracer.run() + except exc.SkipFrame as e: + msg = f"SKIPPED INLINING {code}: {e}" + log.debug(msg) + raise Unsupported(msg) from e + except Exception as e: + log.debug("FAILED INLINING %s", code) + raise + assert tracer.symbolic_result is not None + func.export_freevars(parent, tracer) + + if tracer.f_globals is parent.f_globals: + # Merge symbolic_globals back if parent and child are in the same namespace + parent.symbolic_globals.update(tracer.symbolic_globals) + + parent.inconsistent_side_effects |= tracer.inconsistent_side_effects + + log.debug("DONE INLINING %s", code) + + if is_generator(code): + assert isinstance(tracer, InliningGeneratorInstructionTranslator) + assert tracer.symbolic_result.as_python_constant() is None + return ListIteratorVariable( + tracer.generated_items, + mutable_local=MutableLocal(), + ) + else: + return tracer.symbolic_result + + def __init__( + self, + parent: InstructionTranslatorBase, + code: types.CodeType, + symbolic_locals: Dict[str, VariableTracker], + symbolic_globals: Dict[str, VariableTracker], + closure_cells: Dict[str, VariableTracker], + funcvar: BaseUserFunctionVariable, + ): + f_globals = funcvar.get_globals() # type: ignore[attr-defined] + f_builtins = f_globals["__builtins__"] + if not isinstance(f_builtins, dict): + f_builtins = f_builtins.__dict__ + instructions = cleaned_instructions(code) + propagate_line_nums(instructions) + super().__init__( + output=parent.output, + f_locals={}, + f_globals=f_globals, + f_builtins=f_builtins, + symbolic_locals=symbolic_locals, + symbolic_globals=symbolic_globals, + instructions=instructions, + code_options={k: getattr(code, k) for k in dir(code)}, + f_code=code, + export=parent.export, + inline_depth=parent.inline_depth + 1, + speculation_log=parent.speculation_log, + ) + self.parent = parent + self.symbolic_result = None + self.closure_cells = closure_cells + self.nn_module_stack = parent.nn_module_stack.copy() + + @property + def fake_mode(self): + return self.parent.fake_mode + + def run_ctx_mgr(self): + return TracingContext.current_frame(self.parent.frame_summary()) + + def STORE_DEREF(self, inst): + if inst.argval in self.closure_cells: + cell = self.closure_cells[inst.argval] + val = self.pop() + if isinstance(cell, ClosureVariable): + if not self.output.is_root_tracer(): + unimplemented( + "HigherOrderOperator: Mutating a variable not in the current scope (ClosureVariable)" + ) + self.output.root_tx.symbolic_locals[cell.name] = val + else: + self.output.side_effects.store_cell(cell, val) + else: + maybe_cell = self.symbolic_locals.get(inst.argval) + if isinstance( + maybe_cell, + variables.NewCellVariable, + ): + self.output.side_effects.store_cell( + self.symbolic_locals[inst.argval], self.pop() + ) + else: + if ( + maybe_cell is not None + and maybe_cell.source.name() + not in self.output.root_tx.mutated_closure_cell_contents + ): + # Why is the source name here unique? + # mutated_closure_cell_contents is a per-frame + # concept, and sources identify, e.g., particular + # locals from the frame. If you had two locals, + # they'll get different source names, and therefore + # differ here. + self.output.root_tx.mutated_closure_cell_contents.add( + maybe_cell.source.name() + ) + raise exc.UnspecializeRestartAnalysis() + unimplemented("write to __closure__ while inlining") + + def LOAD_DEREF(self, inst): + if inst.argval in self.closure_cells: + cell = self.closure_cells[inst.argval] + if isinstance(cell, ClosureVariable): + self.push(self.output.root_tx.symbolic_locals[cell.name]) + else: + self.push(self.output.side_effects.load_cell(cell)) + else: + maybe_sym_local = self.symbolic_locals.get(inst.argval, None) + if isinstance(maybe_sym_local, variables.NewCellVariable): + self.push(self.output.side_effects.load_cell(maybe_sym_local)) + else: + super().LOAD_DEREF(inst) + + def LOAD_CLOSURE(self, inst): + assert inst.argval in self.cell_and_freevars() + if inst.argval in self.closure_cells: + self.push(self.closure_cells[inst.argval]) + else: + self.push(InlinedClosureVariable(name=inst.argval)) + + def check_replace_is_safe(self, oldvar): + if not is_side_effect_safe(oldvar.mutable_local): + unimplemented( + "HigherOrderOperator: Mutating a variable not in the current scope (replace_all)" + ) + + def should_compile_partial_graph(self): + return False # inlining functions is all-or-nothing + + def create_call_resume_at(self, offset): + unimplemented("cant resume while inlining") + + def RETURN_VALUE(self, inst): + self.symbolic_result = self.pop() # type: ignore[assignment] + self.instruction_pointer = None + + +class InliningGeneratorInstructionTranslator(InliningInstructionTranslator): + generated_items: List[VariableTracker] + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.generated_items = [] + + def YIELD_VALUE(self, inst: Instruction): + self.generated_items.append(self.pop()) + # TODO(jansel): figure out why this is needed, it isn't in the docs for YIELD_VALUE + self.push(ConstantVariable.create(None)) + + def GET_YIELD_FROM_ITER(self, inst): + tos = self.stack[-1] + if not isinstance(tos, ListIteratorVariable): + self.pop() + res = BuiltinVariable(iter).call_function(self, [tos], {}) + self.push(res) + return self.YIELD_FROM(inst) + + def YIELD_FROM(self, inst): + while True: + tos = self.stack[-1].realize() + if isinstance(tos, ConstantVariable) and tos.value is None: + self.pop() + return + if isinstance( + tos, (variables.ListIteratorVariable, variables.IteratorVariable) + ): + try: + val, next_iter = tos.next_variables(self) + self.push(val) + # TODO(voz): Unclear if we need the push None in YIELD_VALUE? + self.YIELD_VALUE(inst) + self.pop() + self.push(next_iter) + except StopIteration: + return + else: + unimplemented(f"YIELD_FROM {typestr(tos)}") + + def SEND(self, inst): + assert len(self.stack) >= 2 + val = self.pop() + tos = self.stack[-1] + if isinstance(tos, ListIteratorVariable): + if isinstance(val, ConstantVariable) and val.value is None: + self.push(val) + self.instruction_pointer = self.indexof[inst.target] + else: + # invoke send + # Unreachable code - if you hit this, you are implementing generator support and have + # lifted the `unimplemented("generator")` in frame conversion. This codepath handles + # subgenerator and lines up with this line in Python 3.11 + # https://github.com/python/cpython/blob/3.11/Python/ceval.c#L2597 + unimplemented("Unreachable sub-generator code") + else: + unimplemented(f"SEND {typestr(tos)}") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/tensor_version_op.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/tensor_version_op.py new file mode 100644 index 0000000000000000000000000000000000000000..f12ed95b58c0fe809103b35140b16aaf7c168d6b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/tensor_version_op.py @@ -0,0 +1,57 @@ +import torch +from torch._prims import _make_prim, RETURN_TYPE +from torch._subclasses import FakeTensorMode +from torch._subclasses.functional_tensor import FunctionalTensorMode + +_tensor_version = _make_prim( + schema="_tensor_version(Tensor self) -> SymInt", + return_type=RETURN_TYPE.NEW, + meta=torch.ops.aten._version.default, + impl_aten=torch.ops.aten._version.default, + doc="Tracable unbacked SymInt version of torch.Tensor._version", +) + + +@_tensor_version.py_impl(FakeTensorMode) +def _tensor_version_fake(self): + """ + The initial dynamo capture of _tensor_version + _unsafe_set_version_counter turns the + `._version` into an unbacked SymInt so that we don't need to specialize on the `._version` + of input tensors to the graph. + """ + return self.fake_mode.shape_env.create_unbacked_symint() + + +_unsafe_set_version_counter = _make_prim( + schema="_unsafe_set_version_counter(Tensor self, SymInt version) -> ()", + return_type=RETURN_TYPE.NEW, + meta=lambda self, version: None, + impl_aten=torch._C._autograd._unsafe_set_version_counter, + doc="Tracable+SymInt version of torch._C._autograd._unsafe_set_version_counter", +) +torch.fx.node.has_side_effect(_unsafe_set_version_counter) + + +""" +When we functionalize _tensor_version + _unsafe_set_version_counter, +the ops disappear from the traced graph. We run them eagerly on the +fake tensors used for tracing, in order to get past asserts that would +fail in autograd. + +Why is this ok? +1) Versions on functional tensors don't make any sense since you can't mutate a functional tensor. +2) The whole point of version munging is to trick autograd into doing what we want, and after + AotAtuograd there is no longer any need for these ops. + +Note this is similar to how no_grad is handled. +""" + + +@_tensor_version.py_impl(FunctionalTensorMode) +def _tensor_version_functional(self): + return self._version + + +@_unsafe_set_version_counter.py_impl(FunctionalTensorMode) +def _unsafe_set_version_counter_functional(self, version): + torch._C._autograd._unsafe_set_version_counter(self, version) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/test_minifier_common.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/test_minifier_common.py new file mode 100644 index 0000000000000000000000000000000000000000..d12e5a92315a498e8f90586a47c5a93de1217df0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/test_minifier_common.py @@ -0,0 +1,244 @@ +import dataclasses +import io +import logging +import os +import re +import shutil +import subprocess +import sys +import tempfile +import traceback +from typing import Optional +from unittest.mock import patch + +import torch +import torch._dynamo +import torch._dynamo.test_case +from torch.utils._traceback import report_compile_source_on_error + + +@dataclasses.dataclass +class MinifierTestResult: + minifier_code: str + repro_code: str + + def _get_module(self, t): + match = re.search(r"class Repro\(torch\.nn\.Module\):\s+([ ].*\n| *\n)+", t) + assert match is not None, "failed to find module" + r = match.group(0) + r = re.sub(r"\s+$", "\n", r, flags=re.MULTILINE) + r = re.sub(r"\n{3,}", "\n\n", r) + return r.strip() + + def minifier_module(self): + return self._get_module(self.minifier_code) + + def repro_module(self): + return self._get_module(self.repro_code) + + +class MinifierTestBase(torch._dynamo.test_case.TestCase): + DEBUG_DIR = tempfile.mkdtemp() + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls._exit_stack.enter_context( # type: ignore[attr-defined] + torch._dynamo.config.patch(debug_dir_root=cls.DEBUG_DIR) + ) + # These configurations make new process startup slower. Disable them + # for the minification tests to speed them up. + cls._exit_stack.enter_context( # type: ignore[attr-defined] + torch._inductor.config.patch( + { + # https://github.com/pytorch/pytorch/issues/100376 + "pattern_matcher": False, + # multiprocess compilation takes a long time to warmup + "compile_threads": 1, + # https://github.com/pytorch/pytorch/issues/100378 + "cpp.vec_isa_ok": False, + } + ) + ) + + @classmethod + def tearDownClass(cls): + if os.getenv("PYTORCH_KEEP_TMPDIR", "0") != "1": + shutil.rmtree(cls.DEBUG_DIR) + else: + print(f"test_minifier_common tmpdir kept at: {cls.DEBUG_DIR}") + cls._exit_stack.close() # type: ignore[attr-defined] + + def _gen_codegen_fn_patch_code(self, device, bug_type): + assert bug_type in ("compile_error", "runtime_error", "accuracy") + return f"""\ +{torch._dynamo.config.codegen_config()} +{torch._inductor.config.codegen_config()} +torch._inductor.config.{"cpp" if device == "cpu" else "triton"}.inject_relu_bug_TESTING_ONLY = {bug_type!r} +""" + + def _maybe_subprocess_run(self, args, *, isolate, cwd=None): + if not isolate: + assert len(args) >= 2, args + assert args[0] == "python3", args + if args[1] == "-c": + assert len(args) == 3, args + code = args[2] + args = ["-c"] + else: + assert len(args) >= 2, args + with open(args[1]) as f: + code = f.read() + args = args[1:] + + # WARNING: This is not a perfect simulation of running + # the program out of tree. We only interpose on things we KNOW we + # need to handle for tests. If you need more stuff, you will + # need to augment this appropriately. + + # NB: Can't use save_config because that will omit some fields, + # but we must save and reset ALL fields + dynamo_config = torch._dynamo.config.shallow_copy_dict() + inductor_config = torch._inductor.config.shallow_copy_dict() + try: + stderr = io.StringIO() + log_handler = logging.StreamHandler(stderr) + log = logging.getLogger("torch._dynamo") + log.addHandler(log_handler) + try: + prev_cwd = os.getcwd() + if cwd is not None: + os.chdir(cwd) + with patch("sys.argv", args), report_compile_source_on_error(): + exec(code, {"__name__": "__main__", "__compile_source__": code}) + rc = 0 + except Exception: + rc = 1 + traceback.print_exc(file=stderr) + finally: + log.removeHandler(log_handler) + if cwd is not None: + os.chdir(prev_cwd) # type: ignore[possibly-undefined] + # Make sure we don't leave buggy compiled frames lying + # around + torch._dynamo.reset() + finally: + torch._dynamo.config.load_config(dynamo_config) + torch._inductor.config.load_config(inductor_config) + + # TODO: return a more appropriate data structure here + return subprocess.CompletedProcess( + args, + rc, + b"", + stderr.getvalue().encode("utf-8"), + ) + else: + return subprocess.run(args, capture_output=True, cwd=cwd, check=False) + + # Run `code` in a separate python process. + # Returns the completed process state and the directory containing the + # minifier launcher script, if `code` outputted it. + def _run_test_code(self, code, *, isolate): + proc = self._maybe_subprocess_run( + ["python3", "-c", code], isolate=isolate, cwd=self.DEBUG_DIR + ) + + print("test stdout:", proc.stdout.decode("utf-8")) + print("test stderr:", proc.stderr.decode("utf-8")) + repro_dir_match = re.search( + r"(\S+)minifier_launcher.py", proc.stderr.decode("utf-8") + ) + if repro_dir_match is not None: + return proc, repro_dir_match.group(1) + return proc, None + + # Runs the minifier launcher script in `repro_dir` + def _run_minifier_launcher(self, repro_dir, isolate, *, minifier_args=()): + self.assertIsNotNone(repro_dir) + launch_file = os.path.join(repro_dir, "minifier_launcher.py") + with open(launch_file) as f: + launch_code = f.read() + self.assertTrue(os.path.exists(launch_file)) + + args = ["python3", launch_file, "minify", *minifier_args] + if not isolate: + args.append("--no-isolate") + launch_proc = self._maybe_subprocess_run(args, isolate=isolate, cwd=repro_dir) + print("minifier stdout:", launch_proc.stdout.decode("utf-8")) + stderr = launch_proc.stderr.decode("utf-8") + print("minifier stderr:", stderr) + self.assertNotIn("Input graph did not fail the tester", stderr) + + return launch_proc, launch_code + + # Runs the repro script in `repro_dir` + def _run_repro(self, repro_dir, *, isolate=True): + self.assertIsNotNone(repro_dir) + repro_file = os.path.join(repro_dir, "repro.py") + with open(repro_file) as f: + repro_code = f.read() + self.assertTrue(os.path.exists(repro_file)) + + repro_proc = self._maybe_subprocess_run( + ["python3", repro_file], isolate=isolate, cwd=repro_dir + ) + print("repro stdout:", repro_proc.stdout.decode("utf-8")) + print("repro stderr:", repro_proc.stderr.decode("utf-8")) + return repro_proc, repro_code + + # Template for testing code. + # `run_code` is the code to run for the test case. + # `patch_code` is the code to be patched in every generated file; usually + # just use this to turn on bugs via the config + def _gen_test_code(self, run_code, repro_after, repro_level): + return f"""\ +import torch +import torch._dynamo +{torch._dynamo.config.codegen_config()} +{torch._inductor.config.codegen_config()} +torch._dynamo.config.repro_after = "{repro_after}" +torch._dynamo.config.repro_level = {repro_level} +torch._dynamo.config.debug_dir_root = "{self.DEBUG_DIR}" +{run_code} +""" + + # Runs a full minifier test. + # Minifier tests generally consist of 3 stages: + # 1. Run the problematic code + # 2. Run the generated minifier launcher script + # 3. Run the generated repro script + # + # If possible, you should run the test with isolate=False; use + # isolate=True only if the bug you're testing would otherwise + # crash the process + def _run_full_test( + self, run_code, repro_after, expected_error, *, isolate, minifier_args=() + ) -> Optional[MinifierTestResult]: + if isolate: + repro_level = 3 + elif expected_error is None or expected_error == "AccuracyError": + repro_level = 4 + else: + repro_level = 2 + test_code = self._gen_test_code(run_code, repro_after, repro_level) + print("running test", file=sys.stderr) + test_proc, repro_dir = self._run_test_code(test_code, isolate=isolate) + if expected_error is None: + # Just check that there was no error + self.assertEqual(test_proc.returncode, 0) + self.assertIsNone(repro_dir) + return None + # NB: Intentionally do not test return code; we only care about + # actually generating the repro, we don't have to crash + self.assertIn(expected_error, test_proc.stderr.decode("utf-8")) + self.assertIsNotNone(repro_dir) + print("running minifier", file=sys.stderr) + minifier_proc, minifier_code = self._run_minifier_launcher( + repro_dir, isolate=isolate, minifier_args=minifier_args + ) + print("running repro", file=sys.stderr) + repro_proc, repro_code = self._run_repro(repro_dir, isolate=isolate) + self.assertIn(expected_error, repro_proc.stderr.decode("utf-8")) + self.assertNotEqual(repro_proc.returncode, 0) + return MinifierTestResult(minifier_code=minifier_code, repro_code=repro_code) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/testing.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..fac20cf55508011f0d06511b30d846c1b26eeb4b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/testing.py @@ -0,0 +1,378 @@ +import contextlib +import dis +import functools +import logging +import os.path +import random +import re +import sys +import types +import unittest +from typing import List, Optional, Sequence, Union +from unittest.mock import patch + +np: Optional[types.ModuleType] = None +try: + import numpy as np +except ModuleNotFoundError: + np = None + +import torch +from torch import fx +from torch._dynamo.output_graph import OutputGraph + +from . import config, eval_frame, optimize_assert, reset +from .bytecode_transformation import ( + create_instruction, + debug_checks, + is_generator, + transform_code_object, +) +from .guards import CheckFunctionManager, GuardedCode +from .utils import same + +unsupported = eval_frame.unsupported +three = 3 + +log = logging.getLogger(__name__) + + +def clone_me(x): + if x is None: + return None + return x.detach().clone().requires_grad_(x.requires_grad) + + +def named_parameters_for_optimized_module(mod): + assert isinstance(mod, eval_frame.OptimizedModule) + return mod._orig_mod.named_parameters + + +def named_buffers_for_optimized_module(mod): + assert isinstance(mod, eval_frame.OptimizedModule) + return mod._orig_mod.named_buffers + + +def remove_optimized_module_prefix(name) -> str: + return re.sub(r"^_orig_mod[.]", "", name) + + +def collect_results(model, prediction, loss, example_inputs): + results = [] + results.append(prediction) + results.append(loss) + # if isinstance(loss, torch.Tensor) and loss.item() > 1: + # log.warning( + # f"High loss value alert - {loss:.2f}. Can result in unstable gradients." + # ) + + grads = dict() + params = dict() + for name, param in model.named_parameters(): + if isinstance(model, eval_frame.OptimizedModule): + name = remove_optimized_module_prefix(name) + param_copy = param + grad = param.grad + # Treat None and zero grad as same + if param.grad is None: + grad = torch.zeros_like(param) + grads[name + ".grad"] = grad + params[name] = param_copy + results.append(grads) + results.append(params) + buffers = dict() + for name, buffer in model.named_buffers(): + if isinstance(model, eval_frame.OptimizedModule): + name = remove_optimized_module_prefix(name) + buffers[name] = buffer + results.append(buffers) + for example in example_inputs: + if isinstance(example, (tuple, list)): + for inp in example: + if isinstance(inp, torch.Tensor): + results.append(inp.grad) + else: + if isinstance(example, torch.Tensor): + results.append(example.grad) + return results + + +def requires_bwd_pass(out): + if isinstance(out, torch.Tensor): + return out.requires_grad + elif isinstance(out, (list, tuple)): + return any(requires_bwd_pass(x) for x in out) + elif out is None: + return False + elif isinstance(out, int): + return False + raise NotImplementedError("Don't know how to reduce", type(out)) + + +def reduce_to_scalar_loss(out): + """Reduce the output of a model to get scalar loss""" + if isinstance(out, torch.Tensor): + # Mean does not work on integer tensors + return out.sum() / out.numel() + elif isinstance(out, (list, tuple)): + return sum([reduce_to_scalar_loss(x) for x in out]) / len(out) + elif type(out).__name__ in ( + "MaskedLMOutput", + "Seq2SeqLMOutput", + "CausalLMOutputWithCrossAttentions", + ): + return reduce_to_scalar_loss(out.logits) + elif type(out).__name__ == "SquashedNormal": + return out.mean.sum() + elif isinstance(out, dict): + return sum([reduce_to_scalar_loss(value) for value in out.values()]) / len( + out.keys() + ) + raise NotImplementedError("Don't know how to reduce", type(out)) + + +def debug_dir() -> str: + path = os.path.join(os.path.dirname(__file__), "../debug") + if not os.path.exists(path): + os.mkdir(path) + return path + + +def debug_dump(name, code: types.CodeType, extra="") -> None: + with open(os.path.join(debug_dir(), name), "w") as fd: + fd.write( + f"{dis.Bytecode(code).info()}\n\n{dis.Bytecode(code).dis()}\n\n{extra}\n" + ) + + +def debug_insert_nops( + frame, cache_size, hooks, _, *, skip: int = 0 +) -> Optional[GuardedCode]: + """used to debug jump updates""" + + def insert_nops(instructions, code_options): + instructions.insert(0, create_instruction("NOP")) + instructions.insert(0, create_instruction("NOP")) + + if is_generator(frame.f_code): + return None + + debug_checks(frame.f_code) + code = transform_code_object(frame.f_code, insert_nops) + graph = OutputGraph( + code_options={}, + compiler_fn=None, + root_tx=None, + export=False, + export_constraints=None, + frame_state={"_id": 0}, + # TODO: shouldn't this be f_locals/f_globals from frame? + local_scope=locals(), + global_scope=globals(), + f_code=frame.f_code, + ) + + return GuardedCode(code, CheckFunctionManager(graph).check_fn) + + +class CompileCounter: + def __init__(self): + self.frame_count = 0 + self.op_count = 0 + + def __call__(self, gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): + self.frame_count += 1 + for node in gm.graph.nodes: + if "call" in node.op: + self.op_count += 1 + return gm.forward + + def clear(self): + self.frame_count = 0 + self.op_count = 0 + + +class CompileCounterWithBackend: + def __init__(self, backend): + self.frame_count = 0 + self.op_count = 0 + self.backend = backend + self.graphs = [] + + def __call__(self, gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): + from .backends.registry import lookup_backend + + self.frame_count += 1 + for node in gm.graph.nodes: + if "call" in node.op: + self.op_count += 1 + self.graphs.append(gm) + return lookup_backend(self.backend)(gm, example_inputs) + + +# Equivalent to backend="eager", but also records graphs that +# we can assert on +class EagerAndRecordGraphs: + def __init__(self): + self.graphs = [] + + def __call__(self, gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): + self.graphs.append(gm) + return gm + + +def strip_comment(code) -> str: + code = str(code) + return re.sub(r"(?m)^ *#.*\n?", "", code) + + +def remove_trailing_space(code) -> str: + return "\n".join([line.rstrip() for line in code.split("\n")]) + + +def normalize_gm(gm_str) -> str: + # strip comments as comments have path to files which may differ from + # system to system. + return remove_trailing_space(strip_comment(gm_str)) + + +def standard_test( + self, + fn, + nargs, + expected_ops=None, + expected_ops_dynamic=None, + expected_frame_count=1, +): + if not config.assume_static_by_default and expected_ops_dynamic is not None: + expected_ops = expected_ops_dynamic + + actual = CompileCounter() + + args1 = [torch.randn(10, 10) for _ in range(nargs)] + args2 = [torch.randn(10, 10) for _ in range(nargs)] + correct1 = fn(*args1) + correct2 = fn(*args2) + reset() + opt_fn = optimize_assert(actual)(fn) + val1a = opt_fn(*args1) + val2a = opt_fn(*args2) + val1b = opt_fn(*args1) + val2b = opt_fn(*args2) + reset() + self.assertTrue(same(val1a, correct1)) + self.assertTrue(same(val1b, correct1)) + self.assertTrue(same(val2a, correct2)) + self.assertTrue(same(val2b, correct2)) + self.assertEqual(actual.frame_count, expected_frame_count) + if expected_ops is not None: + self.assertEqual(actual.op_count, expected_ops) + + +def dummy_fx_compile(gm: fx.GraphModule, example_inputs): + return gm.forward + + +def format_speedup(speedup, pvalue, is_correct=True, pvalue_threshold=0.1): + if not is_correct: + return "ERROR" + if pvalue > pvalue_threshold: + return f"{speedup:.3f}x SAME" + return f"{speedup:.3f}x p={pvalue:.2f}" + + +def rand_strided( + size: Sequence[int], + stride: Sequence[int], + dtype: torch.dtype = torch.float32, + device: Union[str, torch.device] = "cpu", + extra_size: int = 0, +): + needed_size = ( + sum((shape - 1) * stride for shape, stride in zip(size, stride)) + + 1 + + extra_size + ) + if dtype.is_floating_point: + buffer = torch.randn(needed_size, dtype=dtype, device=device) + else: + buffer = torch.zeros(size=[needed_size], dtype=dtype, device=device) + return torch.as_strided(buffer, size, stride) + + +def _make_fn_with_patches(fn, *patches): + @functools.wraps(fn) + def _fn(*args, **kwargs): + with contextlib.ExitStack() as stack: + for module, attr, val in patches: + stack.enter_context(patch.object(module, attr, val)) + + return fn(*args, **kwargs) + + return _fn + + +def make_test_cls_with_patches(cls, cls_prefix, fn_suffix, *patches, xfail_prop=None): + DummyTestClass = type(f"{cls_prefix}{cls.__name__}", cls.__bases__, {}) + DummyTestClass.__qualname__ = DummyTestClass.__name__ + + for name in dir(cls): + if name.startswith("test_"): + fn = getattr(cls, name) + if not callable(fn): + setattr(DummyTestClass, name, getattr(cls, name)) + continue + new_name = f"{name}{fn_suffix}" + new_fn = _make_fn_with_patches(fn, *patches) + new_fn.__name__ = new_name + if xfail_prop is not None and hasattr(fn, xfail_prop): + new_fn = unittest.expectedFailure(new_fn) + setattr(DummyTestClass, new_name, new_fn) + # NB: Doesn't handle slots correctly, but whatever + elif not hasattr(DummyTestClass, name): + setattr(DummyTestClass, name, getattr(cls, name)) + + return DummyTestClass + + +# test Python 3.11+ specific features +def skipIfNotPy311(fn): + if sys.version_info >= (3, 11): + return fn + return unittest.skip(fn) + + +def xfailIfPy311(fn): + if sys.version_info >= (3, 11): + return unittest.expectedFailure(fn) + return fn + + +# Controls tests generated in test/inductor/test_torchinductor_dynamic_shapes.py +# and test/dynamo/test_dynamic_shapes.py +def expectedFailureDynamic(fn): + fn._expected_failure_dynamic = True + return fn + + +# Controls tests generated in test/inductor/test_torchinductor_codegen_dynamic_shapes.py +def expectedFailureCodegenDynamic(fn): + fn._expected_failure_codegen_dynamic = True + return fn + + +# Controls test generated in test/inductor/test_cpp_wrapper.py +def expectedFailureDynamicWrapper(fn): + fn._expected_failure_dynamic_wrapper = True + return fn + + +def reset_rng_state(use_xla=False): + torch.manual_seed(1337) + random.seed(1337) + if np: + np.random.seed(1337) + if use_xla: + import torch_xla.core.xla_model as xm + + xm.set_rng_state(1337, str(xm.xla_device())) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/trace_rules.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/trace_rules.py new file mode 100644 index 0000000000000000000000000000000000000000..6210160607cd2feb3b0ce0fff9181e37e4a68f08 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/trace_rules.py @@ -0,0 +1,3460 @@ +import _collections_abc +import _weakrefset +import abc +import builtins +import collections +import contextlib +import copy +import copyreg +import dataclasses +import enum +import functools +import importlib +import inspect +import itertools +import linecache +import logging +import multiprocessing +import operator +import os +import posixpath +import random +import re +import selectors +import signal +import sys +import tempfile +import threading +import tokenize +import traceback +import types +import typing +import unittest +import weakref +from collections import defaultdict +from typing import Any, Callable, cast, Dict, List, Optional, Set, Union + +np: Optional[types.ModuleType] = None +try: + import numpy as np +except ModuleNotFoundError: + pass + +import torch +import torch._inductor.test_operators +import torch.distributed +import torch.utils._content_store +from ..utils import _config_module +from .utils import getfile, hashable, NP_SUPPORTED_MODULES, unwrap_if_wrapper + +from .variables import ( + BuiltinVariable, + FunctorchHigherOrderVariable, + NestedUserFunctionVariable, + SkipFunctionVariable, + TorchInGraphFunctionVariable, + UserFunctionVariable, + UserMethodVariable, +) + +from .variables.base import VariableTracker + + +""" +Map of function objects to their tracing rules (Dynamo variables). +* TorchInGraphFunctionVariable: The functions should be put into the FX graph or can be constant folded. E.g., + - torch.add: should be put into the FX graph. + - torch.is_floating_point: constant folded. +* SkipFunctionVariable: The objects should be skipped from tracing. +* UserFunctionVariable: The functions should be inlined. + +For developers: If you add/remove a torch level API, it may trigger failures from +test/dynamo/test_trace_rules.py:test_torch_name_rule_map_updated. To fix the failures: +If you are adding a new torch level API or Dynamo implementation: +* Add the name with the corresponding tracing rule to this map + if you are adding a new in graph function or Dynamo implementation for an existing function. +* Remove the object name from test/dynamo/test_trace_rules.ignored_c_binding_in_graph_function_names if it's there. + +If you are removing an existing torch level API: +* Remove the entry represented the API from this map or test/dynamo/test_trace_rules.ignored_c_binding_in_graph_function_names + depends on where it is. + + +""" +manual_torch_name_rule_map = { + "torch.onnx.is_in_onnx_export": TorchInGraphFunctionVariable, + "torch.onnx.operators.shape_as_tensor": TorchInGraphFunctionVariable, + "torch.overrides.is_tensor_like": TorchInGraphFunctionVariable, + "torch.jit.is_scripting": TorchInGraphFunctionVariable, + "torch.jit.is_tracing": TorchInGraphFunctionVariable, + "torch.jit.annotate": TorchInGraphFunctionVariable, + "torch.distributed.is_available": TorchInGraphFunctionVariable, + "torch.distributed.is_initialized": TorchInGraphFunctionVariable, + "torch.distributed.get_rank": TorchInGraphFunctionVariable, + "torch.distributed.get_world_size": TorchInGraphFunctionVariable, + "torch.distributed._tensor.api.DTensor#from_local": TorchInGraphFunctionVariable, + "torch.distributed.distributed_c10d._get_group_size_by_name": TorchInGraphFunctionVariable, + "torch.distributed.distributed_c10d._resolve_group_name_by_ranks_and_tag": TorchInGraphFunctionVariable, + "torch.distributed.distributed_c10d._get_group_tag": TorchInGraphFunctionVariable, + "torch.distributed.distributed_c10d.get_process_group_ranks": TorchInGraphFunctionVariable, + "torch._utils.is_compiling": TorchInGraphFunctionVariable, + "torch.overrides.get_default_nowrap_functions": TorchInGraphFunctionVariable, + "torch.fx._symbolic_trace.is_fx_tracing": TorchInGraphFunctionVariable, + "torch._dynamo.external_utils.is_compiling": TorchInGraphFunctionVariable, + "torch.compiler.is_compiling": TorchInGraphFunctionVariable, + "torch.compiler.is_dynamo_compiling": TorchInGraphFunctionVariable, + "torch.autograd._profiler_enabled": SkipFunctionVariable, + # We graph break on RNG state setters or getters like + # `torch.get_rng_state` or `torch.set_rng_state`. These functions + # are not aten operations and therefore they are completely ignored + # by the AOT dispatcher. As a result, the AOT graph does not have + # these setter or getter functions, producing an incorrect graph + # when it comes to rng states. + "torch.default_generator#get_state": SkipFunctionVariable, + "torch._C.Generator#get_state": SkipFunctionVariable, + "torch.get_rng_state": SkipFunctionVariable, + "torch.cuda.get_rng_state": SkipFunctionVariable, + "torch.default_generator#set_state": SkipFunctionVariable, + "torch._C.Generator#set_state": SkipFunctionVariable, + "torch.set_rng_state": SkipFunctionVariable, + "torch.cuda.set_rng_state": SkipFunctionVariable, + # https://github.com/pytorch/pytorch/issues/107187 + "torch.manual_seed": SkipFunctionVariable, + # https://github.com/pytorch/pytorch/issues/93501 + "torch.nn.utils.rnn.pack_padded_sequence": SkipFunctionVariable, + "torch.nn.Parameter": TorchInGraphFunctionVariable, + "torch._nested_tensor_from_mask": SkipFunctionVariable, + "torch._nested_from_padded": SkipFunctionVariable, + # symbol operators implemented in Python + "torch.sym_not": TorchInGraphFunctionVariable, + "torch.sym_float": TorchInGraphFunctionVariable, + "torch.sym_int": TorchInGraphFunctionVariable, + "torch.sym_max": TorchInGraphFunctionVariable, + "torch.sym_min": TorchInGraphFunctionVariable, + "torch.sym_sqrt": TorchInGraphFunctionVariable, + "torch.sym_ite": TorchInGraphFunctionVariable, + "torch.Tensor#_make_wrapper_subclass": SkipFunctionVariable, + "torch.Tensor#__init__": SkipFunctionVariable, + "torch.cuda.set_device": SkipFunctionVariable, + "torch.cuda.current_device": SkipFunctionVariable, + "torch._C.autocast_decrement_nesting": SkipFunctionVariable, + "torch._C.autocast_increment_nesting": SkipFunctionVariable, + "torch.autograd.grad": SkipFunctionVariable, + "torch._C.clear_autocast_cache": SkipFunctionVariable, + "torch.distributions.constraints.is_dependent": SkipFunctionVariable, + "torch.jit.isinstance": SkipFunctionVariable, + "torch._C.set_anomaly_enabled": SkipFunctionVariable, + "torch._C.set_autocast_cache_enabled": SkipFunctionVariable, + "torch._C.set_autocast_cpu_dtype": SkipFunctionVariable, + "torch._C.set_autocast_cpu_enabled": SkipFunctionVariable, + "torch._C.set_autocast_enabled": SkipFunctionVariable, + "torch._C.set_autocast_gpu_dtype": SkipFunctionVariable, + "torch._C.set_autocast_ipu_dtype": SkipFunctionVariable, + "torch._C.set_autocast_ipu_enabled": SkipFunctionVariable, + "torch._C.set_autocast_xla_dtype": SkipFunctionVariable, + "torch._C.set_autocast_xla_enabled": SkipFunctionVariable, + "torch.resize_as_": SkipFunctionVariable, + "torch.resize_as_sparse_": SkipFunctionVariable, + "torch.get_default_device": TorchInGraphFunctionVariable, + # functorch/vmap + "torch._functorch.vmap._check_int_or_none": UserFunctionVariable, + "torch._functorch.vmap._check_out_dims_is_int_or_int_pytree": UserFunctionVariable, + "torch._functorch.vmap._check_randomness_arg": UserFunctionVariable, + "torch._functorch.vmap._chunked_vmap": UserFunctionVariable, + "torch._functorch.vmap._concat_chunked_outputs": UserFunctionVariable, + "torch._functorch.vmap._create_batched_inputs": UserFunctionVariable, + "torch._functorch.vmap._flat_vmap": UserFunctionVariable, + "torch._functorch.vmap._flatten_chunks_output": UserFunctionVariable, + "torch._functorch.vmap._get_chunked_inputs": UserFunctionVariable, + "torch._functorch.vmap._get_name": UserFunctionVariable, + "torch._functorch.vmap._maybe_remove_batch_dim": UserFunctionVariable, + "torch._functorch.vmap._num_outputs": UserFunctionVariable, + "torch._functorch.vmap._process_batched_inputs": UserFunctionVariable, + "torch._functorch.vmap._unwrap_batched": UserFunctionVariable, + "torch._functorch.vmap._validate_and_get_batch_size": UserFunctionVariable, + "torch._functorch.vmap.doesnt_support_saved_tensors_hooks": UserFunctionVariable, + "torch._functorch.vmap.get_chunk_sizes": UserFunctionVariable, + # lazy_load_decompositions uses a lock that is not supported yet in dynamo + # "torch._functorch.vmap.lazy_load_decompositions": UserFunctionVariable, + "torch._functorch.vmap.restore_vmap": UserFunctionVariable, + "torch._functorch.apis.vmap": UserFunctionVariable, + "torch._functorch.vmap.unwrap_batched": UserFunctionVariable, + "torch._functorch.vmap.vmap_impl": FunctorchHigherOrderVariable, + "torch._functorch.vmap.wrap_batched": UserFunctionVariable, + # functorch/grad + "torch._functorch.eager_transforms.grad_impl": FunctorchHigherOrderVariable, + "torch._functorch.apis.grad_and_value": UserFunctionVariable, + "torch._functorch.eager_transforms._as_tuple": UserFunctionVariable, + "torch._functorch.eager_transforms._check_unique_non_empty": UserFunctionVariable, + "torch._functorch.eager_transforms._create_differentiable": UserFunctionVariable, + "torch._functorch.eager_transforms._slice_argnums": UserFunctionVariable, + "torch._functorch.eager_transforms._undo_create_differentiable": UserFunctionVariable, + "torch._functorch.eager_transforms._validate_and_wrap_argnum": UserFunctionVariable, + "torch._functorch.eager_transforms._validate_and_wrap_argnums": UserFunctionVariable, + "torch._functorch.eager_transforms._wrap_all_tensors": UserFunctionVariable, + "torch._functorch.eager_transforms._wrap_tensor_for_grad": UserFunctionVariable, + # functorch/jacrev + "torch._functorch.eager_transforms.jacrev": UserFunctionVariable, + "torch._functorch.eager_transforms.error_if_complex": UserFunctionVariable, + "torch._functorch.eager_transforms._chunked_standard_basis_for_": UserFunctionVariable, + "torch._functorch.eager_transforms._safe_zero_index": UserFunctionVariable, + # functorch/vjp + "torch._functorch.eager_transforms.vjp": UserFunctionVariable, + "torch._functorch.eager_transforms._vjp_with_argnums": UserFunctionVariable, + "torch._functorch.eager_transforms.assert_non_empty_tensor_output": UserFunctionVariable, + "torch._constrain_as_size": UserFunctionVariable, + "torch._constrain_as_value": UserFunctionVariable, + "torch._tensor._convert": UserFunctionVariable, + "torch.jit._unwrap_optional": UserFunctionVariable, + "torch.backends.mha.get_fastpath_enabled": UserFunctionVariable, + "torch._C._functorch._add_batch_dim": TorchInGraphFunctionVariable, + "torch._C._functorch._remove_batch_dim": TorchInGraphFunctionVariable, + "torch._C._functorch._wrap_for_grad": TorchInGraphFunctionVariable, + "torch._C._functorch._unwrap_for_grad": TorchInGraphFunctionVariable, + "torch._C._functorch.is_batchedtensor": TorchInGraphFunctionVariable, + "torch._dynamo.mark_static": UserFunctionVariable, + "torch.fx.experimental.symbolic_shapes.guard_size_oblivious": TorchInGraphFunctionVariable, + "torch.cuda._get_device_properties": TorchInGraphFunctionVariable, + "torch.utils.hooks.BackwardHook": TorchInGraphFunctionVariable, + "torch.sparse_bsc_tensor": SkipFunctionVariable, + "torch.sparse_bsr_tensor": SkipFunctionVariable, + "torch.sparse_csc_tensor": SkipFunctionVariable, + "torch.sparse_csr_tensor": SkipFunctionVariable, + "torch.sparse_compressed_tensor": SkipFunctionVariable, + "torch._C._autograd._unsafe_set_version_counter": TorchInGraphFunctionVariable, +} + + +# In graph functions (including constant folding) that are C bindings +torch_c_binding_in_graph_functions = dict.fromkeys( + [ + "math.acos", + "math.acosh", + "math.asin", + "math.asinh", + "math.atan", + "math.atan2", + "math.atanh", + "math.ceil", + "math.comb", + "math.copysign", + "math.cos", + "math.cosh", + "math.degrees", + "math.dist", + "math.erf", + "math.erfc", + "math.exp", + "math.expm1", + "math.fabs", + "math.factorial", + "math.floor", + "math.fmod", + "math.frexp", + "math.fsum", + "math.gamma", + "math.gcd", + "math.hypot", + "math.isclose", + "math.isfinite", + "math.isinf", + "math.isnan", + "math.isqrt", + "math.ldexp", + "math.lgamma", + "math.log", + "math.log10", + "math.log1p", + "math.log2", + "math.modf", + "math.nextafter", + "math.perm", + "math.pow", + "math.prod", + "math.radians", + "math.remainder", + "math.sin", + "math.sinh", + "math.tan", + "math.tanh", + "math.trunc", + "math.ulp", + "torch._adaptive_avg_pool2d", + "torch._adaptive_avg_pool3d", + "torch._add_batch_dim", + "torch._add_relu_", + "torch._add_relu", + "torch._addmm_activation", + "torch._aminmax", + "torch._amp_foreach_non_finite_check_and_unscale_", + "torch._amp_update_scale_", + "torch._assert_async", + "torch._assert_tensor_metadata", + "torch._batch_norm_impl_index", + "torch._C._activate_cuda_trace", + "torch._C._add_cached_tensor", + "torch._C._add_docstr", + "torch._C._are_functorch_transforms_active", + "torch._C._autograd_init", + "torch._C._awaitable_nowait", + "torch._C._awaitable_wait", + "torch._C._awaitable", + "torch._C._backport_for_mobile_from_buffer_to_buffer", + "torch._C._backport_for_mobile_from_buffer", + "torch._C._backport_for_mobile_to_buffer", + "torch._C._backport_for_mobile", + "torch._C._broadcast_coalesced", + "torch._C._broadcast_out", + "torch._C._broadcast", + "torch._C._c10d_init", + "torch._C._calculate_package_version_based_on_upgraders", + "torch._C._can_use_flash_attention", + "torch._C._can_use_mem_efficient_attention", + "torch._C._check_onnx_proto", + "torch._C._check_sparse_tensor_invariants", + "torch._C._collect_all", + "torch._C._commit_update", + "torch._C._compile_graph_to_code_table", + "torch._C._construct_CUDA_Tensor_From_Storage_And_Metadata", + "torch._C._construct_storage_from_data_pointer", + "torch._C._conv_determine_backend_memory_format", + "torch._C._cpu._is_cpu_support_vnni", + "torch._C._crash_if_aten_asan", + "torch._C._crash_if_csrc_asan", + "torch._C._crash_if_csrc_ubsan", + "torch._C._crash_if_debug_asserts_fail", + "torch._C._crash_if_vptr_ubsan", + "torch._C._create_function_from_graph", + "torch._C._create_function_from_trace_with_dict", + "torch._C._create_function_from_trace", + "torch._C._create_graph_by_tracing", + "torch._C._create_module_with_type", + "torch._C._create_object_with_type", + "torch._C._cuda_attach_out_of_memory_observer", + "torch._C._cuda_beginAllocateCurrentStreamToPool", + "torch._C._cuda_canDeviceAccessPeer", + "torch._C._cuda_changeCurrentAllocator", + "torch._C._cuda_checkPoolLiveAllocations", + "torch._C._cuda_clearCublasWorkspaces", + "torch._C._cuda_cudaCachingAllocator_raw_alloc", + "torch._C._cuda_cudaCachingAllocator_raw_delete", + "torch._C._cuda_cudaCachingAllocator_set_allocator_settings", + "torch._C._cuda_cudaHostAllocator", + "torch._C._cuda_customAllocator", + "torch._C._cuda_emptyCache", + "torch._C._cuda_endAllocateCurrentStreamToPool", + "torch._C._cuda_exchangeDevice", + "torch._C._cuda_get_conv_benchmark_empty_cache", + "torch._C._cuda_get_cudnn_benchmark_limit", + "torch._C._cuda_get_sync_debug_mode", + "torch._C._cuda_getAllocator", + "torch._C._cuda_getAllocatorBackend", + "torch._C._cuda_getArchFlags", + "torch._C._cuda_getCheckpointState", + "torch._C._cuda_getCompiledVersion", + "torch._C._cuda_getCurrentBlasHandle", + "torch._C._cuda_getCurrentRawStream", + "torch._C._cuda_getCurrentStream", + "torch._C._cuda_getDefaultStream", + "torch._C._cuda_getDevice", + "torch._C._cuda_getDeviceCount", + "torch._C._cuda_hasPrimaryContext", + "torch._C._cuda_init", + "torch._C._cuda_ipc_collect", + "torch._C._cuda_isCurrentStreamCapturing", + "torch._C._cuda_isHistoryEnabled", + "torch._C._cuda_isInBadFork", + "torch._C._cuda_jiterator_compile_and_launch_kernel", + "torch._C._cuda_lock_mutex", + "torch._C._cuda_maybeExchangeDevice", + "torch._C._cuda_memorySnapshot", + "torch._C._cuda_memoryStats", + "torch._C._cuda_record_memory_history_legacy", + "torch._C._cuda_record_memory_history", + "torch._C._cuda_releasePool", + "torch._C._cuda_resetAccumulatedMemoryStats", + "torch._C._cuda_resetPeakMemoryStats", + "torch._C._cuda_set_cudnn_benchmark_limit", + "torch._C._cuda_set_sync_debug_mode", + "torch._C._cuda_setCheckpointPoolState", + "torch._C._cuda_setDevice", + "torch._C._cuda_setMemoryFraction", + "torch._C._cuda_setStream", + "torch._C._cuda_sleep", + "torch._C._cuda_synchronize", + "torch._C._cuda_unlock_mutex", + "torch._C._cudnn_set_conv_benchmark_empty_cache", + "torch._C._cudnn.getCompileVersion", + "torch._C._cudnn.getRuntimeVersion", + "torch._C._cudnn.getVersionInt", + "torch._C._current_autograd_node", + "torch._C._current_graph_task_execution_order", + "torch._C._current_graph_task_id", + "torch._C._cxx_flags", + "torch._C._debug_get_fusion_group_inlining", + "torch._C._debug_only_are_vmap_fallback_warnings_enabled", + "torch._C._debug_only_display_vmap_fallback_warnings", + "torch._C._debug_set_autodiff_subgraph_inlining", + "torch._C._debug_set_fusion_group_inlining", + "torch._C._demangle", + "torch._C._disabled_torch_dispatch_impl", + "torch._C._disabled_torch_function_impl", + "torch._C._dispatch_call_boxed", + "torch._C._dispatch_check_all_invariants", + "torch._C._dispatch_check_invariants", + "torch._C._dispatch_dump_table", + "torch._C._dispatch_dump", + "torch._C._dispatch_find_dangling_impls", + "torch._C._dispatch_find_schema_or_throw", + "torch._C._dispatch_get_all_op_names", + "torch._C._dispatch_get_backend_keyset_from_autograd", + "torch._C._dispatch_get_registrations_for_dispatch_key", + "torch._C._dispatch_has_backend_fallback", + "torch._C._dispatch_has_computed_kernel_for_dispatch_key", + "torch._C._dispatch_has_kernel_for_any_dispatch_key", + "torch._C._dispatch_has_kernel_for_dispatch_key", + "torch._C._dispatch_has_kernel", + "torch._C._dispatch_is_alias_key", + "torch._C._dispatch_is_included_in_alias", + "torch._C._dispatch_is_main_interpreter", + "torch._C._dispatch_isTensorSubclassLike", + "torch._C._dispatch_key_for_device", + "torch._C._dispatch_key_name", + "torch._C._dispatch_key_parse", + "torch._C._dispatch_key_set", + "torch._C._dispatch_keys", + "torch._C._dispatch_keyset_full_after", + "torch._C._dispatch_keyset_full", + "torch._C._dispatch_keyset_to_string", + "torch._C._dispatch_library", + "torch._C._dispatch_num_backends", + "torch._C._dispatch_print_registrations_for_dispatch_key", + "torch._C._dispatch_pystub", + "torch._C._dispatch_set_report_error_callback", + "torch._C._dispatch_tls_is_dispatch_key_excluded", + "torch._C._dispatch_tls_is_dispatch_key_included", + "torch._C._dispatch_tls_local_exclude_set", + "torch._C._dispatch_tls_local_include_set", + "torch._C._dispatch_tls_set_dispatch_key_excluded", + "torch._C._dispatch_tls_set_dispatch_key_included", + "torch._C._dist_autograd_init", + "torch._C._dump_local_tls_set", + "torch._C._dump_upgraders_map", + "torch._C._enable_mobile_interface_call_export", + "torch._C._enter_dual_level", + "torch._C._error_if_any_worker_fails", + "torch._C._exit_dual_level", + "torch._C._export_operator_list", + "torch._C._export_opnames", + "torch._C._faulty_agent_init", + "torch._C._fft.fft_fft", + "torch._C._fft.fft_fft2", + "torch._C._fft.fft_fftfreq", + "torch._C._fft.fft_fftn", + "torch._C._fft.fft_fftshift", + "torch._C._fft.fft_hfft", + "torch._C._fft.fft_hfft2", + "torch._C._fft.fft_hfftn", + "torch._C._fft.fft_ifft", + "torch._C._fft.fft_ifft2", + "torch._C._fft.fft_ifftn", + "torch._C._fft.fft_ifftshift", + "torch._C._fft.fft_ihfft", + "torch._C._fft.fft_ihfft2", + "torch._C._fft.fft_ihfftn", + "torch._C._fft.fft_irfft", + "torch._C._fft.fft_irfft2", + "torch._C._fft.fft_irfftn", + "torch._C._fft.fft_rfft", + "torch._C._fft.fft_rfft2", + "torch._C._fft.fft_rfftfreq", + "torch._C._fft.fft_rfftn", + "torch._C._free_And_Remove_DeleterFn", + "torch._C._freeze_module", + "torch._C._from_dlpack", + "torch._C._functionality_to_backend_keys", + "torch._C._functionalization_reapply_views_tls", + "torch._C._fuse_to_static_module", + "torch._C._gather_out", + "torch._C._gather", + "torch._C._generate_upgraders_graph", + "torch._C._get_autograd_fallback_mode", + "torch._C._get_backcompat_broadcast_warn", + "torch._C._get_backcompat_keepdim_warn", + "torch._C._get_caught_jit_exception_class_name", + "torch._C._get_caught_jit_exception_original_msg", + "torch._C._get_constant_bool_symnode", + "torch._C._get_cpp_backtrace", + "torch._C._get_cpu_capability", + "torch._C._get_cublas_allow_bf16_reduced_precision_reduction", + "torch._C._get_cublas_allow_fp16_reduced_precision_reduction", + "torch._C._get_cublas_allow_tf32", + "torch._C._get_cudnn_allow_tf32", + "torch._C._get_cudnn_benchmark", + "torch._C._get_cudnn_deterministic", + "torch._C._get_cudnn_enabled", + "torch._C._get_custom_class_python_wrapper", + "torch._C._get_default_device", + "torch._C._get_deterministic_algorithms_warn_only", + "torch._C._get_deterministic_algorithms", + "torch._C._get_deterministic_fill_uninitialized_memory", + "torch._C._get_dispatch_mode", + "torch._C._get_dispatch_stack_at", + "torch._C._get_file_format", + "torch._C._get_flash_sdp_enabled", + "torch._C._get_float32_matmul_precision", + "torch._C._get_function_stack_at", + "torch._C._get_graph_executor_optimize", + "torch._C._get_linalg_preferred_backend", + "torch._C._get_math_sdp_enabled", + "torch._C._get_max_operator_version", + "torch._C._get_mem_efficient_sdp_enabled", + "torch._C._get_mkldnn_enabled", + "torch._C._get_cudnn_sdp_enabled", + "torch._C._set_sdp_use_cudnn", + "torch._C._get_mobile_model_contained_types_from_buffer", + "torch._C._get_mobile_model_contained_types", + "torch._C._get_model_bytecode_version_from_buffer", + "torch._C._get_model_bytecode_version", + "torch._C._get_model_extra_files_from_buffer", + "torch._C._get_model_extra_files", + "torch._C._get_model_ops_and_info_from_buffer", + "torch._C._get_model_ops_and_info", + "torch._C._get_module_info_from_flatbuffer", + "torch._C._get_nnpack_enabled", + "torch._C._get_obj_in_tls", + "torch._C._get_operation_overload", + "torch._C._get_operator_version_map", + "torch._C._get_privateuse1_backend_name", + "torch._C._get_qengine", + "torch._C._get_schema", + "torch._C._get_nested_int", + "torch._C._get_tensor_metadata", + "torch._C._get_tracing_state", + "torch._C._get_upgrader_ranges", + "torch._C._get_upgraders_entry_map", + "torch._C._get_upgraders_map_size", + "torch._C._get_value_trace", + "torch._C._get_version_calculator_flag", + "torch._C._get_warnAlways", + "torch._C._graph_pool_handle", + "torch._C._group_tensors_by_device_and_dtype", + "torch._C._hack_do_not_use_clone_module_with_class", + "torch._C._has_distributed", + "torch._C._has_Standard_Deleter", + "torch._C._has_storage", + "torch._C._has_tensorexpr_cpp_tests", + "torch._C._run_tensorexpr_cpp_tests", + "torch._C._has_torch_function_unary", + "torch._C._has_torch_function_variadic", + "torch._C._has_torch_function", + "torch._C._import_ir_module_from_package", + "torch._C._increment_version", + "torch._C._infer_size", + "torch._C._init_names", + "torch._C._initExtension", + "torch._C._is_alias_of", + "torch._C._is_any_autocast_enabled", + "torch._C._is_cached_tensor", + "torch._C._is_fwd_grad_enabled", + "torch._C._is_key_in_tls", + "torch._C._is_multithreading_enabled", + "torch._C._is_torch_function_enabled", + "torch._C._is_torch_function_mode_enabled", + "torch._C._is_tracing", + "torch._C._is_view_replay_enabled", + "torch._C._is_xnnpack_enabled", + "torch._C._itt.is_available", + "torch._C._itt.mark", + "torch._C._itt.rangePop", + "torch._C._itt.rangePush", + "torch._C._ivalue_debug_python_object", + "torch._C._ivalue_tags_match", + "torch._C._jit_assert_is_instance", + "torch._C._jit_can_fuse_on_cpu_legacy", + "torch._C._jit_can_fuse_on_cpu", + "torch._C._jit_can_fuse_on_gpu", + "torch._C._jit_cat_wo_conditionals", + "torch._C._jit_check_alias_annotation", + "torch._C._jit_clear_class_registry", + "torch._C._jit_debug_fuser_num_cached_kernel_specs", + "torch._C._jit_debug_module_iterators", + "torch._C._jit_decay_packed_param_input_types", + "torch._C._jit_decomposition_graph_for_node", + "torch._C._jit_differentiate", + "torch._C._jit_erase_non_input_shape_information", + "torch._C._jit_flatten", + "torch._C._jit_fuser_get_fused_kernel_code", + "torch._C._jit_get_all_schemas", + "torch._C._jit_get_custom_class_schemas", + "torch._C._jit_get_emit_hooks", + "torch._C._jit_get_inline_everything_mode", + "torch._C._jit_get_logging_option", + "torch._C._jit_get_num_profiled_runs", + "torch._C._jit_get_operation", + "torch._C._jit_get_schemas_for_operator", + "torch._C._jit_get_te_cuda_pointwise_block_count", + "torch._C._jit_get_te_cuda_pointwise_block_size", + "torch._C._jit_get_te_cuda_pointwise_loop_levels", + "torch._C._jit_get_te_generate_block_code", + "torch._C._jit_get_te_must_use_llvm_cpu", + "torch._C._jit_get_tracer_state_warn", + "torch._C._jit_has_cpp_tests", + "torch._C._jit_init", + "torch._C._jit_interpret_graph", + "torch._C._jit_is_onnx_log_enabled", + "torch._C._jit_is_script_object", + "torch._C._jit_llga_enabled", + "torch._C._jit_nvfuser_can_be_enabled", + "torch._C._jit_nvfuser_clear_comparison_callback", + "torch._C._jit_nvfuser_enabled", + "torch._C._jit_nvfuser_horizontal_mode", + "torch._C._jit_nvfuser_set_comparison_callback", + "torch._C._jit_nvfuser_single_node_mode", + "torch._C._jit_object_is_non_holding", + "torch._C._jit_onnx_convert_pattern_from_subblock", + "torch._C._jit_onnx_create_full_scope_name", + "torch._C._jit_onnx_list_model_parameters", + "torch._C._jit_onnx_log", + "torch._C._jit_opt_conditionals", + "torch._C._jit_override_can_fuse_on_cpu_legacy", + "torch._C._jit_override_can_fuse_on_cpu", + "torch._C._jit_override_can_fuse_on_gpu", + "torch._C._jit_pass_autocast", + "torch._C._jit_pass_batch_mm", + "torch._C._jit_pass_canonicalize_graph_fuser_ops", + "torch._C._jit_pass_canonicalize", + "torch._C._jit_pass_complete_shape_analysis", + "torch._C._jit_pass_concat_frozen_linear", + "torch._C._jit_pass_constant_loop_unrolling", + "torch._C._jit_pass_constant_pooling", + "torch._C._jit_pass_constant_propagation_immutable_types", + "torch._C._jit_pass_constant_propagation", + "torch._C._jit_pass_convert_frozen_ops_to_mkldnn", + "torch._C._jit_pass_create_autodiff_subgraphs", + "torch._C._jit_pass_create_functional_graphs", + "torch._C._jit_pass_cse", + "torch._C._jit_pass_custom_pattern_based_rewrite_graph", + "torch._C._jit_pass_custom_pattern_based_rewrite", + "torch._C._jit_pass_dbr_quant_remove_redundant_aliases", + "torch._C._jit_pass_dce_allow_deleting_nodes_with_side_effects", + "torch._C._jit_pass_dce", + "torch._C._jit_pass_decompose_ops", + "torch._C._jit_pass_dedup_module_uses", + "torch._C._jit_pass_erase_number_types", + "torch._C._jit_pass_erase_shape_information", + "torch._C._jit_pass_filter_non_tensor_arguments", + "torch._C._jit_pass_fixup_onnx_controlflow_node", + "torch._C._jit_pass_fold_convbn", + "torch._C._jit_pass_fold_frozen_conv_add_or_sub", + "torch._C._jit_pass_fold_frozen_conv_bn", + "torch._C._jit_pass_fold_frozen_conv_mul_or_div", + "torch._C._jit_pass_fold_frozen_linear_bn", + "torch._C._jit_pass_fold_prepacking_ops", + "torch._C._jit_pass_functional_to_inplace_activation", + "torch._C._jit_pass_fuse_add_relu", + "torch._C._jit_pass_fuse_addmm", + "torch._C._jit_pass_fuse_clamp_w_prepacked_linear_conv", + "torch._C._jit_pass_fuse_frozen_conv_add_relu", + "torch._C._jit_pass_fuse_linear", + "torch._C._jit_pass_fuse_quantized_add_relu", + "torch._C._jit_pass_fuse_tensorexprs", + "torch._C._jit_pass_fuse", + "torch._C._jit_pass_inline_fork_wait", + "torch._C._jit_pass_inline_functional_graphs", + "torch._C._jit_pass_inline", + "torch._C._jit_pass_inplace_to_functional_activation", + "torch._C._jit_pass_insert_observer_method_for_ondevice_ptq", + "torch._C._jit_pass_insert_observers", + "torch._C._jit_pass_insert_prepack_unpack", + "torch._C._jit_pass_insert_prepacked_ops", + "torch._C._jit_pass_insert_quant_dequant_for_ondevice_ptq", + "torch._C._jit_pass_insert_quant_dequant", + "torch._C._jit_pass_integer_value_refinement", + "torch._C._jit_pass_lint", + "torch._C._jit_pass_loop_unrolling", + "torch._C._jit_pass_lower_all_tuples", + "torch._C._jit_pass_lower_graph", + "torch._C._jit_pass_metal_fold_prepacking_ops", + "torch._C._jit_pass_metal_fuse_clamp_w_prepacked_conv", + "torch._C._jit_pass_metal_insert_prepacked_ops", + "torch._C._jit_pass_metal_optimize_for_mobile", + "torch._C._jit_pass_onnx_assign_output_shape", + "torch._C._jit_pass_onnx_assign_scoped_names_for_node_and_value", + "torch._C._jit_pass_onnx_autograd_function_process", + "torch._C._jit_pass_onnx_block", + "torch._C._jit_pass_onnx_cast_all_constant_to_floating", + "torch._C._jit_pass_onnx_clear_scope_records", + "torch._C._jit_pass_onnx_constant_fold", + "torch._C._jit_pass_onnx_deduplicate_initializers", + "torch._C._jit_pass_onnx_eliminate_unused_items", + "torch._C._jit_pass_onnx_eval_peephole", + "torch._C._jit_pass_onnx_function_extraction", + "torch._C._jit_pass_onnx_function_substitution", + "torch._C._jit_pass_onnx_graph_shape_type_inference", + "torch._C._jit_pass_onnx_lint", + "torch._C._jit_pass_onnx_node_shape_type_inference", + "torch._C._jit_pass_onnx_peephole", + "torch._C._jit_pass_onnx_preprocess_caffe2", + "torch._C._jit_pass_onnx_preprocess", + "torch._C._jit_pass_onnx_quantization_insert_permutes", + "torch._C._jit_pass_onnx_remove_inplace_ops_for_onnx", + "torch._C._jit_pass_onnx_remove_print", + "torch._C._jit_pass_onnx_scalar_type_analysis", + "torch._C._jit_pass_onnx_set_dynamic_input_shape", + "torch._C._jit_pass_onnx_track_scope_attributes", + "torch._C._jit_pass_onnx_unpack_quantized_weights", + "torch._C._jit_pass_onnx", + "torch._C._jit_pass_optimize_for_inference", + "torch._C._jit_pass_optimize_for_mobile", + "torch._C._jit_pass_optimize_frozen_graph", + "torch._C._jit_pass_pattern_based_rewrite", + "torch._C._jit_pass_peephole_list_idioms", + "torch._C._jit_pass_peephole", + "torch._C._jit_pass_prepare_division_for_onnx", + "torch._C._jit_pass_propagate_device", + "torch._C._jit_pass_propagate_dtype", + "torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute", + "torch._C._jit_pass_propagate_shapes_on_graph", + "torch._C._jit_pass_quant_finalize_for_ondevice_ptq", + "torch._C._jit_pass_quant_finalize", + "torch._C._jit_pass_quant_fusion", + "torch._C._jit_pass_refine_integer_values", + "torch._C._jit_pass_refine_tuple_types", + "torch._C._jit_pass_remove_dropout", + "torch._C._jit_pass_remove_expands", + "torch._C._jit_pass_remove_inplace_ops", + "torch._C._jit_pass_remove_mutation", + "torch._C._jit_pass_replace_old_ops_with_upgraders", + "torch._C._jit_pass_replicate_dequantize", + "torch._C._jit_pass_run_decompositions", + "torch._C._jit_pass_specialize_autogradzero", + "torch._C._jit_pass_swap_functional_linear", + "torch._C._jit_pass_transform_conv1d_to_conv2d", + "torch._C._jit_pass_transpose_frozen_linear", + "torch._C._jit_pass_vulkan_fold_prepacking_ops", + "torch._C._jit_pass_vulkan_fuse_clamp_w_prepacked_conv", + "torch._C._jit_pass_vulkan_insert_prepacked_ops", + "torch._C._jit_pass_vulkan_optimize_for_mobile", + "torch._C._jit_register_decomposition_for_schema", + "torch._C._jit_register_shape_compute_graph_for_node", + "torch._C._jit_resolve_packet", + "torch._C._jit_run_cpp_tests", + "torch._C._jit_script_class_compile", + "torch._C._jit_script_compile_overload", + "torch._C._jit_script_compile", + "torch._C._jit_script_interface_compile", + "torch._C._jit_set_autocast_mode", + "torch._C._jit_set_bailout_depth", + "torch._C._jit_set_emit_hooks", + "torch._C._jit_set_fusion_strategy", + "torch._C._jit_set_inline_everything_mode", + "torch._C._jit_set_llga_enabled", + "torch._C._jit_set_logging_option", + "torch._C._jit_set_logging_stream", + "torch._C._jit_set_num_profiled_runs", + "torch._C._jit_set_nvfuser_enabled", + "torch._C._jit_set_nvfuser_guard_mode", + "torch._C._jit_set_nvfuser_horizontal_mode", + "torch._C._jit_set_nvfuser_single_node_mode", + "torch._C._jit_set_nvfuser_skip_node_kind", + "torch._C._jit_set_onnx_log_enabled", + "torch._C._jit_set_onnx_log_output_stream", + "torch._C._jit_set_profiling_executor", + "torch._C._jit_set_profiling_mode", + "torch._C._jit_set_symbolic_shapes_test_mode", + "torch._C._jit_set_te_cuda_pointwise_block_count", + "torch._C._jit_set_te_cuda_pointwise_block_size", + "torch._C._jit_set_te_cuda_pointwise_loop_levels", + "torch._C._jit_set_te_generate_block_code", + "torch._C._jit_set_te_must_use_llvm_cpu", + "torch._C._jit_set_texpr_dynamic_shape_enabled", + "torch._C._jit_set_texpr_fuser_enabled", + "torch._C._jit_set_texpr_reductions_enabled", + "torch._C._jit_set_tracer_state_warn", + "torch._C._jit_set_utf8_decoding_ignore", + "torch._C._jit_shape_compute_graph_for_node", + "torch._C._jit_symbolic_shapes_test_mode_enabled", + "torch._C._jit_texpr_dynamic_shape_enabled", + "torch._C._jit_texpr_fallback_allowed", + "torch._C._jit_texpr_fuser_enabled", + "torch._C._jit_texpr_reductions_enabled", + "torch._C._jit_texpr_set_fallback_allowed", + "torch._C._jit_to_backend_selective", + "torch._C._jit_to_backend", + "torch._C._jit_to_static_module", + "torch._C._jit_trace_graph", + "torch._C._jit_trace_module", + "torch._C._jit_tree_views.FalseLiteral", + "torch._C._jit_tree_views.NoneLiteral", + "torch._C._jit_tree_views.TrueLiteral", + "torch._C._jit_try_infer_type", + "torch._C._jit_unflatten", + "torch._C._last_executed_optimized_graph", + "torch._C._len_torch_dispatch_stack", + "torch._C._len_torch_function_stack", + "torch._C._linalg._linalg_eigvals", + "torch._C._linalg.linalg_cholesky_ex", + "torch._C._linalg.linalg_cholesky", + "torch._C._linalg.linalg_cond", + "torch._C._linalg.linalg_cross", + "torch._C._linalg.linalg_det", + "torch._C._linalg.linalg_diagonal", + "torch._C._linalg.linalg_eig", + "torch._C._linalg.linalg_eigh", + "torch._C._linalg.linalg_eigvals", + "torch._C._linalg.linalg_eigvalsh", + "torch._C._linalg.linalg_householder_product", + "torch._C._linalg.linalg_inv_ex", + "torch._C._linalg.linalg_inv", + "torch._C._linalg.linalg_ldl_factor_ex", + "torch._C._linalg.linalg_ldl_factor", + "torch._C._linalg.linalg_ldl_solve", + "torch._C._linalg.linalg_lstsq", + "torch._C._linalg.linalg_lu_factor_ex", + "torch._C._linalg.linalg_lu_factor", + "torch._C._linalg.linalg_lu_solve", + "torch._C._linalg.linalg_lu", + "torch._C._linalg.linalg_matmul", + "torch._C._linalg.linalg_matrix_exp", + "torch._C._linalg.linalg_matrix_norm", + "torch._C._linalg.linalg_matrix_power", + "torch._C._linalg.linalg_matrix_rank", + "torch._C._linalg.linalg_multi_dot", + "torch._C._linalg.linalg_norm", + "torch._C._linalg.linalg_pinv", + "torch._C._linalg.linalg_qr", + "torch._C._linalg.linalg_slogdet", + "torch._C._linalg.linalg_solve_ex", + "torch._C._linalg.linalg_solve_triangular", + "torch._C._linalg.linalg_solve", + "torch._C._linalg.linalg_svd", + "torch._C._linalg.linalg_svdvals", + "torch._C._linalg.linalg_tensorinv", + "torch._C._linalg.linalg_tensorsolve", + "torch._C._linalg.linalg_vander", + "torch._C._linalg.linalg_vecdot", + "torch._C._linalg.linalg_vector_norm", + "torch._C._llvm_enabled", + "torch._C._load_for_lite_interpreter_from_buffer", + "torch._C._load_for_lite_interpreter", + "torch._C._load_jit_module_from_bytes", + "torch._C._load_jit_module_from_file", + "torch._C._load_mobile_module_from_bytes", + "torch._C._load_mobile_module_from_file", + "torch._C._log_api_usage_metadata", + "torch._C._log_api_usage_once", + "torch._C._logging_set_logger", + "torch._C._meta_in_tls_dispatch_include", + "torch._C._mps_acquireEvent", + "torch._C._mps_currentAllocatedMemory", + "torch._C._mps_deviceSynchronize", + "torch._C._mps_driverAllocatedMemory", + "torch._C._mps_elapsedTimeOfEvents", + "torch._C._mps_emptyCache", + "torch._C._mps_get_default_generator", + "torch._C._mps_is_available", + "torch._C._mps_is_in_bad_fork", + "torch._C._mps_is_on_macos_13_or_newer", + "torch._C._mps_profilerStartTrace", + "torch._C._mps_profilerStopTrace", + "torch._C._mps_queryEvent", + "torch._C._mps_recordEvent", + "torch._C._mps_releaseEvent", + "torch._C._mps_setMemoryFraction", + "torch._C._mps_synchronizeEvent", + "torch._C._mps_waitForEvent", + "torch._C._multiprocessing_init", + "torch._C._nccl_all_gather", + "torch._C._nccl_all_reduce", + "torch._C._nccl_broadcast", + "torch._C._nccl_init_rank", + "torch._C._nccl_reduce_scatter", + "torch._C._nccl_reduce", + "torch._C._nccl_unique_id", + "torch._C._nccl_version_suffix", + "torch._C._nccl_version", + "torch._C._nested.nested_tensor", + "torch._C._nested.nested_to_padded_tensor", + "torch._C._new_symbolic_shape_symbol", + "torch._C._nn_module_to_mobile", + "torch._C._nn._conv_depthwise2d", + "torch._C._nn._pad_circular", + "torch._C._nn._pad_enum", + "torch._C._nn._parse_to", + "torch._C._nn._test_ambiguous_defaults", + "torch._C._nn._test_optional_filled_intlist", + "torch._C._nn._test_optional_floatlist", + "torch._C._nn._test_optional_intlist", + "torch._C._nn._test_string_default", + "torch._C._nn._test_warn_in_autograd", + "torch._C._nn._upsample_bicubic2d_aa", + "torch._C._nn._upsample_bilinear2d_aa", + "torch._C._nn._upsample_nearest_exact1d", + "torch._C._nn._upsample_nearest_exact2d", + "torch._C._nn._upsample_nearest_exact3d", + "torch._C._nn.adaptive_avg_pool2d", + "torch._C._nn.adaptive_avg_pool3d", + "torch._C._nn.adaptive_max_pool2d", + "torch._C._nn.adaptive_max_pool3d", + "torch._C._nn.avg_pool2d", + "torch._C._nn.avg_pool3d", + "torch._C._nn.binary_cross_entropy", + "torch._C._nn.col2im", + "torch._C._nn.conv_depthwise3d", + "torch._C._nn.cross_entropy_loss", + "torch._C._nn.elu_", + "torch._C._nn.elu", + "torch._C._nn.flatten_dense_tensors", + "torch._C._nn.fractional_max_pool2d", + "torch._C._nn.fractional_max_pool3d", + "torch._C._nn.gelu_", + "torch._C._nn.gelu", + "torch._C._nn.glu", + "torch._C._nn.hardsigmoid_", + "torch._C._nn.hardsigmoid", + "torch._C._nn.hardswish_", + "torch._C._nn.hardswish", + "torch._C._nn.hardtanh_", + "torch._C._nn.hardtanh", + "torch._C._nn.huber_loss", + "torch._C._nn.im2col", + "torch._C._nn.l1_loss", + "torch._C._nn.leaky_relu_", + "torch._C._nn.leaky_relu", + "torch._C._nn.linear", + "torch._C._nn.log_sigmoid", + "torch._C._nn.max_pool2d_with_indices", + "torch._C._nn.max_pool3d_with_indices", + "torch._C._nn.max_unpool2d", + "torch._C._nn.max_unpool3d", + "torch._C._nn.mish_", + "torch._C._nn.mish", + "torch._C._nn.mkldnn_linear", + "torch._C._nn.mkldnn_reorder_conv2d_weight", + "torch._C._nn.mkldnn_reorder_conv3d_weight", + "torch._C._nn.mse_loss", + "torch._C._nn.multi_margin_loss", + "torch._C._nn.multilabel_margin_loss", + "torch._C._nn.nll_loss_nd", + "torch._C._nn.nll_loss", + "torch._C._nn.nll_loss2d", + "torch._C._nn.one_hot", + "torch._C._nn.pad_sequence", + "torch._C._nn.pad", + "torch._C._nn.reflection_pad1d", + "torch._C._nn.reflection_pad2d", + "torch._C._nn.reflection_pad3d", + "torch._C._nn.relu6_", + "torch._C._nn.relu6", + "torch._C._nn.replication_pad1d", + "torch._C._nn.replication_pad2d", + "torch._C._nn.replication_pad3d", + "torch._C._nn.rrelu_with_noise_", + "torch._C._nn.rrelu_with_noise", + "torch._C._nn.scaled_dot_product_attention", + "torch._C._nn.silu_", + "torch._C._nn.silu", + "torch._C._nn.slow_conv_dilated2d", + "torch._C._nn.slow_conv_dilated3d", + "torch._C._nn.slow_conv_transpose2d", + "torch._C._nn.slow_conv_transpose3d", + "torch._C._nn.slow_conv3d", + "torch._C._nn.smooth_l1_loss", + "torch._C._nn.soft_margin_loss", + "torch._C._nn.softplus", + "torch._C._nn.softshrink", + "torch._C._nn.thnn_conv2d", + "torch._C._nn.unflatten_dense_tensors", + "torch._C._nn.upsample_bicubic2d", + "torch._C._nn.upsample_bilinear2d", + "torch._C._nn.upsample_linear1d", + "torch._C._nn.upsample_nearest1d", + "torch._C._nn.upsample_nearest2d", + "torch._C._nn.upsample_nearest3d", + "torch._C._nn.upsample_trilinear3d", + "torch._C._non_sym_sizes", + "torch._C._overlaps", + "torch._C._parallel_info", + "torch._C._parse_dispatch_key", + "torch._C._parse_source_def", + "torch._C._pop_torch_dispatch_stack", + "torch._C._pop_torch_function_stack", + "torch._C._propagate_and_assign_input_shapes", + "torch._C._propagate_shapes", + "torch._C._propagate_xla_data", + "torch._C._push_on_torch_dispatch_stack", + "torch._C._push_on_torch_function_stack", + "torch._C._quantize_ondevice_ptq_dynamic", + "torch._C._register_py_class_for_device", + "torch._C._remove_cached_tensor", + "torch._C._remove_worker_pids", + "torch._C._rename_privateuse1_backend", + "torch._C._replace_", + "torch._C._replace_overloaded_method_decl", + "torch._C._resolve_type_from_object", + "torch._C._resolve_type", + "torch._C._rocm_is_backward_pass", + "torch._C._rpc_init", + "torch._C._run_emit_module_hook", + "torch._C._save_jit_module_to_bytes", + "torch._C._save_jit_module", + "torch._C._save_mobile_module_to_bytes", + "torch._C._save_mobile_module", + "torch._C._save_parameters", + "torch._C._scatter_out", + "torch._C._scatter", + "torch._C._select_conv_backend", + "torch._C._set_autograd_fallback_mode", + "torch._C._set_backcompat_broadcast_warn", + "torch._C._set_backcompat_keepdim_warn", + "torch._C._set_cached_tensors_enabled", + "torch._C._set_check_sparse_tensor_invariants", + "torch._C._set_conj", + "torch._C._set_cublas_allow_bf16_reduced_precision_reduction", + "torch._C._set_cublas_allow_fp16_reduced_precision_reduction", + "torch._C._set_cublas_allow_tf32", + "torch._C._set_cudnn_allow_tf32", + "torch._C._set_cudnn_benchmark", + "torch._C._set_cudnn_deterministic", + "torch._C._set_cudnn_enabled", + "torch._C._set_default_dtype", + "torch._C._set_default_mobile_cpu_allocator", + "torch._C._set_default_tensor_type", + "torch._C._set_deterministic_algorithms", + "torch._C._set_deterministic_fill_uninitialized_memory", + "torch._C._set_dispatch_mode", + "torch._C._set_float32_matmul_precision", + "torch._C._set_fwd_grad_enabled", + "torch._C._set_grad_enabled", + "torch._C._set_graph_executor_optimize", + "torch._C._set_linalg_preferred_backend", + "torch._C._set_meta_in_tls_dispatch_include", + "torch._C._set_mkldnn_enabled", + "torch._C._set_multithreading_enabled", + "torch._C._set_neg", + "torch._C._set_nnpack_enabled", + "torch._C._set_print_stack_traces_on_fatal_signal", + "torch._C._set_qengine", + "torch._C._set_sdp_use_flash", + "torch._C._set_sdp_use_math", + "torch._C._set_sdp_use_mem_efficient", + "torch._C._set_should_use_format_with_string_table", + "torch._C._set_storage_access_error_msg", + "torch._C._set_tensor_metadata", + "torch._C._set_tracing_state", + "torch._C._set_value_trace", + "torch._C._set_view_replay_enabled", + "torch._C._set_warnAlways", + "torch._C._set_worker_pids", + "torch._C._set_worker_signal_handlers", + "torch._C._should_allow_numbers_as_tensors", + "torch._C._show_config", + "torch._C._sparse._sparse_addmm", + "torch._C._sparse._sparse_log_softmax", + "torch._C._sparse._sparse_mm_reduce_impl", + "torch._C._sparse._sparse_mm", + "torch._C._sparse._sparse_softmax", + "torch._C._sparse._spdiags", + "torch._C._sparse.sparse_sampled_addmm", + "torch._C._special.special_airy_ai", + "torch._C._special.special_bessel_j0", + "torch._C._special.special_bessel_j1", + "torch._C._special.special_bessel_y0", + "torch._C._special.special_bessel_y1", + "torch._C._special.special_chebyshev_polynomial_t", + "torch._C._special.special_chebyshev_polynomial_u", + "torch._C._special.special_chebyshev_polynomial_v", + "torch._C._special.special_chebyshev_polynomial_w", + "torch._C._special.special_digamma", + "torch._C._special.special_entr", + "torch._C._special.special_erf", + "torch._C._special.special_erfc", + "torch._C._special.special_erfcx", + "torch._C._special.special_erfinv", + "torch._C._special.special_exp2", + "torch._C._special.special_expit", + "torch._C._special.special_expm1", + "torch._C._special.special_gammainc", + "torch._C._special.special_gammaincc", + "torch._C._special.special_gammaln", + "torch._C._special.special_hermite_polynomial_h", + "torch._C._special.special_hermite_polynomial_he", + "torch._C._special.special_i0", + "torch._C._special.special_i0e", + "torch._C._special.special_i1", + "torch._C._special.special_i1e", + "torch._C._special.special_laguerre_polynomial_l", + "torch._C._special.special_legendre_polynomial_p", + "torch._C._special.special_log_ndtr", + "torch._C._special.special_log_softmax", + "torch._C._special.special_log1p", + "torch._C._special.special_logit", + "torch._C._special.special_logsumexp", + "torch._C._special.special_modified_bessel_i0", + "torch._C._special.special_modified_bessel_i1", + "torch._C._special.special_modified_bessel_k0", + "torch._C._special.special_modified_bessel_k1", + "torch._C._special.special_multigammaln", + "torch._C._special.special_ndtr", + "torch._C._special.special_ndtri", + "torch._C._special.special_polygamma", + "torch._C._special.special_psi", + "torch._C._special.special_round", + "torch._C._special.special_scaled_modified_bessel_k0", + "torch._C._special.special_scaled_modified_bessel_k1", + "torch._C._special.special_shifted_chebyshev_polynomial_t", + "torch._C._special.special_shifted_chebyshev_polynomial_u", + "torch._C._special.special_shifted_chebyshev_polynomial_v", + "torch._C._special.special_shifted_chebyshev_polynomial_w", + "torch._C._special.special_sinc", + "torch._C._special.special_softmax", + "torch._C._special.special_spherical_bessel_j0", + "torch._C._special.special_xlog1py", + "torch._C._special.special_xlogy", + "torch._C._special.special_zeta", + "torch._C._stash_obj_in_tls", + "torch._C._storage_id", + "torch._C._storage_Use_Count", + "torch._C._supported_qengines", + "torch._C._te.abs", + "torch._C._te.acos", + "torch._C._te.annotate_input_shapes", + "torch._C._te.asin", + "torch._C._te.atan", + "torch._C._te.atan2", + "torch._C._te.ceil", + "torch._C._te.Compute", + "torch._C._te.Compute2", + "torch._C._te.construct_codegen", + "torch._C._te.cos", + "torch._C._te.cosh", + "torch._C._te.erf", + "torch._C._te.erfc", + "torch._C._te.exp", + "torch._C._te.expm1", + "torch._C._te.fixup_missing_shape_info", + "torch._C._te.floor", + "torch._C._te.fmod", + "torch._C._te.frac", + "torch._C._te.ifThenElse", + "torch._C._te.is_graph_compilable", + "torch._C._te.isnan", + "torch._C._te.lgamma", + "torch._C._te.log", + "torch._C._te.log10", + "torch._C._te.log1p", + "torch._C._te.log2", + "torch._C._te.lower", + "torch._C._te.make_shapes_symbolic", + "torch._C._te.pow", + "torch._C._te.Reduce", + "torch._C._te.remainder", + "torch._C._te.remove_graph_output", + "torch._C._te.remove_unused_self_argument", + "torch._C._te.replace_list_output_with_tuple", + "torch._C._te.round", + "torch._C._te.rsqrt", + "torch._C._te.sigmoid", + "torch._C._te.simplify", + "torch._C._te.sin", + "torch._C._te.sinh", + "torch._C._te.sqrt", + "torch._C._te.tan", + "torch._C._te.tanh", + "torch._C._te.trim_graph", + "torch._C._te.trunc", + "torch._C._tensor_impl_raw_handle", + "torch._C._test_only_add_entry_to_op_version_map", + "torch._C._test_only_populate_upgraders", + "torch._C._test_only_remove_entry_to_op_version_map", + "torch._C._test_only_remove_upgraders", + "torch._C._to_dlpack", + "torch._C._to_functionality_key", + "torch._C._tracer_set_force_outplace", + "torch._C._tracer_set_get_unique_name_fn", + "torch._C._tracer_warn_use_python", + "torch._C._unset_default_mobile_cpu_allocator", + "torch._C._unset_dispatch_mode", + "torch._C._valgrind_supported_platform", + "torch._C._valgrind_toggle_and_dump_stats", + "torch._C._valgrind_toggle", + "torch._C._verbose.mkl_set_verbose", + "torch._C._verbose.mkldnn_set_verbose", + "torch._C._vmapmode_decrement_nesting", + "torch._C._vmapmode_increment_nesting", + "torch._C._warn_deprecation", + "torch._C._warn", + "torch._C._will_engine_execute_node", + "torch._C._wrap_tensor_impl", + "torch._C.fork", + "torch._C.get_autocast_cpu_dtype", + "torch._C.get_autocast_gpu_dtype", + "torch._C.get_autocast_ipu_dtype", + "torch._C.get_autocast_xla_dtype", + "torch._C.get_default_dtype", + "torch._C.get_num_interop_threads", + "torch._C.get_num_threads", + "torch._C.import_ir_module_from_buffer", + "torch._C.import_ir_module", + "torch._C.init_num_threads", + "torch._C.is_anomaly_check_nan_enabled", + "torch._C.is_anomaly_enabled", + "torch._C.is_autocast_cache_enabled", + "torch._C.is_autocast_cpu_enabled", + "torch._C.is_autocast_enabled", + "torch._C.is_autocast_ipu_enabled", + "torch._C.is_autocast_xla_enabled", + "torch._C.is_grad_enabled", + "torch._C.is_inference_mode_enabled", + "torch._C.merge_type_from_type_comment", + "torch._C.parse_ir", + "torch._C.parse_schema", + "torch._C.parse_type_comment", + "torch._C.read_vitals", + "torch._C.set_flush_denormal", + "torch._C.set_num_interop_threads", + "torch._C.set_num_threads", + "torch._C.set_vital", + "torch._C.unify_type_list", + "torch._C.vitals_enabled", + "torch._C.wait", + "torch._cast_Byte", + "torch._cast_Char", + "torch._cast_Double", + "torch._cast_Float", + "torch._cast_Half", + "torch._cast_Int", + "torch._cast_Long", + "torch._cast_Short", + "torch._choose_qparams_per_tensor", + "torch._chunk_cat", + "torch._coalesce", + "torch._compute_linear_combination", + "torch._conj_copy", + "torch._conj_physical", + "torch._conj", + "torch._convert_indices_from_coo_to_csr", + "torch._convert_indices_from_csr_to_coo", + "torch._convert_weight_to_int4pack", + "torch._convolution_mode", + "torch._convolution", + "torch._copy_from_and_resize", + "torch._copy_from", + "torch._cslt_compress", + "torch._cslt_sparse_mm", + "torch._ctc_loss", + "torch._cudnn_ctc_loss", + "torch._cudnn_init_dropout_state", + "torch._cudnn_rnn_flatten_weight", + "torch._cudnn_rnn", + "torch._cufft_clear_plan_cache", + "torch._cufft_get_plan_cache_max_size", + "torch._cufft_get_plan_cache_size", + "torch._cufft_set_plan_cache_max_size", + "torch._cummax_helper", + "torch._cummin_helper", + "torch._debug_has_internal_overlap", + "torch._dim_arange", + "torch._dirichlet_grad", + "torch._disable_functionalization", + "torch._efficientzerotensor", + "torch._embedding_bag_forward_only", + "torch._embedding_bag", + "torch._empty_affine_quantized", + "torch._empty_per_channel_affine_quantized", + "torch._enable_functionalization", + "torch._euclidean_dist", + "torch._fake_quantize_learnable_per_channel_affine", + "torch._fake_quantize_learnable_per_tensor_affine", + "torch._fake_quantize_per_tensor_affine_cachemask_tensor_qparams", + "torch._fft_c2c", + "torch._fft_c2r", + "torch._fft_r2c", + "torch._fill_mem_eff_dropout_mask_", + "torch._foobar", + "torch._foreach_abs_", + "torch._foreach_abs", + "torch._foreach_acos_", + "torch._foreach_acos", + "torch._foreach_add_", + "torch._foreach_add", + "torch._foreach_addcdiv_", + "torch._foreach_addcdiv", + "torch._foreach_addcmul_", + "torch._foreach_addcmul", + "torch._foreach_asin_", + "torch._foreach_asin", + "torch._foreach_atan_", + "torch._foreach_atan", + "torch._foreach_ceil_", + "torch._foreach_ceil", + "torch._foreach_clamp_max_", + "torch._foreach_clamp_max", + "torch._foreach_clamp_min_", + "torch._foreach_clamp_min", + "torch._foreach_copy_", + "torch._foreach_cos_", + "torch._foreach_cos", + "torch._foreach_cosh_", + "torch._foreach_cosh", + "torch._foreach_div_", + "torch._foreach_div", + "torch._foreach_erf_", + "torch._foreach_erf", + "torch._foreach_erfc_", + "torch._foreach_erfc", + "torch._foreach_exp_", + "torch._foreach_exp", + "torch._foreach_expm1_", + "torch._foreach_expm1", + "torch._foreach_floor_", + "torch._foreach_floor", + "torch._foreach_frac_", + "torch._foreach_frac", + "torch._foreach_lerp_", + "torch._foreach_lerp", + "torch._foreach_lgamma_", + "torch._foreach_lgamma", + "torch._foreach_log_", + "torch._foreach_log", + "torch._foreach_log10_", + "torch._foreach_log10", + "torch._foreach_log1p_", + "torch._foreach_log1p", + "torch._foreach_log2_", + "torch._foreach_log2", + "torch._foreach_maximum_", + "torch._foreach_maximum", + "torch._foreach_minimum_", + "torch._foreach_minimum", + "torch._foreach_mul_", + "torch._foreach_mul", + "torch._foreach_neg_", + "torch._foreach_neg", + "torch._foreach_norm", + "torch._foreach_pow_", + "torch._foreach_pow", + "torch._foreach_reciprocal_", + "torch._foreach_reciprocal", + "torch._foreach_round_", + "torch._foreach_round", + "torch._foreach_sigmoid_", + "torch._foreach_sigmoid", + "torch._foreach_sign_", + "torch._foreach_sign", + "torch._foreach_sin_", + "torch._foreach_sin", + "torch._foreach_sinh_", + "torch._foreach_sinh", + "torch._foreach_sqrt_", + "torch._foreach_sqrt", + "torch._foreach_sub_", + "torch._foreach_sub", + "torch._foreach_tan_", + "torch._foreach_tan", + "torch._foreach_tanh_", + "torch._foreach_tanh", + "torch._foreach_trunc_", + "torch._foreach_trunc", + "torch._foreach_zero_", + "torch._freeze_functional_tensor", + "torch._from_functional_tensor", + "torch._functional_assert_async", + "torch._functional_sym_constrain_range_for_size", + "torch._functional_sym_constrain_range", + "torch._functionalize_are_all_mutations_hidden_from_autograd", + "torch._functionalize_commit_update", + "torch._functionalize_enable_reapply_views", + "torch._functionalize_has_data_mutation", + "torch._functionalize_has_metadata_mutation", + "torch._functionalize_is_multi_output_view", + "torch._functionalize_mark_mutation_hidden_from_autograd", + "torch._functionalize_replace", + "torch._functionalize_sync", + "torch._functionalize_was_storage_changed", + "torch._fused_adam_", + "torch._fused_adamw_", + "torch._fused_dropout", + "torch._fused_moving_avg_obs_fq_helper", + "torch._fused_sdp_choice", + "torch._fw_primal_copy", + "torch._grid_sampler_2d_cpu_fallback", + "torch._has_compatible_shallow_copy_type", + "torch._histogramdd_bin_edges", + "torch._histogramdd_from_bin_cts", + "torch._histogramdd_from_bin_tensors", + "torch._index_put_impl_", + "torch._indices_copy", + "torch._int_mm", + "torch._is_all_true", + "torch._is_any_true", + "torch._is_functional_tensor", + "torch._is_zerotensor", + "torch._linalg_check_errors", + "torch._linalg_det", + "torch._linalg_eigh", + "torch._linalg_slogdet", + "torch._linalg_solve_ex", + "torch._linalg_svd", + "torch._log_softmax_backward_data", + "torch._log_softmax", + "torch._logcumsumexp", + "torch._lstm_mps", + "torch._lu_with_info", + "torch._make_dep_token", + "torch._make_dual_copy", + "torch._make_dual", + "torch._make_per_channel_quantized_tensor", + "torch._make_per_tensor_quantized_tensor", + "torch._masked_scale", + "torch._masked_softmax", + "torch._mirror_autograd_meta_to", + "torch._mixed_dtypes_linear", + "torch._mkldnn_reshape", + "torch._mkldnn_transpose_", + "torch._mkldnn_transpose", + "torch._mps_convolution_transpose", + "torch._mps_convolution", + "torch._native_batch_norm_legit_no_training", + "torch._native_batch_norm_legit", + "torch._native_multi_head_attention", + "torch._neg_view_copy", + "torch._neg_view", + "torch._nested_from_padded_and_nested_example", + "torch._nested_tensor_from_mask_left_aligned", + "torch._nested_tensor_from_tensor_list", + "torch._nested_tensor_softmax_with_shape", + "torch._nested_view_from_buffer_copy", + "torch._nested_view_from_buffer", + "torch._nnpack_available", + "torch._nnpack_spatial_convolution", + "torch._pack_padded_sequence", + "torch._pad_packed_sequence", + "torch._pin_memory", + "torch._prelu_kernel", + "torch._propagate_xla_data", + "torch._remove_batch_dim", + "torch._reshape_alias_copy", + "torch._reshape_from_tensor", + "torch._resize_output_", + "torch._rowwise_prune", + "torch._sample_dirichlet", + "torch._saturate_weight_to_fp16", + "torch._scaled_dot_product_attention_math", + "torch._scaled_dot_product_efficient_attention", + "torch._scaled_dot_product_flash_attention", + "torch._scaled_dot_product_flash_attention_for_cpu", + "torch._scaled_dot_product_cudnn_attention", + "torch._scaled_mm", + "torch._shape_as_tensor", + "torch._sobol_engine_draw", + "torch._sobol_engine_ff_", + "torch._sobol_engine_initialize_state_", + "torch._sobol_engine_scramble_", + "torch._softmax_backward_data", + "torch._softmax", + "torch._sparse_broadcast_to_copy", + "torch._sparse_broadcast_to", + "torch._sparse_csr_prod", + "torch._sparse_csr_sum", + "torch._sparse_log_softmax_backward_data", + "torch._sparse_semi_structured_linear", + "torch._sparse_softmax_backward_data", + "torch._sparse_sparse_matmul", + "torch._sparse_sum", + "torch._stack", + "torch._standard_gamma_grad", + "torch._standard_gamma", + "torch._test_autograd_multiple_dispatch_view_copy", + "torch._test_autograd_multiple_dispatch_view", + "torch._test_autograd_multiple_dispatch", + "torch._test_check_tensor", + "torch._test_functorch_fallback", + "torch._test_serialization_subcmul", + "torch._to_cpu", + "torch._to_functional_tensor", + "torch._to_sparse_semi_structured", + "torch._transform_bias_rescale_qkv", + "torch._transformer_encoder_layer_fwd", + "torch._trilinear", + "torch._triton_multi_head_attention", + "torch._triton_scaled_dot_attention", + "torch._unique", + "torch._unique2", + "torch._unpack_dual", + "torch._unsafe_index_put", + "torch._unsafe_index", + "torch._use_cudnn_ctc_loss", + "torch._use_cudnn_rnn_flatten_weight", + "torch._values_copy", + "torch._weight_int4pack_mm", + "torch._weight_int8pack_mm", + "torch._weight_norm_interface", + "torch._weight_norm", + "torch.abs_", + "torch.abs", + "torch.absolute", + "torch.acos_", + "torch.acos", + "torch.acosh_", + "torch.acosh", + "torch.adaptive_avg_pool1d", + "torch.adaptive_max_pool1d", + "torch.add", + "torch.addbmm", + "torch.addcdiv", + "torch.addcmul", + "torch.addmm", + "torch.addmv_", + "torch.addmv", + "torch.addr", + "torch.adjoint", + "torch.affine_grid_generator", + "torch.alias_copy", + "torch.all", + "torch.allclose", + "torch.alpha_dropout_", + "torch.alpha_dropout", + "torch.amax", + "torch.amin", + "torch.aminmax", + "torch.angle", + "torch.any", + "torch.arange", + "torch.arccos_", + "torch.arccos", + "torch.arccosh_", + "torch.arccosh", + "torch.arcsin_", + "torch.arcsin", + "torch.arcsinh_", + "torch.arcsinh", + "torch.arctan_", + "torch.arctan", + "torch.arctan2", + "torch.arctanh_", + "torch.arctanh", + "torch.argmax", + "torch.argmin", + "torch.argsort", + "torch.argwhere", + "torch.as_strided_", + "torch.as_strided_copy", + "torch.as_strided_scatter", + "torch.as_strided", + "torch.as_tensor", + "torch.asarray", + "torch.asin_", + "torch.asin", + "torch.asinh_", + "torch.asinh", + "torch.atan_", + "torch.atan", + "torch.atan2", + "torch.atanh_", + "torch.atanh", + "torch.avg_pool1d", + "torch.baddbmm", + "torch.bartlett_window", + "torch.batch_norm_backward_elemt", + "torch.batch_norm_backward_reduce", + "torch.batch_norm_elemt", + "torch.batch_norm_gather_stats_with_counts", + "torch.batch_norm_gather_stats", + "torch.batch_norm_stats", + "torch.batch_norm_update_stats", + "torch.batch_norm", + "torch.bernoulli", + "torch.bilinear", + "torch.binary_cross_entropy_with_logits", + "torch.bincount", + "torch.binomial", + "torch.bitwise_and", + "torch.bitwise_left_shift", + "torch.bitwise_not", + "torch.bitwise_or", + "torch.bitwise_right_shift", + "torch.bitwise_xor", + "torch.blackman_window", + "torch.bmm", + "torch.broadcast_to", + "torch.bucketize", + "torch.can_cast", + "torch.cat", + "torch.ccol_indices_copy", + "torch.ceil_", + "torch.ceil", + "torch.celu_", + "torch.celu", + "torch.channel_shuffle", + "torch.cholesky_inverse", + "torch.cholesky_solve", + "torch.cholesky", + "torch.choose_qparams_optimized", + "torch.chunk", + "torch.clamp_", + "torch.clamp_max_", + "torch.clamp_max", + "torch.clamp_min_", + "torch.clamp_min", + "torch.clamp", + "torch.clip_", + "torch.clip", + "torch.clone", + "torch.col_indices_copy", + "torch.column_stack", + "torch.combinations", + "torch.complex", + "torch.concat", + "torch.concatenate", + "torch.conj_physical_", + "torch.conj_physical", + "torch.conj", + "torch.constant_pad_nd", + "torch.conv_tbc", + "torch.conv_transpose1d", + "torch.conv_transpose2d", + "torch.conv_transpose3d", + "torch.conv1d", + "torch.conv2d", + "torch.conv3d", + "torch.convolution", + "torch.copysign", + "torch.corrcoef", + "torch.cos_", + "torch.cos", + "torch.cosh_", + "torch.cosh", + "torch.cosine_embedding_loss", + "torch.cosine_similarity", + "torch.count_nonzero", + "torch.cov", + "torch.cross", + "torch.crow_indices_copy", + "torch.ctc_loss", + "torch.cudnn_affine_grid_generator", + "torch.cudnn_batch_norm", + "torch.cudnn_convolution_add_relu", + "torch.cudnn_convolution_relu", + "torch.cudnn_convolution_transpose", + "torch.cudnn_convolution", + "torch.cudnn_grid_sampler", + "torch.cudnn_is_acceptable", + "torch.cummax", + "torch.cummin", + "torch.cumprod", + "torch.cumsum", + "torch.cumulative_trapezoid", + "torch.deg2rad_", + "torch.deg2rad", + "torch.dequantize", + "torch.det", + "torch.detach_", + "torch.detach_copy", + "torch.detach", + "torch.diag_embed", + "torch.diag", + "torch.diagflat", + "torch.diagonal_copy", + "torch.diagonal_scatter", + "torch.diagonal", + "torch.diff", + "torch.digamma", + "torch.dist", + "torch.div", + "torch.divide", + "torch.dot", + "torch.dropout_", + "torch.dropout", + "torch.dsmm", + "torch.dsplit", + "torch.dstack", + "torch.embedding_bag", + "torch.embedding_renorm_", + "torch.embedding", + "torch.empty_like", + "torch.empty_permuted", + "torch.empty_quantized", + "torch.empty_strided", + "torch.empty", + "torch.eq", + "torch.equal", + "torch.erf_", + "torch.erf", + "torch.erfc_", + "torch.erfc", + "torch.erfinv", + "torch.exp_", + "torch.exp", + "torch.exp2_", + "torch.exp2", + "torch.expand_copy", + "torch.expm1_", + "torch.expm1", + "torch.eye", + "torch.fake_quantize_per_channel_affine", + "torch.fake_quantize_per_tensor_affine", + "torch.fbgemm_linear_fp16_weight_fp32_activation", + "torch.fbgemm_linear_fp16_weight", + "torch.fbgemm_linear_int8_weight_fp32_activation", + "torch.fbgemm_linear_int8_weight", + "torch.fbgemm_linear_quantize_weight", + "torch.fbgemm_pack_gemm_matrix_fp16", + "torch.fbgemm_pack_quantized_matrix", + "torch.feature_alpha_dropout_", + "torch.feature_alpha_dropout", + "torch.feature_dropout_", + "torch.feature_dropout", + "torch.fill_", + "torch.fill", + "torch.fix_", + "torch.fix", + "torch.flatten", + "torch.flip", + "torch.fliplr", + "torch.flipud", + "torch.float_power", + "torch.floor_", + "torch.floor_divide", + "torch.floor", + "torch.fmax", + "torch.fmin", + "torch.fmod", + "torch.frac_", + "torch.frac", + "torch.frexp", + "torch.frobenius_norm", + "torch.from_file", + "torch.from_numpy", + "torch.frombuffer", + "torch.full_like", + "torch.full", + "torch.fused_moving_avg_obs_fake_quant", + "torch.gather", + "torch.gcd_", + "torch.gcd", + "torch.ge", + "torch.geqrf", + "torch.ger", + "torch.get_device", + "torch.gradient", + "torch.greater_equal", + "torch.greater", + "torch.grid_sampler_2d", + "torch.grid_sampler_3d", + "torch.grid_sampler", + "torch.group_norm", + "torch.gru_cell", + "torch.gru", + "torch.gt", + "torch.hamming_window", + "torch.hann_window", + "torch.hardshrink", + "torch.heaviside", + "torch.hinge_embedding_loss", + "torch.histc", + "torch.histogram", + "torch.histogramdd", + "torch.hsmm", + "torch.hsplit", + "torch.hspmm", + "torch.hstack", + "torch.hypot", + "torch.i0_", + "torch.i0", + "torch.igamma", + "torch.igammac", + "torch.imag", + "torch.index_add", + "torch.index_copy", + "torch.index_fill", + "torch.index_put_", + "torch.index_put", + "torch.index_reduce", + "torch.index_select", + "torch.indices_copy", + "torch.inner", + "torch.instance_norm", + "torch.int_repr", + "torch.inverse", + "torch.is_complex", + "torch.is_conj", + "torch.is_distributed", + "torch.is_floating_point", + "torch.is_inference", + "torch.is_neg", + "torch.is_nonzero", + "torch.is_same_size", + "torch.is_signed", + "torch.is_vulkan_available", + "torch.isclose", + "torch.isfinite", + "torch.isin", + "torch.isinf", + "torch.isnan", + "torch.isneginf", + "torch.isposinf", + "torch.isreal", + "torch.istft", + "torch.kaiser_window", + "torch.kl_div", + "torch.kron", + "torch.kthvalue", + "torch.layer_norm", + "torch.lcm_", + "torch.lcm", + "torch.ldexp_", + "torch.ldexp", + "torch.le", + "torch.lerp", + "torch.less_equal", + "torch.less", + "torch.lgamma", + "torch.linspace", + "torch.log_", + "torch.log_softmax", + "torch.log", + "torch.log10_", + "torch.log10", + "torch.log1p_", + "torch.log1p", + "torch.log2_", + "torch.log2", + "torch.logaddexp", + "torch.logaddexp2", + "torch.logcumsumexp", + "torch.logdet", + "torch.logical_and", + "torch.logical_not", + "torch.logical_or", + "torch.logical_xor", + "torch.logit_", + "torch.logit", + "torch.logspace", + "torch.logsumexp", + "torch.lstm_cell", + "torch.lstm", + "torch.lt", + "torch.lu_solve", + "torch.lu_unpack", + "torch.margin_ranking_loss", + "torch.masked_fill", + "torch.masked_scatter", + "torch.masked_select", + "torch.matmul", + "torch.matrix_exp", + "torch.matrix_power", + "torch.max_pool1d_with_indices", + "torch.max_pool1d", + "torch.max_pool2d", + "torch.max_pool3d", + "torch.max", + "torch.maximum", + "torch.mean", + "torch.median", + "torch.min", + "torch.minimum", + "torch.miopen_batch_norm", + "torch.miopen_convolution_add_relu", + "torch.miopen_convolution_relu", + "torch.miopen_convolution_transpose", + "torch.miopen_convolution", + "torch.miopen_depthwise_convolution", + "torch.miopen_rnn", + "torch.mkldnn_adaptive_avg_pool2d", + "torch.mkldnn_convolution", + "torch.mkldnn_linear_backward_weights", + "torch.mkldnn_max_pool2d", + "torch.mkldnn_max_pool3d", + "torch.mkldnn_rnn_layer", + "torch.mm", + "torch.mode", + "torch.moveaxis", + "torch.movedim", + "torch.msort", + "torch.mul", + "torch.multinomial", + "torch.multiply", + "torch.mv", + "torch.mvlgamma", + "torch.nan_to_num_", + "torch.nan_to_num", + "torch.nanmean", + "torch.nanmedian", + "torch.nanquantile", + "torch.nansum", + "torch.narrow_copy", + "torch.narrow", + "torch.native_batch_norm", + "torch.native_channel_shuffle", + "torch.native_dropout", + "torch.native_group_norm", + "torch.native_layer_norm", + "torch.native_norm", + "torch.ne", + "torch.neg_", + "torch.neg", + "torch.negative_", + "torch.negative", + "torch.nextafter", + "torch.nonzero_static", + "torch.nonzero", + "torch.norm_except_dim", + "torch.normal", + "torch.not_equal", + "torch.nuclear_norm", + "torch.numel", + "torch.obj", + "torch.ones_like", + "torch.ones", + "torch.orgqr", + "torch.ormqr", + "torch.outer", + "torch.pairwise_distance", + "torch.pdist", + "torch.permute_copy", + "torch.permute", + "torch.pinverse", + "torch.pixel_shuffle", + "torch.pixel_unshuffle", + "torch.poisson_nll_loss", + "torch.poisson", + "torch.polar", + "torch.polygamma", + "torch.positive", + "torch.pow", + "torch.prelu", + "torch._print", + "torch.prod", + "torch.promote_types", + "torch.put", + "torch.q_per_channel_axis", + "torch.q_per_channel_scales", + "torch.q_per_channel_zero_points", + "torch.q_scale", + "torch.q_zero_point", + "torch.qr", + "torch.quantile", + "torch.quantize_per_channel", + "torch.quantize_per_tensor_dynamic", + "torch.quantize_per_tensor", + "torch.quantized_batch_norm", + "torch.quantized_gru_cell", + "torch.quantized_lstm_cell", + "torch.quantized_max_pool1d", + "torch.quantized_max_pool2d", + "torch.quantized_max_pool3d", + "torch.quantized_rnn_relu_cell", + "torch.quantized_rnn_tanh_cell", + "torch.rad2deg_", + "torch.rad2deg", + "torch.rand_like", + "torch.rand", + "torch.randint_like", + "torch.randint", + "torch.randn_like", + "torch.randn", + "torch.randperm", + "torch.range", + "torch.ravel", + "torch.real", + "torch.reciprocal_", + "torch.reciprocal", + "torch.relu_", + "torch.relu", + "torch.remainder", + "torch.renorm", + "torch.repeat_interleave", + "torch.reshape", + "torch.resolve_conj", + "torch.resolve_neg", + "torch.result_type", + "torch.rnn_relu_cell", + "torch.rnn_relu", + "torch.rnn_tanh_cell", + "torch.rnn_tanh", + "torch.roll", + "torch.rot90", + "torch.round_", + "torch.round", + "torch.row_indices_copy", + "torch.row_stack", + "torch.rrelu_", + "torch.rrelu", + "torch.rsqrt_", + "torch.rsqrt", + "torch.rsub", + "torch.saddmm", + "torch.scalar_tensor", + "torch.scatter_add", + "torch.scatter_reduce", + "torch.scatter", + "torch.searchsorted", + "torch.segment_reduce", + "torch.select_copy", + "torch.select_scatter", + "torch.select", + "torch.selu_", + "torch.selu", + "torch.sgn", + "torch.sigmoid_", + "torch.sigmoid", + "torch.sign", + "torch.signal.windows.windows.sqrt", + "torch.signbit", + "torch.sin_", + "torch.sin", + "torch.sinc_", + "torch.sinc", + "torch.sinh_", + "torch.sinh", + "torch.slice_copy", + "torch.slice_scatter", + "torch.slogdet", + "torch.smm", + "torch.softmax", + "torch.sort", + "torch.split_copy", + "torch.split_with_sizes_copy", + "torch.split_with_sizes", + "torch.spmm", + "torch.sqrt_", + "torch.sqrt", + "torch.square_", + "torch.square", + "torch.squeeze_copy", + "torch.squeeze", + "torch.sspaddmm", + "torch.stack", + "torch.std_mean", + "torch.std", + "torch.sub", + "torch.subtract", + "torch.sum", + "torch.svd", + "torch.swapaxes", + "torch.swapdims", + "torch.sym_constrain_range_for_size", + "torch.sym_constrain_range", + "torch.t_copy", + "torch.t", + "torch.take_along_dim", + "torch.take", + "torch.tan_", + "torch.tan", + "torch.tanh_", + "torch.tanh", + "torch.tensor_split", + "torch.tensor", + "torch.threshold_", + "torch.threshold", + "torch.tile", + "torch.topk", + "torch.trace", + "torch.transpose_copy", + "torch.transpose", + "torch.trapezoid", + "torch.trapz", + "torch.triangular_solve", + "torch.tril_indices", + "torch.tril", + "torch.triplet_margin_loss", + "torch.triu_indices", + "torch.triu", + "torch.true_divide", + "torch.trunc_", + "torch.trunc", + "torch.unbind_copy", + "torch.unbind", + "torch.unflatten", + "torch.unfold_copy", + "torch.unsafe_chunk", + "torch.unsafe_split_with_sizes", + "torch.unsafe_split", + "torch.unsqueeze_copy", + "torch.unsqueeze", + "torch.values_copy", + "torch.vander", + "torch.var_mean", + "torch.var", + "torch.vdot", + "torch.view_as_complex_copy", + "torch.view_as_complex", + "torch.view_as_real_copy", + "torch.view_as_real", + "torch.view_copy", + "torch.vsplit", + "torch.vstack", + "torch.where", + "torch.xlogy_", + "torch.xlogy", + "torch.zero_", + "torch.zeros", + "torch._fused_sgd_", + "torch.slice_inverse", + "torch._assert_scalar", + "torch._functional_assert_scalar", + ], + TorchInGraphFunctionVariable, +) + + +if sys.version_info >= (3, 9): + torch_c_binding_in_graph_functions["math.lcm"] = TorchInGraphFunctionVariable +if sys.version_info >= (3, 11): + torch_c_binding_in_graph_functions["math.exp2"] = TorchInGraphFunctionVariable + torch_c_binding_in_graph_functions["math.cbrt"] = TorchInGraphFunctionVariable + + +# In graph functions (including constant folding) that are not C bindings +torch_non_c_binding_in_graph_functions = dict.fromkeys( + [ + "torch.__future__.get_overwrite_module_params_on_conversion", + "torch.__future__.set_overwrite_module_params_on_conversion", + "torch.__getattr__", + "torch._assert", + "torch._check_index", + "torch._check_is_size", + "torch._check_not_implemented", + "torch._check_tensor_all_with", + "torch._check_tensor_all", + "torch._check_type", + "torch._check_value", + "torch._check_with", + "torch._check", + "torch._compile._disable_dynamo", + "torch._functorch.apis.chunk_vmap", + "torch._functorch.autograd_function.custom_function_call_functionalize", + "torch._functorch.autograd_function.custom_function_call_grad", + "torch._functorch.autograd_function.custom_function_call_vmap_generate_rule", + "torch._functorch.autograd_function.custom_function_call_vmap", + "torch._functorch.autograd_function.generate_single_level_function", + "torch._functorch.autograd_function.get_tangents_in_dims", + "torch._functorch.autograd_function.has_overriden_vmap_rule", + "torch._functorch.autograd_function.reductify_leaf", + "torch._functorch.autograd_function.reductify", + "torch._functorch.autograd_function.validate_vmap_returns_tuple_of_two_elements", + "torch._functorch.autograd_function.vmapify_autograd_function", + "torch._functorch.autograd_function.wrap_outputs_maintaining_identity", + "torch._functorch.batch_norm_replacement.batch_norm_without_running_stats", + "torch._functorch.batch_norm_replacement.replace_all_batch_norm_modules_", + "torch._functorch.deprecated.combine_state_for_ensemble", + "torch._functorch.deprecated.functionalize", + "torch._functorch.deprecated.get_warning", + "torch._functorch.deprecated.grad_and_value", + "torch._functorch.deprecated.hessian", + "torch._functorch.deprecated.jacfwd", + "torch._functorch.deprecated.jacrev", + "torch._functorch.deprecated.jvp", + "torch._functorch.deprecated.make_functional_with_buffers", + "torch._functorch.deprecated.make_functional", + "torch._functorch.deprecated.setup_docs", + "torch._functorch.deprecated.vjp", + "torch._functorch.deprecated.warn_deprecated", + "torch._functorch.eager_transforms._any_differentiable", + "torch._functorch.eager_transforms._autograd_grad", + "torch._functorch.eager_transforms._construct_standard_basis_for", + "torch._functorch.eager_transforms._vjp_treespec_compare", + "torch._functorch.eager_transforms._set_tensor_requires_grad", + "torch._functorch.eager_transforms._is_differentiable", + "torch._functorch.eager_transforms._jvp_with_argnums", + "torch._functorch.eager_transforms._maybe_unwrap_functional_tensor", + "torch._functorch.eager_transforms._maybe_wrap_functional_tensor", + "torch._functorch.eager_transforms._replace_args", + "torch._functorch.eager_transforms._unwrap_all_tensors_from_functional", + "torch._functorch.eager_transforms._wrap_all_tensors_to_functional", + "torch._functorch.eager_transforms.assert_flat_tuple_of_tensors", + "torch._functorch.eager_transforms.assert_non_empty_list_of_tensors", + "torch._functorch.eager_transforms.assert_output_is_tensor_or_tensors", + "torch._functorch.eager_transforms.functionalize", + "torch._functorch.eager_transforms.hessian", + "torch._functorch.eager_transforms.jacfwd", + "torch._functorch.eager_transforms.jvp", + "torch._functorch.eager_transforms.lazy_dynamo_disable", + "torch._functorch.eager_transforms.linearize", + "torch._functorch.eager_transforms.noop", + "torch._functorch.eager_transforms.safe_unflatten", + "torch._functorch.eager_transforms.safe_unpack_dual", + "torch._functorch.functional_call.construct_stacked_leaf", + "torch._functorch.functional_call.functional_call", + "torch._functorch.functional_call.stack_module_state", + "torch._functorch.pyfunctorch.coerce_cinterpreter", + "torch._functorch.pyfunctorch.dispatch_functorch", + "torch._functorch.pyfunctorch.nested", + "torch._functorch.pyfunctorch.retrieve_current_functorch_interpreter", + "torch._functorch.pyfunctorch.temporarily_pop_interpreter_stack", + "torch._functorch.utils.enable_single_level_autograd_function", + "torch._functorch.utils.exposed_in", + "torch._functorch.utils.unwrap_dead_wrappers", + "torch._functorch.vmap.lazy_load_decompositions", + "torch._guards.compile_context", + "torch._guards.detect_fake_mode", + "torch._guards.tracing", + "torch._higher_order_ops.map._has_potential_branch_input_alias", + "torch._higher_order_ops.map._has_potential_branch_input_mutation", + "torch._higher_order_ops.map._stack_pytree", + "torch._higher_order_ops.map._unstack_pytree", + "torch._higher_order_ops.map.create_fw_bw_graph", + "torch._higher_order_ops.map.map_autograd", + "torch._higher_order_ops.map.map_dense", + "torch._higher_order_ops.map.map_fake_tensor_mode", + "torch._higher_order_ops.map.map_functionalize", + "torch._higher_order_ops.map.map_proxy_torch_dispatch_mode", + "torch._higher_order_ops.map.map_wrapper", + "torch._higher_order_ops.map.trace_map", + "torch._higher_order_ops.out_dtype.elementwise_dtypes", + "torch._higher_order_ops.out_dtype.is_int_mm", + "torch._higher_order_ops.out_dtype.out_dtype_dense", + "torch._higher_order_ops.out_dtype.out_dtype_fake_tensor_mode", + "torch._higher_order_ops.out_dtype.out_dtype_fallback", + "torch._higher_order_ops.out_dtype.out_dtype_func", + "torch._higher_order_ops.out_dtype.out_dtype_proxy", + "torch._higher_order_ops.out_dtype.trace_out_dtype", + "torch._higher_order_ops.utils.autograd_not_implemented_inner", + "torch._higher_order_ops.utils.autograd_not_implemented", + "torch._linalg_utils._symeig", + "torch._linalg_utils.basis", + "torch._linalg_utils.bform", + "torch._linalg_utils.conjugate", + "torch._linalg_utils.eig", + "torch._linalg_utils.get_floating_dtype", + "torch._linalg_utils.is_sparse", + "torch._linalg_utils.lstsq", + "torch._linalg_utils.matmul", + "torch._linalg_utils.matrix_rank", + "torch._linalg_utils.qform", + "torch._linalg_utils.solve", + "torch._linalg_utils.symeig", + "torch._linalg_utils.transjugate", + "torch._linalg_utils.transpose", + "torch._load_global_deps", + "torch._lowrank._svd_lowrank", + "torch._lowrank.get_approximate_basis", + "torch._lowrank.pca_lowrank", + "torch._lowrank.svd_lowrank", + "torch._ops._compute_keyset", + "torch._ops._get_tensors", + "torch._ops._to_flat_tuple", + "torch._ops.add_cached_op", + "torch._ops.dl_open_guard", + "torch._ops.get_cached_ops", + "torch._ops.key_extractor", + "torch._ops.reset_cached_ops", + "torch._ops.resolve_key", + "torch._preload_cuda_deps", + "torch._register_device_module", + "torch._running_with_deploy", + "torch._utils._dummy_type", + "torch._weights_only_unpickler._get_allowed_globals", + "torch._weights_only_unpickler.load", + "torch.align_tensors", + "torch.amp.autocast_mode._enter_autocast", + "torch.amp.autocast_mode._exit_autocast", + "torch.amp.autocast_mode.autocast_decorator", + "torch.are_deterministic_algorithms_enabled", + "torch.atleast_1d", + "torch.atleast_2d", + "torch.atleast_3d", + "torch.autograd._calculate_shape", + "torch.autograd._is_checkpoint_valid", + "torch.autograd._make_grads", + "torch.autograd._register_py_tensor_class_for_device", + "torch.autograd._tensor_or_tensors_to_tuple", + "torch.autograd.backward", + "torch.autograd.forward_ad.enter_dual_level", + "torch.autograd.forward_ad.exit_dual_level", + "torch.autograd.forward_ad.make_dual", + "torch.autograd.forward_ad.unpack_dual", + "torch.autograd.function._iter_filter", + "torch.autograd.function._iter_jit_values", + "torch.autograd.function._iter_None_tensors", + "torch.autograd.function._iter_tensors_permissive", + "torch.autograd.function._iter_tensors", + "torch.autograd.function._jit_unwrap_structured", + "torch.autograd.function._map_tensor_data", + "torch.autograd.function._nested_map", + "torch.autograd.function._unflatten", + "torch.autograd.function.once_differentiable", + "torch.autograd.function.traceable", + "torch.autograd.functional._as_tuple_nocheck", + "torch.autograd.functional._as_tuple", + "torch.autograd.functional._autograd_grad", + "torch.autograd.functional._check_requires_grad", + "torch.autograd.functional._construct_standard_basis_for", + "torch.autograd.functional._fill_in_zeros", + "torch.autograd.functional._grad_postprocess", + "torch.autograd.functional._grad_preprocess", + "torch.autograd.functional._jacfwd", + "torch.autograd.functional._tuple_postprocess", + "torch.autograd.functional._validate_v", + "torch.autograd.functional.hessian", + "torch.autograd.functional.hvp", + "torch.autograd.functional.jacobian", + "torch.autograd.functional.jvp", + "torch.autograd.functional.vhp", + "torch.autograd.functional.vjp", + "torch.autograd.grad_mode._enter_inference_mode", + "torch.autograd.grad_mode._exit_inference_mode", + "torch.autograd.graph._get_sid", + "torch.autograd.graph._get_tid", + "torch.autograd.graph.allow_mutation_on_saved_tensors", + "torch.autograd.graph.get_gradient_edge", + "torch.autograd.graph.increment_version", + "torch.autograd.graph.register_multi_grad_hook", + "torch.autograd.variable", + "torch.backends.__allow_nonbracketed_mutation", + "torch.backends.cpu.get_cpu_capability", + "torch.backends.cuda.can_use_efficient_attention", + "torch.backends.cuda.can_use_flash_attention", + "torch.backends.cuda.enable_flash_sdp", + "torch.backends.cuda.enable_math_sdp", + "torch.backends.cuda.enable_mem_efficient_sdp", + "torch.backends.cuda.flash_sdp_enabled", + "torch.backends.cuda.is_built", + "torch.backends.cuda.math_sdp_enabled", + "torch.backends.cuda.mem_efficient_sdp_enabled", + "torch.backends.cuda.cudnn_sdp_enabled", + "torch.backends.cuda.enable_cudnn_sdp", + "torch.backends.cuda.preferred_linalg_library", + "torch.backends.cuda.sdp_kernel", + "torch.backends.cudnn._init", + "torch.backends.cudnn.flags", + "torch.backends.cudnn.is_acceptable", + "torch.backends.cudnn.is_available", + "torch.backends.cudnn.set_flags", + "torch.backends.cudnn.version", + "torch.backends.disable_global_flags", + "torch.backends.flags_frozen", + "torch.backends.mkl.is_available", + "torch.backends.mkldnn.flags", + "torch.backends.mkldnn.is_available", + "torch.backends.mkldnn.set_flags", + "torch.backends.mps._init", + "torch.backends.mps.is_available", + "torch.backends.mps.is_built", + "torch.backends.mps.is_macos13_or_newer", + "torch.backends.openmp.is_available", + "torch.backends.quantized._get_qengine_id", + "torch.backends.quantized._get_qengine_str", + "torch.block_diag", + "torch.broadcast_tensors", + "torch.cartesian_prod", + "torch.cdist", + "torch.chain_matmul", + "torch.compile", + "torch.compiled_with_cxx11_abi", + "torch.cpu._is_cpu_support_vnni", + "torch.cpu.current_device", + "torch.cpu.current_stream", + "torch.cpu.device_count", + "torch.cpu.is_available", + "torch.cpu.set_device", + "torch.cpu.stream", + "torch.cpu.synchronize", + "torch.cuda._check_capability", + "torch.cuda._check_cubins", + "torch.cuda._device_count_nvml", + "torch.cuda._get_device", + "torch.cuda._get_generator", + "torch.cuda._get_nvml_device_index", + "torch.cuda._get_pynvml_handler", + "torch.cuda._get_rng_state_offset", + "torch.cuda._is_compiled", + "torch.cuda._lazy_call", + "torch.cuda._lazy_init", + "torch.cuda._memory_viz._block_extra_legacy", + "torch.cuda._memory_viz._block_extra", + "torch.cuda._memory_viz._format_size", + "torch.cuda._memory_viz._format_viz", + "torch.cuda._memory_viz._frame_filter", + "torch.cuda._memory_viz._frame_fmt", + "torch.cuda._memory_viz._frames_fmt", + "torch.cuda._memory_viz._profile_to_snapshot", + "torch.cuda._memory_viz._report_free", + "torch.cuda._memory_viz._write_blocks", + "torch.cuda._memory_viz.calc_active", + "torch.cuda._memory_viz.compare", + "torch.cuda._memory_viz.format_flamegraph", + "torch.cuda._memory_viz.memory", + "torch.cuda._memory_viz.profile_plot", + "torch.cuda._memory_viz.segment_plot", + "torch.cuda._memory_viz.segments", + "torch.cuda._memory_viz.segsum", + "torch.cuda._memory_viz.trace_plot", + "torch.cuda._memory_viz.trace", + "torch.cuda._nvml_based_avail", + "torch.cuda._parse_visible_devices", + "torch.cuda._raw_device_count_nvml", + "torch.cuda._raw_device_uuid_nvml", + "torch.cuda._register_triton_kernels", + "torch.cuda._set_rng_state_offset", + "torch.cuda._set_stream_by_id", + "torch.cuda._sleep", + "torch.cuda._transform_uuid_to_ordinals", + "torch.cuda._utils._get_device_index", + "torch.cuda.amp.autocast_mode._cast", + "torch.cuda.amp.autocast_mode.custom_bwd", + "torch.cuda.amp.autocast_mode.custom_fwd", + "torch.cuda.amp.common.amp_definitely_not_available", + "torch.amp.grad_scaler._refresh_per_optimizer_state", + "torch.cuda.can_device_access_peer", + "torch.cuda.check_error", + "torch.cuda.clock_rate", + "torch.cuda.cudart", + "torch.cuda.current_blas_handle", + "torch.cuda.current_stream", + "torch.cuda.default_stream", + "torch.cuda.device_count", + "torch.cuda.get_arch_list", + "torch.cuda.get_device_capability", + "torch.cuda.get_device_name", + "torch.cuda.get_device_properties", + "torch.cuda.get_gencode_flags", + "torch.cuda.get_sync_debug_mode", + "torch.cuda.graphs.graph_pool_handle", + "torch.cuda.graphs.is_current_stream_capturing", + "torch.cuda.graphs.make_graphed_callables", + "torch.cuda.init", + "torch.cuda.ipc_collect", + "torch.cuda.is_available", + "torch.cuda.is_bf16_supported", + "torch.cuda.is_initialized", + "torch.cuda.jiterator._create_jit_fn", + "torch.cuda.jiterator._create_multi_output_jit_fn", + "torch.cuda.memory_usage", + "torch.cuda.memory._dump_snapshot", + "torch.cuda.memory._free_mutex", + "torch.cuda.memory._get_current_allocator", + "torch.cuda.memory._host_allocator", + "torch.cuda.memory._record_memory_history_impl", + "torch.cuda.memory._record_memory_history_legacy", + "torch.cuda.memory._record_memory_history", + "torch.cuda.memory._save_memory_usage", + "torch.cuda.memory._save_segment_usage", + "torch.cuda.memory._set_allocator_settings", + "torch.cuda.memory._snapshot", + "torch.cuda.memory.caching_allocator_alloc", + "torch.cuda.memory.caching_allocator_delete", + "torch.cuda.memory.change_current_allocator", + "torch.cuda.memory.empty_cache", + "torch.cuda.memory.get_allocator_backend", + "torch.cuda.memory.list_gpu_processes", + "torch.cuda.memory.max_memory_allocated", + "torch.cuda.memory.max_memory_cached", + "torch.cuda.memory.max_memory_reserved", + "torch.cuda.memory.mem_get_info", + "torch.cuda.memory.memory_allocated", + "torch.cuda.memory.memory_cached", + "torch.cuda.memory.memory_reserved", + "torch.cuda.memory.memory_snapshot", + "torch.cuda.memory.memory_stats_as_nested_dict", + "torch.cuda.memory.memory_stats", + "torch.cuda.memory.memory_summary", + "torch.cuda.memory.reset_accumulated_memory_stats", + "torch.cuda.memory.reset_max_memory_allocated", + "torch.cuda.memory.reset_max_memory_cached", + "torch.cuda.memory.reset_peak_memory_stats", + "torch.cuda.memory.set_per_process_memory_fraction", + "torch.cuda.nccl._check_sequence_type", + "torch.cuda.nccl.all_gather", + "torch.cuda.nccl.all_reduce", + "torch.cuda.nccl.broadcast", + "torch.cuda.nccl.init_rank", + "torch.cuda.nccl.is_available", + "torch.cuda.nccl.reduce_scatter", + "torch.cuda.nccl.reduce", + "torch.cuda.nccl.unique_id", + "torch.cuda.nccl.version", + "torch.cuda.nvtx.mark", + "torch.cuda.nvtx.range_end", + "torch.cuda.nvtx.range_pop", + "torch.cuda.nvtx.range_push", + "torch.cuda.nvtx.range_start", + "torch.cuda.nvtx.range", + "torch.cuda.power_draw", + "torch.cuda.profiler.init", + "torch.cuda.profiler.profile", + "torch.cuda.profiler.start", + "torch.cuda.profiler.stop", + "torch.cuda.random.get_rng_state_all", + "torch.cuda.random.initial_seed", + "torch.cuda.random.manual_seed_all", + "torch.cuda.random.manual_seed", + "torch.cuda.random.seed_all", + "torch.cuda.random.seed", + "torch.cuda.random.set_rng_state_all", + "torch.cuda.set_stream", + "torch.cuda.set_sync_debug_mode", + "torch.cuda.stream", + "torch.cuda.synchronize", + "torch.cuda.temperature", + "torch.cuda.utilization", + "torch.einsum", + "torch.functional._check_list_size", + "torch.functional._consecutive_return_counts", + "torch.functional._consecutive_return_inverse_false", + "torch.functional._consecutive_return_inverse_true", + "torch.functional._consecutive_return_inverse", + "torch.functional._consecutive_return_output", + "torch.functional._lu_impl", + "torch.functional._lu_no_infos", + "torch.functional._lu_with_infos", + "torch.functional._meshgrid", + "torch.functional._return_counts", + "torch.functional._return_inverse_false", + "torch.functional._return_inverse_true", + "torch.functional._return_inverse", + "torch.functional._return_output", + "torch.functional._unique_consecutive_impl", + "torch.functional._unique_impl", + "torch.functional._unravel_index", + "torch.functional.broadcast_shapes", + "torch.functional.lu", + "torch.functional.unique", + "torch.functional.unravel_index", + "torch.futures.collect_all", + "torch.futures.wait_all", + "torch.get_deterministic_debug_mode", + "torch.get_float32_matmul_precision", + "torch.is_deterministic_algorithms_warn_only_enabled", + "torch.is_storage", + "torch.is_tensor", + "torch.is_warn_always_enabled", + "torch.masked._ops._any", + "torch.masked._ops._apply_docstring_templates", + "torch.masked._ops._canonical_dim", + "torch.masked._ops._combine_input_and_mask", + "torch.masked._ops._generate_docstring", + "torch.masked._ops._input_mask", + "torch.masked._ops._output_mask", + "torch.masked._ops._reduction_identity", + "torch.masked._ops._sparse_coo_flatten_indices", + "torch.masked._ops._sparse_coo_scatter_reduction_helper", + "torch.masked._ops._sparse_coo_where", + "torch.masked._ops._sparse_csr_segment_reduction_helper", + "torch.masked._ops._sparse_csr_where", + "torch.masked._ops._std_var", + "torch.masked._ops._where", + "torch.masked._ops.amax", + "torch.masked._ops.amin", + "torch.masked._ops.argmax", + "torch.masked._ops.argmin", + "torch.masked._ops.corresponding_real_dtype", + "torch.masked._ops.cumprod", + "torch.masked._ops.cumsum", + "torch.masked._ops.log_softmax", + "torch.masked._ops.logaddexp", + "torch.masked._ops.logsumexp", + "torch.masked._ops.mean", + "torch.masked._ops.median", + "torch.masked._ops.norm", + "torch.masked._ops.normalize", + "torch.masked._ops.prod", + "torch.masked._ops.softmax", + "torch.masked._ops.softmin", + "torch.masked._ops.std", + "torch.masked._ops.sum", + "torch.masked._ops.var", + "torch.meshgrid", + "torch.mps._get_default_mps_generator", + "torch.mps.current_allocated_memory", + "torch.mps.driver_allocated_memory", + "torch.mps.empty_cache", + "torch.mps.get_rng_state", + "torch.mps.manual_seed", + "torch.mps.profiler.profile", + "torch.mps.profiler.start", + "torch.mps.profiler.stop", + "torch.mps.seed", + "torch.mps.set_per_process_memory_fraction", + "torch.mps.set_rng_state", + "torch.mps.synchronize", + "torch.nested._internal.nested_tensor.get_tensor_symint", + "torch.nested._internal.nested_tensor.is_expandable_to", + "torch.nested._internal.nested_tensor.jagged_from_list", + "torch.nested._internal.nested_tensor.jagged_from_tensor_and_lengths", + "torch.nested._internal.nested_tensor.nested_view_from_values_offsets", + "torch.nested._internal.nested_tensor.nested_view_from_values_offsets_lengths", + "torch.nested.as_nested_tensor", + "torch.nested.narrow", + "torch.nested.nested_tensor", + "torch.nn._reduction.get_enum", + "torch.nn._reduction.legacy_get_enum", + "torch.nn._reduction.legacy_get_string", + "torch.nn.factory_kwargs", + "torch.nn.functional._adaptive_max_pool1d", + "torch.nn.functional._adaptive_max_pool2d", + "torch.nn.functional._adaptive_max_pool3d", + "torch.nn.functional._canonical_mask", + "torch.nn.functional._fractional_max_pool2d", + "torch.nn.functional._fractional_max_pool3d", + "torch.nn.functional._get_softmax_dim", + "torch.nn.functional._in_projection_packed", + "torch.nn.functional._in_projection", + "torch.nn.functional._is_integer", + "torch.nn.functional._max_pool1d", + "torch.nn.functional._max_pool2d", + "torch.nn.functional._max_pool3d", + "torch.nn.functional._mha_shape_check", + "torch.nn.functional._no_grad_embedding_renorm_", + "torch.nn.functional._none_or_dtype", + "torch.nn.functional._threshold", + "torch.nn.functional._unpool_output_size", + "torch.nn.functional._verify_batch_size", + "torch.nn.functional._verify_spatial_size", + "torch.nn.functional.adaptive_avg_pool2d", + "torch.nn.functional.adaptive_avg_pool3d", + "torch.nn.functional.adaptive_max_pool1d_with_indices", + "torch.nn.functional.adaptive_max_pool1d", + "torch.nn.functional.adaptive_max_pool2d_with_indices", + "torch.nn.functional.adaptive_max_pool2d", + "torch.nn.functional.adaptive_max_pool3d_with_indices", + "torch.nn.functional.adaptive_max_pool3d", + "torch.nn.functional.affine_grid", + "torch.nn.functional.alpha_dropout", + "torch.nn.functional.assert_int_or_pair", + "torch.nn.functional.batch_norm", + "torch.nn.functional.binary_cross_entropy_with_logits", + "torch.nn.functional.binary_cross_entropy", + "torch.nn.functional.celu", + "torch.nn.functional.cosine_embedding_loss", + "torch.nn.functional.cross_entropy", + "torch.nn.functional.ctc_loss", + "torch.nn.functional.dropout", + "torch.nn.functional.dropout1d", + "torch.nn.functional.dropout2d", + "torch.nn.functional.dropout3d", + "torch.nn.functional.elu", + "torch.nn.functional.embedding_bag", + "torch.nn.functional.embedding", + "torch.nn.functional.feature_alpha_dropout", + "torch.nn.functional.fold", + "torch.nn.functional.fractional_max_pool2d_with_indices", + "torch.nn.functional.fractional_max_pool2d", + "torch.nn.functional.fractional_max_pool3d_with_indices", + "torch.nn.functional.fractional_max_pool3d", + "torch.nn.functional.gaussian_nll_loss", + "torch.nn.functional.glu", + "torch.nn.functional.grid_sample", + "torch.nn.functional.group_norm", + "torch.nn.functional.gumbel_softmax", + "torch.nn.functional.hardsigmoid", + "torch.nn.functional.hardswish", + "torch.nn.functional.hardtanh", + "torch.nn.functional.hinge_embedding_loss", + "torch.nn.functional.huber_loss", + "torch.nn.functional.instance_norm", + "torch.nn.functional.interpolate", + "torch.nn.functional.kl_div", + "torch.nn.functional.l1_loss", + "torch.nn.functional.layer_norm", + "torch.nn.functional.leaky_relu", + "torch.nn.functional.local_response_norm", + "torch.nn.functional.log_softmax", + "torch.nn.functional.lp_pool1d", + "torch.nn.functional.lp_pool2d", + "torch.nn.functional.margin_ranking_loss", + "torch.nn.functional.max_pool1d_with_indices", + "torch.nn.functional.max_pool1d", + "torch.nn.functional.max_pool2d_with_indices", + "torch.nn.functional.max_pool2d", + "torch.nn.functional.max_pool3d_with_indices", + "torch.nn.functional.max_pool3d", + "torch.nn.functional.max_unpool1d", + "torch.nn.functional.max_unpool2d", + "torch.nn.functional.max_unpool3d", + "torch.nn.functional.mish", + "torch.nn.functional.mse_loss", + "torch.nn.functional.multi_head_attention_forward", + "torch.nn.functional.multi_margin_loss", + "torch.nn.functional.multilabel_margin_loss", + "torch.nn.functional.multilabel_soft_margin_loss", + "torch.nn.functional.nll_loss", + "torch.nn.functional.normalize", + "torch.nn.functional.poisson_nll_loss", + "torch.nn.functional.relu", + "torch.nn.functional.relu6", + "torch.nn.functional.rrelu", + "torch.nn.functional.selu", + "torch.nn.functional.sigmoid", + "torch.nn.functional.silu", + "torch.nn.functional.smooth_l1_loss", + "torch.nn.functional.soft_margin_loss", + "torch.nn.functional.softmax", + "torch.nn.functional.softmin", + "torch.nn.functional.softsign", + "torch.nn.functional.tanh", + "torch.nn.functional.tanhshrink", + "torch.nn.functional.triplet_margin_loss", + "torch.nn.functional.unfold", + "torch.nn.functional.upsample_bilinear", + "torch.nn.functional.upsample_nearest", + "torch.nn.functional.upsample", + "torch.nn.grad._pair", + "torch.nn.grad._single", + "torch.nn.grad._triple", + "torch.nn.grad.conv1d_input", + "torch.nn.grad.conv1d_weight", + "torch.nn.grad.conv2d_input", + "torch.nn.grad.conv2d_weight", + "torch.nn.grad.conv3d_input", + "torch.nn.grad.conv3d_weight", + "torch.nn.modules.activation._arg_requires_grad", + "torch.nn.modules.activation._check_arg_device", + "torch.nn.modules.activation._is_make_fx_tracing", + "torch.nn.modules.container._addindent", + "torch.nn.modules.transformer._detect_is_causal_mask", + "torch.nn.modules.transformer._generate_square_subsequent_mask", + "torch.nn.modules.transformer._get_activation_fn", + "torch.nn.modules.transformer._get_clones", + "torch.nn.modules.transformer._get_seq_len", + "torch.nn.modules.utils._list_with_default", + "torch.nn.modules.utils._ntuple", + "torch.nn.modules.utils._quadruple", + "torch.nn.modules.utils._reverse_repeat_tuple", + "torch.nn.modules.utils.consume_prefix_in_state_dict_if_present", + "torch.nn.parameter.is_lazy", + "torch.norm", + "torch.quantization.default_eval_fn", + "torch.random._seed_custom_device", + "torch.random.fork_rng", + "torch.random.initial_seed", + "torch.random.seed", + "torch.return_types.pytree_register_structseq", + "torch.set_default_device", + "torch.set_default_dtype", + "torch.set_default_tensor_type", + "torch.set_deterministic_debug_mode", + "torch.set_float32_matmul_precision", + "torch.set_warn_always", + "torch.signal.windows.windows._add_docstr", + "torch.signal.windows.windows._window_function_checks", + "torch.signal.windows.windows.bartlett", + "torch.signal.windows.windows.blackman", + "torch.signal.windows.windows.cosine", + "torch.signal.windows.windows.exponential", + "torch.signal.windows.windows.gaussian", + "torch.signal.windows.windows.general_cosine", + "torch.signal.windows.windows.general_hamming", + "torch.signal.windows.windows.hamming", + "torch.signal.windows.windows.hann", + "torch.signal.windows.windows.kaiser", + "torch.signal.windows.windows.merge_dicts", + "torch.signal.windows.windows.nuttall", + "torch.signal.windows.windows.parse_kwargs", + "torch.sparse.semi_structured.to_sparse_semi_structured", + "torch.sparse.sum", + "torch.split", + "torch.stft", + "torch.sym_float", + "torch.sym_int", + "torch.sym_ite", + "torch.sym_max", + "torch.sym_min", + "torch.sym_not", + "torch.tensordot", + "torch.typename", + "torch.unique_consecutive", + "torch.use_deterministic_algorithms", + ], + TorchInGraphFunctionVariable, +) + + +torch_name_rule_map = [ + manual_torch_name_rule_map, + torch_c_binding_in_graph_functions, + torch_non_c_binding_in_graph_functions, +] + + +""" +Generate the torch object - Dynamo tracing rule (the wrapping variable) map. +""" + + +@functools.lru_cache(None) +def get_torch_obj_rule_map(): + d: Dict[Any, VariableTracker] = dict() + for m in torch_name_rule_map: + for k, v in m.items(): # type: ignore[attr-defined] + obj = load_object(k) + if obj is not None: + if obj in d and d[obj] != v: + raise AssertionError( + f"Duplicate torch object {obj} with different rules: {v}, {d[obj]}" + ) + else: + d[obj] = v + return d + + +def _load_obj_from_str(fully_qualified_name): + module, obj_name = fully_qualified_name.rsplit(".", maxsplit=1) + return getattr(importlib.import_module(module), obj_name) + + +""" +Load string represented torch objects. +""" + + +def load_object(name): + try: + x = name.split("#") + if len(x) == 2: + obj = _load_obj_from_str(x[0]) + val = getattr(obj, x[1]) + else: + assert len(x) == 1, f"Invalid obj name {name}" + val = _load_obj_from_str(x[0]) + val = unwrap_if_wrapper(val) + except (AttributeError, ImportError): + val = None + return val + + +""" +Get all torch.Tensor methods which are allowed to be in graph functions. +""" + + +@functools.lru_cache(None) +def get_tensor_method(): + s = set() + for name in dir(torch.Tensor): + method = getattr(torch.Tensor, name) + if isinstance( + method, (types.MethodDescriptorType, types.WrapperDescriptorType) + ): + s.add(method) + return frozenset(s) + + +""" +Return if a torch object is ATen op or torch.Tensor method. +""" + + +def is_aten_op_or_tensor_method(obj): + return obj in get_tensor_method() or isinstance( + obj, + (torch._ops.OpOverloadPacket, torch._ops.OpOverload), + ) + + +class FunctionIdSet: + """ + Track a set of `id()`s of objects which are either allowed or not + allowed to go into the generated FX graph. Use to test for torch.*, + numpy.*, builtins.*, etc. + + Support user modification to permit customization of what can be + added to the graph and what will cause a graph break. + """ + + function_ids: Optional[Set[int]] = None + function_names: Optional[Dict[int, str]] = None + + def __init__(self, lazy_initializer: Callable[[], Union[Dict[int, str], Set[int]]]): + self.lazy_initializer = lazy_initializer + + def __call__(self): + if self.function_ids is None: + value = self.lazy_initializer() + if isinstance(value, dict): + self.function_ids = set(value.keys()) + self.function_names = value + else: + assert isinstance(value, set) + self.function_ids = value + return self.function_ids + + def get_name(self, idx: int, default: str): + self() # lazy init + assert self.function_names is not None + return self.function_names.get(idx, default) + + def add(self, idx: int): + function_ids = self() # lazy init + function_ids.add(idx) + + def remove(self, idx: int): + function_ids = self() + if idx in function_ids: + function_ids.remove(idx) + + def __contains__(self, idx: int): + return idx in self() + + +@FunctionIdSet +def _allowed_callable_ids() -> Dict[int, str]: + rv: Dict[int, str] = {} + return rv + + +@FunctionIdSet +def _disallowed_callable_ids() -> Dict[int, str]: + rv: Dict[int, str] = {} + return rv + + +@FunctionIdSet +def _builtin_function_ids() -> Dict[int, str]: + rv = { + id(v): f"builtins.{k}" + for k, v in builtins.__dict__.items() + if not k.startswith("_") and callable(v) + } + rv.update( + { + id(v): f"operator.{k}" + for k, v in operator.__dict__.items() + if not k.startswith("_") and callable(v) + } + ) + rv.update( + {id(v): f"functools.{v.__name__}" for v in (itertools.chain, itertools.islice)} + ) + rv.update( + { + id(cast): "typing.cast", + id(functools.reduce): "functools.reduce", + id(copy.deepcopy): "copy.deepcopy", + } + ) + return rv + + +@FunctionIdSet +def _numpy_function_ids() -> Dict[int, str]: + rv = dict() + for mod in NP_SUPPORTED_MODULES: + rv.update( + { + id(v): f"{mod.__name__}.{k}" + for k, v in mod.__dict__.items() + if callable(v) + and (getattr(v, "__module__", None) or mod.__name__) == mod.__name__ + } + ) + return rv + + +@FunctionIdSet +def _builtin_constant_ids() -> Dict[int, str]: + """ + Collects constant builtins by eliminating callable items. + """ + rv = { + id(v): f"builtins.{k}" + for k, v in builtins.__dict__.items() + if not k.startswith("_") and not callable(v) + } + return rv + + +_lazy_module_init: Dict[str, List[Callable[[], None]]] = defaultdict(list) + + +def add_module_init_func(name: str, init_func: Callable[[], None]) -> None: + """Register a module without eagerly importing it""" + # If the module is already imported, eagerly run init + assert "." not in name, f"Expected a root module name, but got {name}" + if name in sys.modules: + init_func() + + # Module is not yet imported, delay processing until needed + assert name not in _lazy_module_init + _lazy_module_init[name].append(init_func) + + +def _maybe_init_lazy_module(obj: object) -> None: + module = getattr(obj, "__module__", None) + if module is None: + return + + base_module = module.split(".")[0] + init_funcs = _lazy_module_init.pop(base_module, None) + if init_funcs is not None: + for fn in init_funcs: + fn() + + +def is_callable_allowed(obj) -> bool: + _maybe_init_lazy_module(obj) + return id(obj) in _allowed_callable_ids + + +def is_callable_disallowed(obj) -> bool: + _maybe_init_lazy_module(obj) + return id(obj) in _disallowed_callable_ids + + +def is_forbidden(obj) -> bool: + _maybe_init_lazy_module(obj) + return getattr(obj, "_dynamo_forbidden", False) + + +def is_builtin_callable(obj) -> bool: + return id(obj) in _builtin_function_ids + + +def is_builtin_constant(obj) -> bool: + return id(obj) in _builtin_constant_ids + + +def is_numpy(obj) -> bool: + if np is None: + return False + return isinstance(obj, (np.ndarray, np.generic)) or id(obj) in _numpy_function_ids + + +""" +A note on skip/inline rules: + +Dynamo consults this file to determine whether function should be inlined or skipped. + +A skip applies at the frame boundary, meaning dynamo either triggers a graph break +at the beginning of the frame or attempts to trace/inline the whole frame. When skipping +a frame, recursively called frames are still traced by dynamo unless also skipped. + +Skipfiles (skipped at the file level instead of function level) still apply on a +frame-by-frame boundary as dynamo traces, but apply to all functions in that file. + +@skip is a helper decorator that can be applied to your function to cause it to be +included here. + +Dynamo skip/inline rules & priorities are defined as follows: +* Inline is the default behavior and will be used unless explicitly skipped. +* Dynamo has two SKIPLIST: BUILTIN_SKIPLIST and THIRDPARTY_SKIPLIST. + * BUILTIN_SKIPLIST contains builtin python modules, such as abc, collections, etc. + * THIRDPARTY_SKIPLIST contains common third party libraries, such as numpy, pandas, etc. +* Functions in these two SKIPLISTs are always skipped, except: + * They have explicitly defined rule in `manual_torch_name_rule_map`; + * The corresponding python module has been put into MOD_INLINELIST. +* PyTorch(torch) is in the BUILTIN_SKIPLIST by default, but there are many cases + where we want inline the functions under torch namespace. + We should specify inline for the functions in `manual_torch_name_rule_map` or + put the corresponding python module into MOD_INLINELIST to make dynamo inline them. +* If you call functions under skipped modules/files, Dynamo will wrap these functions + as SkipFunctionVariable. There are a few functions(e.g, collections.OrderedDict) that + we have special handling at SkipFunctionVariable.call_function. + +Overall: *_INLINELIST has precedence over *_SKIPLIST has precedence over DEFAULT (inline) + +To figure out what the behavior is, check the following list in order: +* `manual_torch_name_rule_map` (Inline if YES) +* MOD_INLINELIST (Inline if YES) +* BUILTIN_SKIPLIST & THIRDPARTY_SKIPLIST (Skip if YES) +* Inline by default + +In general, if you want to force inline a function or module, please consider adding +the function's python module to MOD_INLINELIST first. +Use the `manual_torch_name_rule_map` only when there are other functions under the same module that +you don't want to inline them. +""" + + +BUILTIN_SKIPLIST = ( + abc, + collections, + contextlib, + copy, + copyreg, + dataclasses, + enum, + functools, + importlib, + inspect, + linecache, + logging, + multiprocessing, + operator, + os, + posixpath, + random, + re, + selectors, + signal, + tempfile, + threading, + tokenize, + torch, # torch/* is skipped by default unless specified in FUNC_INLINELIST or MOD_INLINELIST + traceback, + types, + typing, + unittest, + weakref, + _collections_abc, + _weakrefset, +) + +# third party libraries skiplist is defined by str, because users may not use these libraries. +# we should use lazy import & skip in the future. +THIRDPARTY_SKIPLIST = ( + "fx2trt_oss", + "hypothesis", + "networkx", + "numpy", + "omegaconf", + "onnx", + "onnxruntime", + "onnx_tf", + "pandas", + "sklearn", + "tabulate", + "tensorflow", + "tensorrt", + "torch2trt", + "tqdm", + "tree", + "tvm", + "xarray", +) + + +def _strip_init_py(s): + # TODO: Once we require py3.9 use removesuffix instead. + suffix = "__init__.py" + if s.endswith(suffix): + return s[: -len(suffix)] + else: + return s + + +def _module_dir(m: types.ModuleType): + # Protect against a module not exporting __file__ - this can happen for + # frozen modules, for example. + file = getattr(m, "__file__", None) + return file and _strip_init_py(file) + + +# These are legacy workarounds, don't add new modules to this list. +# Please use the MOD_INLINELIST instead to force inline functions under particular modules. +LEGACY_MOD_INLINELIST = { + "torch._dynamo.external_utils", + "torch._export.db.examples", + "torch._export.wrappers", + "torch._functorch.apis", + "torch._functorch.deprecated", + "torch._higher_order_ops.cond", + "torch.ao.quantization.pt2e.export_utils", + "torch.ao.quantization.pt2e.qat_utils", + "torch.ao.quantization.pt2e.representation.rewrite", + "torch.ao.quantization.pt2e.utils", + "torch.ao.quantization.quantizer.xnnpack_quantizer", + "torch.optim", +} + +if torch.distributed.is_available(): + LEGACY_MOD_INLINELIST |= { + "torch.distributed._tensor.api", + "torch.distributed._tensor.device_mesh", + "torch.distributed.device_mesh", + "torch.distributed.algorithms._checkpoint.checkpoint_wrapper", + "torch.distributed.tensor.parallel._data_parallel_utils", + "torch.distributed.tensor.parallel._utils", + "torch.distributed.tensor.parallel.style", + # we have to add replicate to LEGACY_MOD_INLINELIST to ensure + # the forward_hook won't be ignored. + "torch.distributed._composable.replicate", + } + + +# Force inline functions under these modules, even they are in *_SKIPLIST. +# We are using python module name instead of file or directory object to avoid circular dependency. +# Please keep this sorted alphabetically. +MOD_INLINELIST = { + "torch._refs", + "torch._prims", + "torch._decomp", + "torch._dynamo._trace_wrapped_higher_order_op", + "torch._dynamo.comptime", + "torch._dynamo.polyfill", + "torch._functorch.vmap", + "torch._functorch.eager_transforms", + "torch._inductor.test_operators", + "torch.amp.autocast_mode", + "torch.ao.nn", + "torch.autograd.function", + "torch.backends.cuda", + "torch.cuda.amp.autocast_mode", + "torch.distributions", + "torch.fx._pytree", + "torch.fx.passes.shape_prop", + "torch.nn", + "torch.random", + "torch.sparse", + "torch.testing", + "torch.testing._internal.hypothesis_utils", + "torch.utils._content_store", + "torch.utils._contextlib", + "torch.utils._foreach_utils", + "torch.utils._pytree", + "torch.utils.hooks", + "torch._tensor", + "torch._higher_order_ops.strict_mode", + "torch._higher_order_ops.while_loop", +} + + +if torch.distributed.is_available(): + MOD_INLINELIST.add("torch.distributed") + MOD_INLINELIST.add("torch.distributed._functional_collectives") + MOD_INLINELIST.add("torch.distributed._composable.replicate") + + +@functools.lru_cache(None) +def get_legacy_mod_inlinelist(): + inlinelist = set() + for m in LEGACY_MOD_INLINELIST: + inlinelist.add(_module_dir(torch) + m[len("torch.") :].replace(".", "/")) + return inlinelist + + +@functools.lru_cache(None) +def get_mod_inlinelist(): + inlinelist = set() + for m in MOD_INLINELIST: + inlinelist.add(_module_dir(torch) + m[len("torch.") :].replace(".", "/")) + return inlinelist + + +# skip some standard python builtin libs +SKIP_DIRS = [ + "", + _config_module.__file__, +] +SKIP_DIRS.extend(filter(None, (_module_dir(m) for m in BUILTIN_SKIPLIST))) + +SKIP_DIRS_RE = re.compile(r"match nothing^") + +is_fbcode = importlib.import_module("torch._inductor.config").is_fbcode() +# Skip fbcode paths(including torch.package paths) containing +# one of the following strings. +FBCODE_SKIP_DIRS = { + "torchrec/distributed", + "torchrec/fb/distributed", + "caffe2/torch/fb/sparsenn/pooled_embeddings_modules.py", +} +FBCODE_SKIP_DIRS_RE = re.compile(f".*({'|'.join(map(re.escape, FBCODE_SKIP_DIRS))})") + + +def _recompile_re(): + global SKIP_DIRS_RE + SKIP_DIRS_RE = re.compile(f"^({'|'.join(map(re.escape, SKIP_DIRS))})") + + +def add(import_name: str): + if isinstance(import_name, types.ModuleType): + return add(import_name.__name__) + assert isinstance(import_name, str) + from importlib.util import find_spec + + module_spec = find_spec(import_name) + if not module_spec: + return + origin = module_spec.origin + if origin is None: + return + global SKIP_DIRS_RE + SKIP_DIRS.append(_strip_init_py(origin)) + _recompile_re() + + +@dataclasses.dataclass +class SkipResult: + skipped: bool + reason: Optional[str] + + +def check_file(filename, is_inlined_call=False): + """Should skip this file?""" + if filename is None: + return SkipResult(True, "filename is None") + if any(filename.startswith(d) for d in get_legacy_mod_inlinelist()): + return SkipResult( + False, + "inlined according trace_rules.LEGACY_MOD_INLINELIST", + ) + if is_inlined_call and is_torch_inline_allowed(filename): + return SkipResult( + False, + "inlined according trace_rules.MOD_INLINELIST", + ) + if is_fbcode and bool(FBCODE_SKIP_DIRS_RE.match(filename)): + return SkipResult( + True, + "skipped according trace_rules.FBCODE_SKIP_DIRS", + ) + if bool(SKIP_DIRS_RE.match(filename)): + return SkipResult(True, "skipped according trace_rules.SKIP_DIRS") + else: + return SkipResult(False, "inlined by default") + + +@dataclasses.dataclass +class FunctionInfo: + py_obj: Optional[object] + name: Optional[str] + filename: str + code: Optional[types.CodeType] + + +""" +This is the main entry point to determine whether an object (function) should be inlined or skipped. +Let's illustrate the logic with an example: + @torch.compile + def f1(x, y): + ...... + f2(x, y) + ...... + + def f2(x, y): + ...... + f3(x, y) + ...... + + def f3(x, y): + ...... + +There are mainly three call sites of check/check_verbose: +* The compile region entrance (like function f1), the correspoinding code is located at eval_frame.py. +* When tracing the recursively called functions (like function f2 and f3). + * Dynamo decides inline/skip everytime it encounters a new recursively function call, and the call site + is in InliningInstructionTranslator.check_inlineable of symbolic_convert.py. + * If f2 is skipped by Dynamo, when evaluating the frame of f3, Dynamo need the inline/skip check again + and the call site is in catch_errors_wrapper.catch_errors of convert_frame.py. +* For global variables and function arguments, Dynamo needs to decide if they are wrapped as SkipFunctionVariable in builder.py. + +`is_inlined_call` is used to indicate if the current function call is inlined (f2 is inlined call if it passes check) +or not (f3 is not inlined call if f2 is skipped). Inside of the `check_verbose` function, there are more rules +to be checked if this `is_inlined_call`. +The reason to have this flag is that if the upper level function call (e.g, f2) is skipped, +we don't want to inline the lower level function call (e.g, f3) by default. +""" + + +def check_verbose(obj, is_inlined_call=False): + if isinstance( + obj, (UserFunctionVariable, UserMethodVariable, NestedUserFunctionVariable) + ): + try: + py_obj = obj.get_function() + except NotImplementedError: + py_obj = None + fi = FunctionInfo(py_obj, obj.get_name(), obj.get_filename(), obj.get_code()) + elif isinstance(obj, types.CodeType): + fi = FunctionInfo(None, obj.co_name, obj.co_filename, obj) + elif isinstance(obj, (types.FunctionType, types.MethodType)): + fi = FunctionInfo( + obj, obj.__name__, getfile(obj), obj.__code__ # type: ignore[union-attr] # FIXME Add MethodType.__code__ to typeshed + ) + else: + fi = FunctionInfo(obj, None, getfile(obj), None) + + # Consulte the central trace rules defined in torch._dynamo.trace_rules. + rule = torch._dynamo.trace_rules.lookup_inner( + fi.py_obj, fi.name, fi.filename, is_inlined_call + ) + if rule in [UserFunctionVariable, FunctorchHigherOrderVariable]: + return SkipResult( + False, + "inlined according trace_rules.lookup", + ) + else: + assert rule == SkipFunctionVariable, rule + return SkipResult( + True, + "skipped according trace_rules.lookup", + ) + + +def check(obj, is_inlined_call=False): + return check_verbose(obj, is_inlined_call).skipped + + +# skip common third party libs +for _name in THIRDPARTY_SKIPLIST: + add(_name) + +_recompile_re() + + +def is_torch_inline_allowed(filename): + return any(filename.startswith(d) for d in get_mod_inlinelist()) + + +@functools.lru_cache(None) +def dynamo_dir(): + import torch._dynamo + + return _module_dir(torch._dynamo) + + +def is_torch(filename): + if filename.startswith(dynamo_dir()): + return False + return filename.startswith(_module_dir(torch)) + + +""" +Main entry point for looking up the trace rule (the Dynamo variable) for a given callable object. +""" + + +def lookup_callable(obj): + if not hashable(obj): + return None + # Custom allow/disallow in graph takes precedence over the general lookup. + if is_callable_disallowed(obj): + return SkipFunctionVariable + if is_callable_allowed(obj): + return TorchInGraphFunctionVariable + if is_builtin_callable(obj): + return BuiltinVariable + + +""" +Main entry point for looking up the trace rule (the Dynamo variable) for a given function object. +E.g, the lookup result of `torch.sin` is `TorchInGraphFunctionVariable`. +""" + + +def lookup(obj): + return lookup_inner(obj) + + +def lookup_inner(obj, name=None, filename=None, is_direct_call=True): + # Step 1: lookup obj's tracing rule in `torch_name_rule_map`. + # The rules defined in `torch_name_rule_map` mainly includes two parts: + # - Manually defined rules for any functions. + # - The list of torch in graph functions. + if not hashable(obj): + return None + if obj is not None: + if is_aten_op_or_tensor_method(obj): + return TorchInGraphFunctionVariable + rule = get_torch_obj_rule_map().get(obj, None) + if rule is not None: + return rule + + # Step 2: lookup obj's tracing rule by function name. + if is_direct_call: + if name == "patched_init": + return SkipFunctionVariable + elif name == "__torch_function__": + return UserFunctionVariable + + # Step 3: lookup obj's tracing rule by filename. + if filename is None: + filename = getfile(obj) + + if check_file(filename, is_direct_call).skipped: + return SkipFunctionVariable + else: + return UserFunctionVariable diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/utils.py b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f97acb33086b92c2b38d2b71e083a3ab24669866 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_dynamo/utils.py @@ -0,0 +1,2548 @@ +import atexit +import collections +import contextlib +import copy +import cProfile +import dataclasses +import datetime +import dis +import enum +import functools +import gc +import inspect +import itertools +import linecache +import logging +import math +import operator +import os +import pstats +import re +import subprocess +import sys +import textwrap +import threading +import time +import types +import typing +import weakref +from contextlib import contextmanager +from functools import lru_cache, wraps +from pathlib import Path +from types import MethodWrapperType +from typing import ( + Any, + Callable, + cast, + ClassVar, + Counter, + DefaultDict, + Deque, + Dict, + Iterator, + KeysView, + List, + Optional, + Set, + Tuple, + Type, + Union, + ValuesView, +) + +from ..utils.hooks import RemovableHandle + +try: + import numpy as np +except ModuleNotFoundError: + np = None # type: ignore[assignment] + +try: + import torch._logging + import torch._numpy as tnp + from torch._guards import detect_fake_mode # noqa: F401n + from torch._logging import LazyString + from . import config + + # NOTE: Make sure `NP_SUPPORTED_MODULES` and `NP_TO_TNP_MODULE` are in sync. + if np: + NP_SUPPORTED_MODULES: Tuple[types.ModuleType, ...] = ( + np, + np.fft, + np.linalg, + np.random, + ) + + NP_TO_TNP_MODULE = { + np: tnp, + np.fft: tnp.fft, + np.linalg: tnp.linalg, + np.random: tnp.random, + } + else: + NP_SUPPORTED_MODULES = tuple() + + NP_TO_TNP_MODULE = {} + from torch._subclasses.fake_tensor import FakeTensor, is_fake, maybe_get_fake_mode +except ImportError: + pass + +import importlib + +import torch +import torch._functorch.config +import torch.fx.experimental.symbolic_shapes +from torch import fx +from torch._dispatch.python import enable_python_dispatcher +from torch._utils_internal import log_compilation_event + +from torch.nn.modules.lazy import LazyModuleMixin +from torch.utils._pytree import tree_map_only + + +counters: DefaultDict[str, Counter[str]] = collections.defaultdict(collections.Counter) +optimus_scuba_log: Dict[str, Any] = {} +troubleshooting_url = "https://pytorch.org/docs/master/compile/troubleshooting.html" +nnmodule_doc_url = "https://pytorch.org/docs/master/compile/nn-module.html" +nnmodule_doc_url_msg = f"See {nnmodule_doc_url} for more information and limitations." +log = logging.getLogger(__name__) + +# profiling compilation time by function +compilation_time_metrics: Dict[str, List[float]] = {} + +# profiling compilation time by frame phase +frame_phase_timing: Dict[str, Dict[str, float]] = {} + +timer_counter = itertools.count() + + +def tabulate(rows, headers): + try: + import tabulate + + return tabulate.tabulate(rows, headers=headers) + except ImportError: + return "\n".join( + ", ".join(map(str, row)) for row in itertools.chain([headers], rows) + ) + + +def maybe_cprofile(func): + if config.cprofile: + return cprofile_wrapper(func) + return func + + +def cprofile_wrapper(func): + @wraps(func) + def profile_wrapper(*args, **kwargs): + global timer_counter + profile_cnt = next(timer_counter) + profile_path = Path(func.__name__ + f"{profile_cnt}.profile") + prof = cProfile.Profile() + prof.enable() + start_ts = time.time() + retval = prof.runcall(func, *args, **kwargs) + profile_latency = time.time() - start_ts + prof.disable() + print( + f"### Cprofile for {func.__name__} iter {profile_cnt} took {profile_latency:.3f} seconds ###" + ) + ps = pstats.Stats(prof) + prof.dump_stats(profile_path) + svg_path = profile_path.with_suffix(".svg") + try: + gprof2dot_process = subprocess.Popen( + [ + "gprof2dot", + "-f", + "pstats", + "--node-label=total-time-percentage", + "--node-label=self-time-percentage", + "--node-label=total-time", + str(profile_path), + ], + stdout=subprocess.PIPE, + ) + subprocess.check_call( + ["dot", "-Tsvg", "-o", str(svg_path)], + stdin=gprof2dot_process.stdout, + ) + print(f"Generated SVG from profile at {str(svg_path)}") + except FileNotFoundError: + print( + "Failed to generate SVG from profile -- dumping stats instead." + "Try installing gprof2dot and dot for a better visualization" + ) + ps.sort_stats(pstats.SortKey.TIME).print_stats(20) + ps.sort_stats(pstats.SortKey.CUMULATIVE).print_stats(20) + return retval + + return profile_wrapper + + +curr_frame = 0 + + +# Note: Called for you by dynamo - you almost never ever want to invoke this yourself. +def increment_frame(): + global curr_frame + curr_frame = curr_frame + 1 + + +# Note: Called for you by dynamo - you almost never ever want to invoke this yourself. +def reset_frame_count(): + global curr_frame + frame_phase_timing.clear() + compilation_time_metrics.clear() + curr_frame = 0 + + +op_count = 0 + + +def increment_op_count(cnt): + global op_count + op_count += cnt + + +# Print a report of time spent so far +# Ex: +# TIMING: +# entire_frame_compile:8.574629999999999 +# backend_compile:5.26806 +def print_time_report(): + total = 0.0 + total_by_key = {} + for timings in frame_phase_timing.values(): + for key, timing in timings.items(): + total += timing + if key not in total_by_key: + total_by_key[key] = timing + else: + total_by_key[key] += timing + + out = "TIMING:" + for key, value in total_by_key.items(): + out = f"{out} {key}:{round(value, 5)}" + + print(out) + + +# dynamo_timed API works as a function decorator +# By wrapping a function in dynamo_timed, we can store a record in compilation_time_metrics +# where the key is the functions name. +# For example: +# +# @dynamo_timed +# def _foo(...): +# +# Would show up as an entry in our timing dict: +# OrderedDict([('bar.._foo', [0.083690, 0.23949, 3.1425e-05])]) +# This is extremely useful for granular debugging. +# +# For a higher-level mode, pass a phase_name into dynamo_timed +# phase_names record an extra record into a separate compilation timing structure, +# one keyed on frame+name rather than function. +# The frame is incremented outside of this function, in def increment_frame() above. + + +def dynamo_timed(original_function=None, phase_name=None): + def dynamo_timed_inner(func): + if config.cprofile: + return func + + @wraps(func) + def time_wrapper(*args, **kwargs): + key = func.__qualname__ + if key not in compilation_time_metrics: + compilation_time_metrics[key] = [] + with torch.profiler.record_function(f"{key} (dynamo_timed)"): + t0 = time.time() + r = func(*args, **kwargs) + time_spent = time.time() - t0 + compilation_time_metrics[key].append(time_spent) + if phase_name: + frame_key = str(curr_frame) + if frame_key not in frame_phase_timing: + frame_phase_timing[frame_key] = {} + if phase_name not in frame_phase_timing[frame_key]: + frame_phase_timing[frame_key][phase_name] = time_spent + else: + frame_phase_timing[frame_key][phase_name] += time_spent + return r + + return time_wrapper + + if original_function: + return dynamo_timed_inner(original_function) + return dynamo_timed_inner + + +def compile_times(repr="str", aggregate=False): + """ + Get metrics about torchdynamo frontend/backend compilation times. + + Accumulates information from functions tagged with `@dynamo_timed`. + + repr='str' returns a printable string for user interaction, and 'csv' + returns headers, rows which can be logged for output + + aggregate causes values from multiple compilations (e.g. split graphs) + to be accumulated into one value. If false, expect more than one value + per metric. + """ + + def fmt_fn(values, item_fn=lambda x: x): + if aggregate: + return item_fn(sum(values)) + return ", ".join(map(item_fn, values)) + + if repr == "str": + rows = [ + (k, fmt_fn(compilation_time_metrics[k], item_fn=lambda x: f"{x:.4f}")) + for k in compilation_time_metrics + ] + out = "TorchDynamo compilation metrics:\n" + out += tabulate(rows, headers=("Function", "Runtimes (s)")) + return out + elif repr == "csv": + values = [ + fmt_fn(v, item_fn=lambda x: f"{x:.6f}") + for v in compilation_time_metrics.values() + ] + headers = list(compilation_time_metrics.keys()) + return headers, values + + +@atexit.register +def dump_compile_times(): + log.info(compile_times(repr="str", aggregate=True)) + + +tensortype_to_dtype = { + torch.FloatTensor: (torch.float32, torch.float), + torch.DoubleTensor: (torch.float64, torch.double), + torch.HalfTensor: (torch.float16, torch.half), + torch.BFloat16Tensor: (torch.bfloat16,), + torch.ByteTensor: (torch.uint8,), + torch.CharTensor: (torch.int8,), + torch.LongTensor: (torch.int64, torch.long), + torch.IntTensor: (torch.int32, torch.int), + torch.ShortTensor: (torch.int16, torch.short), + torch.BoolTensor: (torch.bool,), +} + + +class DuplicateWarningChecker: + def __init__(self, maxsize=4096): + self.maxsize = maxsize + self.reset() + + def reset(self): + self.set = collections.OrderedDict() + + def add(self, key): + if key in self.set: + self.set.move_to_end(key, last=True) + if not config.verbose: + return False + else: + self.set[key] = None + while len(self.set) > self.maxsize: + self.set.popitem(last=False) + return True + + +graph_break_dup_warning_checker = DuplicateWarningChecker() + + +def setup_compile_debug(): + compile_debug = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1" + + if compile_debug: + torch._logging.set_logs( + dynamo=logging.DEBUG, + aot=logging.DEBUG, + inductor=logging.DEBUG, + output_code=True, # this is off by default + ) + return add_file_handler() + + return contextlib.ExitStack() + + +def reset_graph_break_dup_checker(): + graph_break_dup_warning_checker.reset() + + +def add_file_handler(): + log_path = os.path.join(get_debug_dir(), "torchdynamo") + os.makedirs(log_path, exist_ok=True) + + log_file_handler = logging.FileHandler(os.path.join(log_path, "debug.log")) + logger = logging.getLogger("torch._dynamo") + logger.addHandler(log_file_handler) + + exitstack = contextlib.ExitStack() + exitstack.callback(lambda: logger.removeHandler(log_file_handler)) + return exitstack + + +def setup_log_file(): + exitstack = contextlib.ExitStack() + if config.log_file_name is not None: + log_file_handler = logging.FileHandler(config.log_file_name) + for logger in torch._logging._internal.get_loggers(): + logger.addHandler(log_file_handler) + exitstack.callback(lambda: logger.removeHandler(log_file_handler)) + return exitstack + + return exitstack + + +def gen_record_file_name(exc, code): + return f"{get_debug_dir()}/error_recordings/\ +{code.co_name}_{type(exc).__name__}_{code.co_firstlineno}.rec" + + +def write_record_to_file(filename, exec_record): + try: + if os.path.exists(filename): + log.warning( + "Unable to write execution record %s; file already exists.", filename + ) + else: + os.makedirs(os.path.dirname(filename), exist_ok=True) + with open(filename, "wb") as f: + exec_record.dump(f) + except Exception: + log.exception("Unable to write execution record %s", filename) + + +def count_calls(g: fx.Graph): + c = 0 + for n in g.nodes: + if "call" in n.op: + c += 1 + return c + + +def identity(x): + return x + + +def hashable(x): + try: + hash(x) + return True + except TypeError: + return False + # cannot hash writable memoryview object + except ValueError: + return False + + +def nothing(*args, **kwargs): + pass + + +class ExactWeakKeyDictionary: + """Similar to weakref.WeakKeyDictionary, but use `is`/`id` rather than `==` to compare equality""" + + def __init__(self): + self.values = dict() + self.refs = dict() + + def __getitem__(self, key): + return self.values[id(key)] + + def get(self, key, default=None): + return self.values.get(id(key), default) + + def __contains__(self, key): + return id(key) in self.values + + def __setitem__(self, key, value): + idx = id(key) + if idx not in self.refs: + self.refs[idx] = weakref.ref(key, lambda ref: self._remove_id(idx)) + self.values[idx] = value + + def _remove_id(self, idx): + if idx in self.values: + del self.values[idx] + if idx in self.refs: + del self.refs[idx] + + def clear(self): + self.refs.clear() + self.values.clear() + + +def istype(obj, allowed_types): + """isinstance() without subclasses""" + if isinstance(allowed_types, (tuple, list, set)): + return type(obj) in allowed_types + return type(obj) is allowed_types + + +def is_typing(value): + # _Final catches most of typing classes: + # - Any + # - Callable + # - Union + # ... + # + # NB: we intentionally ignore classes that inherit from Generic, since they + # can be used as both TypingVariable as well as UserDefinedClassVariable. + return isinstance(value, typing._Final) or value is typing.Generic # type: ignore[attr-defined] + + +def is_numpy_int_type(value): + if not np: + return False + + return istype( + value, + ( + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + ), + ) + + +def is_numpy_float_type(value): + if not np: + return False + + return istype( + value, + ( + np.float16, + np.float32, + np.float64, + ), + ) + + +def is_function_or_wrapper(value): + return ( + is_function(value) + or isinstance(value, functools._lru_cache_wrapper) + and is_function(inspect.getattr_static(value, "__wrapped__")) + or isinstance(value, (torch._ops.OpOverloadPacket, torch._ops.OpOverload)) + ) + + +def is_function(value): + return isinstance( + value, + ( + types.FunctionType, + types.BuiltinFunctionType, + types.MethodDescriptorType, + types.WrapperDescriptorType, + torch.jit.ScriptFunction, + ), + ) + + +def unwrap_if_wrapper(fn): + return unwrap_with_attr_name_if_wrapper(fn)[0] + + +def unwrap_with_attr_name_if_wrapper(fn): + # unpack @functools.lru_cache wrapped function + if isinstance(fn, functools._lru_cache_wrapper): + fn = inspect.getattr_static(fn, "__wrapped__") + attr_name = "__wrapped__" + # unpack @torch._dynamo.optimize()(fn) wrapped function + elif is_function(fn) and inspect.getattr_static(fn, "_torchdynamo_inline", False): + fn = inspect.getattr_static(fn, "_torchdynamo_inline", fn) + attr_name = "_torchdynamo_inline" + # unpack torch.jit.script_if_tracing + elif is_function(fn) and inspect.getattr_static( + fn, "__script_if_tracing_wrapper", False + ): + fn = inspect.getattr_static(fn, "__original_fn", fn) + attr_name = "__original_fn" + else: + attr_name = None + return fn, attr_name + + +def is_numpy_ndarray(value): + if not np: + return False + + return istype(value, np.ndarray) + + +def istensor(obj): + """Check of obj is a tensor""" + tensor_list = ( + torch.Tensor, + torch.nn.Parameter, + *config.traceable_tensor_subclasses, + ) + tensor_list = tensor_list + (torch._subclasses.FakeTensor,) + return istype(obj, tensor_list) + + +def is_lazy_module(mod): + return isinstance(mod, LazyModuleMixin) + + +@functools.lru_cache(4096) +def print_once(*args): + print(*args) + + +def make_cell(val=None): + """Some black magic to create a cell object that usually only exists in a closure""" + x = val + + def f(): + return x + + assert f.__closure__ is not None and len(f.__closure__) == 1 + return f.__closure__[0] + + +def proxy_args_kwargs(args, kwargs): + try: + proxy_args = tuple(arg.as_proxy() for arg in args) + proxy_kwargs = {key: arg.as_proxy() for key, arg in kwargs.items()} + return proxy_args, proxy_kwargs + except NotImplementedError as e: + from .exc import unimplemented + from .variables.base import typestr + + raise unimplemented( + f"call_function args: {typestr(*args)} {typestr(*list(kwargs.values()))}" + ) from e + + +@dataclasses.dataclass +class CompilationMetrics: + frame_key: str + co_name: str + co_filename: str + co_firstlineno: int + cache_size: int + accumulated_cache_size: int + guard_count: Optional[int] + shape_env_guard_count: Optional[int] + graph_op_count: Optional[int] + graph_node_count: Optional[int] + graph_input_count: Optional[int] + start_time: float + entire_frame_compile_time_s: Optional[float] + backend_compile_time_s: Optional[float] + inductor_compile_time_s: Optional[float] + code_gen_time_s: Optional[float] + fail_type: Optional[str] + fail_reason: Optional[str] + fail_user_frame_filename: Optional[str] + fail_user_frame_lineno: Optional[int] + non_compliant_ops: Set[str] + compliant_custom_ops: Set[str] + + +DEFAULT_COMPILATION_METRICS_LIMIT = 64 + + +_compilation_metrics: Deque[CompilationMetrics] = collections.deque( + maxlen=DEFAULT_COMPILATION_METRICS_LIMIT +) + + +def record_compilation_metrics(compilation_metrics: CompilationMetrics): + global _compilation_metrics + _compilation_metrics.append(compilation_metrics) + if config.log_compilation_metrics: + log_compilation_event(compilation_metrics) + + +def set_compilation_metrics_limit(new_size: int) -> None: + global _compilation_metrics + while len(_compilation_metrics) > new_size: + _compilation_metrics.popleft() + new_deque = collections.deque(_compilation_metrics, maxlen=new_size) + _compilation_metrics = new_deque + + +def clear_compilation_metrics() -> None: + global _compilation_metrics + _compilation_metrics.clear() + + +def get_compilation_metrics() -> List[CompilationMetrics]: + return list(_compilation_metrics) + + +@dataclasses.dataclass +class CleanupHook: + """Remove a global variable when hook is called""" + + scope: Dict[str, Any] + name: str + + def __call__(self, *args): + CleanupManager.count -= 1 + del self.scope[self.name] + + @staticmethod + def create(scope, name, val): + assert name not in scope + CleanupManager.count += 1 + scope[name] = val + return CleanupHook(scope, name) + + +class CleanupManager(ExactWeakKeyDictionary): + count = 0 + instance: ClassVar["CleanupManager"] + + def _remove_id(self, idx): + for hook in self.values[idx]: + hook() + super()._remove_id(idx) + + +CleanupManager.instance = CleanupManager() + + +def clone_tensor(x): + """Clone the tensor and its gradient""" + y = x.clone().requires_grad_(x.requires_grad) + if x.is_leaf and x.grad is not None: + y.grad = x.grad.clone() + return y + + +def clone_input(x, *, dtype=None): + """copy while preserving strides""" + # TODO: this is questionable + if is_fake(x): + # this func fails on fake tensors in __torch_dispatch__ + return x + + def torch_clone(x): + y = torch.clone(x) + if x.is_leaf: + y.requires_grad_(x.requires_grad) + if x.is_leaf and x.grad is not None: + y.grad = clone_input(x.grad, dtype=dtype) + if hasattr(x, "_dynamo_dynamic_indices"): + y._dynamo_dynamic_indices = x._dynamo_dynamic_indices.copy() # type: ignore[attr-defined] + return y + + with torch.no_grad(): + if x.device.type == "xla": + # Access data_ptr() for a xla tensor will cause crash + return torch_clone(x) + + needed_size = sum( + (shape - 1) * stride for shape, stride in zip(x.size(), x.stride()) + ) + if x.is_quantized: + result = torch.empty_quantized((needed_size + 32,), x) + else: + result = torch.empty( + needed_size + 32, dtype=dtype or x.dtype, device=x.device + ) + cache_line_offset = ( + (x.data_ptr() - result.data_ptr()) % 32 + ) // x.element_size() + result.as_strided_(x.size(), x.stride(), cache_line_offset) + try: + result.copy_(x.clone()) + if x.is_leaf: + result.requires_grad_(x.requires_grad) + if x.is_leaf and x.grad is not None: + result.grad = clone_input(x.grad, dtype=dtype) + except RuntimeError: + # RuntimeError: unsupported operation: more than one element of the written-to + # tensor refers to a single memory location. Please clone() the tensor before + # performing the operation. + return torch_clone(x) + if hasattr(x, "_dynamo_dynamic_indices"): + result._dynamo_dynamic_indices = x._dynamo_dynamic_indices.copy() # type: ignore[attr-defined] + return result + + +def clone_inputs(example_inputs): + res: Union[Dict[Any, Any], List[Any]] + if type(example_inputs) is dict: + res = dict(example_inputs) + for key, value in res.items(): + if isinstance(value, tuple): + res[key] = clone_inputs(value) + else: + assert isinstance(value, torch.Tensor), type(value) + res[key] = clone_input(value) + return res + + res = list(example_inputs) + for i in range(len(res)): + if isinstance(res[i], torch.Tensor): + res[i] = clone_input(res[i]) + return res + + +def skip_frame_if_in_functorch_mode(val: torch.Tensor): + try: + val.data_ptr() # will throw for functorch tensors + except RuntimeError as e: + from .exc import SkipFrame + + # This will be GradTrackingTensor/BatchedTensor/etc + functorch_subclass_name = re.sub(r"\(.*", "", repr(val)) + raise SkipFrame( + f"torch.compile cannot be run in context: {functorch_subclass_name}" + ) from e + + +@contextmanager +def preserve_rng_state(): + disable_functorch = torch._C._DisableFuncTorch + disable_current_modes = torch.utils._python_dispatch._disable_current_modes + with disable_current_modes(), disable_functorch(): + rng_state = torch.clone(torch.random.get_rng_state()) + skip_frame_if_in_functorch_mode(rng_state) + if torch.cuda.is_available(): + cuda_rng_state = torch.clone(torch.cuda.get_rng_state()) + try: + yield + finally: + with torch.utils._python_dispatch._disable_current_modes(): + torch.random.set_rng_state(rng_state) + if torch.cuda.is_available(): + torch.cuda.set_rng_state(cuda_rng_state) # type: ignore[possibly-undefined] + + +def is_jit_model(model0): + return isinstance( + model0, + ( + torch.jit._trace.TopLevelTracedModule, + torch.jit._script.RecursiveScriptModule, + torch.jit.ScriptFunction, + torch.jit.ScriptModule, + ), + ) + + +def torchscript(model, example_inputs, verbose=False): + if is_jit_model(model): + # already done? + return model + + try: + return torch.jit.trace(model, example_inputs) + except Exception: + try: + return torch.jit.script(model) + except Exception: + if verbose: + log.exception("jit error") + else: + log.error("Both torch.jit.trace and torch.jit.script failed") + return None + + +def getfile(obj): + try: + return inspect.getfile(obj) + except (TypeError, OSError): + return None + + +def is_namedtuple(obj): + """Test if an object is a namedtuple or a torch.return_types.* quasi-namedtuple""" + return is_namedtuple_cls(type(obj)) + + +def is_namedtuple_cls(cls): + """Test if an object is a namedtuple or a torch.return_types.* quasi-namedtuple""" + try: + if issubclass(cls, tuple): + bases = getattr(cls, "__bases__", []) or [None] + module = getattr(cls, "__module__", None) + return module == "torch.return_types" or ( + bases[0] is tuple and hasattr(cls, "_make") and hasattr(cls, "_fields") + ) + except TypeError: + pass + return False + + +@functools.lru_cache(1) +def namedtuple_fields(cls): + """Get the fields of a namedtuple or a torch.return_types.* quasi-namedtuple""" + if cls is slice: + return ["start", "stop", "step"] + + assert issubclass(cls, tuple) + if hasattr(cls, "_fields"): + # normal namedtuples + return cls._fields + + @dataclasses.dataclass + class Marker: + index: int + + # frustrating ones e.g. torch.return_types.max + assert cls.__module__ == "torch.return_types" + obj = cls(map(Marker, range(cls.n_fields))) + fields: List[Optional[str]] = [None] * cls.n_fields + for name in dir(obj): + if name[0] != "_" and isinstance(getattr(obj, name), Marker): + fields[getattr(obj, name).index] = name + return fields + + +def checkpoint_params(gm): + with torch.no_grad(): + rng_state = torch.clone(torch.random.get_rng_state()) + if torch.cuda.is_available(): + cuda_rng_state = torch.clone(torch.cuda.get_rng_state()) + saved_state = [] + for param in itertools.chain(gm.parameters(), gm.buffers()): + saved_state.append((param, param._version, torch.clone(param))) + + def restore(): + with torch.no_grad(): + torch.random.set_rng_state(rng_state) + if torch.cuda.is_available(): + torch.cuda.set_rng_state(cuda_rng_state) + for param, version, original_value in saved_state: + if param._version != version: + param.copy_(original_value) + + return restore + + +def timed(model, example_inputs, times=1): + if torch.cuda.is_available(): + synchronize = torch.cuda.synchronize + else: + synchronize = nothing + + synchronize() + gc.collect() + torch.manual_seed(1337) + t0 = time.perf_counter() + for _ in range(times): + result = model(*example_inputs) + synchronize() + t1 = time.perf_counter() + return result, t1 - t0 # type: ignore[possibly-undefined] + + +def check_is_cuda(gm, example_inputs): + return all(x.is_cuda for x in itertools.chain(example_inputs, gm.parameters(True))) + + +@lru_cache(32) +def rot_n_helper(n): + assert n > 1 + vars = [f"v{i}" for i in range(n)] + rotated = reversed(vars[-1:] + vars[:-1]) + fn = eval(f"lambda {','.join(vars)}: ({','.join(rotated)})") + fn.__name__ = f"rot_{n}_helper" + return fn + + +common_constant_types = { + int, + float, + complex, + bool, + str, + bytes, + type(None), + Ellipsis.__class__, + types.CodeType, + torch.device, + torch.dtype, + torch.memory_format, + torch.layout, +} + + +def is_safe_constant(v): + if istype(v, (tuple, frozenset)): + return all(map(is_safe_constant, v)) + return isinstance(v, (enum.Enum, type)) or istype( + v, + common_constant_types | {slice}, + ) + + +def specialize_symnode(arg): + from .variables import ConstantVariable, SymNodeVariable + + # Guard and specialize + if isinstance(arg, SymNodeVariable): + return ConstantVariable.create(arg.evaluate_expr()) + + return arg + + +def guard_if_dyn(arg): + from .variables import ConstantVariable + + arg = specialize_symnode(arg) + + if isinstance(arg, ConstantVariable): + return arg.as_python_constant() + + return arg + + +def check_constant_args(args, kwargs): + return all(x.is_python_constant() for x in itertools.chain(args, kwargs.values())) + + +def check_unspec_python_args(args, kwargs): + from .variables.constant import ConstantVariable + from .variables.tensor import UnspecializedPythonVariable + + unspec_count = 0 + for x in itertools.chain(args, kwargs.values()): + if isinstance(x, UnspecializedPythonVariable): + unspec_count += 1 + elif not isinstance(x, (UnspecializedPythonVariable, ConstantVariable)): + return False + else: + pass + + return unspec_count > 0 + + +def check_numpy_ndarray_args(args, kwargs): + from .variables.tensor import NumpyNdarrayVariable + + return any( + isinstance(x, NumpyNdarrayVariable) + for x in itertools.chain(args, kwargs.values()) + ) + + +dict_keys: Type[KeysView[Any]] = type(dict().keys()) +dict_values: Type[ValuesView[Any]] = type(dict().values()) +odict_values: Type[ValuesView[Any]] = type(collections.OrderedDict().values()) +tuple_iterator: Type[Iterator[Any]] = type(iter(tuple())) +tuple_iterator_len = tuple_iterator.__length_hint__ # type: ignore[attr-defined] +object_new = object.__new__ + + +def nn_module_new(cls): + obj = object_new(cls) + torch.nn.Module.__init__(obj) + return obj + + +def product(it): + return functools.reduce(operator.mul, it, 1) + + +def tuple_iterator_getitem(it, index): + _, (obj,), start = it.__reduce__() + return obj[start + index] + + +iter_next = next + + +def to_subclass(t, cls): + return t.as_subclass(cls) + + +def dict_keys_getitem(d, n): + return next(itertools.islice(iter(d), n, n + 1)) + + +def enum_repr(value, local): + # enum class can override __str__ method. Use __class__ and name attribute + # to extract the class name and key name. + name = value.__class__.__name__ + val = value.name + scope = "L" if local else "G" + local_name = f'{scope}["{name}"].{val}' + return local_name + + +def _get_fake_tensor(vt): + fake_tensor = vt.as_proxy().node.meta.get("example_value") + if not is_fake(fake_tensor): + from .exc import unimplemented + + unimplemented("Cannot check Tensor object identity without its fake value") + return fake_tensor + + +def iter_contains(items, search, tx, check_tensor_identity=False): + from .variables import ( + BuiltinVariable, + ConstantVariable, + TensorVariable, + VariableTracker, + ) + + if search.is_python_constant(): + found_const = any( + x.is_python_constant() + and x.as_python_constant() == search.as_python_constant() + for x in items + ) + return ConstantVariable.create(found_const) + + must_check_tensor_id = False + if check_tensor_identity and isinstance(search, TensorVariable): + must_check_tensor_id = True + # Match of Tensor means match of FakeTensor + search = _get_fake_tensor(search) + + found: Optional[VariableTracker] = None + for x in items: + if must_check_tensor_id: + if isinstance(x, TensorVariable): + if search is _get_fake_tensor(x): # Object equivalence + return ConstantVariable.create(True) + else: + check = BuiltinVariable(operator.eq).call_function(tx, [x, search], {}) + if found is None: + found = check + else: + found = BuiltinVariable(operator.or_).call_function( + tx, [check, found], {} + ) + if found is None: + found = ConstantVariable.create(False) + return found + + +def key_is_id(k): + """Returns whether it indexes dictionaries using its id""" + return isinstance(k, (torch.Tensor, torch.nn.Module, MethodWrapperType)) + + +def key_to_id(value): + return [id(k) if key_is_id(k) else k for k in value.keys()] + + +def const_repr(x, *, local) -> str: + from .trace_rules import is_builtin_callable + + if isinstance(x, (list, tuple)): + elems_repr = ",".join(const_repr(s, local=local) for s in x) + if isinstance(x, list): + return f"[{elems_repr}]" + else: + assert isinstance(x, tuple) + if len(x) == 1: + return f"({elems_repr},)" + else: + return f"({elems_repr})" + elif isinstance(x, enum.Enum): + # To workaround repr(Enum) returning invalid global reference before python 3.11 + # by calling enum_repr and removing quotes to render enum in guard code. + return enum_repr(x, local=local).replace("'", "") + elif is_builtin_callable(x): + return x.__name__ + elif isinstance(x, type): + + def fullname(o): + klass = o.__class__ + module = klass.__module__ + if module == "builtins": + return klass.__qualname__ # avoid outputs like 'builtins.str' + return module + "." + klass.__qualname__ + + return fullname(x) + else: + return f"{x!r}" + + +def dict_keys_repr(const_keys, *, local) -> str: + keys_str = ",".join(const_repr(s, local=local) for s in const_keys) + return "[" + keys_str + "]" + + +GLOBAL_KEY_PREFIX = "__dict_key" + + +from torch._subclasses import UnsupportedFakeTensorException # noqa: F401 + + +def wrap_fake_exception(fn): + try: + return fn() + except UnsupportedFakeTensorException as e: + from .exc import unimplemented + + msg = f"Unsupported: {e.reason} with fake tensor propagation." + log.warning(msg) + raise unimplemented(msg) from e + + +def deepcopy_to_fake_tensor(obj, fake_mode): + with torch._subclasses.fake_tensor.FakeCopyMode(fake_mode): + return wrap_fake_exception(lambda: copy.deepcopy(obj)) + + +def rmse(ref, res): + """ + Calculate root mean squared error + """ + return torch.sqrt(torch.mean(torch.square(ref - res))) + + +def same( + ref, + res, + fp64_ref=None, + cos_similarity=False, + tol=1e-4, + equal_nan=False, + exact_dtype=True, + relax_numpy_equality=False, + ignore_non_fp=False, + log_error=log.error, +): + """Check correctness to see if ref and res match""" + if fp64_ref is None: + fp64_ref = ref + if isinstance(ref, (list, tuple, torch.nn.ParameterList, torch.Size)): + assert isinstance(res, (list, tuple)), f"type mismatch {type(ref)} {type(res)}" + if len(ref) != len(res): + log_error("Length mismatch") + return False + return len(ref) == len(res) and all( + same( + ai, + bi, + fp64_refi, + cos_similarity, + tol, + equal_nan, + exact_dtype, + relax_numpy_equality, + ignore_non_fp, + log_error=log_error, + ) + for ai, bi, fp64_refi in zip(ref, res, fp64_ref) + ) + elif isinstance(ref, dict): + assert isinstance(res, dict) + assert set(ref.keys()) == set( + res.keys() + ), f"keys mismatch {set(ref.keys())} == {set(res.keys())}" + for k in sorted(ref.keys()): + if not ( + same( + ref[k], + res[k], + fp64_ref[k], + cos_similarity=cos_similarity, + tol=tol, + equal_nan=equal_nan, + exact_dtype=exact_dtype, + relax_numpy_equality=relax_numpy_equality, + ignore_non_fp=ignore_non_fp, + log_error=log_error, + ) + ): + log_error("Accuracy failed for key name %s", k) + return False + return True + elif isinstance(ref, (torch.Tensor, float)): + assert not isinstance(ref, torch._subclasses.FakeTensor) + assert not isinstance(res, torch._subclasses.FakeTensor) + + def to_tensor(t): + return t if isinstance(t, torch.Tensor) else torch.tensor(t) + + ref, res, fp64_ref = (to_tensor(val) for val in (ref, res, fp64_ref)) + + if ref.is_sparse: + assert res.is_sparse + ref = ref.to_dense() + res = res.to_dense() + assert isinstance(res, torch.Tensor), f"type mismatch {type(ref)} {type(res)}" + if exact_dtype: + if ref.dtype != res.dtype: + log_error("dtype mismatch %s, %s", ref.dtype, res.dtype) + return False + if ref.dtype == torch.bool: + if ignore_non_fp: + return True + # triton stores bool as int8, so add this for more accurate checking + r = torch.allclose( + ref.to(dtype=torch.uint8), + res.to(dtype=torch.uint8), + atol=tol, + rtol=tol, + equal_nan=equal_nan, + ) + if not r: + log_error("Accuracy failed: uint8 tensor did not match") + return r + + if cos_similarity: + ref = ref.flatten().to(torch.float32) + res = res.flatten().to(torch.float32) + if torch.allclose(ref, res, atol=tol, rtol=tol, equal_nan=True): + # early exit that handles zero/nan better + # cosine_similarity(zeros(10), zeros(10), dim=0) is 0 + return True + score = torch.nn.functional.cosine_similarity(ref, res, dim=0, eps=1e-6) + if score < 0.99: + log.warning("Similarity score=%s", score.cpu().detach().item()) + return score >= 0.99 + else: + if not exact_dtype: + ref = ref.to(res.dtype) + + # First try usual allclose + if torch.allclose(ref, res, atol=tol, rtol=tol, equal_nan=equal_nan): + return True + + # Check error from fp64 version + if fp64_ref.dtype == torch.float64: + ref_error = rmse(fp64_ref, ref).item() + # ref unable to produce this with stable numerics in this precision, ignore + if math.isnan(ref_error): + log.warning( + "Found nan in reference. Consider running in higher precision." + ) + + res_error = rmse(fp64_ref, res).item() + + # In the case of using AMP (Automatic Mixed Precision), certain models have + # failed the benchmark's correctness check. However, the end-to-end model's + # accuracy when comparing AMP with FP32 is within a difference of less than 0.1%. + # Thus, it's possible that the correctness check failures for these models are + # false alarms. We use multiplier of 3 instead of 2 to avoid these false alarms. + multiplier = 3.0 if res.dtype == torch.bfloat16 else 2.0 + + if ( + fp64_ref.numel() < 1000 + or (ref.ndim == 4 and ref.shape[-1] == ref.shape[-2] == 1) + # large tol means a benchmark has been specified as REQUIRE_HIGHER_TOLERANCE + or tol >= 2 * 1e-2 + ): + # In the presence of noise, noise might dominate our error + # metric for smaller tensors. + # Similary, for 1x1 kernels, there seems to be high noise with amp. + multiplier = 3.0 + + passes_test = res_error <= (multiplier * ref_error + tol / 10.0) + if not passes_test: + log_error( + "RMSE (res-fp64): %.5f, (ref-fp64): %.5f and shape=%s", + res_error, + ref_error, + res.size(), + ) + # import pdb; pdb.set_trace() + return passes_test + + if ignore_non_fp: + return True + + log_error("Accuracy failed: allclose not within tol=%s", tol) + return False + elif isinstance(ref, (str, int, type(None), bool, torch.device)): + if ignore_non_fp: + return True + r = ref == res + if not r: + log_error("Accuracy failed (%s): %s != %s", type(ref), ref, res) + return r + elif is_numpy_int_type(ref) or is_numpy_float_type(ref): + if relax_numpy_equality and not ( + is_numpy_int_type(res) or is_numpy_float_type(res) + ): + ref = ref.item() + r = (type(ref) is type(res)) and (ref == res) + if not r: + log_error("Accuracy failed (numpy): %s != %s", ref, res) + return r + elif is_numpy_ndarray(ref): + return (type(ref) is type(res)) and same( + torch.as_tensor(ref), + torch.as_tensor(res), + fp64_ref, + cos_similarity=cos_similarity, + tol=tol, + equal_nan=equal_nan, + exact_dtype=exact_dtype, + relax_numpy_equality=relax_numpy_equality, + ignore_non_fp=ignore_non_fp, + log_error=log_error, + ) + elif type(ref).__name__ in ( + "MaskedLMOutput", + "Seq2SeqLMOutput", + "CausalLMOutputWithCrossAttentions", + "LongformerMaskedLMOutput", + "Instances", + "SquashedNormal", + "Boxes", + "Normal", + "TanhTransform", + "Foo", + "Variable", + ): + assert type(ref) is type(res) + return all( + same( + getattr(ref, key), + getattr(res, key), + getattr(fp64_ref, key), + cos_similarity=cos_similarity, + tol=tol, + equal_nan=equal_nan, + exact_dtype=exact_dtype, + relax_numpy_equality=relax_numpy_equality, + ignore_non_fp=ignore_non_fp, + log_error=log_error, + ) + for key in ref.__dict__.keys() + ) + else: + raise RuntimeError(f"unsupported type: {type(ref).__name__}") + + +def format_func_info(code): + short_filename = code.co_filename.split("/")[-1] + return f"'{code.co_name}' ({short_filename}:{code.co_firstlineno})" + + +@contextlib.contextmanager +def disable_cache_limit(): + prior = config.cache_size_limit + config.cache_size_limit = sys.maxsize + prior_acc_limit = config.accumulated_cache_size_limit + config.accumulated_cache_size_limit = sys.maxsize + + try: + yield + finally: + config.cache_size_limit = prior + config.accumulated_cache_size_limit = prior_acc_limit + + +# map from transformed code back to original user code +orig_code_map = ExactWeakKeyDictionary() + +# keep a record of code_obj -> list of guard failure reasons for logging +guard_failures: DefaultDict[Any, List[Any]] = collections.defaultdict(list) + +# Keep a record of graph break reasons for logging +graph_break_reasons: List["torch._dynamo.output_graph.GraphCompileReason"] = list() + +# keep record of compiled code, if we are in "error if recompile" +# to track code that dynamo has compiled previously +seen_code_map = ExactWeakKeyDictionary() + + +class CompileProfiler: + """Utility for profiling how and what dynamo would compile. + + Can be used for + * diagnosing recompilation issues + * determining an appropriate compile cache limit + * (TODO)confirming which functions got compiled/skipped + """ + + def __init__(self): + self.frame_count = 0 + self.op_count = 0 + self.backend_ctx_ctor = disable_cache_limit + + def __call__(self, gm: torch.fx.GraphModule, example_inputs): + self.frame_count += 1 + for node in gm.graph.nodes: + if "call" in node.op: + self.op_count += 1 + return gm.forward + + # no-op __enter__ and __exit__ to preserve BC + def __enter__(self): + return self + + def __exit__(self, typ, val, traceback): + pass + + def get_metrics(self): + return {"guard_failures": guard_failures} + + def report(self): + metrics = self.get_metrics() + gf = metrics["guard_failures"] + + def num_recompiles(code): + return len(gf[code]) + + def recompile_reasons(code): + return "\n".join([str(x) for x in gf[code]]) + + summarized_gf = [ + [format_func_info(code), num_recompiles(code), recompile_reasons(code)] + for code in gf + ] + + def graph_break_report(): + if "graph_break" in counters: + graph_breaks = counters["graph_break"] + return tabulate( + [[msg, graph_breaks[msg]] for msg in graph_breaks], + headers=["Graph Break Reason", "Count"], + ) + + def recompilation_report(): + if len(gf): + max_recompiles = max([num_recompiles(code) for code in gf]) + recomp_table = tabulate( + summarized_gf, + headers=["Function", "Recompiles", "Recompile Reasons"], + ) + return recomp_table + textwrap.dedent( + f""" + + Set torch._dynamo.config.cache_size_limit to {max_recompiles} to avoid being cache limited. + """ + ) + + report = textwrap.dedent( + """ + Torchdynamo Profiler Report + =========================== + + Graph Breaks + ------------ + Graph breaks happen when torchdynamo encounters code it can't safely trace. + If you want to find out why breaks are happening, check below for each break reason + You may gain additional insight by passing `fullgraph=True` to torch.compile, + to stop at the first break. + + """ + ) + report += graph_break_report() or "No graph breaks detected." + report += textwrap.dedent( + """ + + Recompilation + ------------- + These subgraphs were recompiled more than once due to guard failures + Guard failures indicate some condition assumed to be static by the tracer changed, + making it unsafe to reuse the compiled program. + + """ + ) + report += recompilation_report() or "No recompilation detected.\n" + return report + + +# return same dir unless user changes config between calls +@functools.lru_cache(None) +def _get_debug_dir(root_dir): + dir_name = ( + "run_" + + datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f") + # use pid to avoid conflicts among ranks + + "-pid_" + + str(os.getpid()) + ) + return os.path.join(root_dir, dir_name) + + +def get_debug_dir(): + debug_root = config.debug_dir_root + return _get_debug_dir(debug_root) + + +def extract_fake_example_value(node, required=True): + if "example_value" in node.meta and is_fake(node.meta["example_value"]): + return node.meta["example_value"] + elif required: + from torch._dynamo.exc import unimplemented + + unimplemented("`FakeTensor` example value was required but not available") + else: + return None + + +def ensure_graph_fake(e, tx): + assert maybe_get_fake_mode(e) is tx.fake_mode + return e + + +def get_fake_values_from_nodes(tx, nodes, allow_non_graph_fake): + def visit(n: torch.fx.Node): + if n.op == "call_function" and "example_value" not in n.meta: + # fake tensor validity is checked inside get_fake_value using + # ensure_graph_fake + return get_fake_value(n, tx, allow_non_graph_fake) + + out = n.meta["example_value"] + if not allow_non_graph_fake and isinstance(out, torch.Tensor): + return ensure_graph_fake(out, tx) + return out + + return torch.fx.node.map_arg(nodes, visit) + + +def get_fake_value(node, tx, allow_non_graph_fake=False): + """ + Run the computation represented by `node` using fake tensors and return the result. + + allow_non_graph_fake: whether to allow the return result to be: + 1. non-fake or 2. fake that is not created by this instance of Dynamo. + If `True`, you must be prepared to deal with such return values, ideally + by further wrapping them as this graph's fakes. + """ + from torch.utils._sympy.value_ranges import ValueRangeError + from .exc import ( + TorchRuntimeError, + unimplemented, + Unsupported, + UserError, + UserErrorType, + ) + + op = node.op + + # FX Node should always return the same fake value + if "example_value" in node.meta and is_fake(node.meta["example_value"]): + return node.meta["example_value"] + + args, kwargs = get_fake_values_from_nodes( + tx, (node.args, node.kwargs), allow_non_graph_fake + ) + + nnmodule = None + if op == "call_method" and len(args) > 0 and isinstance(args[0], torch.nn.Module): + # If the first argument is nn.Module, should copy to fake mode. + args = (deepcopy_to_fake_tensor(args[0], tx.fake_mode),) + tuple(args[1:]) + + if op == "call_module": + nnmodule = tx.output.nn_modules[node.target] + + if is_lazy_module(nnmodule) and hasattr(nnmodule, "_initialize_hook"): + # In the case of a lazy module, we want to run + # the pre-hooks which initialize it. + # Afterwards, lazy module deletes its pre-hooks + # to avoid treating it as lazy on subsequent recompile. + nnmodule._infer_parameters(nnmodule, args) + + # no matter it's lazy module or not, we should copy to fake mode. + nnmodule = deepcopy_to_fake_tensor(nnmodule, tx.fake_mode) + + try: + with tx.fake_mode, enable_python_dispatcher(): + ret_val = wrap_fake_exception( + lambda: run_node(tx.output, node, args, kwargs, nnmodule) + ) + except Unsupported: + raise + except RuntimeError as e: + cause: BaseException = e + if e.__cause__ is not None: + cause = e.__cause__ + + if isinstance( + cause, torch._subclasses.fake_tensor.DataDependentOutputException + ): + unimplemented( + f"data dependent operator: {cause.func}; " + "to enable, set torch._dynamo.config.capture_scalar_outputs = True" + ) + elif isinstance( + cause, torch._subclasses.fake_tensor.DynamicOutputShapeException + ): + unimplemented( + f"dynamic shape operator: {cause.func}; " + "to enable, set torch._dynamo.config.capture_dynamic_output_shape_ops = True" + ) + elif isinstance( + cause, torch._subclasses.fake_tensor.UnsupportedOperatorException + ): + op = cause.func + import_suggestion = "" + if isinstance(op, torch._ops.OpOverload): + maybe_pystub = torch._C._dispatch_pystub( + op._schema.name, op._schema.overload_name + ) + if maybe_pystub is not None: + module, ctx = maybe_pystub + import_suggestion = ( + f"It's possible that the support was implemented in " + f"module `{module}` and you may need to `import {module}`" + f"({ctx}), otherwise " + ) + unimplemented( + f"unsupported operator: {cause.func} ({import_suggestion}see " + "https://docs.google.com/document/d/1GgvOe7C8_NVOMLOCwDaYV1mXXyHMXY7ExoewHqooxrs/edit#heading=h.64r4npvq0w0" + " for how to fix)" + ) + elif isinstance( + cause, torch.fx.experimental.symbolic_shapes.GuardOnDataDependentSymNode + ): + raise UserError( # noqa: TRY200 + UserErrorType.CONSTRAINT_VIOLATION, + "Tried to use data-dependent value in the subsequent computation. " + "This can happen when we encounter unbounded dynamic value that is unknown during tracing time. " + "You will need to explicitly give hint to the compiler. Please take a look at " + f"constrain_as_value OR constrain_as_size APIs. {cause}", + case_name="constrain_as_size_example", + ) + elif isinstance(cause, ValueRangeError): + raise UserError(UserErrorType.CONSTRAINT_VIOLATION, e.args[0]) from e + raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None + + if not allow_non_graph_fake: + _ = tree_map_only( + torch.Tensor, functools.partial(ensure_graph_fake, tx=tx), ret_val + ) + return ret_val + + +_current_node = threading.local() + + +def get_current_node(): + return getattr(_current_node, "value", None) + + +@contextmanager +def set_current_node(node): + old = get_current_node() + _current_node.value = node + try: + yield + finally: + _current_node.value = old + + +def run_node(tracer, node, args, kwargs, nnmodule): + """ + Runs a given node, with the given args and kwargs. + + Behavior is dictated by a node's op. + + run_node is useful for extracting real values out of nodes. + See get_real_value for more info on common usage. + + Note: The tracer arg is only used for 'get_attr' ops + Note: The nnmodule arg is only used for 'call_module' ops + + Nodes that are not call_function, call_method, call_module, or get_attr will + raise an AssertionError. + """ + op = node.op + + with set_current_node(node): + + def make_error_message(e): + return f"Failed running {op} {node.target}(*{args}, **{kwargs}):\n" + str(e) + + try: + if op == "call_function": + return node.target(*args, **kwargs) + elif op == "call_method": + return getattr(args[0], node.target)(*args[1:], **kwargs) + elif op == "call_module": + assert nnmodule is not None + return nnmodule(*args, **kwargs) + elif op == "get_attr": + return tracer.get_submodule(node.target) + elif op == "placeholder": + assert "example_value" in node.meta + return node.meta["example_value"] + + except (NotImplementedError, UnsupportedFakeTensorException) as e: + # NB: mimic how wrap_fake_exception does it + from .exc import unimplemented + + raise unimplemented(make_error_message(e)) from e + except Exception as e: + raise RuntimeError(make_error_message(e)).with_traceback( + e.__traceback__ + ) from e + + raise AssertionError(op) + + +def get_real_value(node, tracer): + """ + Run the actual computation represented by `node` and return the result. + This will execute any dependent nodes in the graph as well. + """ + from .exc import TorchRuntimeError + + cache = tracer.real_value_cache + if node in cache: + return cache[node] + + op = node.op + args, kwargs = torch.fx.node.map_arg( + (node.args, node.kwargs), + lambda n: get_real_value(n, tracer), + ) + + if op == "call_module": + nn_module = tracer.output_graph.nn_modules[node.target] + if not is_lazy_module(nn_module): + nn_module = copy.deepcopy(nn_module) + else: + # In the case of a lazy module, we want to run + # the pre-hooks which initialize it + nn_module(*args, **kwargs) + else: + nn_module = None + + try: + real_value = run_node(tracer, node, args, kwargs, nn_module) + cache[node] = real_value + except RuntimeError as e: + raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None + return real_value + + +def assert_no_fake_params_or_buffers(gm): + from torch._subclasses.fake_tensor import FakeTensorConfig + + def stack_or_hint(t): + if FakeTensorConfig.debug: + import traceback + + return f"FAKE TENSOR CREATION TRACEBACK: \n {traceback.format_list(t._debug_trace)}" + else: + return "Enable TORCH_FAKE_TENSOR_DEBUG=1 to get creation stack traces on fake tensors." + + for name, buffer in gm.named_buffers(): + assert not isinstance( + buffer, torch._subclasses.FakeTensor + ), f"Unexpected fake buffer {name} {stack_or_hint(buffer)}" + for name, param in gm.named_parameters(): + assert not isinstance( + param, torch._subclasses.FakeTensor + ), f"Unexpected fake param {name} {stack_or_hint(param)}" + + +def fqn(obj: Any): + """ + Returns the fully qualified name of the object. + """ + return f"{obj.__module__}.{obj.__qualname__}" + + +def ifdynstaticdefault(count1, count2): + if torch._dynamo.config.assume_static_by_default: + return count1 + else: + return count2 + + +def import_submodule(mod: types.ModuleType): + """ + Ensure all the files in a given submodule are imported + """ + for filename in sorted(os.listdir(os.path.dirname(cast(str, mod.__file__)))): + if filename.endswith(".py") and filename[0] != "_": + importlib.import_module(f"{mod.__name__}.{filename[:-3]}") + + +def object_has_getattribute(value: Any): + try: + if isinstance( + inspect.getattr_static(type(value), "__getattribute__"), + types.FunctionType, + ): + return True + except AttributeError: + pass + return False + + +def get_custom_getattr(value: Any): + try: + getattr_fn = inspect.getattr_static(type(value), "__getattr__") + except AttributeError: + getattr_fn = None + if getattr_fn is torch.nn.Module.__getattr__: + # ignore this case of getattr + getattr_fn = None + return getattr_fn + + +class TensorStaticReason(enum.Enum): + PARAMETER = 2 + NOT_TENSOR = 4 + NN_MODULE_PROPERTY = 5 + + +def tensor_static_reason_to_message(reason: TensorStaticReason): + if reason == TensorStaticReason.PARAMETER: + return "mark_dynamic on parameter, parameters are always static today." + if reason == TensorStaticReason.NOT_TENSOR: + return "mark_dynamic on a non tensor, how did this happen?" + if reason == TensorStaticReason.NN_MODULE_PROPERTY: + return "tensor is static because it is nn module associated." + raise AssertionError(f"Illegal reason {reason}") + + +def tensor_always_has_static_shape( + tensor: Union[torch.Tensor, Any], + is_tensor: bool, + guard_source: "torch._guards.GuardSource", +) -> Tuple[bool, Optional[TensorStaticReason]]: + """ + Given a tensor, source, and is_tensor flag, determine if a shape should be static. + + Args: + tensor - the real tensor to evaluate, parameters force a static shape. + is_tensor - internal dynamo check, essentially "is_tensor": target_cls is TensorVariable, + tensors not in a TensorVariable for whatever reason are forced static. + + Returns a tuple, where the first element is the bool of whether or not this tensor should have a static shape. + The second element is a TensorStaticReason, useful for passing to tensor_static_reason_to_message if needed. + """ + if guard_source.is_nn_module() and config.force_nn_module_property_static_shapes: + return True, TensorStaticReason.NN_MODULE_PROPERTY + if type(tensor) is torch.nn.Parameter and config.force_parameter_static_shapes: + return True, TensorStaticReason.PARAMETER + if not is_tensor: + return True, TensorStaticReason.NOT_TENSOR + return False, None + + +def lazy_format_graph_code(name, gm, maybe_id=None): + def format_name(): + if maybe_id is not None: + return f"{name} {maybe_id}" + else: + return name + + return LazyString( + lambda: _format_graph_code( + f"===== {format_name()} =====\n", + gm.forward.__code__.co_filename, + gm.print_readable(print_output=False), + ) + ) + + +def _format_graph_code(name, filename, graph_str): + return f"TRACED GRAPH\n {name} {filename} {graph_str}\n" + + +def lazy_format_graph_tabular(fn_name, gm): + def inner(): + try: + from tabulate import tabulate # TODO: Check that this is installed + except ImportError: + return ( + "Tabulate module missing, please install tabulate to log the graph in tabular format, logging code instead:\n" + + str(lazy_format_graph_code(fn_name, gm)) + ) + + node_specs = [ + [n.op, n.name, n.target, n.args, n.kwargs] for n in gm.graph.nodes + ] + graph_str = tabulate( + node_specs, headers=["opcode", "name", "target", "args", "kwargs"] + ) + return _format_graph_code(fn_name, gm.forward.__code__.co_filename, graph_str) + + return LazyString(inner) + + +def format_bytecode(prefix, name, filename, line_no, code): + return f"{prefix} {name} {filename} line {line_no} \n{dis.Bytecode(code).dis()}\n" + + +forward_hook_names = ["_forward_pre_hooks", "_forward_hooks"] +backward_hook_names = ["_backward_pre_hooks", "_backward_hooks"] +state_dict_hook_names = [ + "_state_dict_pre_hooks", + "_state_dict_hooks", + "_load_state_dict_pre_hooks", + "_load_state_dict_post_hooks", +] +all_hook_names = forward_hook_names + backward_hook_names + state_dict_hook_names + + +def nn_module_get_all_hooks( + mod, + check_forward_hooks=False, + check_backward_hooks=False, + check_state_dict_hooks=False, +): + reset_code = torch._C._dynamo.eval_frame.reset_code + """ + Sometimes its useful to differentiate between types of hooks such as forward/backward/pre + hooks executed during module.__call__, and state_dict hooks which are executed separately. + """ + hook_dicts_to_check = [] + check_all_hooks = ( + not check_forward_hooks + and not check_backward_hooks + and not check_state_dict_hooks + ) + if check_forward_hooks or check_all_hooks: + hook_dicts_to_check.extend(forward_hook_names) + if check_backward_hooks or check_all_hooks: + hook_dicts_to_check.extend(backward_hook_names) + if check_state_dict_hooks: + hook_dicts_to_check.extend(state_dict_hook_names) + + all_hooks = [] + for hook_dict_name in hook_dicts_to_check: + hooks = getattr(mod, hook_dict_name, []) + for hook_name in hooks: + hook = hooks[hook_name] + + all_hooks.append(hook) + return all_hooks + + +def nnmodule_has_hooks( + mod, + check_forward_hooks=False, + check_backward_hooks=False, + check_state_dict_hooks=False, +): + """ + Helper function to check if a module has any hooks attached to it. + """ + hooks = nn_module_get_all_hooks( + mod, + check_forward_hooks=check_forward_hooks, + check_backward_hooks=check_backward_hooks, + check_state_dict_hooks=check_state_dict_hooks, + ) + return bool(hooks) + + +def to_numpy_helper(value): + """Convert tensor and tnp.ndarray to numpy.ndarray.""" + if is_fake(value): + return value + if isinstance(value, tnp.ndarray): + return to_numpy_helper(value.tensor) + elif isinstance(value, torch.Tensor): + return value.numpy(force=True) + elif isinstance(value, (tuple, list)): + return type(value)(to_numpy_helper(obj) for obj in value) + else: + return value + + +def numpy_to_tensor(value): + """Convert tnp.ndarray to tensor, leave other types intact. If a list/tuple, loop through it to convert.""" + assert np is not None + if isinstance(value, np.ndarray): + return torch.as_tensor(value) + if isinstance(value, tnp.ndarray): + return value.tensor + elif isinstance(value, (tuple, list)): + return type(value)(numpy_to_tensor(obj) for obj in value) + else: + return value + + +class numpy_to_tensor_wrapper: + def __init__(self, f): + self.f = f + self.__name__ = "wrapped_" + self.f.__name__ + + def __repr__(self): + return f">" + + def __call__(self, *args, **kwargs): + out = self.f(*args, **kwargs) + return numpy_to_tensor(out) + + +def numpy_attr_wrapper(obj, name): + if isinstance(obj, tnp.ndarray): + out = getattr(obj, name) + return numpy_to_tensor(out) + elif isinstance(obj, torch.Tensor): + out = getattr(tnp.ndarray(obj), name) + return numpy_to_tensor(out) + + +class numpy_method_wrapper: + """Convert obj from torch.Tensor to tnp.ndarray and call method. Then convert result back to torch.Tensor.""" + + def __init__(self, method: str): + self.method = method + self.__name__ = "wrapped_" + self.method + + def __repr__(self): + return f">" + + def __call__(self, *args, **kwargs): + obj = args[0] + if isinstance(obj, torch.Tensor): + obj = tnp.ndarray(obj) + method_callable = getattr(obj, self.method) + out = method_callable(*args[1:], **kwargs) + return numpy_to_tensor(out) + + +class numpy_operator_wrapper: + """Implements dunder methods for tnp.ndarray via functions from the operator library""" + + def __init__(self, op: Callable[..., Any]): + self.op = op + self.__name__ = f"wrapped_{op.__name__}" + + def __repr__(self): + return f">" + + def __call__(self, *args, **kwargs): + assert not kwargs + + args = ( + tnp.ndarray(arg) if isinstance(arg, torch.Tensor) else arg for arg in args + ) + out = self.op(*args) + return numpy_to_tensor(out) + + +def defake(x): + if not isinstance(x, FakeTensor): + return x + size: "torch._prims_common.ShapeType" + stride: "torch._prims_common.StrideType" + if x._has_symbolic_sizes_strides: + size = [] + for s in x.size(): + if isinstance(s, torch.SymInt): + size.append(s.node.shape_env.size_hint(s.node.expr)) + else: + size.append(s) + stride = [] + for s in x.stride(): + if isinstance(s, torch.SymInt): + stride.append(s.node.shape_env.size_hint(s.node.expr)) + else: + stride.append(s) + else: + size = x.size() + stride = x.stride() + y = torch.empty_strided( + size, + stride, + dtype=x.dtype, + device=x.device, + requires_grad=x.requires_grad, + ) + y.zero_() + return y + + +def is_utils_checkpoint(obj): + # Lazy import to avoid circular dependencies + import torch.utils.checkpoint + + return obj is torch.utils.checkpoint.checkpoint + + +def build_checkpoint_variable(**options): + import torch._higher_order_ops.wrap as higher_order_ops + from .variables.higher_order_ops import TorchHigherOrderOperatorVariable + + # TODO - This is a temporary situation where we have two versions of + # checkpointing implementation. We will converge on one and remove the other. + activation_checkpoint_op: "torch._ops.HigherOrderOperator" = ( + higher_order_ops.tag_activation_checkpoint + ) + if torch._functorch.config.functionalize_rng_ops: + activation_checkpoint_op = higher_order_ops.wrap_activation_checkpoint + + return TorchHigherOrderOperatorVariable.make( + activation_checkpoint_op, + **options, + ) + + +def is_compile_supported(device_type): + from .eval_frame import is_dynamo_supported + + compile_supported = is_dynamo_supported() + if device_type == "cpu": + pass + elif device_type == "cuda" and compile_supported: + from torch.utils._triton import has_triton + + compile_supported = has_triton() + else: + compile_supported = False + return compile_supported + + +# The following 3.11 source code functions are adapted from +# https://github.com/python/cpython/blob/v3.11.4/Lib/traceback.py +# in order to output source code corresponding to bytecode in 3.11+. +# We need our own versions since we want to support multiline expressions. +def _fix_offset(str: str, offset: int) -> int: + """ + Convert byte offset `offset` of `str` into character offset. + Byte offset is used for 3.11+ instruction column data. + Takes things like unicode characters into consideration. + + Unchanged from CPython implementation. + """ + as_utf8 = str.encode("utf-8") + return len(as_utf8[:offset].decode("utf-8", errors="replace")) + + +@dataclasses.dataclass +class _Anchors: + # inclusive + left_end_lineno: int + left_end_offset: int + right_start_lineno: int + # exclusive + right_start_offset: int + + +def _extract_anchors_from_expr(segment: str) -> Optional[_Anchors]: + """ + Given source code `segment` corresponding to a bytecode + instruction, determine: + - for binary ops, the location of the binary op + - for indexing, the location of the brackets. + `segment` is expected to be a valid Python expression + """ + assert sys.version_info >= (3, 11) + + import ast + + try: + # Without brackets, `segment` is parsed as a statement. + # We expect an expression, so wrap `segment` in + # brackets to handle multi-line expressions. + tree = ast.parse("(\n" + segment + "\n)") + except SyntaxError: + return None + + if len(tree.body) != 1: + return None + + lines = segment.split("\n") + + # get character index given byte offset + def normalize(lineno, offset): + return _fix_offset(lines[lineno], offset) + + # Gets the next valid character index in `lines`, if + # the current location is not valid. Handles empty lines. + def next_valid_char(lineno, col): + while lineno < len(lines) and col >= len(lines[lineno]): + col = 0 + lineno += 1 + assert lineno < len(lines) and col < len(lines[lineno]) + return lineno, col + + # Get the next valid character index in `lines`. + def increment(lineno, col): + col += 1 + lineno, col = next_valid_char(lineno, col) + assert lineno < len(lines) and col < len(lines[lineno]) + return lineno, col + + # Get the next valid character at least on the next line + def nextline(lineno, col): + col = 0 + lineno += 1 + lineno, col = next_valid_char(lineno, col) + assert lineno < len(lines) and col < len(lines[lineno]) + return lineno, col + + statement = tree.body[0] + if isinstance(statement, ast.Expr): + expr = statement.value + if isinstance(expr, ast.BinOp): + # ast gives locations for BinOp subexpressions, e.g. + # ( left_expr ) + ( right_expr ) + # left^^^^^ right^^^^^ + # -2 since end_lineno is 1-indexed and because we added an extra + # bracket to `segment` when calling ast.parse + cur_lineno = cast(int, expr.left.end_lineno) - 2 + cur_col = normalize(cur_lineno, expr.left.end_col_offset) + cur_lineno, cur_col = next_valid_char(cur_lineno, cur_col) + + # Heuristic to find the operator character. + # The original CPython implementation did not look for ), \, or #, + # leading to incorrect anchor location, e.g. + # (x) + (y) + # ~~^~~~~~~ + while (ch := lines[cur_lineno][cur_col]).isspace() or ch in ")\\#": + if ch in "\\#": + cur_lineno, cur_col = nextline(cur_lineno, cur_col) + else: + cur_lineno, cur_col = increment(cur_lineno, cur_col) + + # binary op is 1 or 2 characters long, on the same line + right_col = cur_col + 1 + if ( + right_col < len(lines[cur_lineno]) + and not (ch := lines[cur_lineno][right_col]).isspace() + and ch not in "\\#" + ): + right_col += 1 + # right_col can be invalid since it is exclusive + + return _Anchors(cur_lineno, cur_col, cur_lineno, right_col) + elif isinstance(expr, ast.Subscript): + # ast gives locations for value and slice subexpressions, e.g. + # ( value_expr ) [ slice_expr ] + # value^^^^^ slice^^^^^ + # subscript^^^^^^^^^^^^^^^^^^^^ + # find left bracket (first '[' after value) + left_lineno = cast(int, expr.value.end_lineno) - 2 + left_col = normalize(left_lineno, expr.value.end_col_offset) + left_lineno, left_col = next_valid_char(left_lineno, left_col) + while lines[left_lineno][left_col] != "[": + left_lineno, left_col = increment(left_lineno, left_col) + # find right bracket (final character of expression) + right_lineno = cast(int, expr.end_lineno) - 2 + right_col = normalize(right_lineno, expr.end_col_offset) + return _Anchors(left_lineno, left_col, right_lineno, right_col) + elif isinstance(expr, ast.Call): + # ( func_expr ) (args, kwargs) + # func^^^^^ + # call^^^^^^^^^^^^^^^^^^^^^^^^ + # find left bracket (first '(' after func) + left_lineno = cast(int, expr.func.end_lineno) - 2 + left_col = normalize(left_lineno, expr.func.end_col_offset) + left_lineno, left_col = next_valid_char(left_lineno, left_col) + while lines[left_lineno][left_col] != "(": + left_lineno, left_col = increment(left_lineno, left_col) + # find right bracket (final character of expression) + right_lineno = cast(int, expr.end_lineno) - 2 + right_col = normalize(right_lineno, expr.end_col_offset) + return _Anchors(left_lineno, left_col, right_lineno, right_col) + + return None + + +def get_instruction_source_311(code: types.CodeType, inst: dis.Instruction) -> str: + """ + Python 3.11+ only. Returns lines of source code (from code object `code`) + corresponding to `inst`'s location data, and underlines relevant code to `inst`. + + Example: CALL on `g`: + f(g( + ^^ + h(x))) + ^^^^^ + + We need our own implementation since `format_frame_summary` in + Python's `traceback` module doesn't handle multi-line expressions + (and their anchor extraction code is not completely correct). + """ + assert inst.positions is not None + if inst.positions.lineno is None: + return "" + # The rstrip + "\n" pattern is used throughout this function to handle + # linecache.getline errors. Error lines are treated as empty strings "", but we want + # to treat them as blank lines "\n". + first_line = linecache.getline(code.co_filename, inst.positions.lineno).rstrip() + if inst.positions.end_lineno is None: + return first_line + if inst.positions.col_offset is None or inst.positions.end_col_offset is None: + return first_line + + # character index of the start of the instruction + start_offset = _fix_offset(first_line, inst.positions.col_offset) + # character index of the end of the instruction + # compute later since end may be a different line + end_offset = None + # expression corresponding to the instruction so we can get anchors + segment = "" + # underline markers to be printed - start with `~` marker and replace with `^` later + markers = [] + + # Compute segment and initial markers + if inst.positions.end_lineno == inst.positions.lineno: + end_offset = _fix_offset(first_line, inst.positions.end_col_offset) + segment = first_line[start_offset:end_offset] + markers.append(" " * start_offset + "~" * (end_offset - start_offset)) + else: + segment = first_line[start_offset:] + "\n" + markers.append(" " * start_offset + "~" * (len(first_line) - start_offset)) + last_line = linecache.getline( + code.co_filename, inst.positions.end_lineno + ).rstrip() + end_offset = _fix_offset(last_line, inst.positions.end_col_offset) + for lineno in range(inst.positions.lineno + 1, inst.positions.end_lineno): + line = linecache.getline(code.co_filename, lineno).rstrip() + segment += line + "\n" + # don't underline leading spaces + num_spaces = len(line) - len(line.lstrip()) + markers.append(" " * num_spaces + "~" * (len(line) - num_spaces)) + segment += last_line[:end_offset] + num_spaces = len(last_line) - len(last_line.lstrip()) + markers.append(" " * num_spaces + "~" * (end_offset - num_spaces)) + + anchors: Optional[_Anchors] = None + try: + anchors = _extract_anchors_from_expr(segment) + except AssertionError: + pass + + # replace `~` markers with `^` where necessary + if anchors is None: + markers = [marker.replace("~", "^") for marker in markers] + else: + # make markers mutable + mutable_markers: List[List[str]] = [list(marker) for marker in markers] + + # anchor positions do not take start_offset into account + if anchors.left_end_lineno == 0: + anchors.left_end_offset += start_offset + if anchors.right_start_lineno == 0: + anchors.right_start_offset += start_offset + + # Turn `~`` markers between anchors to `^` + for lineno in range(len(markers)): + for col in range(len(mutable_markers[lineno])): + if lineno < anchors.left_end_lineno: + continue + if lineno == anchors.left_end_lineno and col < anchors.left_end_offset: + continue + if ( + lineno == anchors.right_start_lineno + and col >= anchors.right_start_offset + ): + continue + if lineno > anchors.right_start_lineno: + continue + if mutable_markers[lineno][col] == "~": + mutable_markers[lineno][col] = "^" + + # make markers into strings again + markers = ["".join(marker) for marker in mutable_markers] + + result = "" + for i in range(len(markers)): + result += ( + linecache.getline(code.co_filename, inst.positions.lineno + i).rstrip() + + "\n" + ) + result += markers[i] + "\n" + return result + + +def get_static_address_type(t): + if isinstance(t, torch.Tensor): + return getattr(t, "_dynamo_static_input_type", None) + + return None + + +def is_rng_state_getter_or_setter(value): + getters = ( + # The following two functions are not identical, so don't remove anyone! + torch._C.Generator.get_state, + torch.default_generator.get_state, + torch.get_rng_state, + torch.cuda.get_rng_state, + ) + setters = ( + torch._C.Generator.set_state, + torch.default_generator.set_state, + torch.set_rng_state, + torch.cuda.set_rng_state, + ) + return value in (*setters, *getters) + + +def is_tensor_base_attr_getter(value): + return ( + isinstance(value, types.MethodWrapperType) + and value.__name__ == "__get__" + and value.__self__.__objclass__ is torch._C._TensorBase # type: ignore[attr-defined] + ) + + +def is_torch_function_object(value): + return hasattr(value, "__torch_function__") + + +def has_torch_function(vt: "torch._dynamo.variables.base.VariableTracker") -> bool: + from torch._dynamo.variables import UserDefinedObjectVariable + from torch._dynamo.variables.torch_function import TensorWithTFOverrideVariable + + return isinstance(vt, TensorWithTFOverrideVariable) or ( + isinstance(vt, UserDefinedObjectVariable) + and hasattr(vt.value, "__torch_function__") + ) + + +# see note [Tensor Fakification and Symbol Caching] +def to_fake_tensor(t, fake_mode): + symbolic_context = None + source = None + if tracing_context := torch._guards.TracingContext.try_get(): + if t in tracing_context.tensor_to_context: + symbolic_context = tracing_context.tensor_to_context[t] + source = symbolic_context.tensor_source + + return fake_mode.from_tensor( + t, static_shapes=False, symbolic_context=symbolic_context, source=source + ) + + +def get_first_attr(obj, *attrs): + """ + Return the first available attribute or throw an exception if none is present. + """ + for attr in attrs: + if hasattr(obj, attr): + return getattr(obj, attr) + + raise AssertionError(f"{obj} does not has any of the attributes: {attrs}") + + +@contextlib.contextmanager +def maybe_enable_compiled_autograd(should_enable): + def compiler_fn(gm): + def inner_compiler(gm_, example_inputs_): + torch._dynamo.utils.counters["compiled_autograd"]["compiles"] += 1 + return torch._inductor.compile(gm_, example_inputs_) + + return torch.compile(gm, backend=inner_compiler, fullgraph=True, dynamic=True) + + if should_enable: + with torch._dynamo.compiled_autograd.enable(compiler_fn) as ctx: + yield ctx + else: + yield + + +def invalid_removeable_handle(): + # need a subclass so weakref works + class Invalid(dict): # type: ignore[type-arg] + pass + + return RemovableHandle(Invalid()) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db4416d53f486acf2280193c0aed6060974294e1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_config_module.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_config_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca76f73f396ff95946219601e8ef9dfeed59ddbc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_config_module.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_contextlib.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_contextlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d94e1c995c4c497675431bf5680f2d27657a3ba6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_contextlib.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_cxx_pytree.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_cxx_pytree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a231adc80094cca8e3918ceacb3d196b0e5346ce Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_cxx_pytree.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_freeze.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_freeze.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed841550ac751c9b8da4171dd5964c43ec6a7a58 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_freeze.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_import_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_import_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35fe3b2d1fb6baf2f92dee4b1a3dbc3458c9f8f9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_import_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_stats.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_stats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3e8fb0e3af00579849a1ca0d496388076531be9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_stats.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_traceback.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_traceback.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1034824222831bd4c676d292662c6fc63bfdb7fe Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_traceback.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_triton.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_triton.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c4bfe965fee896f88d27ffdb3b9234c814e6a52 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_triton.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_typing_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_typing_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..059350ebcbea3728fbab78a213a44cde18f3726b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_typing_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/bundled_inputs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/bundled_inputs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0edcd406166687eda1327a908f65da356528d371 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/bundled_inputs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/collect_env.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/collect_env.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81dc2f1fa0432fd308a461c1ae9deebc42464a66 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/collect_env.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/cpp_backtrace.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/cpp_backtrace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f90c2cb2a072d5371f4c6de97c0039a7c070a0ec Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/cpp_backtrace.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/cpp_extension.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/cpp_extension.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b661002feefe449e5f08781460fe6a7abf38586 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/cpp_extension.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/dlpack.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/dlpack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c425a6a8fe85dfa204ec8680fa4bfb6a30d69753 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/dlpack.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/weak.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/weak.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..016fcbbdf1924a4cda60277a3fd21f20924e5a0b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/weak.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ee6c0d39c821b8ee2465a26e7df916a93e29603 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/functions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45331543a204207032db9f075606a70ff2922991 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/functions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/interp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/interp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8ad4fbf9f270e59dfcaa573a53b367c09281562 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/interp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/reference.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/reference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..791f15baaac5761903790f7705efa965952097ef Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/reference.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/singleton_int.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/singleton_int.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..302cba6c6d2f8de40c89ca33f498989d4f660bba Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/singleton_int.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/solve.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/solve.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..040ffd6417436858343a72b11ecbb1a67b890e09 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/solve.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/value_ranges.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/value_ranges.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c12891be07c39deb48e71f74902d47ac06bdb9d1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/__pycache__/value_ranges.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/functions.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..ab58107524a95b5eb11785debe2ee3251f01cc85 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/functions.py @@ -0,0 +1,353 @@ +import sympy +from sympy import S +from sympy.core.logic import fuzzy_and, fuzzy_not, fuzzy_or + +__all__ = [ + "FloorDiv", "ModularIndexing", "CleanDiv", "CeilDiv", "Pow", "TrueDiv", + "LShift", "RShift", "IsNonOverlappingAndDenseIndicator", "Round", "RoundDecimal", +] + + +def fuzzy_eq(x, y): + if None in (x, y): + return None + return x == y + + +class FloorDiv(sympy.Function): + """ + We maintain this so that: + 1. We can use divisibility guards to simplify FloorDiv(a, b) to a / b. + 2. Printing out the expression is nicer (compared to say, representing a//b as (a - a % b) / b) + """ + nargs = (2,) + precedence = 50 # precedence of mul # noqa: F811 + + # Default return type for SymPy assumptions. + # https://docs.sympy.org/latest/guides/assumptions.html#implementing-assumptions-handlers + is_real = True + + @property + def base(self): + return self.args[0] + + @property + def divisor(self): + return self.args[1] + + def _sympystr(self, printer): + base = printer.parenthesize(self.base, self.precedence) + divisor = printer.parenthesize(self.divisor, self.precedence) + return f"({base}//{divisor})" + + # SymPy assumptions based on argument types. + def _eval_is_real(self): + return fuzzy_or([self.base.is_real, self.divisor.is_real]) + + def _eval_is_integer(self): + return fuzzy_and([self.base.is_integer, self.divisor.is_integer]) + + # Automatic evaluation. + # https://docs.sympy.org/latest/guides/custom-functions.html#best-practices-for-eval + @classmethod + def eval(cls, base, divisor): + def check_supported_type(x): + if (x.is_integer is False and x.is_real is False and x.is_complex) or x.is_Boolean: + raise TypeError( + f"unsupported operand type(s) for //: " + f"'{type(base).__name__}' and '{type(divisor).__name__}'" + f", expected integer or real") + + check_supported_type(base) + check_supported_type(divisor) + + # We don't provide the same error message as in Python because SymPy + # makes it difficult to check the types. + if divisor.is_zero: + raise ZeroDivisionError("division by zero") + + if base.is_zero: + return sympy.S.Zero + if base.is_integer and divisor == 1: + return base + if base.is_real and divisor == 1: + return sympy.floor(base) + if base.is_integer and divisor == -1: + return sympy.Mul(base, -1) + if isinstance(base, sympy.Integer) and isinstance(divisor, sympy.Integer): + return base // divisor + if isinstance(base, (sympy.Integer, sympy.Float)) and isinstance(divisor, (sympy.Integer, sympy.Float)): + return sympy.floor(base / divisor) + if isinstance(base, FloorDiv): + return FloorDiv(base.args[0], base.args[1] * divisor) + if isinstance(divisor, sympy.Rational) and divisor.p == 1: + return sympy.floor(base * divisor.q) + + if isinstance(base, sympy.Add): + for a in base.args: + gcd = sympy.gcd(a, divisor) + if gcd == divisor: + return FloorDiv(base - a, divisor) + a / gcd + + try: + gcd = sympy.gcd(base, divisor) + if gcd != 1: + return FloorDiv( + sympy.simplify(base / gcd), sympy.simplify(divisor / gcd) + ) + except sympy.PolynomialError: + pass # https://github.com/pytorch/pytorch/issues/108276 + + +class ModularIndexing(sympy.Function): + """ + ModularIndexing(a, b, c) => (a // b) % c where % is the C modulus + """ + + nargs = (3,) + is_integer = True + + @classmethod + def eval(cls, base, divisor, modulus): + if base == 0 or modulus == 1: + return sympy.Integer(0) + + if ( + isinstance(base, sympy.Integer) + and isinstance(divisor, sympy.Integer) + and isinstance(modulus, sympy.Integer) + ): + return (base // divisor) % modulus + + try: + if divisor != 1: + gcd = sympy.gcd(base, divisor) + if gcd != 1: + return ModularIndexing( + sympy.simplify(base / gcd), sympy.simplify(divisor / gcd), modulus + ) + except sympy.PolynomialError: + pass # https://github.com/pytorch/pytorch/issues/108276 + + if isinstance(base, sympy.Add): + new_terms = [] + all_positive = True + for term in base.args: + if sympy.gcd(term, modulus * divisor) != modulus * divisor: + if (isinstance(term, sympy.Integer) and term < 0) or ( + isinstance(term, sympy.Mul) + and isinstance(term.args[0], sympy.Integer) + and term.args[0] < 0 + ): + # workaround for https://github.com/openai/triton/issues/619, + # if there are negative terms, // produces wrong result + # TODO if https://github.com/openai/triton/issues/619 is fixed + # this optimization would become valid + all_positive = False + break + else: + new_terms.append(term) + + if len(new_terms) != len(base.args) and all_positive: + return ModularIndexing(sum(new_terms), divisor, modulus) + + if isinstance(base, FloorDiv): + return ModularIndexing(base.args[0], base.args[1] * divisor, modulus) + + def _eval_is_nonnegative(self): + p, q = self.args[:2] + return fuzzy_eq(p.is_nonnegative, q.is_nonnegative) # type: ignore[attr-defined] + + def _eval_is_positive(self): + p, q = self.args[:2] + return fuzzy_eq(p.is_positive, q.is_positive) # type: ignore[attr-defined] + + +class Where(sympy.Function): + """ + Good ol' ternary operator + """ + + nargs = (3,) + + @classmethod + def eval(cls, c, p, q): + if c == sympy.true: + return p + elif c == sympy.false: + return q + +class Mod(sympy.Function): + """ + We maintain this so that we avoid SymPy correctness issues, such as: + https://github.com/sympy/sympy/issues/25146 + """ + + nargs = (2,) + + @classmethod + def eval(cls, p, q): + # This was adapted from: sympy/core/mod.py + + if q.is_zero: + raise ZeroDivisionError("Modulo by zero") + # If either of them is NaN or infinite. + if p is S.NaN or q is S.NaN or p.is_finite is False or q.is_finite is False: + return S.NaN + # Three cases: + # 1. p == 0 + # 2. p is either q or -q + # 3. p is integer and q == 1 + if p is S.Zero or p in (q, -q) or (p.is_integer and q == 1): + return S.Zero + + # Evaluate if they are both literals. + if q.is_Number and p.is_Number: + return p % q + + # If q == 2, it's a matter of whether p is odd or even. + if q.is_Number and q == 2: + if p.is_even: + return S.Zero + if p.is_odd: + return S.One + + # If p is a multiple of q. + r = p / q + if r.is_integer: + return S.Zero + + # If p < q and its ratio is positive, then: + # - floor(p / q) = 0 + # - p % q = p - floor(p / q) * q = p + less = p < q + if less.is_Boolean and bool(less) and r.is_positive: + return p + + def _eval_is_integer(self): + p, q = self.args + return fuzzy_and([p.is_integer, q.is_integer, fuzzy_not(q.is_zero)]) # type: ignore[attr-defined] + + def _eval_is_nonnegative(self): + return True if self.args[1].is_positive else None # type: ignore[attr-defined] + + def _eval_is_nonpositive(self): + return True if self.args[1].is_negative else None # type: ignore[attr-defined] + + +class CleanDiv(FloorDiv): + """ + Div where we can assume no rounding. + This is to enable future optimizations. + """ + + pass + + +class CeilDiv(sympy.Function): + """ + Div used in indexing that rounds up. + """ + + is_integer = True + + def __new__(cls, base, divisor): + if sympy.gcd(base, divisor) == divisor: + return CleanDiv(base, divisor) + else: + return FloorDiv(base + (divisor - 1), divisor) + + +class LShift(sympy.Function): + @classmethod + def eval(cls, base, shift): + if shift < 0: + raise ValueError('negative shift count') + return base * 2 ** shift + + +class RShift(sympy.Function): + @classmethod + def eval(cls, base, shift): + if shift < 0: + raise ValueError('negative shift count') + return base // 2 ** shift + +# Overloaded to be compatible with regular Python. +# https://github.com/pytorch/pytorch/issues/90900 +class Pow(sympy.Function): + @classmethod + def eval(cls, base, exp): + if exp.is_zero: + return sympy.Integer(1) + elif base.is_zero and exp < 0: + raise ZeroDivisionError(f"{base} cannot be raised to a negative power") + else: + return base ** exp + +# Overloaded to be compatible with regular Python. +# https://github.com/pytorch/pytorch/issues/90900 +class TrueDiv(sympy.Function): + @classmethod + def eval(cls, base, divisor): + if divisor.is_zero: + raise ZeroDivisionError("division by zero") + else: + return base / divisor + + +# TODO: As an indicator, this != 0 implies == 1 (and vice versa). +# Because we do not have the ability to guard on the stride permutation +# at the moment, it is hard to make further inferences when this is true, +# as although we know the tensor is contiguous in *some* layout, we don't +# know which one (however, you could, for example, make the inference that +# reshaping this to a 1D tensor can be guard-free.) +class IsNonOverlappingAndDenseIndicator(sympy.Function): + is_integer = True + + @classmethod + def eval(cls, *args): + assert len(args) % 2 == 0 + dim = len(args) // 2 + # TODO: it is possible to make progress evaluating this guard + # even if not all of the inputs are known. For example, a 2D + # tensor with non-0/1 sizes but strides (0, 1) is definitely + # false, because we know its numel > 1 but it's broadcasted + # in dim 0. + if all(isinstance(a, sympy.Integer) for a in args): + # sym_node imported in torch.__init__. Local import to avoid an import cycle + from torch.fx.experimental.symbolic_shapes import eval_is_non_overlapping_and_dense + + size_args = args[0:dim] + stride_args = args[dim:] + return eval_is_non_overlapping_and_dense( + [int(a) for a in size_args], + [int(a) for a in stride_args] + ) + return None + + +class Round(sympy.Function): + is_integer = True + + @classmethod + def eval(cls, number): + if number.is_integer: + return number + elif isinstance(number, sympy.Number): + return sympy.Integer(round(float(number))) + + def __int__(self): + # This will only ever be called when computing size hints. At that point, self.args[0] should be a number and + # no longer an expression. If it were, the float call would fail and the caller would handle this further. + return round(float(self.args[0])) # type: ignore[arg-type] + + +class RoundDecimal(sympy.Function): + @classmethod + def eval(cls, number, ndigits): + if number.is_integer and ndigits >= 0: + return number + elif isinstance(number, sympy.Number) and isinstance(ndigits, sympy.Integer): + value_type, output_type = (int, sympy.Integer) if isinstance(number, sympy.Integer) else (float, sympy.Float) + return output_type(round(value_type(number), int(ndigits))) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/interp.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/interp.py new file mode 100644 index 0000000000000000000000000000000000000000..86515b6b1aa77ed3b075e2a4ab48c1bd2570957a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/interp.py @@ -0,0 +1,118 @@ +""" +This is a simple interpreter for Sympy expressions that dispatches to +classes following the torch._inductor.virtualized calling convention. +For directness, the interpreter takes the handler directly rather than +consulting the TLS. It does not use most of the methods on the full +handler; only those with corresponding Sympy expressions. To see an example +of a full handler, see torch.utils._sympy.value_ranges.ValueRangeAnalysis. +""" + +import functools +from typing import Any, Dict, Union + +import sympy +from sympy.logic.boolalg import Boolean as SympyBoolean, BooleanAtom + +import torch +from .functions import ( + CleanDiv, + FloorDiv, + IsNonOverlappingAndDenseIndicator, + Mod, + ModularIndexing, + Pow, + Round, + RoundDecimal, + TrueDiv, + Where, +) + + +# TODO: Dedupe this with SYMPY_INTERP + + +@functools.lru_cache(None) +def handlers(): + # TODO add CeilDiv (it doesn't appear in the index_expr) + + # TODO default to some decompositions if the interpreter doesn't have them + # like decomposing ModularIndexing or implementing Le(a,b) as Ge(b, a) + + HANDLERS = { + sympy.Or: "or_", + sympy.And: "and_", + sympy.Eq: "eq", + sympy.Ne: "ne", + sympy.Lt: "lt", + sympy.Gt: "gt", + sympy.Le: "le", + sympy.Ge: "ge", + sympy.Not: "not_", + TrueDiv: "truediv", + FloorDiv: "floordiv", + CleanDiv: "div", + Where: "where", + sympy.Add: "add", + sympy.Mul: "mul", + Pow: "pow", + sympy.Pow: "pow", + Mod: "mod", + sympy.Mod: "mod", + sympy.Abs: "abs", + sympy.log: "log", + sympy.exp: "exp", + sympy.floor: "floor", + sympy.ceiling: "ceil", + sympy.Min: "minimum", + sympy.Max: "maximum", + ModularIndexing: "modular_indexing", + sympy.functions.elementary.piecewise.ExprCondPair: "expr_cond_pair", + sympy.Piecewise: "piecewise", + IsNonOverlappingAndDenseIndicator: "is_non_overlapping_and_dense_indicator", + Round: "round", + RoundDecimal: "round", + } + for name in ["cos", "sin", "tan", "sinh", "cosh", "tanh", "asin", "acos", "atan"]: + HANDLERS[getattr(sympy, name)] = name + + return HANDLERS + + +ASSOCIATIVE_OPS = {"minimum", "maximum", "mul", "add", "and_", "or_"} + + +def sympy_interp( + analysis, env: Dict[sympy.Symbol, Any], expr: Union[sympy.Expr, SympyBoolean] +): + # Handle base cases + dtype = None + if isinstance(expr, BooleanAtom): + dtype = torch.bool + elif isinstance(expr, sympy.Integer): + dtype = torch.int64 + elif isinstance(expr, sympy.Number): + dtype = torch.double + + if dtype is not None: + return analysis.constant(expr, dtype) + elif isinstance(expr, sympy.Symbol): + return env[expr] + + # Special cases + if isinstance(expr, sympy.Pow) and isinstance( + expr.args[1], sympy.core.numbers.Half + ): + return analysis.sqrt(sympy_interp(analysis, env, expr.args[0])) + + # Recursive case + args = [sympy_interp(analysis, env, arg) for arg in expr.args] # type: ignore[arg-type] + handler_name = handlers()[expr.func] + handler = getattr(analysis, handler_name) + if handler_name in ASSOCIATIVE_OPS: + assert len(args) > 1 + acc = handler(args[0], args[1]) + for i in range(2, len(args)): + acc = handler(acc, args[i]) + return acc + else: + return handler(*args) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/reference.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/reference.py new file mode 100644 index 0000000000000000000000000000000000000000..adb25c7ffb0fe60b3a752e52654052d7fa74dc3c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/reference.py @@ -0,0 +1,214 @@ +import math + +import sympy + +import torch + + +# The sympy interpretation of operators. It will also sometimes work with +# plain int/float, but if you do certain operations you will get out a +# sympy.Basic in the end. If you want the Python/FX traceable interpretation, +# check PythonReferenceAnalysis. +# NB: For magic methods this needs to use normal magic methods +# so that test_magic_methods works +class ReferenceAnalysis: + @staticmethod + def constant(c, dtype): + return sympy.sympify(c) + + @staticmethod + def or_(a, b): + return a | b + + @staticmethod + def and_(a, b): + return a & b + + @staticmethod + def eq(a, b): + if isinstance(a, sympy.Expr) or isinstance(b, sympy.Expr): + return sympy.Eq(a, b) + return a == b + + @classmethod + def ne(cls, a, b): + return cls.not_(cls.eq(a, b)) + + @staticmethod + def lt(a, b): + return a < b + + @staticmethod + def gt(a, b): + return a > b + + @staticmethod + def le(a, b): + return a <= b + + @staticmethod + def ge(a, b): + return a >= b + + @staticmethod + def not_(a): + assert not isinstance(a, bool) + return ~a + + @staticmethod + def reciprocal(x): + return 1 / x + + @staticmethod + def square(x): + return x * x + + @staticmethod + def mod(x, y): + return x % y + + @staticmethod + def abs(x): + return abs(x) + + @staticmethod + def neg(x): + return -x + + @staticmethod + def truediv(a, b): + return a / b + + @staticmethod + def div(a, b): + return ReferenceAnalysis.truediv(a, b) + + @staticmethod + def floordiv(a, b): + if b == 0: + return sympy.nan if a == 0 else sympy.zoo + return a // b + + @staticmethod + def truncdiv(a, b): + result = a / b + if result.is_finite: + result = sympy.Integer(result) + + return result + + @staticmethod + def add(a, b): + return a + b + + @staticmethod + def mul(a, b): + return a * b + + @staticmethod + def sub(a, b): + return a - b + + @staticmethod + def exp(x): + return sympy.exp(x) + + @staticmethod + def log(x): + return sympy.log(x) + + @staticmethod + def sqrt(x): + return sympy.sqrt(x) + + @staticmethod + def pow(a, b): + return a**b + + @staticmethod + def minimum(a, b): + # Poorman's version of upcasting in Sympy + # This won't do for sympy.Expr as the casting does nothing for those + if a.is_Float or not a.is_finite or b.is_Float or not b.is_finite: + result_type = sympy.Float + else: + assert a.is_Integer + assert b.is_Integer + result_type = sympy.Integer + return sympy.Min(result_type(a), result_type(b)) + + @staticmethod + def maximum(a, b): + # Poorman's version of upcasting in Sympy + # This won't do for sympy.Expr as the casting does nothing for those + if a.is_Float or not a.is_finite or b.is_Float or not b.is_finite: + result_type = sympy.Float + else: + assert a.is_Integer + assert b.is_Integer + result_type = sympy.Integer + return sympy.Max(result_type(a), result_type(b)) + + @staticmethod + def floor(x): + return sympy.floor(x) + + @staticmethod + def ceil(x): + return sympy.ceiling(x) + + +# Unlike ReferenceAnalysis, does NOT sympyify, instead, works with plain +# Python types and is FX traceable. Inheritance here is purely for code +# sharing (TODO: considering splitting out a BaseReferenceAnalysis). +class PythonReferenceAnalysis(ReferenceAnalysis): + @staticmethod + def constant(c, dtype): + if dtype is torch.int64: + return int(c) + elif dtype is torch.double: + return float(c) + elif dtype is torch.bool: + return bool(c) + else: + raise AssertionError(f"unrecognized dtype {dtype}") + + @staticmethod + def not_(a): + return torch.sym_not(a) + + @staticmethod + def floordiv(a, b): + return a // b + + @staticmethod + def truncdiv(a, b): + return a / b + + @staticmethod + def exp(x): + raise AssertionError("exp is not valid shape sympy expr") + + @staticmethod + def log(x): + raise AssertionError("log is not valid shape sympy expr") + + @staticmethod + def sqrt(x): + return torch._sym_sqrt(x) # type: ignore[attr-defined] + + @staticmethod + def minimum(a, b): + return torch.sym_min(a, b) + + @staticmethod + def maximum(a, b): + return torch.sym_max(a, b) + + @staticmethod + def floor(x): + return math.floor(x) + + @staticmethod + def ceil(x): + return math.ceil(x) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/singleton_int.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/singleton_int.py new file mode 100644 index 0000000000000000000000000000000000000000..870bda554e74808d9423e1d79ebf2dfbdee93f91 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/singleton_int.py @@ -0,0 +1,94 @@ +import sympy +from sympy.multipledispatch import dispatch + +__all__ = ["SingletonInt"] + + +class SingletonInt(sympy.AtomicExpr): + # This is probably not super important unless we are in multiple dispatch + # situations with other more exotic Expr types. + _op_priority = 99999 + + def __new__(cls, *args, coeff=None, **kwargs): + instance = super().__new__(cls, *args, **kwargs) + return instance + + # The semantics of this class should match that of NestedIntSymNodeImpl in + # c10/core/NestedIntSymNodeImpl.h + def __init__(self, val, *, coeff=1): + self._val = val + self._coeff = coeff + super().__init__() + + # See NOTE [ Inequalities with nested int ] + def _eval_Eq(self, other): + if ( + isinstance(other, SingletonInt) + and other._val == self._val + and self._coeff == other._coeff + ): + return sympy.true + else: + return sympy.false + + # This is necessary so that calling expr.free_symbols on exprs that contain + # this Singleton does not error + @property + def free_symbols(self): + return set() + + def __mul__(self, other): + if isinstance(other, SingletonInt): + raise ValueError( + "SingletonInt cannot be multiplied by another SingletonInt" + ) + return SingletonInt(self._val, coeff=self._coeff * other) + + def __rmul__(self, other): + if isinstance(other, SingletonInt): + raise ValueError( + "SingletonInt cannot be multiplied by another SingletonInt" + ) + return SingletonInt(self._val, coeff=self._coeff * other) + + # Make sure we promptly raise an error instead of falling back to building + # an expression tree. There are probably more ops, how can we be exhaustive? + def __add__(self, other): + raise NotImplementedError("NYI") + + def __sub__(self, other): + raise NotImplementedError("NYI") + + def __truediv__(self, other): + raise NotImplementedError("NYI") + + def __floordiv__(self, other): + raise NotImplementedError("NYI") + + def __mod__(self, other): + raise NotImplementedError("NYI") + + +# See NOTE [ Inequalities with nested int ] +@dispatch(sympy.Integer, SingletonInt) +def _eval_is_ge(a, b): + if a < 2: + return sympy.false + raise ValueError("Symbolic SingletonInt: Relation is indeterminate") + + +@dispatch(SingletonInt, sympy.Integer) # type: ignore[no-redef] +def _eval_is_ge(a, b): # noqa: F811 + if b <= 2: + return sympy.true + raise ValueError("Symbolic SingletonInt: Relation is indeterminate") + + +@dispatch(SingletonInt, SingletonInt) # type: ignore[no-redef] +def _eval_is_ge(a, b): # noqa: F811 + if a._val == b._val: + if a._coeff >= b._coeff: + return sympy.true + else: + return sympy.false + raise ValueError("Symbolic SingletonInt: Relation is indeterminate") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/solve.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/solve.py new file mode 100644 index 0000000000000000000000000000000000000000..4d1113bea891df2be89e7bb324a558a73afc0425 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/solve.py @@ -0,0 +1,175 @@ +import logging + +from typing import Dict, Optional, Tuple, Type + +import sympy + +from torch.utils._sympy.functions import FloorDiv + +log = logging.getLogger(__name__) + +_MIRROR_REL_OP: Dict[Type[sympy.Basic], Type[sympy.Rel]] = { + sympy.Eq: sympy.Eq, + sympy.Ne: sympy.Ne, + sympy.Ge: sympy.Le, + sympy.Gt: sympy.Lt, + sympy.Le: sympy.Ge, + sympy.Lt: sympy.Gt, +} + +INEQUALITY_TYPES = (sympy.Gt, sympy.Ge, sympy.Lt, sympy.Le) + + +def mirror_rel_op(type: Type) -> Optional[Type[sympy.Rel]]: + return _MIRROR_REL_OP.get(type, None) + + +# Tries to simplify 'expr', so as to leave only 'thing' in the left-hand side. +# +# Returns a tuple of: +# 1. The simplified expression +# 2. The expression on the right-hand side +# +# Returns 'None' if it can't reach a state where the only thing in the left +# hand side is 'thing'. +# +# 'trials': number of times 'try_solve' will try to isolate 'thing' to the +# left-hand side. +# +# 'floordiv_inequality': flag to enable conversion of 'FloorDiv' into +# inequalities. +def try_solve( + expr: sympy.Basic, + thing: sympy.Basic, + trials: int = 5, + floordiv_inequality: bool = True, +) -> Optional[Tuple[sympy.Rel, sympy.Basic]]: + mirror = mirror_rel_op(type(expr)) + + # Ignore unsupported expressions: + # - Those that are not relational operations + # - Those that don't have a mirror (just avoiding unexpected classes) + if not isinstance(expr, sympy.Rel) or mirror is None: + log.debug("expression with unsupported type: %s", type(expr)) + return None + + lhs_has_thing = expr.lhs.has(thing) + rhs_has_thing = expr.rhs.has(thing) + + # Give up when 'thing' appears on both sides of the relational expression. + # That is because, as is, we assume the thing we are trying to isolate is + # only on the right-hand side. + if lhs_has_thing and rhs_has_thing: + log.debug("thing (%s) found in both sides of expression: %s", thing, expr) + return None + + # Try considering both LHS and RHS by mirroring the original expression: + # a < b ==> b > a + expressions = [] + + # Add each version of 'expr' if 'thing' is in its left-hand side. + if lhs_has_thing: + expressions.append(expr) + if rhs_has_thing: + expressions.append(mirror(expr.rhs, expr.lhs)) + + for e in expressions: + if e is None: + continue + + assert isinstance(e, sympy.Rel) + + for _ in range(trials): + trial = _try_isolate_lhs(e, thing, floordiv_inequality=floordiv_inequality) + # Stop if there was no change in this trial. + if trial == e: + break + e = trial # type: ignore[assignment] + + # Return if we were able to isolate 'thing' on the left-hand side. + if isinstance(e, sympy.Rel) and e.lhs == thing: + return e, e.rhs + + return None + + +def _try_isolate_lhs( + expr: sympy.Basic, thing: sympy.Basic, floordiv_inequality: bool +) -> sympy.Basic: + e = expr + op = type(expr) + + if isinstance(e, sympy.Rel): + # Move any constants in the left-hand side to the right-hand side. + lhs_not_thing = ( + sum([a for a in e.lhs.args if not a.has(thing)]) + if isinstance(e.lhs, sympy.Add) + else 0 + ) + e = op(expr.lhs - lhs_not_thing, expr.rhs - lhs_not_thing) # type: ignore[attr-defined] + + # Divide both sides by the factors that don't contain thing. + if isinstance(e, sympy.Rel) and isinstance(e.lhs, sympy.Mul): + lhs, rhs = e.args + other = sympy.Mul(*[a for a in lhs.args if not a.has(thing)]) + + # If we can't tell whether 'other' is negative or positive, we do nothing. + # That is because we don't know whether we have mirror the operation or not. + if not (isinstance(e, INEQUALITY_TYPES) and other.is_negative is None): + # Divide both sides by 'other'. + lhs = lhs / other + rhs = rhs / other + + # If 'e' is an inequality and 'other' is negative, we have to + # mirror the expression. + if isinstance(e, INEQUALITY_TYPES) and other.is_negative: + op = mirror_rel_op(op) # type: ignore[assignment] + + assert op is not None + e = op(lhs, rhs) + + ################################################################################ + # left-hand side is FloorDiv + ################################################################################ + # + # Given the expression: a // b op c + # where 'op' is a relational operation, these rules only work if: + # - b > 0 + # - c is an integer + if ( + floordiv_inequality + and isinstance(e, sympy.Rel) + and isinstance(e.lhs, FloorDiv) + and e.lhs.divisor.is_positive + and e.rhs.is_integer + ): + # a // b == expr + # => a >= (b * expr) and a < (b * (expr + 1)) + if isinstance(expr, sympy.Eq): + numerator, denominator = e.lhs.args + return sympy.And( + sympy.Ge(numerator, (e.rhs * denominator)), # type: ignore[arg-type] + sympy.Lt(numerator, ((e.rhs + 1) * denominator)), # type: ignore[arg-type] + ) + # a // b != expr + # => a < (b * expr) or a >= (b * (expr + 1)) + if isinstance(expr, sympy.Ne): + numerator, denominator = e.lhs.args + return sympy.Or( + sympy.Lt(numerator, (e.rhs * denominator)), # type: ignore[arg-type] + sympy.Ge(numerator, ((e.rhs + 1) * denominator)), # type: ignore[arg-type] + ) + # The transformations below only work if b is positive. + # Note: we only have this information for constants. + # a // b > expr => a >= b * (expr + 1) + # a // b >= expr => a >= b * expr + if isinstance(expr, (sympy.Gt, sympy.Ge)): + quotient = e.rhs if isinstance(expr, sympy.Ge) else (e.rhs + 1) # type: ignore[arg-type] + return sympy.Ge(e.lhs.args[0], (quotient * e.lhs.args[1])) # type: ignore[arg-type] + # a // b < expr => a < b * expr + # a // b <= expr => a < b * (expr + 1) + if isinstance(expr, (sympy.Lt, sympy.Le)): + quotient = e.rhs if isinstance(expr, sympy.Lt) else (e.rhs + 1) # type: ignore[arg-type] + return sympy.Lt(e.lhs.args[0], (quotient * e.lhs.args[1])) # type: ignore[arg-type] + + return e diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/value_ranges.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/value_ranges.py new file mode 100644 index 0000000000000000000000000000000000000000..d6f4ca9a6b77d7f5f9178fa3e05f842ee324edf2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_sympy/value_ranges.py @@ -0,0 +1,782 @@ +from __future__ import annotations + +import dataclasses +import itertools +import sympy +from sympy.logic.boolalg import BooleanAtom, Boolean as SympyBoolean +import operator +import math +import logging +import torch +from typing import Dict, Optional, SupportsFloat, TypeVar, Generic, Union, overload, Callable, TYPE_CHECKING +from typing_extensions import TypeGuard + +from torch._prims_common import dtype_to_type +from .interp import sympy_interp +from .functions import Round, RoundDecimal + +log = logging.getLogger(__name__) + +__all__ = ["ValueRanges", "ValueRangeAnalysis", "bound_sympy"] + +_T = TypeVar('_T', sympy.Expr, SympyBoolean) + +class ValueRangeError(RuntimeError): + pass + + +# Like sympify, but supports less stuff, and also ensures that direct +# sympy expressions don't have free variables +def simple_sympify(e): + if isinstance(e, bool): + return sympy.true if e else sympy.false + elif isinstance(e, int): + return sympy.Integer(e) + elif isinstance(e, float): + # infinity is special; we use it to bracket integers as well + if math.isinf(e): + return sympy.oo if e > 0 else -sympy.oo + return sympy.Float(e) + elif isinstance(e, sympy.Expr): + assert e.is_number, e + # NaNs can occur when doing things like 0 * sympy.oo, but it is better + # if the operator notices this and takes care of it, because sometimes + # the NaN is inappropriate (for example, for ints, the [-oo, oo] range + # should go to zero when multiplied with [0, 0]) + assert e != sympy.nan + return e + elif isinstance(e, BooleanAtom): + return e + else: + raise AssertionError(f"not simple sympy type {type(e)}: {e}") + + +# Sympy atomics only. Unlike <=, it also works on Sympy bools. +def sympy_generic_le(lower, upper): + if isinstance(lower, sympy.Expr): + assert isinstance(upper, sympy.Expr) + return lower <= upper + else: + # only negative condition is True > False + assert isinstance(lower, SympyBoolean) and isinstance(upper, SympyBoolean) + return not (lower and not upper) + + +def vr_is_bool(vr: ValueRanges[_T]) -> TypeGuard[ValueRanges[SympyBoolean]]: + return vr.is_bool + + +def vr_is_expr(vr: ValueRanges[_T]) -> TypeGuard[ValueRanges[sympy.Expr]]: + return not vr.is_bool + + +ExprIn = Union[int, float, sympy.Expr] +BoolIn = Union[bool, SympyBoolean] +AllIn = Union[ExprIn, BoolIn] +ExprFn = Callable[[sympy.Expr], sympy.Expr] +ExprFn2 = Callable[[sympy.Expr, sympy.Expr], sympy.Expr] +BoolFn = Callable[[SympyBoolean], SympyBoolean] +BoolFn2 = Callable[[SympyBoolean, SympyBoolean], SympyBoolean] +AllFn = Union[ExprFn, BoolFn] +AllFn2 = Union[ExprFn2, BoolFn2] + + +@dataclasses.dataclass(frozen=True) +class ValueRanges(Generic[_T]): + if TYPE_CHECKING: + # ruff doesn't understand circular references but mypy does + ExprVR = ValueRanges[sympy.Expr] # noqa: F821 + BoolVR = ValueRanges[SympyBoolean] # noqa: F821 + AllVR = Union[ExprVR, BoolVR] + + # Although the type signature here suggests you can pass any + # sympy expression, in practice the analysis here only works + # with constant sympy expressions + lower: _T + upper: _T + is_bool: bool + + @overload + def __init__(self: ValueRanges[sympy.Expr], lower: ExprIn, upper: ExprIn) -> None: + ... + + @overload + def __init__(self: ValueRanges[SympyBoolean], lower: BoolIn, upper: BoolIn) -> None: + ... + + def __init__(self, lower: AllIn, upper: AllIn) -> None: + lower = simple_sympify(lower) + upper = simple_sympify(upper) + # TODO: when the bounds have free variables, this may be + # nontrivial to actually verify + if not sympy_generic_le(lower, upper): + raise ValueRangeError(f"Invalid ranges [{lower}:{upper}]") + # Because this is a frozen class + object.__setattr__(self, "lower", lower) + object.__setattr__(self, "upper", upper) + object.__setattr__(self, "is_bool", isinstance(lower, SympyBoolean)) + assert isinstance(upper, SympyBoolean) == self.is_bool + + def boolify(self) -> ValueRanges[SympyBoolean]: + if vr_is_bool(self): + return self + elif self == ValueRanges.unknown(): + return ValueRanges.unknown_bool() + else: + raise AssertionError(f"not bool like {self}") + + def __contains__(self, x: AllIn) -> bool: + x = simple_sympify(x) + return sympy_generic_le(self.lower, x) and sympy_generic_le(x, self.upper) + + def issubset(self, other): + return sympy_generic_le(other.lower, self.lower) and sympy_generic_le(self.upper, other.upper) + + def tighten(self, other) -> ValueRanges: + """Given two ValueRanges, returns their intersection""" + return self & other + + # Intersection + @overload + def __and__(self: ValueRanges[sympy.Expr], other: ValueRanges[sympy.Expr]) -> ValueRanges[sympy.Expr]: + ... + + @overload + def __and__(self: ValueRanges[SympyBoolean], other: ValueRanges[SympyBoolean]) -> ValueRanges[SympyBoolean]: + ... + + def __and__(self: AllVR, other: AllVR) -> AllVR: + if other == ValueRanges.unknown(): + return self + if self == ValueRanges.unknown(): + return other + assert self.is_bool == other.is_bool, (self, other) + if self.is_bool: + return ValueRanges(sympy.Or(self.lower, other.lower), sympy.And(self.upper, other.upper)) + else: + return ValueRanges(sympy.Max(self.lower, other.lower), sympy.Min(self.upper, other.upper)) + + # Union + @overload + def __or__(self: ValueRanges[sympy.Expr], other: ValueRanges[sympy.Expr]) -> ValueRanges[sympy.Expr]: + ... + + @overload + def __or__(self: ValueRanges[SympyBoolean], other: ValueRanges[SympyBoolean]) -> ValueRanges[SympyBoolean]: + ... + + def __or__(self: AllVR, other: AllVR) -> AllVR: + if ValueRanges.unknown() in (self, other): + return ValueRanges.unknown() + assert self.is_bool == other.is_bool, (self, other) + if self.is_bool: + return ValueRanges(sympy.And(self.lower, other.lower), sympy.Or(self.upper, other.upper)) + else: + return ValueRanges(sympy.Min(self.lower, other.lower), sympy.Max(self.upper, other.upper)) + + def is_singleton(self) -> bool: + return self.lower == self.upper + + # TODO: this doesn't work with bools but arguably it should + @staticmethod + def unknown() -> ValueRanges[sympy.Expr]: + return ValueRanges(-sympy.oo, sympy.oo) + + @staticmethod + def unknown_bool() -> ValueRanges[SympyBoolean]: + return ValueRanges(sympy.false, sympy.true) + + @overload + @staticmethod + # work around the fact that bool and int overlap + def wrap(arg: Union[ExprIn, ExprVR]) -> ExprVR: # type: ignore[overload-overlap] + ... + + @overload + @staticmethod + def wrap(arg: Union[BoolIn, BoolVR]) -> BoolVR: + ... + + @staticmethod + def wrap(arg: Union[AllIn, AllVR]) -> AllVR: + if isinstance(arg, ValueRanges): + return arg + # arg is either ExprIn or BoolIn, but we don't know it here + return ValueRanges(arg, arg) # type: ignore[arg-type] + + @staticmethod + def increasing_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR: + """Increasing: x <= y => f(x) <= f(y).""" + x = ValueRanges.wrap(x) + return ValueRanges(fn(x.lower), fn(x.upper)) + + @overload + @staticmethod + def decreasing_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR: + ... + + @overload + @staticmethod + def decreasing_map(x: Union[BoolIn, BoolVR], fn: BoolFn) -> BoolVR: + ... + + @staticmethod + def decreasing_map(x: Union[AllIn, AllVR], fn: AllFn) -> AllVR: + """Decreasing: x <= y => f(x) >= f(y).""" + x = ValueRanges.wrap(x) + # consistently either Expr or Bool, but we don't know it here + return ValueRanges(fn(x.upper), fn(x.lower)) # type: ignore[arg-type] + + @staticmethod + def monotone_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR: + """It's increasing or decreasing.""" + x = ValueRanges.wrap(x) + l = fn(x.lower) + u = fn(x.upper) + return ValueRanges(min(l, u), max(l, u)) + + @staticmethod + def convex_min_zero_map(x: Union[ExprIn, ExprVR], fn: ExprFn) -> ExprVR: + """Fn is convex and has a minimum at 0.""" + x = ValueRanges.wrap(x) + if 0 in x: + return ValueRanges(0, max(fn(x.lower), fn(x.upper))) + else: + return ValueRanges.monotone_map(x, fn) + + @overload + @staticmethod + def coordinatewise_increasing_map(x: Union[ExprIn, ExprVR], y: Union[ExprIn, ExprVR], fn: ExprFn2) -> ExprVR: + ... + + @overload + @staticmethod + def coordinatewise_increasing_map(x: Union[BoolIn, BoolVR], y: Union[BoolIn, BoolVR], fn: BoolFn2) -> BoolVR: + ... + + @staticmethod + def coordinatewise_increasing_map(x: Union[AllIn, AllVR], y: Union[AllIn, AllVR], fn: AllFn2) -> AllVR: + """ + It's increasing on each coordinate. + + Mathematically: + For every 1 <= i <= n and x_i <= y_i we have that + f(x1, .., xn) <= f(x1, , yi, ..., xn) + """ + x, y = ValueRanges.wrap(x), ValueRanges.wrap(y) + return ValueRanges( + fn(x.lower, y.lower), # type: ignore[arg-type] + fn(x.upper, y.upper), # type: ignore[arg-type] + ) + + @classmethod + def coordinatewise_monotone_map(cls, x, y, fn): + """It's increasing or decreasing on each coordinate.""" + x, y = cls.wrap(x), cls.wrap(y) + products = [ + fn(a, b) + for a, b in itertools.product([x.lower, x.upper], [y.lower, y.upper]) + ] + return ValueRanges(min(products), max(products)) + +class SymPyValueRangeAnalysis: + """ + It gives bounds on a SymPy operator given bounds on its arguments + See the function `bound_sympy` for a function that applies this logic to a full SymPy expression + """ + + @staticmethod + def constant(value, dtype): + # NB: value is NOT a sympy expression, it's a constant! + is_python = isinstance(value, (int, float, bool)) + assert is_python or isinstance(value, (BooleanAtom, sympy.Integer, sympy.Number)) + + # using nan makes subsequent computation throw, and for the purposes of optimization + # returning -math.inf - math.inf is equivalent to giving up + if isinstance(value, SupportsFloat) and math.isnan(value): + return ValueRanges.unknown() + + if is_python: + type_ = dtype_to_type(dtype) + value = type_(value) + else: + # We do a type check on a best-effort basis + # We don't want to force a cast to sympy.Float if the value is Rational to avoid losing precision + if dtype == torch.bool: + assert isinstance(value, BooleanAtom) + elif dtype.is_floating_point: + assert not value.is_finite or value.is_real + else: + # dtype is intXX + assert value.is_integer + + return ValueRanges.wrap(value) + + @staticmethod + def not_(a): + a = ValueRanges.wrap(a) + a = a.boolify() + assert a.is_bool + return ValueRanges.decreasing_map(a, sympy.Not) + + @staticmethod + def or_(a, b): + return ValueRanges.coordinatewise_increasing_map(a, b, sympy.Or) + + @staticmethod + def and_(a, b): + return ValueRanges.coordinatewise_increasing_map(a, b, sympy.And) + + @staticmethod + def eq(a, b): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + if a.is_singleton() and b.is_singleton() and a.lower == b.lower: + return ValueRanges.wrap(sympy.true) + elif a.lower > b.upper or b.lower > a.upper: # ranges disjoint + return ValueRanges.wrap(sympy.false) + return ValueRanges(sympy.false, sympy.true) + + @classmethod + def ne(cls, a, b): + return cls.not_(cls.eq(a, b)) + + @classmethod + def lt(cls, a, b): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + assert a.is_bool == b.is_bool + if a.is_bool: + return cls.and_(cls.not_(a), b) + else: + if a.upper < b.lower: + return ValueRanges.wrap(sympy.true) + elif a.lower >= b.upper: + return ValueRanges.wrap(sympy.false) + return ValueRanges(sympy.false, sympy.true) + + @classmethod + def gt(cls, a, b): + return cls.lt(b, a) + + @classmethod + def le(cls, a, b): + return cls.not_(cls.gt(a, b)) + + @classmethod + def ge(cls, a, b): + return cls.not_(cls.lt(a, b)) + + @staticmethod + def add(a, b): + return ValueRanges.coordinatewise_increasing_map(a, b, operator.add) + + @classmethod + def mul(cls, a, b): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + + assert a.is_bool == b.is_bool + if a.is_bool: + return cls.and_(a, b) + + def safe_mul(a, b): + # Make unknown() * wrap(0) == wrap(0) + if a == 0: + return a + elif b == 0: + return b + else: + return a * b + + return ValueRanges.coordinatewise_monotone_map(a, b, safe_mul) + + @classmethod + def div(cls, a, b): + return cls.truediv(a, b) + + @staticmethod + def truediv(a, b): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + if 0 in b or ((-sympy.oo in a or sympy.oo in a) and (-sympy.oo in b or sympy.oo in b)): + return ValueRanges.unknown() + else: + return ValueRanges.coordinatewise_monotone_map(a, b, operator.truediv) + + @staticmethod + def floordiv(a, b): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + if 0 in b or ((-sympy.oo in a or sympy.oo in a) and (-sympy.oo in b or sympy.oo in b)): + return ValueRanges.unknown() + else: + return ValueRanges.coordinatewise_monotone_map(a, b, operator.floordiv) + + @staticmethod + def mod(x, y): + x = ValueRanges.wrap(x) + y = ValueRanges.wrap(y) + if x.is_singleton() and y.is_singleton() and y.lower != 0: + return ValueRanges.wrap(x.lower % y.lower) + if y.lower <= 0: + return ValueRanges.unknown() + return ValueRanges(0, y.upper) + + @classmethod + def modular_indexing(cls, a, b, c): + return cls.mod(cls.floordiv(a, b), c) + + @classmethod + def is_non_overlapping_and_dense_indicator(cls, *args): + return ValueRanges.unknown() + + @classmethod + def pow(cls, a, b): + def is_integer(val): + return isinstance(val, int) or ( + hasattr(val, "is_integer") and val.is_integer + ) + + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + # Not implemented yet. It's a bit tricky + # If you want to implement it, compute the partial derivatives of a ** b + # and check the ranges where the function is increasing / decreasing + # Another non-tight way of doing this is defaulting to doing noting that for a > 0, a ** b == exp(b * log(a)) + # If this second option is implemented, by carefult about the types and possible infinities here and there. + if not b.is_singleton(): + return ValueRanges.unknown() + + b = b.lower + if a.is_singleton(): + a = a.lower + r = a ** b + if not r.is_finite: + return ValueRanges.unknown() + return ValueRanges.wrap(r) + + if b == 0: + if not a.lower.is_finite: + return ValueRanges.unknown() + type_ = sympy.Float if a.lower.is_real else sympy.Integer + return ValueRanges.wrap(type_(1)) + + if b < 0: + a = cls.reciprocal(a) + b = -b + + if a == ValueRanges.unknown(): + return ValueRanges.unknown() + + # Here b > 0 + if not is_integer(b): + # If the base is positive, then we're good, otherwise nothing's defined + if a.lower >= 0: + return ValueRanges.increasing_map(a, lambda x: x ** b) + else: + return ValueRanges.unknown() + else: + # b > 0 integer + if b % 2 == 0: + # x^n where n is even + return ValueRanges.convex_min_zero_map(a, lambda x: x ** b) + else: + # x^n where n is odd + return ValueRanges.increasing_map(a, lambda x: x ** b) + + @staticmethod + def reciprocal(x): + """ Needed as it's used in pow, but it won't appear on a SymPy expression """ + x = ValueRanges.wrap(x) + if 0 in x: + return ValueRanges.unknown() + else: + return ValueRanges.decreasing_map(x, lambda y: 1 / y) + + @staticmethod + def abs(x): + return ValueRanges.convex_min_zero_map(x, abs) + + @staticmethod + def exp(x): + return ValueRanges.increasing_map(x, sympy.functions.elementary.exponential.exp) + + @staticmethod + def log(x): + x = ValueRanges.wrap(x) + if x.lower <= 0: + return ValueRanges.unknown() + return ValueRanges.increasing_map(x, sympy.log) + + @classmethod + def minimum(cls, a, b): + return cls.min_or_max(a, b, sympy.Min) + + @classmethod + def maximum(cls, a, b): + return cls.min_or_max(a, b, sympy.Max) + + @staticmethod + def min_or_max(a, b, fn): + a = ValueRanges.wrap(a) + b = ValueRanges.wrap(b) + + # Performs upcasting first + def fn_(x: sympy.Expr, y: sympy.Expr) -> sympy.Expr: + # Poorman's version of upcasting in Sympy + # Inf is not a float... + if x.is_Integer and y.is_Integer: + result_type = sympy.Integer + elif x.is_rational and y.is_rational: + result_type = sympy.Rational + else: + assert x.is_real or not x.is_finite or y.is_real or not y.is_finite + result_type = sympy.Float + return fn(result_type(x), result_type(y)) + + return ValueRanges.coordinatewise_increasing_map(a, b, fn_) + + @classmethod + def floor(cls, x): + return ValueRanges.increasing_map(x, sympy.functions.elementary.integers.floor) + + @classmethod + def ceil(cls, x): + return ValueRanges.increasing_map(x, sympy.functions.elementary.integers.ceiling) + + @classmethod + def round(cls, number, ndigits=None): + if ndigits is None: + fn = Round + else: + assert ndigits.is_singleton() + ndigits = ndigits.lower + # We can't use functools.partial here since sympy doesn't support keyword arguments, but we have to bind + # the second parameter. + fn = lambda number: RoundDecimal(number, ndigits) # type: ignore[misc, assignment] # noqa: E731 + + return ValueRanges.increasing_map(number, fn) + + # It's used in some models on symints + @staticmethod + def sqrt(x): + x = ValueRanges.wrap(x) + if x.lower < 0: + return ValueRanges.unknown() + return ValueRanges.increasing_map(x, sympy.sqrt) + + @staticmethod + def where(a, b, c): + b = ValueRanges.wrap(b) + c = ValueRanges.wrap(c) + a = a.boolify() + assert b.is_bool == c.is_bool + if b.is_bool: + return ValueRanges(sympy.And(b.lower, c.lower), sympy.Or(b.upper, c.upper)) + else: + return ValueRanges(sympy.Min(b.lower, c.lower), sympy.Max(b.upper, c.upper)) + + # expr_cond_pair is used to represent a single (expr, condition) pair in piecewise. + # We just return the value range of the expression and its corresponding condition as a tuple + # and defer the analysis to piecewise + @staticmethod + def expr_cond_pair(a, b): + b = b.boolify() + return (a, b) + + # piecewise function can be used to convert a SymBool to SymInt: + # int_expr = Piecewise((1, bool_expr), (0, True)), it evalutes to 1 when sym_bool is True and 0 otherwise. + # + # ranges is a sequence of (expr_range, condition_range) pairs. The range pair is constructed in expr_cond_pair. + # The ValueRange of Piecewise is just the union of all expr ranges whose condition expr can be True. + @staticmethod + def piecewise(*ranges): + init_range = None + for expr_range, cond_range in ranges: + if sympy.true in cond_range: + if init_range is None: + init_range = expr_range + else: + init_range = init_range | expr_range + return init_range + + @staticmethod + def cos(x): + # TODO: We should tighten value ranges + # If input range span is pi + 2*pi*k, then output range is (-1, 1) + # otherwise the minimum of the value of the function on the extremes + return ValueRanges(-1.0, 1.0) + + @staticmethod + def cosh(x): + x = ValueRanges.wrap(x) + if x.lower > 0: + return ValueRanges.increasing_map(x, sympy.cosh) + elif x.upper < 0: + return ValueRanges.decreasing_map(x, sympy.cosh) + return ValueRanges(0.0, sympy.oo) + + @staticmethod + def sin(x): + # TODO: We should tighten value ranges + # See details on cos + return ValueRanges(-1.0, 1.0) + + @staticmethod + def sinh(x): + return ValueRanges.increasing_map(x, sympy.sinh) + + @staticmethod + def tan(x): + return ValueRanges(-sympy.oo, sympy.oo) + + @staticmethod + def tanh(x): + return ValueRanges.increasing_map(x, sympy.tanh) + + @staticmethod + def asin(x): + x = ValueRanges.wrap(x) + if -1 <= x.lower and x.upper <= 1: + return ValueRanges.increasing_map(x, sympy.asin) + return ValueRanges.unknown() + + @staticmethod + def acos(x): + x = ValueRanges.wrap(x) + if -1 <= x.lower and x.upper <= 1: + return ValueRanges.decreasing_map(x, sympy.acos) + return ValueRanges.unknown() + + @staticmethod + def atan(x): + return ValueRanges.increasing_map(x, sympy.atan) + + +class ValueRangeAnalysis(SymPyValueRangeAnalysis): + def __init__(self): + self.name = "ValueRangeAnalysis" + boolean_operators = ( + "xor", + "logical_and", + "logical_or", + "logical_not", + ) + for op in boolean_operators: + setattr(self, op, self.bool_handler) + + @staticmethod + def bool_handler(*args, **kwargs): + # just assuming bools can have both values + return ValueRanges(sympy.false, sympy.true) # type: ignore[arg-type] + + @staticmethod + def default_handler(*args, **kwargs): + # many ops are unlikely to show up in optimizable indexing compute, + # so we dont have full coverage + return ValueRanges.unknown() + + def load(self, name: str, index: sympy.Expr): + return ValueRanges.unknown() + + def store(self, name, index, value, mode=None): + return + + def reduction(self, name, dtype, src_dtype, reduction_type, index, value): + return ValueRanges.unknown() + + def index_expr(self, index, dtype): + assert isinstance(index, ValueRanges) + return index + + @staticmethod + def to_dtype(x, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None): + x = ValueRanges.wrap(x) + + if dtype == torch.bool: + if x.is_singleton(): + return ValueRanges.wrap(x.lower != 0) + elif 0 not in x: + return ValueRanges.wrap(sympy.true) + else: + return ValueRanges(sympy.false, sympy.true) + + def cast(x, dtype): + # dtype is int or float + if dtype.is_floating_point: + return sympy.Float(x) + else: + try: + return sympy.Integer(x) + except TypeError: + # inf cannot be cast to Integer + return x + + if x.is_bool: + if x.is_singleton(): + val = 1 if x.lower else 0 + return ValueRanges.wrap(cast(val, dtype)) + else: + return ValueRanges(cast(0, dtype), cast(1, dtype)) + else: + # int to float or float to int + return ValueRanges(cast(x.lower, dtype), cast(x.upper, dtype)) + + @staticmethod + def square(x): + return ValueRanges.convex_min_zero_map(x, lambda y: y * y) + + @staticmethod + def neg(x): + return ValueRanges.decreasing_map(x, operator.neg) + + @classmethod + def truncdiv(cls, a, b): + x = cls.truediv(a, b) + if x == ValueRanges.unknown(): + return x + + def trunc(x): + return sympy.Integer(x) if x.is_finite else x + + return ValueRanges.increasing_map(x, trunc) + + @classmethod + def sub(cls, a, b): + return cls.add(a, cls.neg(b)) + + def __getattr__(self, name): + log.debug("unhandled ValueRange op %s", name) + return self.default_handler + + +def bound_sympy(expr: sympy.Expr, ranges: Optional[Dict[sympy.Symbol, ValueRanges]] = None) -> ValueRanges: + if isinstance(expr, sympy.Number): + return ValueRanges.wrap(expr) + + ranges = ranges or {} + + # If there's a tracing context, augment available constrained ranges. + context = torch._guards.TracingContext.try_get() + if context and context.fake_mode.shape_env: + ranges = {**context.fake_mode.shape_env.var_to_range, **ranges} + + unbounded_vars = expr.free_symbols - ranges.keys() + if unbounded_vars: + # Give some bounds to the free variables via their SymPy assumptions + # TODO A better way of doing this would be to assign them a range upon creation, as + # size variables can come with a lower bound of 2, as we specialise on 0 and 1 + unbounded_ranges: Dict[sympy.Symbol, ValueRanges] = {} + for s in unbounded_vars: + assert s.is_integer # type: ignore[attr-defined] + if s.is_positive: # type: ignore[attr-defined] + lower = 1 + elif s.is_nonnegative: # type: ignore[attr-defined] + lower = 0 + else: + lower = -math.inf # type: ignore[assignment] + unbounded_ranges[s] = ValueRanges(lower, math.inf) # type: ignore[index] + ranges = {**ranges, **unbounded_ranges} + + return sympy_interp(SymPyValueRangeAnalysis, ranges, expr) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58f3ace6c03d093337c9fa417ccbe8bc267b6c69 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__init__.py @@ -0,0 +1 @@ +from .version import __version__ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23345ff03e2278bd9ad8eead292bc85aabfa5a9a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/constants.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f60cb80affa3ce1b907424d5f97f7b9eff600a8c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/constants.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bba815a53b55e1979ce693fae8502c57e476f72e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/cuda_to_hip_mappings.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/hipify_python.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/hipify_python.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dee18ead501e0088e85ec1d4da5106b12a2edd35 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/hipify_python.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/version.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ae067e0e1c8e5be35283d1c64a0135aef8ff2c4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/__pycache__/version.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/constants.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..fb56e7a77a3ed264905c92b7ae55009be738267b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/constants.py @@ -0,0 +1,62 @@ +"""Constants for annotations in the mapping. + +The constants defined here are used to annotate the mapping tuples in cuda_to_hip_mappings.py. +They are based on +https://github.com/ROCm-Developer-Tools/HIP/blob/master/hipify-clang/src/Statistics.h +and fall in three categories: 1) type of mapping, 2) API of mapping, 3) unsupported +mapping. +""" + +CONV_VERSION = 0, +CONV_INIT = 1 +CONV_DEVICE = 2 +CONV_MEM = 3 +CONV_KERN = 4 +CONV_COORD_FUNC = 5 +CONV_MATH_FUNC = 6 +CONV_DEVICE_FUNC = 7 +CONV_SPECIAL_FUNC = 8 +CONV_STREAM = 9 +CONV_EVENT = 10 +CONV_OCCUPANCY = 11 +CONV_CONTEXT = 12 +CONV_PEER = 13 +CONV_MODULE = 14 +CONV_CACHE = 15 +CONV_EXEC = 16 +CONV_ERROR = 17 +CONV_DEF = 18 +CONV_TEX = 19 +CONV_GL = 20 +CONV_GRAPHICS = 21 +CONV_SURFACE = 22 +CONV_JIT = 23 +CONV_D3D9 = 24 +CONV_D3D10 = 25 +CONV_D3D11 = 26 +CONV_VDPAU = 27 +CONV_EGL = 28 +CONV_THREAD = 29 +CONV_OTHER = 30 +CONV_INCLUDE = 31 +CONV_INCLUDE_CUDA_MAIN_H = 32 +CONV_TYPE = 33 +CONV_LITERAL = 34 +CONV_NUMERIC_LITERAL = 35 +CONV_LAST = 36 + +API_DRIVER = 37 +API_RUNTIME = 38 +API_BLAS = 39 +API_SPECIAL = 40 +API_RAND = 41 +API_LAST = 42 +API_FFT = 43 +API_RTC = 44 +API_ROCTX = 45 + +HIP_UNSUPPORTED = 46 +API_PYTORCH = 1337 +API_CAFFE2 = 1338 +API_C10 = 1339 +API_ROCMSMI = 1340 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/cuda_to_hip_mappings.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/cuda_to_hip_mappings.py new file mode 100644 index 0000000000000000000000000000000000000000..3dd5a8caf32998df18ed51ea4879b5f294c967ef --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/cuda_to_hip_mappings.py @@ -0,0 +1,8713 @@ +import collections +import os +import re +import subprocess + +from .constants import (API_BLAS, API_C10, API_CAFFE2, API_DRIVER, API_FFT, + API_PYTORCH, API_RAND, API_ROCTX, API_RTC, API_RUNTIME, + API_SPECIAL, API_ROCMSMI, CONV_CACHE, CONV_CONTEXT, CONV_D3D9, + CONV_D3D10, CONV_D3D11, CONV_DEF, CONV_DEVICE, + CONV_DEVICE_FUNC, CONV_EGL, CONV_ERROR, CONV_EVENT, + CONV_EXEC, CONV_GL, CONV_GRAPHICS, CONV_INCLUDE, + CONV_INCLUDE_CUDA_MAIN_H, CONV_INIT, CONV_JIT, + CONV_MATH_FUNC, CONV_MEM, CONV_MODULE, + CONV_NUMERIC_LITERAL, CONV_OCCUPANCY, CONV_OTHER, + CONV_PEER, CONV_SPECIAL_FUNC, CONV_STREAM, + CONV_SURFACE, CONV_TEX, CONV_THREAD, CONV_TYPE, + CONV_VDPAU, CONV_VERSION, HIP_UNSUPPORTED) + +""" Mapping of CUDA functions, include files, constants, and types to ROCm/HIP equivalents +This closely follows the implementation in hipify-clang +https://github.com/ROCm-Developer-Tools/HIP/blob/master/hipify-clang/src/CUDA2HipMap.cpp +and its structure. +There are different maps for fundamental names, include files, identifies, sparse, and +PyTorch specific translations. +Each of the entries in these maps translates a CUDA string to a tuple containing the +ROCm/HIP string, a type and API annotation and - optionally - an annotation if it is not +supported in ROCm/HIP yet. +""" + +# We need to know the ROCm version so we can conditionalize some of the mappings later. +# As of ROCm 5.0, the version is found in rocm_version.h header file under /opt/rocm/include. +rocm_path = os.environ.get('ROCM_HOME') or os.environ.get('ROCM_PATH') or "/opt/rocm" +try: + rocm_path = subprocess.check_output(["hipconfig", "--rocmpath"]).decode("utf-8") +except subprocess.CalledProcessError: + print(f"Warning: hipconfig --rocmpath failed, assuming {rocm_path}") +except (FileNotFoundError, PermissionError, NotADirectoryError): + # Do not print warning. This is okay. This file can also be imported for non-ROCm builds. + pass + +rocm_version = (0, 0, 0) +rocm_version_h = f"{rocm_path}/include/rocm-core/rocm_version.h" +if not os.path.isfile(rocm_version_h): + rocm_version_h = f"{rocm_path}/include/rocm_version.h" + +# The file could be missing due to 1) ROCm version < 5.2, or 2) no ROCm install. +if os.path.isfile(rocm_version_h): + RE_MAJOR = re.compile(r"#define\s+ROCM_VERSION_MAJOR\s+(\d+)") + RE_MINOR = re.compile(r"#define\s+ROCM_VERSION_MINOR\s+(\d+)") + RE_PATCH = re.compile(r"#define\s+ROCM_VERSION_PATCH\s+(\d+)") + major, minor, patch = 0, 0, 0 + for line in open(rocm_version_h): + match = RE_MAJOR.search(line) + if match: + major = int(match.group(1)) + match = RE_MINOR.search(line) + if match: + minor = int(match.group(1)) + match = RE_PATCH.search(line) + if match: + patch = int(match.group(1)) + rocm_version = (major, minor, patch) + +# List of math functions that should be replaced inside device code only. +MATH_TRANSPILATIONS = collections.OrderedDict( + [ + ("std::max", ("::max")), + ("std::min", ("::min")), + ("std::ceil", ("::ceil")), + ("std::floor", ("::floor")), + ("std::exp", ("::exp")), + ("std::log", ("::log")), + ("std::pow", ("::pow")), + ("std::fabs", ("::fabs")), + ("std::fmod", ("::fmod")), + ("std::remainder", ("::remainder")), + ("std::frexp", ("::frexp")), + ] +) + +CUDA_TYPE_NAME_MAP = collections.OrderedDict( + [ + ("CUresult", ("hipError_t", CONV_TYPE, API_DRIVER)), + ("cudaError_t", ("hipError_t", CONV_TYPE, API_RUNTIME)), + ("cudaError", ("hipError_t", CONV_TYPE, API_RUNTIME)), + ( + "CUDA_ARRAY3D_DESCRIPTOR", + ("HIP_ARRAY3D_DESCRIPTOR", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUDA_ARRAY_DESCRIPTOR", ("HIP_ARRAY_DESCRIPTOR", CONV_TYPE, API_DRIVER)), + ("CUDA_MEMCPY2D", ("hip_Memcpy2D", CONV_TYPE, API_DRIVER)), + ("CUDA_MEMCPY3D", ("HIP_MEMCPY3D", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUDA_MEMCPY3D_PEER", + ("HIP_MEMCPY3D_PEER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_POINTER_ATTRIBUTE_P2P_TOKENS", + ( + "HIP_POINTER_ATTRIBUTE_P2P_TOKENS", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CUDA_RESOURCE_DESC", + ("HIP_RESOURCE_DESC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_RESOURCE_VIEW_DESC", + ("HIP_RESOURCE_VIEW_DESC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUipcEventHandle", + ("hipIpcEventHandle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUipcMemHandle", ("hipIpcMemHandle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ("CUaddress_mode", ("hipAddress_mode", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUarray_cubemap_face", + ("hipArray_cubemap_face", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUarray_format", ("hipArray_format", CONV_TYPE, API_DRIVER)), + ("CUcomputemode", ("hipComputemode", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ("CUmem_advise", ("hipMemAdvise", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUmem_range_attribute", + ("hipMemRangeAttribute", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUctx_flags", ("hipCctx_flags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ("CUdevice", ("hipDevice_t", CONV_TYPE, API_DRIVER)), + ("CUdevice_attribute_enum", ("hipDeviceAttribute_t", CONV_TYPE, API_DRIVER)), + ("CUdevice_attribute", ("hipDeviceAttribute_t", CONV_TYPE, API_DRIVER)), + ("CUpointer_attribute", ("hipPointer_attribute", CONV_TYPE, API_DRIVER)), + ("CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL", ("HIP_POINTER_ATTRIBUTE_DEVICE_ORDINAL", CONV_TYPE, API_DRIVER)), + ("CU_POINTER_ATTRIBUTE_BUFFER_ID", ("HIP_POINTER_ATTRIBUTE_BUFFER_ID", CONV_TYPE, API_DRIVER)), + ("CUdeviceptr", ("hipDeviceptr_t", CONV_TYPE, API_DRIVER)), + ("CUarray_st", ("hipArray", CONV_TYPE, API_DRIVER)), + ("CUarray", ("hipArray *", CONV_TYPE, API_DRIVER)), + ("CUdevprop_st", ("hipDeviceProp_t", CONV_TYPE, API_DRIVER)), + ("CUdevprop", ("hipDeviceProp_t", CONV_TYPE, API_DRIVER)), + ("CUfunction", ("hipFunction_t", CONV_TYPE, API_DRIVER)), + ( + "CUgraphicsResource", + ("hipGraphicsResource_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUmipmappedArray", + ("hipMipmappedArray_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUfunction_attribute", + ("hipFuncAttribute_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUfunction_attribute_enum", + ("hipFuncAttribute_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUgraphicsMapResourceFlags", + ("hipGraphicsMapFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUgraphicsMapResourceFlags_enum", + ("hipGraphicsMapFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUgraphicsRegisterFlags", + ("hipGraphicsRegisterFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUgraphicsRegisterFlags_enum", + ("hipGraphicsRegisterFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUoccupancy_flags", + ("hipOccupancyFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUoccupancy_flags_enum", + ("hipOccupancyFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUfunc_cache_enum", ("hipFuncCache", CONV_TYPE, API_DRIVER)), + ("CUfunc_cache", ("hipFuncCache", CONV_TYPE, API_DRIVER)), + ("CUipcMem_flags", ("hipIpcMemFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUipcMem_flags_enum", + ("hipIpcMemFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUjit_cacheMode", ("hipJitCacheMode", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUjit_cacheMode_enum", + ("hipJitCacheMode", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUjit_fallback", ("hipJitFallback", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUjit_fallback_enum", + ("hipJitFallback", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUjit_option", ("hipJitOption", CONV_JIT, API_DRIVER)), + ("CUjit_option_enum", ("hipJitOption", CONV_JIT, API_DRIVER)), + ("CUjit_target", ("hipJitTarget", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)), + ("CUjit_target_enum", ("hipJitTarget", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)), + ("CUjitInputType", ("hipJitInputType", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUjitInputType_enum", + ("hipJitInputType", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUlimit", ("hipLimit_t", CONV_TYPE, API_DRIVER)), + ("CUlimit_enum", ("hipLimit_t", CONV_TYPE, API_DRIVER)), + ( + "CUmemAttach_flags", + ("hipMemAttachFlags_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUmemAttach_flags_enum", + ("hipMemAttachFlags_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUmemorytype", ("hipMemType_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ("CUmemorytype_enum", ("hipMemType_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ("CUresourcetype", ("hipResourceType", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUresourcetype_enum", + ("hipResourceType", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUresourceViewFormat", ("hipResourceViewFormat", CONV_TEX, API_DRIVER)), + ("CUresourceViewFormat_enum", ("hipResourceViewFormat", CONV_TEX, API_DRIVER)), + ("CUsharedconfig", ("hipSharedMemConfig", CONV_TYPE, API_DRIVER)), + ("CUsharedconfig_enum", ("hipSharedMemConfig", CONV_TYPE, API_DRIVER)), + ("CUcontext", ("hipCtx_t", CONV_TYPE, API_DRIVER)), + ("CUmodule", ("hipModule_t", CONV_TYPE, API_DRIVER)), + ("CUstream", ("hipStream_t", CONV_TYPE, API_DRIVER)), + ("CUstream_st", ("ihipStream_t", CONV_TYPE, API_DRIVER)), + ("CUstreamCallback", ("hipStreamCallback_t", CONV_TYPE, API_DRIVER)), + ("CUsurfObject", ("hipSurfaceObject", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUsurfref", + ("hipSurfaceReference_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUtexObject", ("hipTextureObject_t", CONV_TYPE, API_DRIVER)), + ("CUtexref", ("textureReference", CONV_TYPE, API_DRIVER)), + ("CUstream_flags", ("hipStreamFlags", CONV_TYPE, API_DRIVER)), + ( + "CUstreamWaitValue_flags", + ("hipStreamWaitValueFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUstreamWriteValue_flags", + ("hipStreamWriteValueFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUstreamBatchMemOpType", + ("hipStreamBatchMemOpType", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUdevice_P2PAttribute", + ("hipDeviceP2PAttribute", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUevent", ("hipEvent_t", CONV_TYPE, API_DRIVER)), + ("CUevent_st", ("ihipEvent_t", CONV_TYPE, API_DRIVER)), + ("CUevent_flags", ("hipEventFlags", CONV_EVENT, API_DRIVER, HIP_UNSUPPORTED)), + ("CUfilter_mode", ("hipTextureFilterMode", CONV_TEX, API_DRIVER)), + ("CUGLDeviceList", ("hipGLDeviceList", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), + ("CUGLmap_flags", ("hipGLMapFlags", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUd3d9DeviceList", + ("hipD3D9DeviceList", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUd3d9map_flags", + ("hipD3D9MapFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUd3d9register_flags", + ("hipD3D9RegisterFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUd3d10DeviceList", + ("hipd3d10DeviceList", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUd3d10map_flags", + ("hipD3D10MapFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUd3d10register_flags", + ("hipD3D10RegisterFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUd3d11DeviceList", + ("hipd3d11DeviceList", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUeglStreamConnection_st", + ("hipEglStreamConnection", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUeglStreamConnection", + ("hipEglStreamConnection", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "libraryPropertyType_t", + ("hipLibraryPropertyType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "libraryPropertyType", + ("hipLibraryPropertyType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaStreamCallback_t", ("hipStreamCallback_t", CONV_TYPE, API_RUNTIME)), + ("cudaArray", ("hipArray", CONV_MEM, API_RUNTIME)), + ("cudaArray_t", ("hipArray_t", CONV_MEM, API_RUNTIME)), + ("cudaArray_const_t", ("hipArray_const_t", CONV_MEM, API_RUNTIME)), + ("cudaMipmappedArray_t", ("hipMipmappedArray_t", CONV_MEM, API_RUNTIME)), + ( + "cudaMipmappedArray_const_t", + ("hipMipmappedArray_const_t", CONV_MEM, API_RUNTIME), + ), + ("cudaArrayDefault", ("hipArrayDefault", CONV_MEM, API_RUNTIME)), + ("cudaArrayLayered", ("hipArrayLayered", CONV_MEM, API_RUNTIME)), + ( + "cudaArraySurfaceLoadStore", + ("hipArraySurfaceLoadStore", CONV_MEM, API_RUNTIME), + ), + ("cudaArrayCubemap", ("hipArrayCubemap", CONV_MEM, API_RUNTIME)), + ("cudaArrayTextureGather", ("hipArrayTextureGather", CONV_MEM, API_RUNTIME)), + ("cudaMemoryAdvise", ("hipMemoryAdvise", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ( + "cudaMemRangeAttribute", + ("hipMemRangeAttribute", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaMemcpyKind", ("hipMemcpyKind", CONV_MEM, API_RUNTIME)), + ("cudaMemoryType", ("hipMemoryType", CONV_MEM, API_RUNTIME)), + ("cudaExtent", ("hipExtent", CONV_MEM, API_RUNTIME)), + ("cudaPitchedPtr", ("hipPitchedPtr", CONV_MEM, API_RUNTIME)), + ("cudaPos", ("hipPos", CONV_MEM, API_RUNTIME)), + ("cudaEvent_t", ("hipEvent_t", CONV_TYPE, API_RUNTIME)), + ("cudaStream_t", ("hipStream_t", CONV_TYPE, API_RUNTIME)), + ("cudaPointerAttributes", ("hipPointerAttribute_t", CONV_TYPE, API_RUNTIME)), + ("cudaDeviceAttr", ("hipDeviceAttribute_t", CONV_TYPE, API_RUNTIME)), + ("cudaDeviceProp", ("hipDeviceProp_t", CONV_TYPE, API_RUNTIME)), + ( + "cudaDeviceP2PAttr", + ("hipDeviceP2PAttribute", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaComputeMode", + ("hipComputeMode", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaFuncCache", ("hipFuncCache_t", CONV_CACHE, API_RUNTIME)), + ( + "cudaFuncAttributes", + ("hipFuncAttributes", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaSharedMemConfig", ("hipSharedMemConfig", CONV_TYPE, API_RUNTIME)), + ("cudaLimit", ("hipLimit_t", CONV_TYPE, API_RUNTIME)), + ("cudaOutputMode", ("hipOutputMode", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED)), + ("cudaTextureReadMode", ("hipTextureReadMode", CONV_TEX, API_RUNTIME)), + ("cudaTextureFilterMode", ("hipTextureFilterMode", CONV_TEX, API_RUNTIME)), + ("cudaChannelFormatKind", ("hipChannelFormatKind", CONV_TEX, API_RUNTIME)), + ("cudaChannelFormatDesc", ("hipChannelFormatDesc", CONV_TEX, API_RUNTIME)), + ("cudaResourceDesc", ("hipResourceDesc", CONV_TEX, API_RUNTIME)), + ("cudaResourceViewDesc", ("hipResourceViewDesc", CONV_TEX, API_RUNTIME)), + ("cudaTextureDesc", ("hipTextureDesc", CONV_TEX, API_RUNTIME)), + ( + "surfaceReference", + ("hipSurfaceReference", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaTextureObject_t", ("hipTextureObject_t", CONV_TEX, API_RUNTIME)), + ("cudaResourceType", ("hipResourceType", CONV_TEX, API_RUNTIME)), + ("cudaResourceViewFormat", ("hipResourceViewFormat", CONV_TEX, API_RUNTIME)), + ("cudaTextureAddressMode", ("hipTextureAddressMode", CONV_TEX, API_RUNTIME)), + ( + "cudaSurfaceBoundaryMode", + ("hipSurfaceBoundaryMode", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaSurfaceFormatMode", + ("hipSurfaceFormatMode", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaTextureType1D", ("hipTextureType1D", CONV_TEX, API_RUNTIME)), + ("cudaTextureType2D", ("hipTextureType2D", CONV_TEX, API_RUNTIME)), + ("cudaTextureType3D", ("hipTextureType3D", CONV_TEX, API_RUNTIME)), + ("cudaTextureTypeCubemap", ("hipTextureTypeCubemap", CONV_TEX, API_RUNTIME)), + ( + "cudaTextureType1DLayered", + ("hipTextureType1DLayered", CONV_TEX, API_RUNTIME), + ), + ( + "cudaTextureType2DLayered", + ("hipTextureType2DLayered", CONV_TEX, API_RUNTIME), + ), + ( + "cudaTextureTypeCubemapLayered", + ("hipTextureTypeCubemapLayered", CONV_TEX, API_RUNTIME), + ), + ("cudaIpcEventHandle_t", ("hipIpcEventHandle_t", CONV_TYPE, API_RUNTIME)), + ("cudaIpcEventHandle_st", ("hipIpcEventHandle_t", CONV_TYPE, API_RUNTIME)), + ("cudaIpcMemHandle_t", ("hipIpcMemHandle_t", CONV_TYPE, API_RUNTIME)), + ("cudaIpcMemHandle_st", ("hipIpcMemHandle_t", CONV_TYPE, API_RUNTIME)), + ( + "cudaGraphicsCubeFace", + ("hipGraphicsCubeFace", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsMapFlags", + ("hipGraphicsMapFlags", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsRegisterFlags", + ("hipGraphicsRegisterFlags", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLDeviceList", + ("hipGLDeviceList", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaGLMapFlags", ("hipGLMapFlags", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)), + ( + "cudaD3D9DeviceList", + ("hipD3D9DeviceList", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9MapFlags", + ("hipD3D9MapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9RegisterFlags", + ("hipD3D9RegisterFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10DeviceList", + ("hipd3d10DeviceList", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10MapFlags", + ("hipD3D10MapFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10RegisterFlags", + ("hipD3D10RegisterFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D11DeviceList", + ("hipd3d11DeviceList", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaEglStreamConnection", + ("hipEglStreamConnection", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cublasHandle_t", ("hipblasHandle_t", CONV_TYPE, API_BLAS)), + ("cublasOperation_t", ("hipblasOperation_t", CONV_TYPE, API_BLAS)), + ("cublasStatus_t", ("hipblasStatus_t", CONV_TYPE, API_BLAS)), + ("cublasFillMode_t", ("hipblasFillMode_t", CONV_TYPE, API_BLAS)), + ("cublasDiagType_t", ("hipblasDiagType_t", CONV_TYPE, API_BLAS)), + ("cublasSideMode_t", ("hipblasSideMode_t", CONV_TYPE, API_BLAS)), + ("cublasPointerMode_t", ("hipblasPointerMode_t", CONV_TYPE, API_BLAS)), + ( + "cublasAtomicsMode_t", + ("hipblasAtomicsMode_t", CONV_TYPE, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDataType_t", + ("hipblasDatatype_t", CONV_TYPE, API_BLAS, HIP_UNSUPPORTED), + ), + ("curandStatus", ("hiprandStatus_t", CONV_TYPE, API_RAND)), + ("curandStatus_t", ("hiprandStatus_t", CONV_TYPE, API_RAND)), + ("curandRngType", ("hiprandRngType_t", CONV_TYPE, API_RAND)), + ("curandRngType_t", ("hiprandRngType_t", CONV_TYPE, API_RAND)), + ("curandGenerator_st", ("hiprandGenerator_st", CONV_TYPE, API_RAND)), + ("curandGenerator_t", ("hiprandGenerator_t", CONV_TYPE, API_RAND)), + ( + "curandDirectionVectorSet", + ("hiprandDirectionVectorSet_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandDirectionVectorSet_t", + ("hiprandDirectionVectorSet_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ("curandOrdering", ("hiprandOrdering_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)), + ( + "curandOrdering_t", + ("hiprandOrdering_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandDistribution_st", + ("hiprandDistribution_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandHistogramM2V_st", + ("hiprandDistribution_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandDistribution_t", + ("hiprandDistribution_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandHistogramM2V_t", + ("hiprandDistribution_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandDistributionShift_st", + ("hiprandDistributionShift_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandDistributionShift_t", + ("hiprandDistributionShift_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandDistributionM2Shift_st", + ("hiprandDistributionM2Shift_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandDistributionM2Shift_t", + ("hiprandDistributionM2Shift_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandHistogramM2_st", + ("hiprandHistogramM2_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandHistogramM2_t", + ("hiprandHistogramM2_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandHistogramM2K_st", + ("hiprandHistogramM2K_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandHistogramM2K_t", + ("hiprandHistogramM2K_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandDiscreteDistribution_st", + ("hiprandDiscreteDistribution_st", CONV_TYPE, API_RAND), + ), + ( + "curandDiscreteDistribution_t", + ("hiprandDiscreteDistribution_t", CONV_TYPE, API_RAND), + ), + ("curandMethod", ("hiprandMethod_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)), + ("curandMethod_t", ("hiprandMethod_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)), + ( + "curandDirectionVectors32_t", + ("hiprandDirectionVectors32_t", CONV_TYPE, API_RAND), + ), + ( + "curandDirectionVectors64_t", + ("hiprandDirectionVectors64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ("curandStateMtgp32_t", ("hiprandStateMtgp32_t", CONV_TYPE, API_RAND)), + ("curandStateMtgp32", ("hiprandStateMtgp32_t", CONV_TYPE, API_RAND)), + ( + "curandStateScrambledSobol64_t", + ("hiprandStateScrambledSobol64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandStateSobol64_t", + ("hiprandStateSobol64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandStateScrambledSobol32_t", + ("hiprandStateScrambledSobol32_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), + ), + ("curandStateSobol32_t", ("hiprandStateSobol32_t", CONV_TYPE, API_RAND)), + ("curandStateMRG32k3a_t", ("hiprandStateMRG32k3a_t", CONV_TYPE, API_RAND)), + ( + "curandStatePhilox4_32_10_t", + ("hiprandStatePhilox4_32_10_t", CONV_TYPE, API_RAND), + ), + ("curandStateXORWOW_t", ("hiprandStateXORWOW_t", CONV_TYPE, API_RAND)), + ("curandState_t", ("hiprandState_t", CONV_TYPE, API_RAND)), + ("curandState", ("hiprandState_t", CONV_TYPE, API_RAND)), + ("CUuuid", ("hipUUID", CONV_TYPE, API_RUNTIME)), + ("cudaGraph_t", ("hipGraph_t", CONV_TYPE, API_RAND)), + ("cudaGraphExec_t", ("hipGraphExec_t", CONV_TYPE, API_RAND)), + ] +) + +CUDA_INCLUDE_MAP = collections.OrderedDict( + [ + # since pytorch uses "\b{pattern}\b" as the actual re pattern, + # patterns listed here have to begin and end with alnum chars + ( + "include " to differentiate + ("", ("", CONV_INCLUDE, API_RUNTIME)), + ("nvrtc.h", ("hip/hiprtc.h", CONV_INCLUDE, API_RTC)), + ("thrust/system/cuda", ("thrust/system/hip", CONV_INCLUDE, API_BLAS)), + ("cub/util_allocator.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("cub/block/block_reduce.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("cub/cub.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("cub/device/device_run_length_encode.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("cub/block/block_load.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("cub/device/device_radix_sort.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("cub/device/device_reduce.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("cub/device/device_scan.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("cub/device/device_select.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), + ("nvToolsExt.h", ("roctracer/roctx.h", CONV_INCLUDE, API_ROCTX)), + ("nvml.h", ("rocm_smi/rocm_smi.h", CONV_INCLUDE, API_ROCMSMI)), + ] +) + +CUDA_IDENTIFIER_MAP = collections.OrderedDict( + [ + ("__CUDACC__", ("__HIPCC__", CONV_DEF, API_RUNTIME)), + ( + "CUDA_ERROR_INVALID_CONTEXT", + ("hipErrorInvalidContext", CONV_TYPE, API_DRIVER), + ), + ( + "CUDA_ERROR_CONTEXT_ALREADY_CURRENT", + ("hipErrorContextAlreadyCurrent", CONV_TYPE, API_DRIVER), + ), + ( + "CUDA_ERROR_ARRAY_IS_MAPPED", + ("hipErrorArrayIsMapped", CONV_TYPE, API_DRIVER), + ), + ("CUDA_ERROR_ALREADY_MAPPED", ("hipErrorAlreadyMapped", CONV_TYPE, API_DRIVER)), + ( + "CUDA_ERROR_ALREADY_ACQUIRED", + ("hipErrorAlreadyAcquired", CONV_TYPE, API_DRIVER), + ), + ("CUDA_ERROR_NOT_MAPPED", ("hipErrorNotMapped", CONV_TYPE, API_DRIVER)), + ( + "CUDA_ERROR_NOT_MAPPED_AS_ARRAY", + ("hipErrorNotMappedAsArray", CONV_TYPE, API_DRIVER), + ), + ( + "CUDA_ERROR_NOT_MAPPED_AS_POINTER", + ("hipErrorNotMappedAsPointer", CONV_TYPE, API_DRIVER), + ), + ( + "CUDA_ERROR_CONTEXT_ALREADY_IN_USE", + ("hipErrorContextAlreadyInUse", CONV_TYPE, API_DRIVER), + ), + ("CUDA_ERROR_INVALID_SOURCE", ("hipErrorInvalidSource", CONV_TYPE, API_DRIVER)), + ("CUDA_ERROR_FILE_NOT_FOUND", ("hipErrorFileNotFound", CONV_TYPE, API_DRIVER)), + ("CUDA_ERROR_NOT_FOUND", ("hipErrorNotFound", CONV_TYPE, API_DRIVER)), + ( + "CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING", + ( + "hipErrorLaunchIncompatibleTexturing", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE", + ("hipErrorPrimaryContextActive", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_CONTEXT_IS_DESTROYED", + ("hipErrorContextIsDestroyed", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_NOT_PERMITTED", + ("hipErrorNotPermitted", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_NOT_SUPPORTED", + ("hipErrorNotSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorMissingConfiguration", + ("hipErrorMissingConfiguration", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorPriorLaunchFailure", + ("hipErrorPriorLaunchFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidDeviceFunction", + ("hipErrorInvalidDeviceFunction", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidConfiguration", + ("hipErrorInvalidConfiguration", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidPitchValue", + ("hipErrorInvalidPitchValue", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidSymbol", + ("hipErrorInvalidSymbol", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidHostPointer", + ("hipErrorInvalidHostPointer", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidDevicePointer", + ("hipErrorInvalidDevicePointer", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaErrorInvalidTexture", + ("hipErrorInvalidTexture", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidTextureBinding", + ("hipErrorInvalidTextureBinding", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidChannelDescriptor", + ( + "hipErrorInvalidChannelDescriptor", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaErrorInvalidMemcpyDirection", + ("hipErrorInvalidMemcpyDirection", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorAddressOfConstant", + ("hipErrorAddressOfConstant", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorTextureFetchFailed", + ("hipErrorTextureFetchFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorTextureNotBound", + ("hipErrorTextureNotBound", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorSynchronizationError", + ("hipErrorSynchronizationError", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidFilterSetting", + ("hipErrorInvalidFilterSetting", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidNormSetting", + ("hipErrorInvalidNormSetting", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorMixedDeviceExecution", + ("hipErrorMixedDeviceExecution", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorNotYetImplemented", + ("hipErrorNotYetImplemented", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorMemoryValueTooLarge", + ("hipErrorMemoryValueTooLarge", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInsufficientDriver", + ("hipErrorInsufficientDriver", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorSetOnActiveProcess", + ("hipErrorSetOnActiveProcess", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidSurface", + ("hipErrorInvalidSurface", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorDuplicateVariableName", + ("hipErrorDuplicateVariableName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorDuplicateTextureName", + ("hipErrorDuplicateTextureName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorDuplicateSurfaceName", + ("hipErrorDuplicateSurfaceName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorDevicesUnavailable", + ("hipErrorDevicesUnavailable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorIncompatibleDriverContext", + ( + "hipErrorIncompatibleDriverContext", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaErrorDeviceAlreadyInUse", + ("hipErrorDeviceAlreadyInUse", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorLaunchMaxDepthExceeded", + ("hipErrorLaunchMaxDepthExceeded", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorLaunchFileScopedTex", + ("hipErrorLaunchFileScopedTex", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorLaunchFileScopedSurf", + ("hipErrorLaunchFileScopedSurf", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorSyncDepthExceeded", + ("hipErrorSyncDepthExceeded", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorLaunchPendingCountExceeded", + ( + "hipErrorLaunchPendingCountExceeded", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaErrorNotPermitted", + ("hipErrorNotPermitted", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorNotSupported", + ("hipErrorNotSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorStartupFailure", + ("hipErrorStartupFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaErrorApiFailureBase", + ("hipErrorApiFailureBase", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("CUDA_SUCCESS", ("hipSuccess", CONV_TYPE, API_DRIVER)), + ("cudaSuccess", ("hipSuccess", CONV_TYPE, API_RUNTIME)), + ("CUDA_ERROR_INVALID_VALUE", ("hipErrorInvalidValue", CONV_TYPE, API_DRIVER)), + ("cudaErrorInvalidValue", ("hipErrorInvalidValue", CONV_TYPE, API_RUNTIME)), + ( + "CUDA_ERROR_OUT_OF_MEMORY", + ("hipErrorMemoryAllocation", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorMemoryAllocation", + ("hipErrorMemoryAllocation", CONV_TYPE, API_RUNTIME), + ), + ( + "CUDA_ERROR_NOT_INITIALIZED", + ("hipErrorNotInitialized", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorInitializationError", + ("hipErrorInitializationError", CONV_TYPE, API_RUNTIME), + ), + ("CUDA_ERROR_DEINITIALIZED", ("hipErrorDeinitialized", CONV_TYPE, API_DRIVER)), + ( + "cudaErrorCudartUnloading", + ("hipErrorDeinitialized", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_PROFILER_DISABLED", + ("hipErrorProfilerDisabled", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorProfilerDisabled", + ("hipErrorProfilerDisabled", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_PROFILER_NOT_INITIALIZED", + ("hipErrorProfilerNotInitialized", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorProfilerNotInitialized", + ("hipErrorProfilerNotInitialized", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_PROFILER_ALREADY_STARTED", + ("hipErrorProfilerAlreadyStarted", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorProfilerAlreadyStarted", + ("hipErrorProfilerAlreadyStarted", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_PROFILER_ALREADY_STOPPED", + ("hipErrorProfilerAlreadyStopped", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorProfilerAlreadyStopped", + ("hipErrorProfilerAlreadyStopped", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("CUDA_ERROR_NO_DEVICE", ("hipErrorNoDevice", CONV_TYPE, API_DRIVER)), + ("cudaErrorNoDevice", ("hipErrorNoDevice", CONV_TYPE, API_RUNTIME)), + ("CUDA_ERROR_INVALID_DEVICE", ("hipErrorInvalidDevice", CONV_TYPE, API_DRIVER)), + ("cudaErrorInvalidDevice", ("hipErrorInvalidDevice", CONV_TYPE, API_RUNTIME)), + ("CUDA_ERROR_INVALID_IMAGE", ("hipErrorInvalidImage", CONV_TYPE, API_DRIVER)), + ( + "cudaErrorInvalidKernelImage", + ("hipErrorInvalidImage", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("CUDA_ERROR_MAP_FAILED", ("hipErrorMapFailed", CONV_TYPE, API_DRIVER)), + ( + "cudaErrorMapBufferObjectFailed", + ("hipErrorMapFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("CUDA_ERROR_UNMAP_FAILED", ("hipErrorUnmapFailed", CONV_TYPE, API_DRIVER)), + ( + "cudaErrorUnmapBufferObjectFailed", + ("hipErrorUnmapFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_NO_BINARY_FOR_GPU", + ("hipErrorNoBinaryForGpu", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorNoKernelImageForDevice", + ("hipErrorNoBinaryForGpu", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_ECC_UNCORRECTABLE", + ("hipErrorECCNotCorrectable", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorECCUncorrectable", + ("hipErrorECCNotCorrectable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_UNSUPPORTED_LIMIT", + ("hipErrorUnsupportedLimit", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorUnsupportedLimit", + ("hipErrorUnsupportedLimit", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_PEER_ACCESS_UNSUPPORTED", + ("hipErrorPeerAccessUnsupported", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorPeerAccessUnsupported", + ("hipErrorPeerAccessUnsupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_INVALID_PTX", + ("hipErrorInvalidKernelFile", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorInvalidPtx", + ("hipErrorInvalidKernelFile", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_INVALID_GRAPHICS_CONTEXT", + ("hipErrorInvalidGraphicsContext", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorInvalidGraphicsContext", + ("hipErrorInvalidGraphicsContext", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_NVLINK_UNCORRECTABLE", + ("hipErrorNvlinkUncorrectable", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorNvlinkUncorrectable", + ("hipErrorNvlinkUncorrectable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND", + ("hipErrorSharedObjectSymbolNotFound", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorSharedObjectSymbolNotFound", + ( + "hipErrorSharedObjectSymbolNotFound", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "CUDA_ERROR_SHARED_OBJECT_INIT_FAILED", + ("hipErrorSharedObjectInitFailed", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorSharedObjectInitFailed", + ("hipErrorSharedObjectInitFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_OPERATING_SYSTEM", + ("hipErrorOperatingSystem", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorOperatingSystem", + ("hipErrorOperatingSystem", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_INVALID_HANDLE", + ("hipErrorInvalidResourceHandle", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorInvalidResourceHandle", + ("hipErrorInvalidResourceHandle", CONV_TYPE, API_RUNTIME), + ), + ("CUDA_ERROR_NOT_READY", ("hipErrorNotReady", CONV_TYPE, API_DRIVER)), + ("cudaErrorNotReady", ("hipErrorNotReady", CONV_TYPE, API_RUNTIME)), + ( + "CUDA_ERROR_ILLEGAL_ADDRESS", + ("hipErrorIllegalAddress", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorIllegalAddress", + ("hipErrorIllegalAddress", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES", + ("hipErrorLaunchOutOfResources", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorLaunchOutOfResources", + ("hipErrorLaunchOutOfResources", CONV_TYPE, API_RUNTIME), + ), + ("CUDA_ERROR_LAUNCH_TIMEOUT", ("hipErrorLaunchTimeOut", CONV_TYPE, API_DRIVER)), + ( + "cudaErrorLaunchTimeout", + ("hipErrorLaunchTimeOut", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED", + ("hipErrorPeerAccessAlreadyEnabled", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorPeerAccessAlreadyEnabled", + ("hipErrorPeerAccessAlreadyEnabled", CONV_TYPE, API_RUNTIME), + ), + ( + "CUDA_ERROR_PEER_ACCESS_NOT_ENABLED", + ("hipErrorPeerAccessNotEnabled", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorPeerAccessNotEnabled", + ("hipErrorPeerAccessNotEnabled", CONV_TYPE, API_RUNTIME), + ), + ( + "CUDA_ERROR_ASSERT", + ("hipErrorAssert", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorAssert", + ("hipErrorAssert", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_TOO_MANY_PEERS", + ("hipErrorTooManyPeers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorTooManyPeers", + ("hipErrorTooManyPeers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED", + ("hipErrorHostMemoryAlreadyRegistered", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorHostMemoryAlreadyRegistered", + ("hipErrorHostMemoryAlreadyRegistered", CONV_TYPE, API_RUNTIME), + ), + ( + "CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED", + ("hipErrorHostMemoryNotRegistered", CONV_TYPE, API_DRIVER), + ), + ( + "cudaErrorHostMemoryNotRegistered", + ("hipErrorHostMemoryNotRegistered", CONV_TYPE, API_RUNTIME), + ), + ( + "CUDA_ERROR_HARDWARE_STACK_ERROR", + ("hipErrorHardwareStackError", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorHardwareStackError", + ("hipErrorHardwareStackError", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_ILLEGAL_INSTRUCTION", + ("hipErrorIllegalInstruction", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorIllegalInstruction", + ("hipErrorIllegalInstruction", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_MISALIGNED_ADDRESS", + ("hipErrorMisalignedAddress", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorMisalignedAddress", + ("hipErrorMisalignedAddress", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_INVALID_ADDRESS_SPACE", + ("hipErrorInvalidAddressSpace", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidAddressSpace", + ("hipErrorInvalidAddressSpace", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_INVALID_PC", + ("hipErrorInvalidPc", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorInvalidPc", + ("hipErrorInvalidPc", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_LAUNCH_FAILED", + ("hipErrorLaunchFailure", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cudaErrorLaunchFailure", + ("hipErrorLaunchFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "CUDA_ERROR_UNKNOWN", + ("hipErrorUnknown", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cudaErrorUnknown", ("hipErrorUnknown", CONV_TYPE, API_RUNTIME)), + ( + "CU_TR_ADDRESS_MODE_WRAP", + ("HIP_TR_ADDRESS_MODE_WRAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TR_ADDRESS_MODE_CLAMP", + ("HIP_TR_ADDRESS_MODE_CLAMP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TR_ADDRESS_MODE_MIRROR", + ("HIP_TR_ADDRESS_MODE_MIRROR", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TR_ADDRESS_MODE_BORDER", + ("HIP_TR_ADDRESS_MODE_BORDER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CUBEMAP_FACE_POSITIVE_X", + ("HIP_CUBEMAP_FACE_POSITIVE_X", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CUBEMAP_FACE_NEGATIVE_X", + ("HIP_CUBEMAP_FACE_NEGATIVE_X", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CUBEMAP_FACE_POSITIVE_Y", + ("HIP_CUBEMAP_FACE_POSITIVE_Y", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CUBEMAP_FACE_NEGATIVE_Y", + ("HIP_CUBEMAP_FACE_NEGATIVE_Y", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CUBEMAP_FACE_POSITIVE_Z", + ("HIP_CUBEMAP_FACE_POSITIVE_Z", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CUBEMAP_FACE_NEGATIVE_Z", + ("HIP_CUBEMAP_FACE_NEGATIVE_Z", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_AD_FORMAT_UNSIGNED_INT8", + ("HIP_AD_FORMAT_UNSIGNED_INT8", CONV_TYPE, API_DRIVER), + ), + ( + "CU_AD_FORMAT_UNSIGNED_INT16", + ("HIP_AD_FORMAT_UNSIGNED_INT16", CONV_TYPE, API_DRIVER), + ), + ( + "CU_AD_FORMAT_UNSIGNED_INT32", + ("HIP_AD_FORMAT_UNSIGNED_INT32", CONV_TYPE, API_DRIVER), + ), + ( + "CU_AD_FORMAT_SIGNED_INT8", + ("HIP_AD_FORMAT_SIGNED_INT8", CONV_TYPE, API_DRIVER), + ), + ( + "CU_AD_FORMAT_SIGNED_INT16", + ("HIP_AD_FORMAT_SIGNED_INT16", CONV_TYPE, API_DRIVER), + ), + ( + "CU_AD_FORMAT_SIGNED_INT32", + ("HIP_AD_FORMAT_SIGNED_INT32", CONV_TYPE, API_DRIVER), + ), + ("CU_AD_FORMAT_HALF", ("HIP_AD_FORMAT_HALF", CONV_TYPE, API_DRIVER)), + ("CU_AD_FORMAT_FLOAT", ("HIP_AD_FORMAT_FLOAT", CONV_TYPE, API_DRIVER)), + ( + "CU_COMPUTEMODE_DEFAULT", + ("hipComputeModeDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_COMPUTEMODE_EXCLUSIVE", + ("hipComputeModeExclusive", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_COMPUTEMODE_PROHIBITED", + ("hipComputeModeProhibited", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_COMPUTEMODE_EXCLUSIVE_PROCESS", + ("hipComputeModeExclusiveProcess", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_ADVISE_SET_READ_MOSTLY", + ("hipMemAdviseSetReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_ADVISE_UNSET_READ_MOSTLY", + ("hipMemAdviseUnsetReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_ADVISE_SET_PREFERRED_LOCATION", + ( + "hipMemAdviseSetPreferredLocation", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION", + ( + "hipMemAdviseUnsetPreferredLocation", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_MEM_ADVISE_SET_ACCESSED_BY", + ("hipMemAdviseSetAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_ADVISE_UNSET_ACCESSED_BY", + ("hipMemAdviseUnsetAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY", + ("hipMemRangeAttributeReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION", + ( + "hipMemRangeAttributePreferredLocation", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY", + ("hipMemRangeAttributeAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION", + ( + "hipMemRangeAttributeLastPrefetchLocation", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_CTX_SCHED_AUTO", + ("HIP_CTX_SCHED_AUTO", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CTX_SCHED_SPIN", + ("HIP_CTX_SCHED_SPIN", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CTX_SCHED_YIELD", + ("HIP_CTX_SCHED_YIELD", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CTX_SCHED_BLOCKING_SYNC", + ("HIP_CTX_SCHED_BLOCKING_SYNC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CTX_BLOCKING_SYNC", + ("HIP_CTX_BLOCKING_SYNC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CTX_SCHED_MASK", + ("HIP_CTX_SCHED_MASK", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CTX_MAP_HOST", + ("HIP_CTX_MAP_HOST", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CTX_LMEM_RESIZE_TO_MAX", + ("HIP_CTX_LMEM_RESIZE_TO_MAX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_CTX_FLAGS_MASK", + ("HIP_CTX_FLAGS_MASK", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_LAUNCH_PARAM_BUFFER_POINTER", + ("HIP_LAUNCH_PARAM_BUFFER_POINTER", CONV_TYPE, API_DRIVER), + ), + ( + "CU_LAUNCH_PARAM_BUFFER_SIZE", + ("HIP_LAUNCH_PARAM_BUFFER_SIZE", CONV_TYPE, API_DRIVER), + ), + ("CU_LAUNCH_PARAM_END", ("HIP_LAUNCH_PARAM_END", CONV_TYPE, API_DRIVER)), + ( + "CU_IPC_HANDLE_SIZE", + ("HIP_IPC_HANDLE_SIZE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMHOSTALLOC_DEVICEMAP", + ("HIP_MEMHOSTALLOC_DEVICEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMHOSTALLOC_PORTABLE", + ("HIP_MEMHOSTALLOC_PORTABLE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMHOSTALLOC_WRITECOMBINED", + ("HIP_MEMHOSTALLOC_WRITECOMBINED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMHOSTREGISTER_DEVICEMAP", + ("HIP_MEMHOSTREGISTER_DEVICEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMHOSTREGISTER_IOMEMORY", + ("HIP_MEMHOSTREGISTER_IOMEMORY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMHOSTREGISTER_PORTABLE", + ("HIP_MEMHOSTREGISTER_PORTABLE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_PARAM_TR_DEFAULT", + ("HIP_PARAM_TR_DEFAULT", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_LEGACY", + ("HIP_STREAM_LEGACY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_PER_THREAD", + ("HIP_STREAM_PER_THREAD", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TRSA_OVERRIDE_FORMAT", + ("HIP_TRSA_OVERRIDE_FORMAT", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TRSF_NORMALIZED_COORDINATES", + ("HIP_TRSF_NORMALIZED_COORDINATES", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TRSF_READ_AS_INTEGER", + ("HIP_TRSF_READ_AS_INTEGER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CU_TRSF_SRGB", ("HIP_TRSF_SRGB", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CUDA_ARRAY3D_2DARRAY", + ("HIP_ARRAY3D_LAYERED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_ARRAY3D_CUBEMAP", + ("HIP_ARRAY3D_CUBEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_ARRAY3D_DEPTH_TEXTURE", + ("HIP_ARRAY3D_DEPTH_TEXTURE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_ARRAY3D_LAYERED", + ("HIP_ARRAY3D_LAYERED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_ARRAY3D_SURFACE_LDST", + ("HIP_ARRAY3D_SURFACE_LDST", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CUDA_ARRAY3D_TEXTURE_GATHER", + ("HIP_ARRAY3D_TEXTURE_GATHER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK", + ( + "hipDeviceAttributeMaxThreadsPerBlock", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X", + ("hipDeviceAttributeMaxBlockDimX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y", + ("hipDeviceAttributeMaxBlockDimY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z", + ("hipDeviceAttributeMaxBlockDimZ", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X", + ("hipDeviceAttributeMaxGridDimX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y", + ("hipDeviceAttributeMaxGridDimY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z", + ("hipDeviceAttributeMaxGridDimZ", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK", + ( + "hipDeviceAttributeMaxSharedMemoryPerBlock", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK", + ( + "hipDeviceAttributeMaxSharedMemoryPerBlock", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY", + ( + "hipDeviceAttributeTotalConstantMemory", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_WARP_SIZE", + ("hipDeviceAttributeWarpSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_PITCH", + ("hipDeviceAttributeMaxPitch", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK", + ( + "hipDeviceAttributeMaxRegistersPerBlock", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK", + ( + "hipDeviceAttributeMaxRegistersPerBlock", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_CLOCK_RATE", + ("hipDeviceAttributeClockRate", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT", + ( + "hipDeviceAttributeTextureAlignment", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_GPU_OVERLAP", + ( + "hipDeviceAttributeAsyncEngineCount", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT", + ( + "hipDeviceAttributeMultiprocessorCount", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT", + ( + "hipDeviceAttributeKernelExecTimeout", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_INTEGRATED", + ("hipDeviceAttributeIntegrated", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY", + ( + "hipDeviceAttributeCanMapHostMemory", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_COMPUTE_MODE", + ("hipDeviceAttributeComputeMode", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH", + ( + "hipDeviceAttributeMaxTexture1DWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH", + ( + "hipDeviceAttributeMaxTexture2DWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT", + ( + "hipDeviceAttributeMaxTexture2DHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH", + ( + "hipDeviceAttributeMaxTexture3DWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT", + ( + "hipDeviceAttributeMaxTexture3DHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH", + ( + "hipDeviceAttributeMaxTexture3DDepth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH", + ( + "hipDeviceAttributeMaxTexture2DLayeredWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT", + ( + "hipDeviceAttributeMaxTexture2DLayeredHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS", + ( + "hipDeviceAttributeMaxTexture2DLayeredLayers", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH", + ( + "hipDeviceAttributeMaxTexture2DLayeredWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT", + ( + "hipDeviceAttributeMaxTexture2DLayeredHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES", + ( + "hipDeviceAttributeMaxTexture2DLayeredLayers", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT", + ( + "hipDeviceAttributeSurfaceAlignment", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS", + ("hipDeviceAttributeConcurrentKernels", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_ECC_ENABLED", + ("hipDeviceAttributeEccEnabled", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_PCI_BUS_ID", + ("hipDeviceAttributePciBusId", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID", + ("hipDeviceAttributePciDeviceId", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_TCC_DRIVER", + ("hipDeviceAttributeTccDriver", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE", + ( + "hipDeviceAttributeMemoryClockRate", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH", + ("hipDeviceAttributeMemoryBusWidth", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE", + ("hipDeviceAttributeL2CacheSize", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR", + ("hipDeviceAttributeMaxThreadsPerMultiProcessor", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT", + ( + "hipDeviceAttributeAsyncEngineCount", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING", + ( + "hipDeviceAttributeUnifiedAddressing", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH", + ( + "hipDeviceAttributeMaxTexture1DLayeredWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS", + ( + "hipDeviceAttributeMaxTexture1DLayeredLayers", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER", + ( + "hipDeviceAttributeCanTex2DGather", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH", + ( + "hipDeviceAttributeMaxTexture2DGatherWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT", + ( + "hipDeviceAttributeMaxTexture2DGatherHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE", + ( + "hipDeviceAttributeMaxTexture3DWidthAlternate", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE", + ( + "hipDeviceAttributeMaxTexture3DHeightAlternate", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE", + ( + "hipDeviceAttributeMaxTexture3DDepthAlternate", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID", + ("hipDeviceAttributePciDomainId", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT", + ( + "hipDeviceAttributeTexturePitchAlignment", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH", + ( + "hipDeviceAttributeMaxTextureCubemapWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH", + ( + "hipDeviceAttributeMaxTextureCubemapLayeredWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS", + ( + "hipDeviceAttributeMaxTextureCubemapLayeredLayers", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH", + ( + "hipDeviceAttributeMaxSurface1DWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH", + ( + "hipDeviceAttributeMaxSurface2DWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT", + ( + "hipDeviceAttributeMaxSurface2DHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH", + ( + "hipDeviceAttributeMaxSurface3DWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT", + ( + "hipDeviceAttributeMaxSurface3DHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH", + ( + "hipDeviceAttributeMaxSurface3DDepth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH", + ( + "hipDeviceAttributeMaxSurface1DLayeredWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS", + ( + "hipDeviceAttributeMaxSurface1DLayeredLayers", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH", + ( + "hipDeviceAttributeMaxSurface2DLayeredWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT", + ( + "hipDeviceAttributeMaxSurface2DLayeredHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS", + ( + "hipDeviceAttributeMaxSurface2DLayeredLayers", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH", + ( + "hipDeviceAttributeMaxSurfaceCubemapWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH", + ( + "hipDeviceAttributeMaxSurfaceCubemapLayeredWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS", + ( + "hipDeviceAttributeMaxSurfaceCubemapLayeredLayers", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH", + ( + "hipDeviceAttributeMaxTexture1DLinearWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH", + ( + "hipDeviceAttributeMaxTexture2DLinearWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT", + ( + "hipDeviceAttributeMaxTexture2DLinearHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH", + ( + "hipDeviceAttributeMaxTexture2DLinearPitch", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH", + ( + "hipDeviceAttributeMaxTexture2DMipmappedWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT", + ( + "hipDeviceAttributeMaxTexture2DMipmappedHeight", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR", + ("hipDeviceAttributeComputeCapabilityMajor", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR", + ("hipDeviceAttributeComputeCapabilityMinor", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH", + ( + "hipDeviceAttributeMaxTexture1DMipmappedWidth", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED", + ( + "hipDeviceAttributeStreamPrioritiesSupported", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED", + ( + "hipDeviceAttributeGlobalL1CacheSupported", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED", + ( + "hipDeviceAttributeLocalL1CacheSupported", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR", + ( + "hipDeviceAttributeMaxSharedMemoryPerMultiprocessor", + CONV_TYPE, + API_DRIVER, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR", + ( + "hipDeviceAttributeMaxRegistersPerMultiprocessor", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY", + ("hipDeviceAttributeManagedMemory", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD", + ("hipDeviceAttributeIsMultiGpuBoard", CONV_TYPE, API_DRIVER), + ), + ( + "CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID", + ( + "hipDeviceAttributeMultiGpuBoardGroupId", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED", + ( + "hipDeviceAttributeHostNativeAtomicSupported", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO", + ( + "hipDeviceAttributeSingleToDoublePrecisionPerfRatio", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS", + ( + "hipDeviceAttributePageableMemoryAccess", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS", + ( + "hipDeviceAttributeConcurrentManagedAccess", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED", + ( + "hipDeviceAttributeComputePreemptionSupported", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM", + ( + "hipDeviceAttributeCanUseHostPointerForRegisteredMem", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_ATTRIBUTE_MAX", + ("hipDeviceAttributeMax", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_POINTER_ATTRIBUTE_CONTEXT", + ("hipPointerAttributeContext", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_POINTER_ATTRIBUTE_MEMORY_TYPE", + ("hipPointerAttributeMemoryType", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_POINTER_ATTRIBUTE_DEVICE_POINTER", + ( + "hipPointerAttributeDevicePointer", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_POINTER_ATTRIBUTE_HOST_POINTER", + ("hipPointerAttributeHostPointer", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_POINTER_ATTRIBUTE_P2P_TOKENS", + ("hipPointerAttributeP2pTokens", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_POINTER_ATTRIBUTE_SYNC_MEMOPS", + ("hipPointerAttributeSyncMemops", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_POINTER_ATTRIBUTE_BUFFER_ID", + ("hipPointerAttributeBufferId", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_POINTER_ATTRIBUTE_IS_MANAGED", + ("hipPointerAttributeIsManaged", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK", + ( + "hipFuncAttributeMaxThreadsPerBlocks", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES", + ("hipFuncAttributeSharedSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES", + ("hipFuncAttributeMaxDynamicSharedMemorySize", CONV_TYPE, API_RUNTIME), + ), + ( + "CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES", + ("hipFuncAttributeConstSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES", + ("hipFuncAttributeLocalSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_FUNC_ATTRIBUTE_NUM_REGS", + ("hipFuncAttributeNumRegs", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_FUNC_ATTRIBUTE_PTX_VERSION", + ("hipFuncAttributePtxVersion", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_FUNC_ATTRIBUTE_BINARY_VERSION", + ("hipFuncAttributeBinaryVersion", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_FUNC_ATTRIBUTE_CACHE_MODE_CA", + ("hipFuncAttributeCacheModeCA", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_FUNC_ATTRIBUTE_MAX", + ("hipFuncAttributeMax", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE", + ("hipGraphicsMapFlagsNone", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY", + ("hipGraphicsMapFlagsReadOnly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD", + ("hipGraphicsMapFlagsWriteDiscard", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_GRAPHICS_REGISTER_FLAGS_NONE", + ("hipGraphicsRegisterFlagsNone", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY", + ( + "hipGraphicsRegisterFlagsReadOnly", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD", + ( + "hipGraphicsRegisterFlagsWriteDiscard", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST", + ( + "hipGraphicsRegisterFlagsSurfaceLoadStore", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER", + ( + "hipGraphicsRegisterFlagsTextureGather", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_OCCUPANCY_DEFAULT", + ("hipOccupancyDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE", + ( + "hipOccupancyDisableCachingOverride", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_FUNC_CACHE_PREFER_NONE", + ("hipFuncCachePreferNone", CONV_CACHE, API_DRIVER), + ), + ( + "CU_FUNC_CACHE_PREFER_SHARED", + ("hipFuncCachePreferShared", CONV_CACHE, API_DRIVER), + ), + ("CU_FUNC_CACHE_PREFER_L1", ("hipFuncCachePreferL1", CONV_CACHE, API_DRIVER)), + ( + "CU_FUNC_CACHE_PREFER_EQUAL", + ("hipFuncCachePreferEqual", CONV_CACHE, API_DRIVER), + ), + ( + "CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS", + ("hipIpcMemLazyEnablePeerAccess", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CUDA_IPC_HANDLE_SIZE", ("HIP_IPC_HANDLE_SIZE", CONV_TYPE, API_DRIVER)), + ( + "CU_JIT_CACHE_OPTION_NONE", + ("hipJitCacheModeOptionNone", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_JIT_CACHE_OPTION_CG", + ("hipJitCacheModeOptionCG", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_JIT_CACHE_OPTION_CA", + ("hipJitCacheModeOptionCA", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_PREFER_PTX", + ("hipJitFallbackPreferPtx", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_PREFER_BINARY", + ("hipJitFallbackPreferBinary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CU_JIT_MAX_REGISTERS", ("hipJitOptionMaxRegisters", CONV_JIT, API_DRIVER)), + ( + "CU_JIT_THREADS_PER_BLOCK", + ("hipJitOptionThreadsPerBlock", CONV_JIT, API_DRIVER), + ), + ("CU_JIT_WALL_TIME", ("hipJitOptionWallTime", CONV_JIT, API_DRIVER)), + ("CU_JIT_INFO_LOG_BUFFER", ("hipJitOptionInfoLogBuffer", CONV_JIT, API_DRIVER)), + ( + "CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES", + ("hipJitOptionInfoLogBufferSizeBytes", CONV_JIT, API_DRIVER), + ), + ( + "CU_JIT_ERROR_LOG_BUFFER", + ("hipJitOptionErrorLogBuffer", CONV_JIT, API_DRIVER), + ), + ( + "CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES", + ("hipJitOptionErrorLogBufferSizeBytes", CONV_JIT, API_DRIVER), + ), + ( + "CU_JIT_OPTIMIZATION_LEVEL", + ("hipJitOptionOptimizationLevel", CONV_JIT, API_DRIVER), + ), + ( + "CU_JIT_TARGET_FROM_CUCONTEXT", + ("hipJitOptionTargetFromContext", CONV_JIT, API_DRIVER), + ), + ("CU_JIT_TARGET", ("hipJitOptionTarget", CONV_JIT, API_DRIVER)), + ( + "CU_JIT_FALLBACK_STRATEGY", + ("hipJitOptionFallbackStrategy", CONV_JIT, API_DRIVER), + ), + ( + "CU_JIT_GENERATE_DEBUG_INFO", + ("hipJitOptionGenerateDebugInfo", CONV_JIT, API_DRIVER), + ), + ("CU_JIT_LOG_VERBOSE", ("hipJitOptionLogVerbose", CONV_JIT, API_DRIVER)), + ( + "CU_JIT_GENERATE_LINE_INFO", + ("hipJitOptionGenerateLineInfo", CONV_JIT, API_DRIVER), + ), + ("CU_JIT_CACHE_MODE", ("hipJitOptionCacheMode", CONV_JIT, API_DRIVER)), + ("CU_JIT_NEW_SM3X_OPT", ("hipJitOptionSm3xOpt", CONV_JIT, API_DRIVER)), + ("CU_JIT_FAST_COMPILE", ("hipJitOptionFastCompile", CONV_JIT, API_DRIVER)), + ("CU_JIT_NUM_OPTIONS", ("hipJitOptionNumOptions", CONV_JIT, API_DRIVER)), + ( + "CU_TARGET_COMPUTE_10", + ("hipJitTargetCompute10", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_11", + ("hipJitTargetCompute11", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_12", + ("hipJitTargetCompute12", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_13", + ("hipJitTargetCompute13", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_20", + ("hipJitTargetCompute20", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_21", + ("hipJitTargetCompute21", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_30", + ("hipJitTargetCompute30", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_32", + ("hipJitTargetCompute32", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_35", + ("hipJitTargetCompute35", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_37", + ("hipJitTargetCompute37", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_50", + ("hipJitTargetCompute50", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_52", + ("hipJitTargetCompute52", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_53", + ("hipJitTargetCompute53", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_60", + ("hipJitTargetCompute60", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_61", + ("hipJitTargetCompute61", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_TARGET_COMPUTE_62", + ("hipJitTargetCompute62", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_JIT_INPUT_CUBIN", + ("hipJitInputTypeBin", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_JIT_INPUT_PTX", + ("hipJitInputTypePtx", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_JIT_INPUT_FATBINARY", + ("hipJitInputTypeFatBinary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_JIT_INPUT_OBJECT", + ("hipJitInputTypeObject", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_JIT_INPUT_LIBRARY", + ("hipJitInputTypeLibrary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_JIT_NUM_INPUT_TYPES", + ("hipJitInputTypeNumInputTypes", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_LIMIT_STACK_SIZE", + ("hipLimitStackSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_LIMIT_PRINTF_FIFO_SIZE", + ("hipLimitPrintfFifoSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_LIMIT_MALLOC_HEAP_SIZE", + ("hipLimitMallocHeapSize", CONV_TYPE, API_DRIVER), + ), + ( + "CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH", + ("hipLimitDevRuntimeSyncDepth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT", + ( + "hipLimitDevRuntimePendingLaunchCount", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_LIMIT_STACK_SIZE", + ("hipLimitStackSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_ATTACH_GLOBAL", + ("hipMemAttachGlobal", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_ATTACH_HOST", + ("hipMemAttachHost", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEM_ATTACH_SINGLE", + ("hipMemAttachSingle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMORYTYPE_HOST", + ("hipMemTypeHost", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMORYTYPE_DEVICE", + ("hipMemTypeDevice", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMORYTYPE_ARRAY", + ("hipMemTypeArray", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_MEMORYTYPE_UNIFIED", + ("hipMemTypeUnified", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_RESOURCE_TYPE_ARRAY", + ("hipResourceTypeArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_RESOURCE_TYPE_MIPMAPPED_ARRAY", + ("hipResourceTypeMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_RESOURCE_TYPE_LINEAR", + ("hipResourceTypeLinear", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_RESOURCE_TYPE_PITCH2D", + ("hipResourceTypePitch2D", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CU_RES_VIEW_FORMAT_NONE", ("hipResViewFormatNone", CONV_TEX, API_DRIVER)), + ( + "CU_RES_VIEW_FORMAT_UINT_1X8", + ("hipResViewFormatUnsignedChar1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UINT_2X8", + ("hipResViewFormatUnsignedChar2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UINT_4X8", + ("hipResViewFormatUnsignedChar4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_1X8", + ("hipResViewFormatSignedChar1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_2X8", + ("hipResViewFormatSignedChar2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_4X8", + ("hipResViewFormatSignedChar4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UINT_1X16", + ("hipResViewFormatUnsignedShort1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UINT_2X16", + ("hipResViewFormatUnsignedShort2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UINT_4X16", + ("hipResViewFormatUnsignedShort4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_1X16", + ("hipResViewFormatSignedShort1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_2X16", + ("hipResViewFormatSignedShort2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_4X16", + ("hipResViewFormatSignedShort4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UINT_1X32", + ("hipResViewFormatUnsignedInt1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UINT_2X32", + ("hipResViewFormatUnsignedInt2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UINT_4X32", + ("hipResViewFormatUnsignedInt4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_1X32", + ("hipResViewFormatSignedInt1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_2X32", + ("hipResViewFormatSignedInt2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SINT_4X32", + ("hipResViewFormatSignedInt4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_FLOAT_1X16", + ("hipResViewFormatHalf1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_FLOAT_2X16", + ("hipResViewFormatHalf2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_FLOAT_4X16", + ("hipResViewFormatHalf4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_FLOAT_1X32", + ("hipResViewFormatFloat1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_FLOAT_2X32", + ("hipResViewFormatFloat2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_FLOAT_4X32", + ("hipResViewFormatFloat4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UNSIGNED_BC1", + ("hipResViewFormatUnsignedBlockCompressed1", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UNSIGNED_BC2", + ("hipResViewFormatUnsignedBlockCompressed2", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UNSIGNED_BC3", + ("hipResViewFormatUnsignedBlockCompressed3", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UNSIGNED_BC4", + ("hipResViewFormatUnsignedBlockCompressed4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SIGNED_BC4", + ("hipResViewFormatSignedBlockCompressed4", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UNSIGNED_BC5", + ("hipResViewFormatUnsignedBlockCompressed5", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SIGNED_BC5", + ("hipResViewFormatSignedBlockCompressed5", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UNSIGNED_BC6H", + ("hipResViewFormatUnsignedBlockCompressed6H", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_SIGNED_BC6H", + ("hipResViewFormatSignedBlockCompressed6H", CONV_TEX, API_DRIVER), + ), + ( + "CU_RES_VIEW_FORMAT_UNSIGNED_BC7", + ("hipResViewFormatUnsignedBlockCompressed7", CONV_TEX, API_DRIVER), + ), + ( + "CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE", + ("hipSharedMemBankSizeDefault", CONV_TYPE, API_DRIVER), + ), + ( + "CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE", + ("hipSharedMemBankSizeFourByte", CONV_TYPE, API_DRIVER), + ), + ( + "CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE", + ("hipSharedMemBankSizeEightByte", CONV_TYPE, API_DRIVER), + ), + ("CU_STREAM_DEFAULT", ("hipStreamDefault", CONV_TYPE, API_DRIVER)), + ("CU_STREAM_NON_BLOCKING", ("hipStreamNonBlocking", CONV_TYPE, API_DRIVER)), + ( + "CU_STREAM_WAIT_VALUE_GEQ", + ("hipStreamWaitValueGeq", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_WAIT_VALUE_EQ", + ("hipStreamWaitValueEq", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_WAIT_VALUE_AND", + ("hipStreamWaitValueAnd", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_WAIT_VALUE_FLUSH", + ("hipStreamWaitValueFlush", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_WRITE_VALUE_DEFAULT", + ("hipStreamWriteValueDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER", + ( + "hipStreamWriteValueNoMemoryBarrier", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_STREAM_MEM_OP_WAIT_VALUE_32", + ("hipStreamBatchMemOpWaitValue32", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_MEM_OP_WRITE_VALUE_32", + ("hipStreamBatchMemOpWriteValue32", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES", + ( + "hipStreamBatchMemOpFlushRemoteWrites", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuGetErrorName", + ("hipGetErrorName", CONV_ERROR, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGetErrorString", + ("hipDrvGetErrorString", CONV_ERROR, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuInit", ("hipInit", CONV_INIT, API_DRIVER)), + ("cuDriverGetVersion", ("hipDriverGetVersion", CONV_VERSION, API_DRIVER)), + ("cuCtxCreate", ("hipCtxCreate", CONV_CONTEXT, API_DRIVER)), + ("cuCtxCreate_v2", ("hipCtxCreate", CONV_CONTEXT, API_DRIVER)), + ("cuCtxDestroy", ("hipCtxDestroy", CONV_CONTEXT, API_DRIVER)), + ("cuCtxDestroy_v2", ("hipCtxDestroy", CONV_CONTEXT, API_DRIVER)), + ("cuCtxGetApiVersion", ("hipCtxGetApiVersion", CONV_CONTEXT, API_DRIVER)), + ("cuCtxGetCacheConfig", ("hipCtxGetCacheConfig", CONV_CONTEXT, API_DRIVER)), + ("cuCtxGetCurrent", ("hipCtxGetCurrent", CONV_CONTEXT, API_DRIVER)), + ("cuCtxGetDevice", ("hipCtxGetDevice", CONV_CONTEXT, API_DRIVER)), + ("cuCtxGetFlags", ("hipCtxGetFlags", CONV_CONTEXT, API_DRIVER)), + ("cuDeviceGetUuid", ("hipDeviceGetUuid", CONV_CONTEXT, API_DRIVER)), + ( + "cuCtxGetLimit", + ("hipCtxGetLimit", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuCtxGetSharedMemConfig", + ("hipCtxGetSharedMemConfig", CONV_CONTEXT, API_DRIVER), + ), + ( + "cuCtxGetStreamPriorityRange", + ("hipCtxGetStreamPriorityRange", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuCtxPopCurrent_v2", ("hipCtxPopCurrent", CONV_CONTEXT, API_DRIVER)), + ("cuCtxPushCurrent_v2", ("hipCtxPushCurrent", CONV_CONTEXT, API_DRIVER)), + ("cuCtxSetCacheConfig", ("hipCtxSetCacheConfig", CONV_CONTEXT, API_DRIVER)), + ("cuCtxSetCurrent", ("hipCtxSetCurrent", CONV_CONTEXT, API_DRIVER)), + ( + "cuCtxSetLimit", + ("hipCtxSetLimit", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuCtxSetSharedMemConfig", + ("hipCtxSetSharedMemConfig", CONV_CONTEXT, API_DRIVER), + ), + ("cuCtxSynchronize", ("hipCtxSynchronize", CONV_CONTEXT, API_DRIVER)), + ("cuCtxAttach", ("hipCtxAttach", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED)), + ("cuCtxDetach", ("hipCtxDetach", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED)), + ("cuCtxEnablePeerAccess", ("hipCtxEnablePeerAccess", CONV_PEER, API_DRIVER)), + ("cuCtxDisablePeerAccess", ("hipCtxDisablePeerAccess", CONV_PEER, API_DRIVER)), + ("cuDeviceCanAccessPeer", ("hipDeviceCanAccessPeer", CONV_PEER, API_DRIVER)), + ( + "cuDeviceGetP2PAttribute", + ("hipDeviceGetP2PAttribute", CONV_PEER, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuDevicePrimaryCtxGetState", + ("hipDevicePrimaryCtxGetState", CONV_CONTEXT, API_DRIVER), + ), + ( + "cuDevicePrimaryCtxRelease", + ("hipDevicePrimaryCtxRelease", CONV_CONTEXT, API_DRIVER), + ), + ( + "cuDevicePrimaryCtxReset", + ("hipDevicePrimaryCtxReset", CONV_CONTEXT, API_DRIVER), + ), + ( + "cuDevicePrimaryCtxRetain", + ("hipDevicePrimaryCtxRetain", CONV_CONTEXT, API_DRIVER), + ), + ( + "cuDevicePrimaryCtxSetFlags", + ("hipDevicePrimaryCtxSetFlags", CONV_CONTEXT, API_DRIVER), + ), + ("cuDeviceGet", ("hipDeviceGet", CONV_DEVICE, API_DRIVER)), + ("cuDeviceGetName", ("hipDeviceGetName", CONV_DEVICE, API_DRIVER)), + ("cuDeviceGetCount", ("hipGetDeviceCount", CONV_DEVICE, API_DRIVER)), + ("cuDeviceGetAttribute", ("hipDeviceGetAttribute", CONV_DEVICE, API_DRIVER)), + ("cuDeviceGetPCIBusId", ("hipDeviceGetPCIBusId", CONV_DEVICE, API_DRIVER)), + ("cuDeviceGetByPCIBusId", ("hipDeviceGetByPCIBusId", CONV_DEVICE, API_DRIVER)), + ("cuDeviceTotalMem_v2", ("hipDeviceTotalMem", CONV_DEVICE, API_DRIVER)), + ( + "cuDeviceComputeCapability", + ("hipDeviceComputeCapability", CONV_DEVICE, API_DRIVER), + ), + ("cuDeviceGetProperties", ("hipGetDeviceProperties", CONV_DEVICE, API_DRIVER)), + ("cuLinkAddData", ("hipLinkAddData", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ("cuLinkAddFile", ("hipLinkAddFile", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuLinkComplete", + ("hipLinkComplete", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuLinkCreate", ("hipLinkCreate", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ("cuLinkDestroy", ("hipLinkDestroy", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ("cuModuleGetFunction", ("hipModuleGetFunction", CONV_MODULE, API_DRIVER)), + ("cuModuleGetGlobal_v2", ("hipModuleGetGlobal", CONV_MODULE, API_DRIVER)), + ( + "cuModuleGetSurfRef", + ("hipModuleGetSurfRef", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuModuleGetTexRef", ("hipModuleGetTexRef", CONV_MODULE, API_DRIVER)), + ("cuModuleLoad", ("hipModuleLoad", CONV_MODULE, API_DRIVER)), + ("cuModuleLoadData", ("hipModuleLoadData", CONV_MODULE, API_DRIVER)), + ("cuModuleLoadDataEx", ("hipModuleLoadDataEx", CONV_MODULE, API_DRIVER)), + ( + "cuModuleLoadFatBinary", + ("hipModuleLoadFatBinary", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuModuleUnload", ("hipModuleUnload", CONV_MODULE, API_DRIVER)), + ( + "CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK", + ( + "hipDeviceP2PAttributePerformanceRank", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED", + ( + "hipDeviceP2PAttributeAccessSupported", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED", + ( + "hipDeviceP2PAttributeNativeAtomicSupported", + CONV_TYPE, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ("CU_EVENT_DEFAULT", ("hipEventDefault", CONV_EVENT, API_DRIVER)), + ("CU_EVENT_BLOCKING_SYNC", ("hipEventBlockingSync", CONV_EVENT, API_DRIVER)), + ("CU_EVENT_DISABLE_TIMING", ("hipEventDisableTiming", CONV_EVENT, API_DRIVER)), + ("CU_EVENT_INTERPROCESS", ("hipEventInterprocess", CONV_EVENT, API_DRIVER)), + ("cuEventCreate", ("hipEventCreate", CONV_EVENT, API_DRIVER)), + ("cuEventDestroy", ("hipEventDestroy", CONV_EVENT, API_DRIVER)), + ("cuEventDestroy_v2", ("hipEventDestroy", CONV_EVENT, API_DRIVER)), + ("cuEventElapsedTime", ("hipEventElapsedTime", CONV_EVENT, API_DRIVER)), + ("cuEventQuery", ("hipEventQuery", CONV_EVENT, API_DRIVER)), + ("cuEventRecord", ("hipEventRecord", CONV_EVENT, API_DRIVER)), + ("cuEventSynchronize", ("hipEventSynchronize", CONV_EVENT, API_DRIVER)), + ("cuFuncSetAttribute", ("hipFuncSetAttribute", CONV_EVENT, API_DRIVER)), + ( + "cuFuncGetAttribute", + ("hipFuncGetAttribute", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuFuncSetCacheConfig", ("hipFuncSetCacheConfig", CONV_MODULE, API_DRIVER)), + ( + "cuFuncSetSharedMemConfig", + ("hipFuncSetSharedMemConfig", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuLaunchKernel", ("hipModuleLaunchKernel", CONV_MODULE, API_DRIVER)), + ( + "cuFuncSetBlockShape", + ("hipFuncSetBlockShape", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuFuncSetSharedSize", + ("hipFuncSetSharedSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuLaunch", ("hipLaunch", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ("cuLaunchGrid", ("hipLaunchGrid", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuLaunchGridAsync", + ("hipLaunchGridAsync", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuParamSetf", ("hipParamSetf", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ("cuParamSeti", ("hipParamSeti", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuParamSetSize", + ("hipParamSetSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuParamSetSize", + ("hipParamSetSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuParamSetv", ("hipParamSetv", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuOccupancyMaxActiveBlocksPerMultiprocessor", + ( + "hipModuleOccupancyMaxActiveBlocksPerMultiprocessor", + CONV_OCCUPANCY, + API_DRIVER, + ), + ), + ( + "cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", + ( + "hipModuleOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", + CONV_OCCUPANCY, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuOccupancyMaxPotentialBlockSize", + ("hipModuleOccupancyMaxPotentialBlockSize", CONV_OCCUPANCY, API_DRIVER), + ), + ( + "cuOccupancyMaxPotentialBlockSizeWithFlags", + ( + "hipModuleOccupancyMaxPotentialBlockSizeWithFlags", + CONV_OCCUPANCY, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ("cuStreamAddCallback", ("hipStreamAddCallback", CONV_STREAM, API_DRIVER)), + ( + "cuStreamAttachMemAsync", + ("hipStreamAttachMemAsync", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuStreamCreate", + ("hipStreamCreate__", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuStreamCreateWithPriority", + ("hipStreamCreateWithPriority", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuStreamDestroy", ("hipStreamDestroy", CONV_STREAM, API_DRIVER)), + ("cuStreamDestroy_v2", ("hipStreamDestroy", CONV_STREAM, API_DRIVER)), + ("cuStreamGetFlags", ("hipStreamGetFlags", CONV_STREAM, API_DRIVER)), + ( + "cuStreamGetPriority", + ("hipStreamGetPriority", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuStreamQuery", ("hipStreamQuery", CONV_STREAM, API_DRIVER)), + ("cuStreamSynchronize", ("hipStreamSynchronize", CONV_STREAM, API_DRIVER)), + ("cuStreamWaitEvent", ("hipStreamWaitEvent", CONV_STREAM, API_DRIVER)), + ( + "cuStreamWaitValue32", + ("hipStreamWaitValue32", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuStreamWriteValue32", + ("hipStreamWriteValue32", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuStreamBatchMemOp", + ("hipStreamBatchMemOp", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuArray3DCreate", ("hipArray3DCreate", CONV_MEM, API_DRIVER)), + ( + "cuArray3DGetDescriptor", + ("hipArray3DGetDescriptor", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuArrayCreate", ("hipArrayCreate", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ("cuArrayDestroy", ("hipArrayDestroy", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuArrayGetDescriptor", + ("hipArrayGetDescriptor", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuIpcCloseMemHandle", + ("hipIpcCloseMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuIpcGetEventHandle", + ("hipIpcGetEventHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuIpcGetMemHandle", + ("hipIpcGetMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuIpcOpenEventHandle", + ("hipIpcOpenEventHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuIpcOpenMemHandle", + ("hipIpcOpenMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemAlloc_v2", ("hipMalloc", CONV_MEM, API_DRIVER)), + ("cuMemAllocHost", ("hipMemAllocHost", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemAllocManaged", + ("hipMemAllocManaged", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMemAllocPitch", + ("hipMemAllocPitch__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemcpy", ("hipMemcpy__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ("cuMemcpy2D", ("hipMemcpy2D__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemcpy2DAsync", + ("hipMemcpy2DAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMemcpy2DUnaligned", + ("hipMemcpy2DUnaligned", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemcpy3D", ("hipMemcpy3D__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemcpy3DAsync", + ("hipMemcpy3DAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMemcpy3DPeer", + ("hipMemcpy3DPeer__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMemcpy3DPeerAsync", + ("hipMemcpy3DPeerAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemcpyAsync", ("hipMemcpyAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ("cuMemcpyAtoA", ("hipMemcpyAtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ("cuMemcpyAtoD", ("hipMemcpyAtoD", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ("cuMemcpyAtoH", ("hipMemcpyAtoH", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemcpyAtoHAsync", + ("hipMemcpyAtoHAsync", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemcpyDtoA", ("hipMemcpyDtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ("cuMemcpyDtoD_v2", ("hipMemcpyDtoD", CONV_MEM, API_DRIVER)), + ("cuMemcpyDtoDAsync_v2", ("hipMemcpyDtoDAsync", CONV_MEM, API_DRIVER)), + ("cuMemcpyDtoH_v2", ("hipMemcpyDtoH", CONV_MEM, API_DRIVER)), + ("cuMemcpyDtoHAsync_v2", ("hipMemcpyDtoHAsync", CONV_MEM, API_DRIVER)), + ("cuMemcpyHtoA", ("hipMemcpyHtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemcpyHtoAAsync", + ("hipMemcpyHtoAAsync", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemcpyHtoD_v2", ("hipMemcpyHtoD", CONV_MEM, API_DRIVER)), + ("cuMemcpyHtoDAsync_v2", ("hipMemcpyHtoDAsync", CONV_MEM, API_DRIVER)), + ( + "cuMemcpyPeerAsync", + ("hipMemcpyPeerAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemcpyPeer", ("hipMemcpyPeer__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ("cuMemFree", ("hipFree", CONV_MEM, API_DRIVER)), + ("cuMemFree_v2", ("hipFree", CONV_MEM, API_DRIVER)), + ("cuMemFreeHost", ("hipHostFree", CONV_MEM, API_DRIVER)), + ( + "cuMemGetAddressRange", + ("hipMemGetAddressRange", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemGetInfo_v2", ("hipMemGetInfo", CONV_MEM, API_DRIVER)), + ("cuMemHostAlloc", ("hipHostMalloc", CONV_MEM, API_DRIVER)), + ( + "cuMemHostGetDevicePointer", + ("hipMemHostGetDevicePointer", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMemHostGetFlags", + ("hipMemHostGetFlags", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemHostRegister_v2", ("hipHostRegister", CONV_MEM, API_DRIVER)), + ("cuMemHostUnregister", ("hipHostUnregister", CONV_MEM, API_DRIVER)), + ("cuMemsetD16_v2", ("hipMemsetD16", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemsetD16Async", + ("hipMemsetD16Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemsetD2D16_v2", ("hipMemsetD2D16", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemsetD2D16Async", + ("hipMemsetD2D16Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemsetD2D32_v2", ("hipMemsetD2D32", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemsetD2D32Async", + ("hipMemsetD2D32Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemsetD2D8_v2", ("hipMemsetD2D8", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemsetD2D8Async", + ("hipMemsetD2D8Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemsetD32_v2", ("hipMemset", CONV_MEM, API_DRIVER)), + ("cuMemsetD32Async", ("hipMemsetAsync", CONV_MEM, API_DRIVER)), + ("cuMemsetD8_v2", ("hipMemsetD8", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemsetD8Async", + ("hipMemsetD8Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMipmappedArrayCreate", + ("hipMipmappedArrayCreate", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMipmappedArrayDestroy", + ("hipMipmappedArrayDestroy", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMipmappedArrayGetLevel", + ("hipMipmappedArrayGetLevel", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMemPrefetchAsync", + ("hipMemPrefetchAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuMemAdvise", ("hipMemAdvise", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuMemRangeGetAttribute", + ("hipMemRangeGetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMemRangeGetAttributes", + ("hipMemRangeGetAttributes", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuPointerGetAttribute", + ("hipPointerGetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuMemGetAddressRange_v2", + ("hipMemGetAddressRange", CONV_MEM, API_DRIVER), + ), + ( + "cuPointerGetAttributes", + ("hipPointerGetAttributes", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuPointerSetAttribute", + ("hipPointerSetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), + ), + ("CU_TR_FILTER_MODE_POINT", ("hipFilterModePoint", CONV_TEX, API_DRIVER)), + ( + "CU_TR_FILTER_MODE_LINEAR", + ("hipFilterModeLinear", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetAddress", + ("hipTexRefGetAddress", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetAddressMode", + ("hipTexRefGetAddressMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetArray", + ("hipTexRefGetArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetBorderColor", + ("hipTexRefGetBorderColor", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetFilterMode", + ("hipTexRefGetFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetFlags", + ("hipTexRefGetFlags", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetFormat", + ("hipTexRefGetFormat", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetMaxAnisotropy", + ("hipTexRefGetMaxAnisotropy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetMipmapFilterMode", + ("hipTexRefGetMipmapFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetMipmapLevelBias", + ("hipTexRefGetMipmapLevelBias", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetMipmapLevelClamp", + ("hipTexRefGetMipmapLevelClamp", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefGetMipmappedArray", + ("hipTexRefGetMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefSetAddress", + ("hipTexRefSetAddress", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefSetAddress2D", + ("hipTexRefSetAddress2D", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuTexRefSetAddressMode", ("hipTexRefSetAddressMode", CONV_TEX, API_DRIVER)), + ("cuTexRefSetArray", ("hipTexRefSetArray", CONV_TEX, API_DRIVER)), + ( + "cuTexRefSetBorderColor", + ("hipTexRefSetBorderColor", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuTexRefSetFilterMode", ("hipTexRefSetFilterMode", CONV_TEX, API_DRIVER)), + ("cuTexRefSetFlags", ("hipTexRefSetFlags", CONV_TEX, API_DRIVER)), + ("cuTexRefSetFormat", ("hipTexRefSetFormat", CONV_TEX, API_DRIVER)), + ( + "cuTexRefSetMaxAnisotropy", + ("hipTexRefSetMaxAnisotropy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefSetMipmapFilterMode", + ("hipTexRefSetMipmapFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefSetMipmapLevelBias", + ("hipTexRefSetMipmapLevelBias", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefSetMipmapLevelClamp", + ("hipTexRefSetMipmapLevelClamp", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexRefSetMipmappedArray", + ("hipTexRefSetMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuTexRefCreate", ("hipTexRefCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuTexRefDestroy", + ("hipTexRefDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuSurfRefGetArray", + ("hipSurfRefGetArray", CONV_SURFACE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuSurfRefSetArray", + ("hipSurfRefSetArray", CONV_SURFACE, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexObjectCreate", + ("hipTexObjectCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexObjectDestroy", + ("hipTexObjectDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexObjectGetResourceDesc", + ("hipTexObjectGetResourceDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexObjectGetResourceViewDesc", + ("hipTexObjectGetResourceViewDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuTexObjectGetTextureDesc", + ("hipTexObjectGetTextureDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuSurfObjectCreate", + ("hipSurfObjectCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuSurfObjectDestroy", + ("hipSurfObjectDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuSurfObjectGetResourceDesc", + ("hipSurfObjectGetResourceDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsMapResources", + ("hipGraphicsMapResources", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsResourceGetMappedMipmappedArray", + ( + "hipGraphicsResourceGetMappedMipmappedArray", + CONV_GRAPHICS, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuGraphicsResourceGetMappedPointer", + ( + "hipGraphicsResourceGetMappedPointer", + CONV_GRAPHICS, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuGraphicsResourceSetMapFlags", + ( + "hipGraphicsResourceSetMapFlags", + CONV_GRAPHICS, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuGraphicsSubResourceGetMappedArray", + ( + "hipGraphicsSubResourceGetMappedArray", + CONV_GRAPHICS, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuGraphicsUnmapResources", + ("hipGraphicsUnmapResources", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsUnregisterResource", + ( + "hipGraphicsUnregisterResource", + CONV_GRAPHICS, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuProfilerInitialize", + ("hipProfilerInitialize", CONV_OTHER, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuProfilerStart", ("hipProfilerStart", CONV_OTHER, API_DRIVER)), + ("cuProfilerStop", ("hipProfilerStop", CONV_OTHER, API_DRIVER)), + ( + "CU_GL_DEVICE_LIST_ALL", + ("HIP_GL_DEVICE_LIST_ALL", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_GL_DEVICE_LIST_CURRENT_FRAME", + ("HIP_GL_DEVICE_LIST_CURRENT_FRAME", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_GL_DEVICE_LIST_NEXT_FRAME", + ("HIP_GL_DEVICE_LIST_NEXT_FRAME", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuGLGetDevices", ("hipGLGetDevices", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuGraphicsGLRegisterBuffer", + ("hipGraphicsGLRegisterBuffer", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsGLRegisterImage", + ("hipGraphicsGLRegisterImage", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ("cuWGLGetDevice", ("hipWGLGetDevice", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), + ( + "CU_GL_MAP_RESOURCE_FLAGS_NONE", + ("HIP_GL_MAP_RESOURCE_FLAGS_NONE", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY", + ( + "HIP_GL_MAP_RESOURCE_FLAGS_READ_ONLY", + CONV_GL, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD", + ( + "HIP_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD", + CONV_GL, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ("cuGLCtxCreate", ("hipGLCtxCreate", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), + ("cuGLInit", ("hipGLInit", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), + ( + "cuGLMapBufferObject", + ("hipGLMapBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGLMapBufferObjectAsync", + ("hipGLMapBufferObjectAsync", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGLRegisterBufferObject", + ("hipGLRegisterBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGLSetBufferObjectMapFlags", + ("hipGLSetBufferObjectMapFlags", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGLUnmapBufferObject", + ("hipGLUnmapBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGLUnmapBufferObjectAsync", + ("hipGLUnmapBufferObjectAsync", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGLUnregisterBufferObject", + ("hipGLUnregisterBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D9_DEVICE_LIST_ALL", + ("HIP_D3D9_DEVICE_LIST_ALL", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D9_DEVICE_LIST_CURRENT_FRAME", + ( + "HIP_D3D9_DEVICE_LIST_CURRENT_FRAME", + CONV_D3D9, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D9_DEVICE_LIST_NEXT_FRAME", + ("HIP_D3D9_DEVICE_LIST_NEXT_FRAME", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9CtxCreate", + ("hipD3D9CtxCreate", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9CtxCreateOnDevice", + ("hipD3D9CtxCreateOnDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9GetDevice", + ("hipD3D9GetDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9GetDevices", + ("hipD3D9GetDevices", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9GetDirect3DDevice", + ("hipD3D9GetDirect3DDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsD3D9RegisterResource", + ("hipGraphicsD3D9RegisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D9_MAPRESOURCE_FLAGS_NONE", + ("HIP_D3D9_MAPRESOURCE_FLAGS_NONE", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D9_MAPRESOURCE_FLAGS_READONLY", + ( + "HIP_D3D9_MAPRESOURCE_FLAGS_READONLY", + CONV_D3D9, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD", + ( + "HIP_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD", + CONV_D3D9, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D9_REGISTER_FLAGS_NONE", + ("HIP_D3D9_REGISTER_FLAGS_NONE", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D9_REGISTER_FLAGS_ARRAY", + ("HIP_D3D9_REGISTER_FLAGS_ARRAY", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9MapResources", + ("hipD3D9MapResources", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9RegisterResource", + ("hipD3D9RegisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9ResourceGetMappedArray", + ("hipD3D9ResourceGetMappedArray", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9ResourceGetMappedPitch", + ("hipD3D9ResourceGetMappedPitch", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9ResourceGetMappedPointer", + ("hipD3D9ResourceGetMappedPointer", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9ResourceGetMappedSize", + ("hipD3D9ResourceGetMappedSize", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9ResourceGetSurfaceDimensions", + ( + "hipD3D9ResourceGetSurfaceDimensions", + CONV_D3D9, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuD3D9ResourceSetMapFlags", + ("hipD3D9ResourceSetMapFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9UnmapResources", + ("hipD3D9UnmapResources", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D9UnregisterResource", + ("hipD3D9UnregisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D10_DEVICE_LIST_ALL", + ("HIP_D3D10_DEVICE_LIST_ALL", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D10_DEVICE_LIST_CURRENT_FRAME", + ( + "HIP_D3D10_DEVICE_LIST_CURRENT_FRAME", + CONV_D3D10, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D10_DEVICE_LIST_NEXT_FRAME", + ( + "HIP_D3D10_DEVICE_LIST_NEXT_FRAME", + CONV_D3D10, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuD3D10GetDevice", + ("hipD3D10GetDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10GetDevices", + ("hipD3D10GetDevices", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsD3D10RegisterResource", + ( + "hipGraphicsD3D10RegisterResource", + CONV_D3D10, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D10_MAPRESOURCE_FLAGS_NONE", + ( + "HIP_D3D10_MAPRESOURCE_FLAGS_NONE", + CONV_D3D10, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D10_MAPRESOURCE_FLAGS_READONLY", + ( + "HIP_D3D10_MAPRESOURCE_FLAGS_READONLY", + CONV_D3D10, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD", + ( + "HIP_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD", + CONV_D3D10, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D10_REGISTER_FLAGS_NONE", + ("HIP_D3D10_REGISTER_FLAGS_NONE", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D10_REGISTER_FLAGS_ARRAY", + ("HIP_D3D10_REGISTER_FLAGS_ARRAY", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10CtxCreate", + ("hipD3D10CtxCreate", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10CtxCreateOnDevice", + ("hipD3D10CtxCreateOnDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10GetDirect3DDevice", + ("hipD3D10GetDirect3DDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10MapResources", + ("hipD3D10MapResources", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10RegisterResource", + ("hipD3D10RegisterResource", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10ResourceGetMappedArray", + ("hipD3D10ResourceGetMappedArray", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10ResourceGetMappedPitch", + ("hipD3D10ResourceGetMappedPitch", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10ResourceGetMappedPointer", + ( + "hipD3D10ResourceGetMappedPointer", + CONV_D3D10, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuD3D10ResourceGetMappedSize", + ("hipD3D10ResourceGetMappedSize", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10ResourceGetSurfaceDimensions", + ( + "hipD3D10ResourceGetSurfaceDimensions", + CONV_D3D10, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuD310ResourceSetMapFlags", + ("hipD3D10ResourceSetMapFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10UnmapResources", + ("hipD3D10UnmapResources", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D10UnregisterResource", + ("hipD3D10UnregisterResource", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D11_DEVICE_LIST_ALL", + ("HIP_D3D11_DEVICE_LIST_ALL", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "CU_D3D11_DEVICE_LIST_CURRENT_FRAME", + ( + "HIP_D3D11_DEVICE_LIST_CURRENT_FRAME", + CONV_D3D11, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "CU_D3D11_DEVICE_LIST_NEXT_FRAME", + ( + "HIP_D3D11_DEVICE_LIST_NEXT_FRAME", + CONV_D3D11, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuD3D11GetDevice", + ("hipD3D11GetDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D11GetDevices", + ("hipD3D11GetDevices", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsD3D11RegisterResource", + ( + "hipGraphicsD3D11RegisterResource", + CONV_D3D11, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuD3D11CtxCreate", + ("hipD3D11CtxCreate", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D11CtxCreateOnDevice", + ("hipD3D11CtxCreateOnDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuD3D11GetDirect3DDevice", + ("hipD3D11GetDirect3DDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsVDPAURegisterOutputSurface", + ( + "hipGraphicsVDPAURegisterOutputSurface", + CONV_VDPAU, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuGraphicsVDPAURegisterVideoSurface", + ( + "hipGraphicsVDPAURegisterVideoSurface", + CONV_VDPAU, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuVDPAUGetDevice", + ("hipVDPAUGetDevice", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuVDPAUCtxCreate", + ("hipVDPAUCtxCreate", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuEGLStreamConsumerAcquireFrame", + ("hipEGLStreamConsumerAcquireFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuEGLStreamConsumerConnect", + ("hipEGLStreamConsumerConnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuEGLStreamConsumerConnectWithFlags", + ( + "hipEGLStreamConsumerConnectWithFlags", + CONV_EGL, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ( + "cuEGLStreamConsumerDisconnect", + ("hipEGLStreamConsumerDisconnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuEGLStreamConsumerReleaseFrame", + ("hipEGLStreamConsumerReleaseFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuEGLStreamProducerConnect", + ("hipEGLStreamProducerConnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuEGLStreamProducerDisconnect", + ("hipEGLStreamProducerDisconnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuEGLStreamProducerPresentFrame", + ("hipEGLStreamProducerPresentFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuEGLStreamProducerReturnFrame", + ("hipEGLStreamProducerReturnFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsEGLRegisterImage", + ("hipGraphicsEGLRegisterImage", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), + ), + ( + "cuGraphicsResourceGetMappedEglFrame", + ( + "hipGraphicsResourceGetMappedEglFrame", + CONV_EGL, + API_DRIVER, + HIP_UNSUPPORTED, + ), + ), + ("cudaDataType_t", ("hipDataType", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("cudaDataType", ("hipDataType", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_R_16BF", ("HIP_R_16BF", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_C_16BF", ("HIP_C_16BF", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_R_16F", ("HIP_R_16F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_C_16F", ("HIP_C_16F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_R_32F", ("HIP_R_32F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_C_32F", ("HIP_C_32F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_R_64F", ("HIP_R_64F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_C_64F", ("HIP_C_64F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_R_8I", ("HIP_R_8I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_C_8I", ("HIP_C_8I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_R_8U", ("HIP_R_8U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_C_8U", ("HIP_C_8U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_R_32I", ("HIP_R_32I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_C_32I", ("HIP_C_32I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_R_32U", ("HIP_R_32U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ("CUDA_C_32U", ("HIP_C_32U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ( + "MAJOR_VERSION", + ("hipLibraryMajorVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "MINOR_VERSION", + ("hipLibraryMinorVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "PATCH_LEVEL", + ("hipLibraryPatchVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemAttachGlobal", + ("hipMemAttachGlobal", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemAttachHost", + ("hipMemAttachHost", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemAttachSingle", + ("hipMemAttachSingle", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaOccupancyDefault", + ("hipOccupancyDefault", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaOccupancyDisableCachingOverride", + ( + "hipOccupancyDisableCachingOverride", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ("cudaGetLastError", ("hipGetLastError", CONV_ERROR, API_RUNTIME)), + ("cudaPeekAtLastError", ("hipPeekAtLastError", CONV_ERROR, API_RUNTIME)), + ("cudaGetErrorName", ("hipGetErrorName", CONV_ERROR, API_RUNTIME)), + ("cudaGetErrorString", ("hipGetErrorString", CONV_ERROR, API_RUNTIME)), + ("cudaMemcpy3DParms", ("hipMemcpy3DParms", CONV_MEM, API_RUNTIME)), + ( + "cudaMemcpy3DPeerParms", + ("hipMemcpy3DPeerParms", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaMemcpy", ("hipMemcpy", CONV_MEM, API_RUNTIME)), + ("cudaMemcpyToArray", ("hipMemcpyToArray", CONV_MEM, API_RUNTIME)), + ("cudaMemcpyToSymbol", ("hipMemcpyToSymbol", CONV_MEM, API_RUNTIME)), + ("cudaMemcpyToSymbolAsync", ("hipMemcpyToSymbolAsync", CONV_MEM, API_RUNTIME)), + ("cudaMemcpyAsync", ("hipMemcpyAsync", CONV_MEM, API_RUNTIME)), + ("cudaMemcpy2D", ("hipMemcpy2D", CONV_MEM, API_RUNTIME)), + ("cudaMemcpy2DAsync", ("hipMemcpy2DAsync", CONV_MEM, API_RUNTIME)), + ("cudaMemcpy2DToArray", ("hipMemcpy2DToArray", CONV_MEM, API_RUNTIME)), + ( + "cudaMemcpy2DArrayToArray", + ("hipMemcpy2DArrayToArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemcpy2DFromArray", + ("hipMemcpy2DFromArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemcpy2DFromArrayAsync", + ("hipMemcpy2DFromArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemcpy2DToArrayAsync", + ("hipMemcpy2DToArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaMemcpy3D", ("hipMemcpy3D", CONV_MEM, API_RUNTIME)), + ( + "cudaMemcpy3DAsync", + ("hipMemcpy3DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemcpy3DPeer", + ("hipMemcpy3DPeer", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemcpy3DPeerAsync", + ("hipMemcpy3DPeerAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemcpyArrayToArray", + ("hipMemcpyArrayToArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemcpyFromArrayAsync", + ("hipMemcpyFromArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaMemcpyFromSymbol", ("hipMemcpyFromSymbol", CONV_MEM, API_RUNTIME)), + ( + "cudaMemcpyFromSymbolAsync", + ("hipMemcpyFromSymbolAsync", CONV_MEM, API_RUNTIME), + ), + ("cudaMemAdvise", ("hipMemAdvise", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)), + ( + "cudaMemRangeGetAttribute", + ("hipMemRangeGetAttribute", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemRangeGetAttributes", + ("hipMemRangeGetAttributes", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemAdviseSetReadMostly", + ("hipMemAdviseSetReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemAdviseUnsetReadMostly", + ("hipMemAdviseUnsetReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemAdviseSetPreferredLocation", + ( + "hipMemAdviseSetPreferredLocation", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaMemAdviseUnsetPreferredLocation", + ( + "hipMemAdviseUnsetPreferredLocation", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaMemAdviseSetAccessedBy", + ("hipMemAdviseSetAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemAdviseUnsetAccessedBy", + ("hipMemAdviseUnsetAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemRangeAttributeReadMostly", + ("hipMemRangeAttributeReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemRangeAttributePreferredLocation", + ( + "hipMemRangeAttributePreferredLocation", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaMemRangeAttributeAccessedBy", + ("hipMemRangeAttributeAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemRangeAttributeLastPrefetchLocation", + ( + "hipMemRangeAttributeLastPrefetchLocation", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ("cudaMemcpyHostToHost", ("hipMemcpyHostToHost", CONV_MEM, API_RUNTIME)), + ("cudaMemcpyHostToDevice", ("hipMemcpyHostToDevice", CONV_MEM, API_RUNTIME)), + ("cudaMemcpyDeviceToHost", ("hipMemcpyDeviceToHost", CONV_MEM, API_RUNTIME)), + ( + "cudaMemcpyDeviceToDevice", + ("hipMemcpyDeviceToDevice", CONV_MEM, API_RUNTIME), + ), + ("cudaMemcpyDefault", ("hipMemcpyDefault", CONV_MEM, API_RUNTIME)), + ("cudaMemset", ("hipMemset", CONV_MEM, API_RUNTIME)), + ("cudaMemsetAsync", ("hipMemsetAsync", CONV_MEM, API_RUNTIME)), + ("cudaMemset2D", ("hipMemset2D", CONV_MEM, API_RUNTIME)), + ( + "cudaMemset2DAsync", + ("hipMemset2DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaMemset3D", ("hipMemset3D", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)), + ( + "cudaMemset3DAsync", + ("hipMemset3DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaMemGetInfo", ("hipMemGetInfo", CONV_MEM, API_RUNTIME)), + ( + "cudaArrayGetInfo", + ("hipArrayGetInfo", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaFreeMipmappedArray", + ("hipFreeMipmappedArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGetMipmappedArrayLevel", + ("hipGetMipmappedArrayLevel", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGetSymbolAddress", + ("hipGetSymbolAddress", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGetSymbolSize", + ("hipGetSymbolSize", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMemPrefetchAsync", + ("hipMemPrefetchAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaMallocHost", ("hipHostMalloc", CONV_MEM, API_RUNTIME)), + ("cudaMallocArray", ("hipMallocArray", CONV_MEM, API_RUNTIME)), + ("cudaMalloc", ("hipMalloc", CONV_MEM, API_RUNTIME)), + ("cudaMalloc3D", ("hipMalloc3D", CONV_MEM, API_RUNTIME)), + ("cudaMalloc3DArray", ("hipMalloc3DArray", CONV_MEM, API_RUNTIME)), + ( + "cudaMallocManaged", + ("hipMallocManaged", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaMallocMipmappedArray", + ("hipMallocMipmappedArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaMallocPitch", ("hipMallocPitch", CONV_MEM, API_RUNTIME)), + ("cudaFreeHost", ("hipHostFree", CONV_MEM, API_RUNTIME)), + ("cudaFreeArray", ("hipFreeArray", CONV_MEM, API_RUNTIME)), + ("cudaFree", ("hipFree", CONV_MEM, API_RUNTIME)), + ("cudaHostRegister", ("hipHostRegister", CONV_MEM, API_RUNTIME)), + ("cudaHostUnregister", ("hipHostUnregister", CONV_MEM, API_RUNTIME)), + ("cudaHostAlloc", ("hipHostMalloc", CONV_MEM, API_RUNTIME)), + ("cudaMemoryTypeHost", ("hipMemoryTypeHost", CONV_MEM, API_RUNTIME)), + ("cudaMemoryTypeDevice", ("hipMemoryTypeDevice", CONV_MEM, API_RUNTIME)), + ("make_cudaExtent", ("make_hipExtent", CONV_MEM, API_RUNTIME)), + ("make_cudaPitchedPtr", ("make_hipPitchedPtr", CONV_MEM, API_RUNTIME)), + ("make_cudaPos", ("make_hipPos", CONV_MEM, API_RUNTIME)), + ("cudaHostAllocDefault", ("hipHostMallocDefault", CONV_MEM, API_RUNTIME)), + ("cudaHostAllocPortable", ("hipHostMallocPortable", CONV_MEM, API_RUNTIME)), + ("cudaHostAllocMapped", ("hipHostMallocMapped", CONV_MEM, API_RUNTIME)), + ( + "cudaHostAllocWriteCombined", + ("hipHostMallocWriteCombined", CONV_MEM, API_RUNTIME), + ), + ("cudaHostGetFlags", ("hipHostGetFlags", CONV_MEM, API_RUNTIME)), + ("cudaHostRegisterDefault", ("hipHostRegisterDefault", CONV_MEM, API_RUNTIME)), + ( + "cudaHostRegisterPortable", + ("hipHostRegisterPortable", CONV_MEM, API_RUNTIME), + ), + ("cudaHostRegisterMapped", ("hipHostRegisterMapped", CONV_MEM, API_RUNTIME)), + ( + "cudaHostRegisterIoMemory", + ("hipHostRegisterIoMemory", CONV_MEM, API_RUNTIME), + ), + # ("warpSize", ("hipWarpSize", CONV_SPECIAL_FUNC, API_RUNTIME), (HIP actually uses warpSize...)), + ("cudaEventCreate", ("hipEventCreate", CONV_EVENT, API_RUNTIME)), + ( + "cudaEventCreateWithFlags", + ("hipEventCreateWithFlags", CONV_EVENT, API_RUNTIME), + ), + ("cudaEventDestroy", ("hipEventDestroy", CONV_EVENT, API_RUNTIME)), + ("cudaEventRecord", ("hipEventRecord", CONV_EVENT, API_RUNTIME)), + ("cudaEventElapsedTime", ("hipEventElapsedTime", CONV_EVENT, API_RUNTIME)), + ("cudaEventSynchronize", ("hipEventSynchronize", CONV_EVENT, API_RUNTIME)), + ("cudaEventQuery", ("hipEventQuery", CONV_EVENT, API_RUNTIME)), + ("cudaEventDefault", ("hipEventDefault", CONV_EVENT, API_RUNTIME)), + ("cudaEventBlockingSync", ("hipEventBlockingSync", CONV_EVENT, API_RUNTIME)), + ("cudaEventDisableTiming", ("hipEventDisableTiming", CONV_EVENT, API_RUNTIME)), + ("cudaEventInterprocess", ("hipEventInterprocess", CONV_EVENT, API_RUNTIME)), + ("cudaStreamCreate", ("hipStreamCreate", CONV_STREAM, API_RUNTIME)), + ( + "cudaStreamCreateWithFlags", + ("hipStreamCreateWithFlags", CONV_STREAM, API_RUNTIME), + ), + ( + "cudaStreamCreateWithPriority", + ("hipStreamCreateWithPriority", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaStreamDestroy", ("hipStreamDestroy", CONV_STREAM, API_RUNTIME)), + ("cudaStreamWaitEvent", ("hipStreamWaitEvent", CONV_STREAM, API_RUNTIME)), + ("cudaStreamSynchronize", ("hipStreamSynchronize", CONV_STREAM, API_RUNTIME)), + ("cudaStreamGetFlags", ("hipStreamGetFlags", CONV_STREAM, API_RUNTIME)), + ("cudaStreamQuery", ("hipStreamQuery", CONV_STREAM, API_RUNTIME)), + ("cudaStreamAddCallback", ("hipStreamAddCallback", CONV_STREAM, API_RUNTIME)), + ( + "cudaStreamAttachMemAsync", + ("hipStreamAttachMemAsync", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaStreamGetPriority", + ("hipStreamGetPriority", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaCpuDeviceId", ("hipCpuDeviceId", CONV_TYPE, API_RUNTIME)), + ("cudaStreamDefault", ("hipStreamDefault", CONV_TYPE, API_RUNTIME)), + ("cudaStreamNonBlocking", ("hipStreamNonBlocking", CONV_TYPE, API_RUNTIME)), + ("cudaStreamGetCaptureInfo", ("hipStreamGetCaptureInfo", CONV_TYPE, API_RUNTIME)), + ("cudaStreamCaptureStatus", ("hipStreamCaptureStatus", CONV_TYPE, API_RUNTIME)), + ("cudaStreamCaptureStatusActive", ("hipStreamCaptureStatusActive", CONV_TYPE, API_RUNTIME)), + ("cudaStreamCaptureMode", ("hipStreamCaptureMode", CONV_TYPE, API_RUNTIME)), + ("cudaStreamCaptureModeGlobal", ("hipStreamCaptureModeGlobal", CONV_TYPE, API_RUNTIME)), + ("cudaStreamCaptureModeRelaxed", ("hipStreamCaptureModeRelaxed", CONV_TYPE, API_RUNTIME)), + ("cudaStreamCaptureModeThreadLocal", ("hipStreamCaptureModeThreadLocal", CONV_TYPE, API_RUNTIME)), + ("cudaStreamBeginCapture", ("hipStreamBeginCapture", CONV_TYPE, API_RUNTIME)), + ("cudaStreamEndCapture", ("hipStreamEndCapture", CONV_TYPE, API_RUNTIME)), + ("cudaGraphInstantiate", ("hipGraphInstantiate", CONV_TYPE, API_RUNTIME)), + ("cudaGraphDestroy", ("hipGraphDestroy", CONV_TYPE, API_RUNTIME)), + ("cudaGraphExecDestroy", ("hipGraphExecDestroy", CONV_TYPE, API_RUNTIME)), + ("cudaGraphLaunch", ("hipGraphLaunch", CONV_TYPE, API_RUNTIME)), + ("cudaGraphGetNodes", ("hipGraphGetNodes", CONV_TYPE, API_RUNTIME)), + ("cudaGraphDebugDotPrint", ("hipGraphDebugDotPrint", CONV_TYPE, API_RUNTIME)), + ("cudaThreadExchangeStreamCaptureMode", ("hipThreadExchangeStreamCaptureMode", CONV_TYPE, API_RUNTIME)), + ("cudaStreamIsCapturing", ("hipStreamIsCapturing", CONV_TYPE, API_RUNTIME)), + ("cudaDeviceSynchronize", ("hipDeviceSynchronize", CONV_DEVICE, API_RUNTIME)), + ("cudaDeviceReset", ("hipDeviceReset", CONV_DEVICE, API_RUNTIME)), + ("cudaSetDevice", ("hipSetDevice", CONV_DEVICE, API_RUNTIME)), + ("cudaGetDevice", ("hipGetDevice", CONV_DEVICE, API_RUNTIME)), + ("cudaGetDeviceCount", ("hipGetDeviceCount", CONV_DEVICE, API_RUNTIME)), + ("cudaChooseDevice", ("hipChooseDevice", CONV_DEVICE, API_RUNTIME)), + ("cudaThreadExit", ("hipDeviceReset", CONV_THREAD, API_RUNTIME)), + ( + "cudaThreadGetCacheConfig", + ("hipDeviceGetCacheConfig", CONV_THREAD, API_RUNTIME), + ), + ( + "cudaThreadGetLimit", + ("hipThreadGetLimit", CONV_THREAD, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaThreadSetCacheConfig", + ("hipDeviceSetCacheConfig", CONV_THREAD, API_RUNTIME), + ), + ( + "cudaThreadSetLimit", + ("hipThreadSetLimit", CONV_THREAD, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaThreadSynchronize", ("hipDeviceSynchronize", CONV_THREAD, API_RUNTIME)), + ("cudaDeviceGetAttribute", ("hipDeviceGetAttribute", CONV_DEVICE, API_RUNTIME)), + ( + "cudaDevAttrMaxThreadsPerBlock", + ("hipDeviceAttributeMaxThreadsPerBlock", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxBlockDimX", + ("hipDeviceAttributeMaxBlockDimX", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxBlockDimY", + ("hipDeviceAttributeMaxBlockDimY", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxBlockDimZ", + ("hipDeviceAttributeMaxBlockDimZ", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxGridDimX", + ("hipDeviceAttributeMaxGridDimX", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxGridDimY", + ("hipDeviceAttributeMaxGridDimY", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxGridDimZ", + ("hipDeviceAttributeMaxGridDimZ", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxSharedMemoryPerBlock", + ("hipDeviceAttributeMaxSharedMemoryPerBlock", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxSharedMemoryPerBlockOptin", + ("hipDeviceAttributeMaxSharedMemoryPerBlock", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrTotalConstantMemory", + ("hipDeviceAttributeTotalConstantMemory", CONV_TYPE, API_RUNTIME), + ), + ("cudaDevAttrWarpSize", ("hipDeviceAttributeWarpSize", CONV_TYPE, API_RUNTIME)), + ( + "cudaDevAttrMaxPitch", + ("hipDeviceAttributeMaxPitch", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaDevAttrMaxRegistersPerBlock", + ("hipDeviceAttributeMaxRegistersPerBlock", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrClockRate", + ("hipDeviceAttributeClockRate", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrTextureAlignment", + ( + "hipDeviceAttributeTextureAlignment", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrGpuOverlap", + ("hipDeviceAttributeGpuOverlap", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaDevAttrMultiProcessorCount", + ("hipDeviceAttributeMultiprocessorCount", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrKernelExecTimeout", + ( + "hipDeviceAttributeKernelExecTimeout", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrIntegrated", + ("hipDeviceAttributeIntegrated", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaDevAttrCanMapHostMemory", + ( + "hipDeviceAttributeCanMapHostMemory", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrComputeMode", + ("hipDeviceAttributeComputeMode", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxTexture1DWidth", + ( + "hipDeviceAttributeMaxTexture1DWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DWidth", + ( + "hipDeviceAttributeMaxTexture2DWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DHeight", + ( + "hipDeviceAttributeMaxTexture2DHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture3DWidth", + ( + "hipDeviceAttributeMaxTexture3DWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture3DHeight", + ( + "hipDeviceAttributeMaxTexture3DHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture3DDepth", + ( + "hipDeviceAttributeMaxTexture3DDepth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DLayeredWidth", + ( + "hipDeviceAttributeMaxTexture2DLayeredWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DLayeredHeight", + ( + "hipDeviceAttributeMaxTexture2DLayeredHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DLayeredLayers", + ( + "hipDeviceAttributeMaxTexture2DLayeredLayers", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrSurfaceAlignment", + ( + "hipDeviceAttributeSurfaceAlignment", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrConcurrentKernels", + ("hipDeviceAttributeConcurrentKernels", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrEccEnabled", + ("hipDeviceAttributeEccEnabled", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaDevAttrPciBusId", ("hipDeviceAttributePciBusId", CONV_TYPE, API_RUNTIME)), + ( + "cudaDevAttrPciDeviceId", + ("hipDeviceAttributePciDeviceId", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrTccDriver", + ("hipDeviceAttributeTccDriver", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaDevAttrMemoryClockRate", + ("hipDeviceAttributeMemoryClockRate", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrGlobalMemoryBusWidth", + ("hipDeviceAttributeMemoryBusWidth", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrL2CacheSize", + ("hipDeviceAttributeL2CacheSize", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxThreadsPerMultiProcessor", + ("hipDeviceAttributeMaxThreadsPerMultiProcessor", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrAsyncEngineCount", + ( + "hipDeviceAttributeAsyncEngineCount", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrUnifiedAddressing", + ( + "hipDeviceAttributeUnifiedAddressing", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture1DLayeredWidth", + ( + "hipDeviceAttributeMaxTexture1DLayeredWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture1DLayeredLayers", + ( + "hipDeviceAttributeMaxTexture1DLayeredLayers", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DGatherWidth", + ( + "hipDeviceAttributeMaxTexture2DGatherWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DGatherHeight", + ( + "hipDeviceAttributeMaxTexture2DGatherHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture3DWidthAlt", + ( + "hipDeviceAttributeMaxTexture3DWidthAlternate", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture3DHeightAlt", + ( + "hipDeviceAttributeMaxTexture3DHeightAlternate", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture3DDepthAlt", + ( + "hipDeviceAttributeMaxTexture3DDepthAlternate", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrPciDomainId", + ("hipDeviceAttributePciDomainId", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaDevAttrTexturePitchAlignment", + ( + "hipDeviceAttributeTexturePitchAlignment", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTextureCubemapWidth", + ( + "hipDeviceAttributeMaxTextureCubemapWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTextureCubemapLayeredWidth", + ( + "hipDeviceAttributeMaxTextureCubemapLayeredWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTextureCubemapLayeredLayers", + ( + "hipDeviceAttributeMaxTextureCubemapLayeredLayers", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface1DWidth", + ( + "hipDeviceAttributeMaxSurface1DWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface2DWidth", + ( + "hipDeviceAttributeMaxSurface2DWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface2DHeight", + ( + "hipDeviceAttributeMaxSurface2DHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface3DWidth", + ( + "hipDeviceAttributeMaxSurface3DWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface3DHeight", + ( + "hipDeviceAttributeMaxSurface3DHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface3DDepth", + ( + "hipDeviceAttributeMaxSurface3DDepth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface1DLayeredWidth", + ( + "hipDeviceAttributeMaxSurface1DLayeredWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface1DLayeredLayers", + ( + "hipDeviceAttributeMaxSurface1DLayeredLayers", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface2DLayeredWidth", + ( + "hipDeviceAttributeMaxSurface2DLayeredWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface2DLayeredHeight", + ( + "hipDeviceAttributeMaxSurface2DLayeredHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurface2DLayeredLayers", + ( + "hipDeviceAttributeMaxSurface2DLayeredLayers", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurfaceCubemapWidth", + ( + "hipDeviceAttributeMaxSurfaceCubemapWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurfaceCubemapLayeredWidth", + ( + "hipDeviceAttributeMaxSurfaceCubemapLayeredWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSurfaceCubemapLayeredLayers", + ( + "hipDeviceAttributeMaxSurfaceCubemapLayeredLayers", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture1DLinearWidth", + ( + "hipDeviceAttributeMaxTexture1DLinearWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DLinearWidth", + ( + "hipDeviceAttributeMaxTexture2DLinearWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DLinearHeight", + ( + "hipDeviceAttributeMaxTexture2DLinearHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DLinearPitch", + ( + "hipDeviceAttributeMaxTexture2DLinearPitch", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DMipmappedWidth", + ( + "hipDeviceAttributeMaxTexture2DMipmappedWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxTexture2DMipmappedHeight", + ( + "hipDeviceAttributeMaxTexture2DMipmappedHeight", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrComputeCapabilityMajor", + ("hipDeviceAttributeComputeCapabilityMajor", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrComputeCapabilityMinor", + ("hipDeviceAttributeComputeCapabilityMinor", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMaxTexture1DMipmappedWidth", + ( + "hipDeviceAttributeMaxTexture1DMipmappedWidth", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrStreamPrioritiesSupported", + ( + "hipDeviceAttributeStreamPrioritiesSupported", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrGlobalL1CacheSupported", + ( + "hipDeviceAttributeGlobalL1CacheSupported", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrLocalL1CacheSupported", + ( + "hipDeviceAttributeLocalL1CacheSupported", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrMaxSharedMemoryPerMultiprocessor", + ( + "hipDeviceAttributeMaxSharedMemoryPerMultiprocessor", + CONV_TYPE, + API_RUNTIME, + ), + ), + ( + "cudaDevAttrMaxRegistersPerMultiprocessor", + ( + "hipDeviceAttributeMaxRegistersPerMultiprocessor", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrManagedMemory", + ( + "hipDeviceAttributeManagedMemory", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrIsMultiGpuBoard", + ("hipDeviceAttributeIsMultiGpuBoard", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDevAttrMultiGpuBoardGroupID", + ( + "hipDeviceAttributeMultiGpuBoardGroupID", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrHostNativeAtomicSupported", + ( + "hipDeviceAttributeHostNativeAtomicSupported", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrSingleToDoublePrecisionPerfRatio", + ( + "hipDeviceAttributeSingleToDoublePrecisionPerfRatio", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrPageableMemoryAccess", + ( + "hipDeviceAttributePageableMemoryAccess", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrConcurrentManagedAccess", + ( + "hipDeviceAttributeConcurrentManagedAccess", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrComputePreemptionSupported", + ( + "hipDeviceAttributeComputePreemptionSupported", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevAttrCanUseHostPointerForRegisteredMem", + ( + "hipDeviceAttributeCanUseHostPointerForRegisteredMem", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaPointerGetAttributes", + ("hipPointerGetAttributes", CONV_MEM, API_RUNTIME), + ), + ( + "cudaHostGetDevicePointer", + ("hipHostGetDevicePointer", CONV_MEM, API_RUNTIME), + ), + ( + "cudaGetDeviceProperties", + ("hipGetDeviceProperties", CONV_DEVICE, API_RUNTIME), + ), + ("cudaDeviceGetPCIBusId", ("hipDeviceGetPCIBusId", CONV_DEVICE, API_RUNTIME)), + ( + "cudaDeviceGetByPCIBusId", + ("hipDeviceGetByPCIBusId", CONV_DEVICE, API_RUNTIME), + ), + ( + "cudaDeviceGetStreamPriorityRange", + ( + "hipDeviceGetStreamPriorityRange", + CONV_DEVICE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaSetValidDevices", + ("hipSetValidDevices", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaDevP2PAttrPerformanceRank", + ( + "hipDeviceP2PAttributePerformanceRank", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevP2PAttrAccessSupported", + ( + "hipDeviceP2PAttributeAccessSupported", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDevP2PAttrNativeAtomicSupported", + ( + "hipDeviceP2PAttributeNativeAtomicSupported", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaDeviceGetP2PAttribute", + ("hipDeviceGetP2PAttribute", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaComputeModeDefault", + ("hipComputeModeDefault", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaComputeModeExclusive", + ("hipComputeModeExclusive", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaComputeModeProhibited", + ("hipComputeModeProhibited", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaComputeModeExclusiveProcess", + ("hipComputeModeExclusiveProcess", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGetDeviceFlags", + ("hipGetDeviceFlags", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaSetDeviceFlags", ("hipSetDeviceFlags", CONV_DEVICE, API_RUNTIME)), + ("cudaDeviceScheduleAuto", ("hipDeviceScheduleAuto", CONV_TYPE, API_RUNTIME)), + ("cudaDeviceScheduleSpin", ("hipDeviceScheduleSpin", CONV_TYPE, API_RUNTIME)), + ("cudaDeviceScheduleYield", ("hipDeviceScheduleYield", CONV_TYPE, API_RUNTIME)), + ( + "cudaDeviceBlockingSync", + ("hipDeviceScheduleBlockingSync", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDeviceScheduleBlockingSync", + ("hipDeviceScheduleBlockingSync", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDeviceScheduleMask", + ("hipDeviceScheduleMask", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaDeviceMapHost", ("hipDeviceMapHost", CONV_TYPE, API_RUNTIME)), + ( + "cudaDeviceLmemResizeToMax", + ("hipDeviceLmemResizeToMax", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaDeviceMask", ("hipDeviceMask", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), + ( + "cudaDeviceSetCacheConfig", + ("hipDeviceSetCacheConfig", CONV_CACHE, API_RUNTIME), + ), + ( + "cudaDeviceGetCacheConfig", + ("hipDeviceGetCacheConfig", CONV_CACHE, API_RUNTIME), + ), + ( + "cudaFuncAttributes", + ("hipFuncAttributes", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaFuncAttributeMaxDynamicSharedMemorySize", + ("hipFuncAttributeMaxDynamicSharedMemorySize", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaFuncAttributePreferredSharedMemoryCarveout", + ("hipFuncAttributePreferredSharedMemoryCarveout", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaFuncSetAttribute", + ("hipFuncSetAttribute", CONV_EXEC, API_RUNTIME), + ), + ("cudaFuncSetCacheConfig", ("hipFuncSetCacheConfig", CONV_CACHE, API_RUNTIME)), + ( + "cudaFuncCachePreferNone", + ("hipFuncCachePreferNone", CONV_CACHE, API_RUNTIME), + ), + ( + "cudaFuncCachePreferShared", + ("hipFuncCachePreferShared", CONV_CACHE, API_RUNTIME), + ), + ("cudaFuncCachePreferL1", ("hipFuncCachePreferL1", CONV_CACHE, API_RUNTIME)), + ( + "cudaFuncCachePreferEqual", + ("hipFuncCachePreferEqual", CONV_CACHE, API_RUNTIME), + ), + ( + "cudaFuncGetAttributes", + ("hipFuncGetAttributes", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaFuncSetSharedMemConfig", + ("hipFuncSetSharedMemConfig", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGetParameterBuffer", + ("hipGetParameterBuffer", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaSetDoubleForDevice", + ("hipSetDoubleForDevice", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaSetDoubleForHost", + ("hipSetDoubleForHost", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaConfigureCall", + ("hipConfigureCall", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaLaunch", ("hipLaunch", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED)), + ( + "cudaLaunchCooperativeKernel", + ("hipLaunchCooperativeKernel", CONV_EXEC, API_RUNTIME), + ), + ( + "cudaSetupArgument", + ("hipSetupArgument", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaDriverGetVersion", ("hipDriverGetVersion", CONV_VERSION, API_RUNTIME)), + ( + "cudaRuntimeGetVersion", + ("hipRuntimeGetVersion", CONV_VERSION, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaOccupancyMaxPotentialBlockSize", + ("hipOccupancyMaxPotentialBlockSize", CONV_OCCUPANCY, API_RUNTIME), + ), + ( + "cudaOccupancyMaxPotentialBlockSizeWithFlags", + ( + "hipOccupancyMaxPotentialBlockSizeWithFlags", + CONV_OCCUPANCY, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaOccupancyMaxActiveBlocksPerMultiprocessor", + ( + "hipOccupancyMaxActiveBlocksPerMultiprocessor", + CONV_OCCUPANCY, + API_RUNTIME, + ), + ), + ( + "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", + ( + "hipOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", + CONV_OCCUPANCY, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaOccupancyMaxPotentialBlockSizeVariableSMem", + ( + "hipOccupancyMaxPotentialBlockSizeVariableSMem", + CONV_OCCUPANCY, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags", + ( + "hipOccupancyMaxPotentialBlockSizeVariableSMemWithFlags", + CONV_OCCUPANCY, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ("cudaDeviceCanAccessPeer", ("hipDeviceCanAccessPeer", CONV_PEER, API_RUNTIME)), + ( + "cudaDeviceDisablePeerAccess", + ("hipDeviceDisablePeerAccess", CONV_PEER, API_RUNTIME), + ), + ( + "cudaDeviceEnablePeerAccess", + ("hipDeviceEnablePeerAccess", CONV_PEER, API_RUNTIME), + ), + ("cudaMemcpyPeerAsync", ("hipMemcpyPeerAsync", CONV_MEM, API_RUNTIME)), + ("cudaMemcpyPeer", ("hipMemcpyPeer", CONV_MEM, API_RUNTIME)), + ( + "cudaIpcMemLazyEnablePeerAccess", + ("hipIpcMemLazyEnablePeerAccess", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaDeviceSetSharedMemConfig", + ("hipDeviceSetSharedMemConfig", CONV_DEVICE, API_RUNTIME), + ), + ( + "cudaDeviceGetSharedMemConfig", + ("hipDeviceGetSharedMemConfig", CONV_DEVICE, API_RUNTIME), + ), + ( + "cudaSharedMemBankSizeDefault", + ("hipSharedMemBankSizeDefault", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaSharedMemBankSizeFourByte", + ("hipSharedMemBankSizeFourByte", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaSharedMemBankSizeEightByte", + ("hipSharedMemBankSizeEightByte", CONV_TYPE, API_RUNTIME), + ), + ( + "cudaLimitStackSize", + ("hipLimitStackSize", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaLimitPrintfFifoSize", + ("hipLimitPrintfFifoSize", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaLimitMallocHeapSize", ("hipLimitMallocHeapSize", CONV_TYPE, API_RUNTIME)), + ( + "cudaLimitDevRuntimeSyncDepth", + ("hipLimitDevRuntimeSyncDepth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaLimitDevRuntimePendingLaunchCount", + ( + "hipLimitDevRuntimePendingLaunchCount", + CONV_TYPE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ("cudaDeviceGetLimit", ("hipDeviceGetLimit", CONV_DEVICE, API_RUNTIME)), + ( + "cudaProfilerInitialize", + ("hipProfilerInitialize", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaProfilerStart", ("hipProfilerStart", CONV_OTHER, API_RUNTIME)), + ("cudaProfilerStop", ("hipProfilerStop", CONV_OTHER, API_RUNTIME)), + ( + "cudaKeyValuePair", + ("hipKeyValuePair", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED), + ), + ("cudaCSV", ("hipCSV", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED)), + ("cudaReadModeElementType", ("hipReadModeElementType", CONV_TEX, API_RUNTIME)), + ( + "cudaReadModeNormalizedFloat", + ("hipReadModeNormalizedFloat", CONV_TEX, API_RUNTIME), + ), + ("cudaFilterModePoint", ("hipFilterModePoint", CONV_TEX, API_RUNTIME)), + ("cudaFilterModeLinear", ("hipFilterModeLinear", CONV_TEX, API_RUNTIME)), + ("cudaBindTexture", ("hipBindTexture", CONV_TEX, API_RUNTIME)), + ("cudaUnbindTexture", ("hipUnbindTexture", CONV_TEX, API_RUNTIME)), + ("cudaBindTexture2D", ("hipBindTexture2D", CONV_TEX, API_RUNTIME)), + ("cudaBindTextureToArray", ("hipBindTextureToArray", CONV_TEX, API_RUNTIME)), + ( + "cudaBindTextureToMipmappedArray", + ("hipBindTextureToMipmappedArray", CONV_TEX, API_RUNTIME), + ), + ( + "cudaGetTextureAlignmentOffset", + ("hipGetTextureAlignmentOffset", CONV_TEX, API_RUNTIME), + ), + ("cudaGetTextureReference", ("hipGetTextureReference", CONV_TEX, API_RUNTIME)), + ( + "cudaChannelFormatKindSigned", + ("hipChannelFormatKindSigned", CONV_TEX, API_RUNTIME), + ), + ( + "cudaChannelFormatKindUnsigned", + ("hipChannelFormatKindUnsigned", CONV_TEX, API_RUNTIME), + ), + ( + "cudaChannelFormatKindFloat", + ("hipChannelFormatKindFloat", CONV_TEX, API_RUNTIME), + ), + ( + "cudaChannelFormatKindNone", + ("hipChannelFormatKindNone", CONV_TEX, API_RUNTIME), + ), + ("cudaCreateChannelDesc", ("hipCreateChannelDesc", CONV_TEX, API_RUNTIME)), + ("cudaGetChannelDesc", ("hipGetChannelDesc", CONV_TEX, API_RUNTIME)), + ("cudaResourceTypeArray", ("hipResourceTypeArray", CONV_TEX, API_RUNTIME)), + ( + "cudaResourceTypeMipmappedArray", + ("hipResourceTypeMipmappedArray", CONV_TEX, API_RUNTIME), + ), + ("cudaResourceTypeLinear", ("hipResourceTypeLinear", CONV_TEX, API_RUNTIME)), + ("cudaResourceTypePitch2D", ("hipResourceTypePitch2D", CONV_TEX, API_RUNTIME)), + ("cudaResViewFormatNone", ("hipResViewFormatNone", CONV_TEX, API_RUNTIME)), + ( + "cudaResViewFormatUnsignedChar1", + ("hipResViewFormatUnsignedChar1", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedChar2", + ("hipResViewFormatUnsignedChar2", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedChar4", + ("hipResViewFormatUnsignedChar4", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedChar1", + ("hipResViewFormatSignedChar1", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedChar2", + ("hipResViewFormatSignedChar2", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedChar4", + ("hipResViewFormatSignedChar4", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedShort1", + ("hipResViewFormatUnsignedShort1", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedShort2", + ("hipResViewFormatUnsignedShort2", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedShort4", + ("hipResViewFormatUnsignedShort4", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedShort1", + ("hipResViewFormatSignedShort1", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedShort2", + ("hipResViewFormatSignedShort2", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedShort4", + ("hipResViewFormatSignedShort4", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedInt1", + ("hipResViewFormatUnsignedInt1", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedInt2", + ("hipResViewFormatUnsignedInt2", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedInt4", + ("hipResViewFormatUnsignedInt4", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedInt1", + ("hipResViewFormatSignedInt1", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedInt2", + ("hipResViewFormatSignedInt2", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedInt4", + ("hipResViewFormatSignedInt4", CONV_TEX, API_RUNTIME), + ), + ("cudaResViewFormatHalf1", ("hipResViewFormatHalf1", CONV_TEX, API_RUNTIME)), + ("cudaResViewFormatHalf2", ("hipResViewFormatHalf2", CONV_TEX, API_RUNTIME)), + ("cudaResViewFormatHalf4", ("hipResViewFormatHalf4", CONV_TEX, API_RUNTIME)), + ("cudaResViewFormatFloat1", ("hipResViewFormatFloat1", CONV_TEX, API_RUNTIME)), + ("cudaResViewFormatFloat2", ("hipResViewFormatFloat2", CONV_TEX, API_RUNTIME)), + ("cudaResViewFormatFloat4", ("hipResViewFormatFloat4", CONV_TEX, API_RUNTIME)), + ( + "cudaResViewFormatUnsignedBlockCompressed1", + ("hipResViewFormatUnsignedBlockCompressed1", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedBlockCompressed2", + ("hipResViewFormatUnsignedBlockCompressed2", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedBlockCompressed3", + ("hipResViewFormatUnsignedBlockCompressed3", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedBlockCompressed4", + ("hipResViewFormatUnsignedBlockCompressed4", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedBlockCompressed4", + ("hipResViewFormatSignedBlockCompressed4", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedBlockCompressed5", + ("hipResViewFormatUnsignedBlockCompressed5", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedBlockCompressed5", + ("hipResViewFormatSignedBlockCompressed5", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedBlockCompressed6H", + ("hipResViewFormatUnsignedBlockCompressed6H", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatSignedBlockCompressed6H", + ("hipResViewFormatSignedBlockCompressed6H", CONV_TEX, API_RUNTIME), + ), + ( + "cudaResViewFormatUnsignedBlockCompressed7", + ("hipResViewFormatUnsignedBlockCompressed7", CONV_TEX, API_RUNTIME), + ), + ("cudaAddressModeWrap", ("hipAddressModeWrap", CONV_TEX, API_RUNTIME)), + ("cudaAddressModeClamp", ("hipAddressModeClamp", CONV_TEX, API_RUNTIME)), + ("cudaAddressModeMirror", ("hipAddressModeMirror", CONV_TEX, API_RUNTIME)), + ("cudaAddressModeBorder", ("hipAddressModeBorder", CONV_TEX, API_RUNTIME)), + ("cudaCreateTextureObject", ("hipCreateTextureObject", CONV_TEX, API_RUNTIME)), + ( + "cudaDestroyTextureObject", + ("hipDestroyTextureObject", CONV_TEX, API_RUNTIME), + ), + ( + "cudaGetTextureObjectResourceDesc", + ("hipGetTextureObjectResourceDesc", CONV_TEX, API_RUNTIME), + ), + ( + "cudaGetTextureObjectResourceViewDesc", + ("hipGetTextureObjectResourceViewDesc", CONV_TEX, API_RUNTIME), + ), + ( + "cudaGetTextureObjectTextureDesc", + ("hipGetTextureObjectTextureDesc", CONV_TEX, API_RUNTIME), + ), + ( + "cudaBindSurfaceToArray", + ("hipBindSurfaceToArray", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGetSurfaceReference", + ("hipGetSurfaceReference", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaBoundaryModeZero", + ("hipBoundaryModeZero", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaBoundaryModeClamp", + ("hipBoundaryModeClamp", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaBoundaryModeTrap", + ("hipBoundaryModeTrap", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaFormatModeForced", + ("hipFormatModeForced", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaFormatModeAuto", + ("hipFormatModeAuto", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaCreateSurfaceObject", + ("hipCreateSurfaceObject", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaDestroySurfaceObject", + ("hipDestroySurfaceObject", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGetSurfaceObjectResourceDesc", + ( + "hipGetSurfaceObjectResourceDesc", + CONV_SURFACE, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ("cudaIpcCloseMemHandle", ("hipIpcCloseMemHandle", CONV_DEVICE, API_RUNTIME)), + ("cudaIpcGetEventHandle", ("hipIpcGetEventHandle", CONV_DEVICE, API_RUNTIME)), + ("cudaIpcGetMemHandle", ("hipIpcGetMemHandle", CONV_DEVICE, API_RUNTIME)), + ("cudaIpcOpenEventHandle", ("hipIpcOpenEventHandle", CONV_DEVICE, API_RUNTIME)), + ("cudaIpcOpenMemHandle", ("hipIpcOpenMemHandle", CONV_DEVICE, API_RUNTIME)), + ( + "cudaGLGetDevices", + ("hipGLGetDevices", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsGLRegisterBuffer", + ("hipGraphicsGLRegisterBuffer", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsGLRegisterImage", + ("hipGraphicsGLRegisterImage", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaWGLGetDevice", + ("hipWGLGetDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsMapResources", + ("hipGraphicsMapResources", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsResourceGetMappedMipmappedArray", + ( + "hipGraphicsResourceGetMappedMipmappedArray", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsResourceGetMappedPointer", + ( + "hipGraphicsResourceGetMappedPointer", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsResourceSetMapFlags", + ( + "hipGraphicsResourceSetMapFlags", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsSubResourceGetMappedArray", + ( + "hipGraphicsSubResourceGetMappedArray", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsUnmapResources", + ("hipGraphicsUnmapResources", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsUnregisterResource", + ( + "hipGraphicsUnregisterResource", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsCubeFacePositiveX", + ( + "hipGraphicsCubeFacePositiveX", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsCubeFaceNegativeX", + ( + "hipGraphicsCubeFaceNegativeX", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsCubeFacePositiveY", + ( + "hipGraphicsCubeFacePositiveY", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsCubeFaceNegativeY", + ( + "hipGraphicsCubeFaceNegativeY", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsCubeFacePositiveZ", + ( + "hipGraphicsCubeFacePositiveZ", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsCubeFaceNegativeZ", + ( + "hipGraphicsCubeFaceNegativeZ", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsMapFlagsNone", + ("hipGraphicsMapFlagsNone", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsMapFlagsReadOnly", + ( + "hipGraphicsMapFlagsReadOnly", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsMapFlagsWriteDiscard", + ( + "hipGraphicsMapFlagsWriteDiscard", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsRegisterFlagsNone", + ( + "hipGraphicsRegisterFlagsNone", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsRegisterFlagsReadOnly", + ( + "hipGraphicsRegisterFlagsReadOnly", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsRegisterFlagsWriteDiscard", + ( + "hipGraphicsRegisterFlagsWriteDiscard", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsRegisterFlagsSurfaceLoadStore", + ( + "hipGraphicsRegisterFlagsSurfaceLoadStore", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsRegisterFlagsTextureGather", + ( + "hipGraphicsRegisterFlagsTextureGather", + CONV_GRAPHICS, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGLDeviceListAll", + ("HIP_GL_DEVICE_LIST_ALL", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLDeviceListCurrentFrame", + ("HIP_GL_DEVICE_LIST_CURRENT_FRAME", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLDeviceListNextFrame", + ("HIP_GL_DEVICE_LIST_NEXT_FRAME", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLGetDevices", + ("hipGLGetDevices", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsGLRegisterBuffer", + ("hipGraphicsGLRegisterBuffer", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsGLRegisterImage", + ("hipGraphicsGLRegisterImage", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaWGLGetDevice", + ("hipWGLGetDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLMapFlagsNone", + ("HIP_GL_MAP_RESOURCE_FLAGS_NONE", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLMapFlagsReadOnly", + ( + "HIP_GL_MAP_RESOURCE_FLAGS_READ_ONLY", + CONV_GL, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGLMapFlagsWriteDiscard", + ( + "HIP_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD", + CONV_GL, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGLMapBufferObject", + ("hipGLMapBufferObject__", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLMapBufferObjectAsync", + ("hipGLMapBufferObjectAsync__", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLRegisterBufferObject", + ("hipGLRegisterBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLSetBufferObjectMapFlags", + ("hipGLSetBufferObjectMapFlags", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLSetGLDevice", + ("hipGLSetGLDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLUnmapBufferObject", + ("hipGLUnmapBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLUnmapBufferObjectAsync", + ("hipGLUnmapBufferObjectAsync", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGLUnregisterBufferObject", + ("hipGLUnregisterBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9DeviceListAll", + ("HIP_D3D9_DEVICE_LIST_ALL", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9DeviceListCurrentFrame", + ( + "HIP_D3D9_DEVICE_LIST_CURRENT_FRAME", + CONV_D3D9, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D9DeviceListNextFrame", + ( + "HIP_D3D9_DEVICE_LIST_NEXT_FRAME", + CONV_D3D9, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D9GetDevice", + ("hipD3D9GetDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9GetDevices", + ("hipD3D9GetDevices", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9GetDirect3DDevice", + ("hipD3D9GetDirect3DDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9SetDirect3DDevice", + ("hipD3D9SetDirect3DDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsD3D9RegisterResource", + ( + "hipGraphicsD3D9RegisterResource", + CONV_D3D9, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D9MapFlags", + ("hipD3D9MapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9MapFlagsNone", + ( + "HIP_D3D9_MAPRESOURCE_FLAGS_NONE", + CONV_D3D9, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D9MapFlagsReadOnly", + ( + "HIP_D3D9_MAPRESOURCE_FLAGS_READONLY", + CONV_D3D9, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D9MapFlagsWriteDiscard", + ( + "HIP_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD", + CONV_D3D9, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D9RegisterFlagsNone", + ("HIP_D3D9_REGISTER_FLAGS_NONE", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9RegisterFlagsArray", + ("HIP_D3D9_REGISTER_FLAGS_ARRAY", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9MapResources", + ("hipD3D9MapResources", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9RegisterResource", + ("hipD3D9RegisterResource", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9ResourceGetMappedArray", + ("hipD3D9ResourceGetMappedArray", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9ResourceGetMappedPitch", + ("hipD3D9ResourceGetMappedPitch", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9ResourceGetMappedPointer", + ( + "hipD3D9ResourceGetMappedPointer", + CONV_D3D9, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D9ResourceGetMappedSize", + ("hipD3D9ResourceGetMappedSize", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9ResourceGetSurfaceDimensions", + ( + "hipD3D9ResourceGetSurfaceDimensions", + CONV_D3D9, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D9ResourceSetMapFlags", + ("hipD3D9ResourceSetMapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9UnmapResources", + ("hipD3D9UnmapResources", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D9UnregisterResource", + ("hipD3D9UnregisterResource", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10DeviceListAll", + ("HIP_D3D10_DEVICE_LIST_ALL", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10DeviceListCurrentFrame", + ( + "HIP_D3D10_DEVICE_LIST_CURRENT_FRAME", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10DeviceListNextFrame", + ( + "HIP_D3D10_DEVICE_LIST_NEXT_FRAME", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10GetDevice", + ("hipD3D10GetDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10GetDevices", + ("hipD3D10GetDevices", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsD3D10RegisterResource", + ( + "hipGraphicsD3D10RegisterResource", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10MapFlagsNone", + ( + "HIP_D3D10_MAPRESOURCE_FLAGS_NONE", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10MapFlagsReadOnly", + ( + "HIP_D3D10_MAPRESOURCE_FLAGS_READONLY", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10MapFlagsWriteDiscard", + ( + "HIP_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10RegisterFlagsNone", + ("HIP_D3D10_REGISTER_FLAGS_NONE", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10RegisterFlagsArray", + ( + "HIP_D3D10_REGISTER_FLAGS_ARRAY", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10GetDirect3DDevice", + ("hipD3D10GetDirect3DDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10MapResources", + ("hipD3D10MapResources", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10RegisterResource", + ("hipD3D10RegisterResource", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10ResourceGetMappedArray", + ( + "hipD3D10ResourceGetMappedArray", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10ResourceGetMappedPitch", + ( + "hipD3D10ResourceGetMappedPitch", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10ResourceGetMappedPointer", + ( + "hipD3D10ResourceGetMappedPointer", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10ResourceGetMappedSize", + ("hipD3D10ResourceGetMappedSize", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10ResourceGetSurfaceDimensions", + ( + "hipD3D10ResourceGetSurfaceDimensions", + CONV_D3D10, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D10ResourceSetMapFlags", + ("hipD3D10ResourceSetMapFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10SetDirect3DDevice", + ("hipD3D10SetDirect3DDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10UnmapResources", + ("hipD3D10UnmapResources", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D10UnregisterResource", + ("hipD3D10UnregisterResource", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D11DeviceListAll", + ("HIP_D3D11_DEVICE_LIST_ALL", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D11DeviceListCurrentFrame", + ( + "HIP_D3D11_DEVICE_LIST_CURRENT_FRAME", + CONV_D3D11, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D11DeviceListNextFrame", + ( + "HIP_D3D11_DEVICE_LIST_NEXT_FRAME", + CONV_D3D11, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D11GetDevice", + ("hipD3D11GetDevice", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D11GetDevices", + ("hipD3D11GetDevices", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsD3D11RegisterResource", + ( + "hipGraphicsD3D11RegisterResource", + CONV_D3D11, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaD3D11GetDevice", + ("hipD3D11GetDevice", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaD3D11GetDevices", + ("hipD3D11GetDevices", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsD3D11RegisterResource", + ( + "hipGraphicsD3D11RegisterResource", + CONV_D3D11, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsVDPAURegisterOutputSurface", + ( + "hipGraphicsVDPAURegisterOutputSurface", + CONV_VDPAU, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaGraphicsVDPAURegisterVideoSurface", + ( + "hipGraphicsVDPAURegisterVideoSurface", + CONV_VDPAU, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaVDPAUGetDevice", + ("hipVDPAUGetDevice", CONV_VDPAU, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaVDPAUSetVDPAUDevice", + ("hipVDPAUSetDevice", CONV_VDPAU, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaEGLStreamConsumerAcquireFrame", + ( + "hipEGLStreamConsumerAcquireFrame", + CONV_EGL, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaEGLStreamConsumerConnect", + ("hipEGLStreamConsumerConnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaEGLStreamConsumerConnectWithFlags", + ( + "hipEGLStreamConsumerConnectWithFlags", + CONV_EGL, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaEGLStreamConsumerReleaseFrame", + ( + "hipEGLStreamConsumerReleaseFrame", + CONV_EGL, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaEGLStreamProducerConnect", + ("hipEGLStreamProducerConnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaEGLStreamProducerDisconnect", + ("hipEGLStreamProducerDisconnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaEGLStreamProducerPresentFrame", + ( + "hipEGLStreamProducerPresentFrame", + CONV_EGL, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ( + "cudaEGLStreamProducerReturnFrame", + ("hipEGLStreamProducerReturnFrame", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsEGLRegisterImage", + ("hipGraphicsEGLRegisterImage", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), + ), + ( + "cudaGraphicsResourceGetMappedEglFrame", + ( + "hipGraphicsResourceGetMappedEglFrame", + CONV_EGL, + API_RUNTIME, + HIP_UNSUPPORTED, + ), + ), + ("cublasInit", ("hipblasInit", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasShutdown", + ("hipblasShutdown", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasGetVersion", + ("hipblasGetVersion", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasGetError", + ("hipblasGetError", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasAlloc", ("hipblasAlloc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasFree", ("hipblasFree", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasSetKernelStream", + ("hipblasSetKernelStream", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasGetAtomicsMode", + ("hipblasGetAtomicsMode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSetAtomicsMode", + ("hipblasSetAtomicsMode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasGetMathMode", + ("hipblasGetMathMode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSetMathMode", + ("hipblasSetMathMode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("CUBLAS_OP_N", ("HIPBLAS_OP_N", CONV_NUMERIC_LITERAL, API_BLAS)), + ( + "CUBLAS_OP_T", + ("HIPBLAS_OP_T", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_OP_C", + ("HIPBLAS_OP_C", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_SUCCESS", + ("HIPBLAS_STATUS_SUCCESS", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_NOT_INITIALIZED", + ("HIPBLAS_STATUS_NOT_INITIALIZED", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_ALLOC_FAILED", + ("HIPBLAS_STATUS_ALLOC_FAILED", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_INVALID_VALUE", + ("HIPBLAS_STATUS_INVALID_VALUE", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_MAPPING_ERROR", + ("HIPBLAS_STATUS_MAPPING_ERROR", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_EXECUTION_FAILED", + ("HIPBLAS_STATUS_EXECUTION_FAILED", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_INTERNAL_ERROR", + ("HIPBLAS_STATUS_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_NOT_SUPPORTED", + ("HIPBLAS_STATUS_NOT_SUPPORTED", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_STATUS_ARCH_MISMATCH", + ("HIPBLAS_STATUS_ARCH_MISMATCH", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_FILL_MODE_LOWER", + ("HIPBLAS_FILL_MODE_LOWER", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_FILL_MODE_UPPER", + ("HIPBLAS_FILL_MODE_UPPER", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_DIAG_NON_UNIT", + ("HIPBLAS_DIAG_NON_UNIT", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ("CUBLAS_DIAG_UNIT", ("HIPBLAS_DIAG_UNIT", CONV_NUMERIC_LITERAL, API_BLAS)), + ("CUBLAS_SIDE_LEFT", ("HIPBLAS_SIDE_LEFT", CONV_NUMERIC_LITERAL, API_BLAS)), + ("CUBLAS_SIDE_RIGHT", ("HIPBLAS_SIDE_RIGHT", CONV_NUMERIC_LITERAL, API_BLAS)), + ( + "CUBLAS_POINTER_MODE_HOST", + ("HIPBLAS_POINTER_MODE_HOST", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_POINTER_MODE_DEVICE", + ("HIPBLAS_POINTER_MODE_DEVICE", CONV_NUMERIC_LITERAL, API_BLAS), + ), + ( + "CUBLAS_ATOMICS_NOT_ALLOWED", + ( + "HIPBLAS_ATOMICS_NOT_ALLOWED", + CONV_NUMERIC_LITERAL, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ( + "CUBLAS_ATOMICS_ALLOWED", + ( + "HIPBLAS_ATOMICS_ALLOWED", + CONV_NUMERIC_LITERAL, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ( + "CUBLAS_DATA_FLOAT", + ( + "HIPBLAS_DATA_FLOAT", + CONV_NUMERIC_LITERAL, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ( + "CUBLAS_DATA_DOUBLE", + ( + "HIPBLAS_DATA_DOUBLE", + CONV_NUMERIC_LITERAL, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ( + "CUBLAS_DATA_HALF", + ("HIPBLAS_DATA_HALF", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "CUBLAS_DATA_INT8", + ("HIPBLAS_DATA_INT8", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED), + ), + ("CUBLAS_GEMM_DEFAULT", ("HIPBLAS_GEMM_DEFAULT", CONV_NUMERIC_LITERAL, API_BLAS)), + ("CUBLAS_GEMM_DEFAULT_TENSOR_OP", ("HIPBLAS_GEMM_DEFAULT", CONV_NUMERIC_LITERAL, API_BLAS)), + ("cublasCreate", ("hipblasCreate", CONV_MATH_FUNC, API_BLAS)), + ("cublasDestroy", ("hipblasDestroy", CONV_MATH_FUNC, API_BLAS)), + ("cublasSetVector", ("hipblasSetVector", CONV_MATH_FUNC, API_BLAS)), + ("cublasGetVector", ("hipblasGetVector", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasSetVectorAsync", + ("hipblasSetVectorAsync", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasGetVectorAsync", + ("hipblasGetVectorAsync", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSetMatrix", ("hipblasSetMatrix", CONV_MATH_FUNC, API_BLAS)), + ("cublasGetMatrix", ("hipblasGetMatrix", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasGetMatrixAsync", + ("hipblasGetMatrixAsync", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSetMatrixAsync", + ("hipblasSetMatrixAsync", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasXerbla", ("hipblasXerbla", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSnrm2", ("hipblasSnrm2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDnrm2", ("hipblasDnrm2", CONV_MATH_FUNC, API_BLAS)), + ("cublasScnrm2", ("hipblasScnrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDznrm2", ("hipblasDznrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasNrm2Ex", + ("hipblasNrm2Ex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSdot", ("hipblasSdot", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasSdotBatched", + ("hipblasSdotBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasDdot", ("hipblasDdot", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasDdotBatched", + ("hipblasDdotBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasCdotu", ("hipblasCdotu", CONV_MATH_FUNC, API_BLAS)), + ("cublasCdotc", ("hipblasCdotc", CONV_MATH_FUNC, API_BLAS)), + ("cublasZdotu", ("hipblasZdotu", CONV_MATH_FUNC, API_BLAS)), + ("cublasZdotc", ("hipblasZdotc", CONV_MATH_FUNC, API_BLAS)), + ("cublasSscal", ("hipblasSscal", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasSscalBatched", + ("hipblasSscalBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasDscal", ("hipblasDscal", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasDscalBatched", + ("hipblasDscalBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasCscal", ("hipblasCscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCsscal", ("hipblasCsscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZscal", ("hipblasZscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZdscal", ("hipblasZdscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSaxpy", ("hipblasSaxpy", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasSaxpyBatched", + ("hipblasSaxpyBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasDaxpy", ("hipblasDaxpy", CONV_MATH_FUNC, API_BLAS)), + ("cublasCaxpy", ("hipblasCaxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZaxpy", ("hipblasZaxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasScopy", ("hipblasScopy", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasScopyBatched", + ("hipblasScopyBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasDcopy", ("hipblasDcopy", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasDcopyBatched", + ("hipblasDcopyBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasCcopy", ("hipblasCcopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZcopy", ("hipblasZcopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSswap", ("hipblasSswap", CONV_MATH_FUNC, API_BLAS)), + ("cublasDswap", ("hipblasDswap", CONV_MATH_FUNC, API_BLAS)), + ("cublasCswap", ("hipblasCswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZswap", ("hipblasZswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasIsamax", ("hipblasIsamax", CONV_MATH_FUNC, API_BLAS)), + ("cublasIdamax", ("hipblasIdamax", CONV_MATH_FUNC, API_BLAS)), + ("cublasIcamax", ("hipblasIcamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasIzamax", ("hipblasIzamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasIsamin", ("hipblasIsamin", CONV_MATH_FUNC, API_BLAS)), + ("cublasIdamin", ("hipblasIdamin", CONV_MATH_FUNC, API_BLAS)), + ("cublasIcamin", ("hipblasIcamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasIzamin", ("hipblasIzamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSasum", ("hipblasSasum", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasSasumBatched", + ("hipblasSasumBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasDasum", ("hipblasDasum", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasDasumBatched", + ("hipblasDasumBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasScasum", ("hipblasScasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDzasum", ("hipblasDzasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSrot", ("hipblasSrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDrot", ("hipblasDrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCrot", ("hipblasCrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCsrot", ("hipblasCsrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZrot", ("hipblasZrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZdrot", ("hipblasZdrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSrotg", ("hipblasSrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDrotg", ("hipblasDrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCrotg", ("hipblasCrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZrotg", ("hipblasZrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSrotm", ("hipblasSrotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDrotm", ("hipblasDrotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSrotmg", ("hipblasSrotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDrotmg", ("hipblasDrotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSgemv", ("hipblasSgemv", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasSgemvBatched", + ("hipblasSgemvBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasDgemv", ("hipblasDgemv", CONV_MATH_FUNC, API_BLAS)), + ("cublasCgemv", ("hipblasCgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZgemv", ("hipblasZgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSgbmv", ("hipblasSgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDgbmv", ("hipblasDgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCgbmv", ("hipblasCgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZgbmv", ("hipblasZgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStrmv", ("hipblasStrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtrmv", ("hipblasDtrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtrmv", ("hipblasCtrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtrmv", ("hipblasZtrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStbmv", ("hipblasStbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtbmv", ("hipblasDtbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtbmv", ("hipblasCtbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtbmv", ("hipblasZtbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStpmv", ("hipblasStpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtpmv", ("hipblasDtpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtpmv", ("hipblasCtpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtpmv", ("hipblasZtpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStrsv", ("hipblasStrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtrsv", ("hipblasDtrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtrsv", ("hipblasCtrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtrsv", ("hipblasZtrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStpsv", ("hipblasStpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtpsv", ("hipblasDtpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtpsv", ("hipblasCtpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtpsv", ("hipblasZtpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStbsv", ("hipblasStbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtbsv", ("hipblasDtbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtbsv", ("hipblasCtbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtbsv", ("hipblasZtbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSsymv", ("hipblasSsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDsymv", ("hipblasDsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCsymv", ("hipblasCsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZsymv", ("hipblasZsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasChemv", ("hipblasChemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZhemv", ("hipblasZhemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSsbmv", ("hipblasSsbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDsbmv", ("hipblasDsbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasChbmv", ("hipblasChbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZhbmv", ("hipblasZhbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSspmv", ("hipblasSspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDspmv", ("hipblasDspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasChpmv", ("hipblasChpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZhpmv", ("hipblasZhpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSger", ("hipblasSger", CONV_MATH_FUNC, API_BLAS)), + ("cublasDger", ("hipblasDger", CONV_MATH_FUNC, API_BLAS)), + ("cublasCgeru", ("hipblasCgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCgerc", ("hipblasCgerc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZgeru", ("hipblasZgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZgerc", ("hipblasZgerc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSsyr", ("hipblasSsyr", CONV_MATH_FUNC, API_BLAS)), + ("cublasDsyr", ("hipblasDsyr", CONV_MATH_FUNC, API_BLAS)), + ("cublasCher", ("hipblasCher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZher", ("hipblasZher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSspr", ("hipblasSspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDspr", ("hipblasDspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasChpr", ("hipblasChpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZhpr", ("hipblasZhpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSsyr2", ("hipblasSsyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDsyr2", ("hipblasDsyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCher2", ("hipblasCher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZher2", ("hipblasZher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSspr2", ("hipblasSspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDspr2", ("hipblasDspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasChpr2", ("hipblasChpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZhpr2", ("hipblasZhpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasSgemmBatched", + ("hipblasSgemmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDgemmBatched", + ("hipblasDgemmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasHgemmBatched", + ("hipblasHgemmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSgemmStridedBatched", + ("hipblasSgemmStridedBatched", CONV_MATH_FUNC, API_BLAS), + ), + ( + "cublasDgemmStridedBatched", + ("hipblasDgemmStridedBatched", CONV_MATH_FUNC, API_BLAS), + ), + ( + "cublasHgemmStridedBatched", + ("hipblasHgemmStridedBatched", CONV_MATH_FUNC, API_BLAS), + ), + ( + "cublasCgemmBatched", + ("hipblasCgemmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgemm3mBatched", + ("hipblasCgemm3mBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgemmBatched", + ("hipblasZgemmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgemmStridedBatched", + ( + "hipblasCgemmStridedBatched", + CONV_MATH_FUNC, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ( + "cublasCgemm3mStridedBatched", + ( + "hipblasCgemm3mStridedBatched", + CONV_MATH_FUNC, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ( + "cublasZgemmStridedBatched", + ( + "hipblasZgemmStridedBatched", + CONV_MATH_FUNC, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ( + "cublasHgemmStridedBatched", + ( + "hipblasHgemmStridedBatched", + CONV_MATH_FUNC, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ("cublasSgemm", ("hipblasSgemm", CONV_MATH_FUNC, API_BLAS)), + ("cublasDgemm", ("hipblasDgemm", CONV_MATH_FUNC, API_BLAS)), + ("cublasCgemm", ("hipblasCgemm", CONV_MATH_FUNC, API_BLAS)), + ("cublasZgemm", ("hipblasZgemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasHgemm", ("hipblasHgemm", CONV_MATH_FUNC, API_BLAS)), + ("cublasSsyrk", ("hipblasSsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDsyrk", ("hipblasDsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCsyrk", ("hipblasCsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZsyrk", ("hipblasZsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCherk", ("hipblasCherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZherk", ("hipblasZherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSsyr2k", ("hipblasSsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDsyr2k", ("hipblasDsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCsyr2k", ("hipblasCsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZsyr2k", ("hipblasZyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSsyrkx", ("hipblasSsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDsyrkx", ("hipblasDsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCsyrkx", ("hipblasCsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZsyrkx", ("hipblasZsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCher2k", ("hipblasCher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZher2k", ("hipblasZher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCherkx", ("hipblasCherkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZherkx", ("hipblasZherkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSsymm", ("hipblasSsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDsymm", ("hipblasDsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCsymm", ("hipblasCsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZsymm", ("hipblasZsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasChemm", ("hipblasChemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZhemm", ("hipblasZhemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStrsm", ("hipblasStrsm", CONV_MATH_FUNC, API_BLAS)), + ("cublasDtrsm", ("hipblasDtrsm", CONV_MATH_FUNC, API_BLAS)), + ("cublasCtrsm", ("hipblasCtrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtrsm", ("hipblasZtrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasStrsmBatched", + ("hipblasStrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtrsmBatched", + ("hipblasDtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtrsmBatched", + ("hipblasCtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtrsmBatched", + ("hipblasZtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasStrmm", ("hipblasStrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtrmm", ("hipblasDtrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtrmm", ("hipblasCtrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtrmm", ("hipblasZtrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSgeam", ("hipblasSgeam", CONV_MATH_FUNC, API_BLAS)), + ("cublasDgeam", ("hipblasDgeam", CONV_MATH_FUNC, API_BLAS)), + ("cublasCgeam", ("hipblasCgeam", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZgeam", ("hipblasZgeam", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasSgetrfBatched", + ("hipblasSgetrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDgetrfBatched", + ("hipblasDgetrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgetrfBatched", + ("hipblasCgetrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgetrfBatched", + ("hipblasZgetrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSgetriBatched", + ("hipblasSgetriBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDgetriBatched", + ("hipblasDgetriBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgetriBatched", + ("hipblasCgetriBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgetriBatched", + ("hipblasZgetriBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSgetrsBatched", + ("hipblasSgetrsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDgetrsBatched", + ("hipblasDgetrsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgetrsBatched", + ("hipblasCgetrsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgetrsBatched", + ("hipblasZgetrsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStrsmBatched", + ("hipblasStrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtrsmBatched", + ("hipblasDtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtrsmBatched", + ("hipblasCtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtrsmBatched", + ("hipblasZtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSmatinvBatched", + ("hipblasSmatinvBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDmatinvBatched", + ("hipblasDmatinvBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCmatinvBatched", + ("hipblasCmatinvBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZmatinvBatched", + ("hipblasZmatinvBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSgeqrfBatched", + ("hipblasSgeqrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDgeqrfBatched", + ("hipblasDgeqrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgeqrfBatched", + ("hipblasCgeqrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgeqrfBatched", + ("hipblasZgeqrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSgelsBatched", + ("hipblasSgelsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDgelsBatched", + ("hipblasDgelsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgelsBatched", + ("hipblasCgelsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgelsBatched", + ("hipblasZgelsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSdgmm", ("hipblasSdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDdgmm", ("hipblasDdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCdgmm", ("hipblasCdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZdgmm", ("hipblasZdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStpttr", ("hipblasStpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtpttr", ("hipblasDtpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtpttr", ("hipblasCtpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtpttr", ("hipblasZtpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasStrttp", ("hipblasStrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDtrttp", ("hipblasDtrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCtrttp", ("hipblasCtrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZtrttp", ("hipblasZtrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCreate_v2", ("hipblasCreate_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDestroy_v2", ("hipblasDestroy_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasGetVersion_v2", + ("hipblasGetVersion_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSetStream", ("hipblasSetStream", CONV_MATH_FUNC, API_BLAS)), + ("cublasGetStream", ("hipblasGetStream", CONV_MATH_FUNC, API_BLAS)), + ("cublasSetStream_v2", ("hipblasSetStream_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasGetStream_v2", ("hipblasGetStream_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasGetPointerMode", + ("hipblasGetPointerMode", CONV_MATH_FUNC, API_BLAS), + ), + ( + "cublasSetPointerMode", + ("hipblasSetPointerMode", CONV_MATH_FUNC, API_BLAS), + ), + ( + "cublasGetPointerMode_v2", + ("hipblasGetPointerMode_v2", CONV_MATH_FUNC, API_BLAS), + ), + ( + "cublasSetPointerMode_v2", + ("hipblasSetPointerMode_v2", CONV_MATH_FUNC, API_BLAS), + ), + ("cublasSgemv_v2", ("hipblasSgemv_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDgemv_v2", ("hipblasDgemv_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasCgemv_v2", + ("hipblasCgemv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgemv_v2", + ("hipblasZgemv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSgbmv_v2", + ("hipblasSgbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDgbmv_v2", + ("hipblasDgbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgbmv_v2", + ("hipblasCgbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgbmv_v2", + ("hipblasZgbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStrmv_v2", + ("hipblasStrmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtrmv_v2", + ("hipblasDtrmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtrmv_v2", + ("hipblasCtrmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtrmv_v2", + ("hipblasZtrmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStbmv_v2", + ("hipblasStbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtbmv_v2", + ("hipblasDtbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtbmv_v2", + ("hipblasCtbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtbmv_v2", + ("hipblasZtbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStpmv_v2", + ("hipblasStpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtpmv_v2", + ("hipblasDtpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtpmv_v2", + ("hipblasCtpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtpmv_v2", + ("hipblasZtpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStrsv_v2", + ("hipblasStrsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtrsv_v2", + ("hipblasDtrsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtrsv_v2", + ("hipblasCtrsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtrsv_v2", + ("hipblasZtrsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStpsv_v2", + ("hipblasStpsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtpsv_v2", + ("hipblasDtpsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtpsv_v2", + ("hipblasCtpsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtpsv_v2", + ("hipblasZtpsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStbsv_v2", + ("hipblasStbsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtbsv_v2", + ("hipblasDtbsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtbsv_v2", + ("hipblasCtbsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtbsv_v2", + ("hipblasZtbsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSsymv_v2", + ("hipblasSsymv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDsymv_v2", + ("hipblasDsymv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCsymv_v2", + ("hipblasCsymv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZsymv_v2", + ("hipblasZsymv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasChemv_v2", + ("hipblasChemv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZhemv_v2", + ("hipblasZhemv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSsbmv_v2", + ("hipblasSsbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDsbmv_v2", + ("hipblasDsbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasChbmv_v2", + ("hipblasChbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZhbmv_v2", + ("hipblasZhbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSspmv_v2", + ("hipblasSspmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDspmv_v2", + ("hipblasDspmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasChpmv_v2", + ("hipblasChpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZhpmv_v2", + ("hipblasZhpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSger_v2", ("hipblasSger_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDger_v2", ("hipblasDger_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasCgeru_v2", + ("hipblasCgeru_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgerc_v2", + ("hipblasCergc_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgeru_v2", + ("hipblasZgeru_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgerc_v2", + ("hipblasZgerc_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSsyr_v2", ("hipblasSsyr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDsyr_v2", ("hipblasDsyr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCsyr_v2", ("hipblasCsyr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZsyr_v2", ("hipblasZsyr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCher_v2", ("hipblasCher_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZher_v2", ("hipblasZher_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSspr_v2", ("hipblasSspr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDspr_v2", ("hipblasDspr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasChpr_v2", ("hipblasChpr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasZhpr_v2", ("hipblasZhpr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasSsyr2_v2", + ("hipblasSsyr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDsyr2_v2", + ("hipblasDsyr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCsyr2_v2", + ("hipblasCsyr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZsyr2_v2", + ("hipblasZsyr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCher2_v2", + ("hipblasCher2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZher2_v2", + ("hipblasZher2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSspr2_v2", + ("hipblasSspr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDspr2_v2", + ("hipblasDspr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasChpr2_v2", + ("hipblasChpr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZhpr2_v2", + ("hipblasZhpr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSgemm_v2", ("hipblasSgemm_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDgemm_v2", ("hipblasDgemm_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasCgemm_v2", + ("hipblasCgemm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgemm3m", + ("hipblasCgemm3m", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgemm3mEx", + ("hipblasCgemm3mEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgemm_v2", + ("hipblasZgemm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZgemm3m", + ("hipblasZgemm3m", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSgemmEx", + ("hipblasSgemmEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasGemmEx", ("hipblasGemmEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasGemmBatchedEx", + ("hipblasGemmBatchedEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasGemmStridedBatchedEx", + ("hipblasGemmStridedBatchedEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCgemmEx", + ("hipblasCgemmEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasUint8gemmBias", + ("hipblasUint8gemmBias", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSsyrk_v2", + ("hipblasSsyrk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDsyrk_v2", + ("hipblasDsyrk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCsyrk_v2", + ("hipblasCsyrk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZsyrk_v2", + ("hipblasZsyrk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCsyrkEx", + ("hipblasCsyrkEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCsyrk3mEx", + ("hipblasCsyrk3mEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCherk_v2", + ("hipblasCherk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCherkEx", + ("hipblasCherkEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCherk3mEx", + ("hipblasCherk3mEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZherk_v2", + ("hipblasZherk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSsyr2k_v2", + ("hipblasSsyr2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDsyr2k_v2", + ("hipblasDsyr2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCsyr2k_v2", + ("hipblasCsyr2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZsyr2k_v2", + ("hipblasZsyr2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCher2k_v2", + ("hipblasCher2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZher2k_v2", + ("hipblasZher2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSsymm_v2", + ("hipblasSsymm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDsymm_v2", + ("hipblasDsymm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCsymm_v2", + ("hipblasCsymm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZsymm_v2", + ("hipblasZsymm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasChemm_v2", + ("hipblasChemm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZhemm_v2", + ("hipblasZhemm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStrsm_v2", + ("hipblasStrsm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtrsm_v2", + ("hipblasDtrsm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtrsm_v2", + ("hipblasCtrsm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtrsm_v2", + ("hipblasZtrsm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasStrmm_v2", + ("hipblasStrmm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDtrmm_v2", + ("hipblasDtrmm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCtrmm_v2", + ("hipblasCtrmm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZtrmm_v2", + ("hipblasZtrmm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSnrm2_v2", ("hipblasSnrm2_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDnrm2_v2", ("hipblasDnrm2_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasScnrm2_v2", + ("hipblasScnrm2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDznrm2_v2", + ("hipblasDznrm2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasDotEx", ("hipblasDotEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDotcEx", ("hipblasDotcEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSdot_v2", ("hipblasSdot_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDdot_v2", ("hipblasDdot_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasCdotu_v2", + ("hipblasCdotu_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCdotc_v2", + ("hipblasCdotc_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZdotu_v2", + ("hipblasZdotu_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZdotc_v2", + ("hipblasZdotc_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasScalEx", ("hipblasScalEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSscal_v2", ("hipblasSscal_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDscal_v2", ("hipblasDscal_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasCscal_v2", + ("hipblasCscal_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCsscal_v2", + ("hipblasCsscal_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZscal_v2", + ("hipblasZcsal_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZdscal_v2", + ("hipblasZdscal_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasAxpyEx", ("hipblasAxpyEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasSaxpy_v2", ("hipblasSaxpy_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDaxpy_v2", ("hipblasDaxpy_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasCaxpy_v2", + ("hipblasCaxpy_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZaxpy_v2", + ("hipblasZaxpy_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasScopy_v2", ("hipblasScopy_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDcopy_v2", ("hipblasDcopy_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasCcopy_v2", + ("hipblasCcopy_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZcopy_v2", + ("hipblasZcopy_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSswap_v2", ("hipblasSswap_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDswap_v2", ("hipblasDswap_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasCswap_v2", + ("hipblasCswap_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZswap_v2", + ("hipblasZswap_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasIsamax_v2", ("hipblasIsamax_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasIdamax_v2", ("hipblasIdamax_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasIcamax_v2", + ("hipblasIcamax_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasIzamax_v2", + ("hipblasIzamax_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasIsamin_v2", ("hipblasIsamin_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasIdamin_v2", ("hipblasIdamin_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasIcamin_v2", + ("hipblasIcamin_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasIzamin_v2", + ("hipblasIzamin_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSasum_v2", ("hipblasSasum_v2", CONV_MATH_FUNC, API_BLAS)), + ("cublasDasum_v2", ("hipblasDasum_v2", CONV_MATH_FUNC, API_BLAS)), + ( + "cublasScasum_v2", + ("hipblasScasum_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDzasum_v2", + ("hipblasDzasum_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasSrot_v2", ("hipblasSrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasDrot_v2", ("hipblasDrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ("cublasCrot_v2", ("hipblasCrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasCsrot_v2", + ("hipblasCsrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ("cublasZrot_v2", ("hipblasZrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), + ( + "cublasZdrot_v2", + ("hipblasZdrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSrotg_v2", + ("hipblasSrotg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDrotg_v2", + ("hipblasDrotg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasCrotg_v2", + ("hipblasCrotg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasZrotg_v2", + ("hipblasZrotg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSrotm_v2", + ("hipblasSrotm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDrotm_v2", + ("hipblasDrotm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasSrotmg_v2", + ("hipblasSrotmg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasDrotmg_v2", + ("hipblasDrotmg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), + ), + ( + "cublasComputeType_t", + ("hipblasComputeType_t" if rocm_version >= (6, 0, 0) else "hipblasLtComputeType_t", + CONV_MATH_FUNC, API_BLAS) + ), + ( + "CUBLAS_COMPUTE_32I", + ("HIPBLAS_COMPUTE_32I" if rocm_version >= (6, 0, 0) else "HIPBLASLT_COMPUTE_I32", CONV_MATH_FUNC, API_BLAS) + ), + ( + "CUBLAS_COMPUTE_32F", + ("HIPBLAS_COMPUTE_32F" if rocm_version >= (6, 0, 0) else "HIPBLASLT_COMPUTE_F32", CONV_MATH_FUNC, API_BLAS) + ), + ( + "CUBLAS_COMPUTE_64F", + ("HIPBLAS_COMPUTE_64F" if rocm_version >= (6, 0, 0) else "HIPBLASLT_COMPUTE_F64", CONV_MATH_FUNC, API_BLAS) + ), + ("cublasLtEpilogue_t", ("hipblasLtEpilogue_t", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_EPILOGUE_DEFAULT", ("HIPBLASLT_EPILOGUE_DEFAULT", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_EPILOGUE_RELU", ("HIPBLASLT_EPILOGUE_RELU", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_EPILOGUE_BIAS", ("HIPBLASLT_EPILOGUE_BIAS", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_EPILOGUE_RELU_BIAS", ("HIPBLASLT_EPILOGUE_RELU_BIAS", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_EPILOGUE_GELU", ("HIPBLASLT_EPILOGUE_GELU", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_EPILOGUE_GELU_BIAS", ("HIPBLASLT_EPILOGUE_GELU_BIAS", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtHandle_t", ("hipblasLtHandle_t", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatmulDesc_t", ("hipblasLtMatmulDesc_t", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatmulDescOpaque_t", ("hipblasLtMatmulDescOpaque_t", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatmulDescAttributes_t", ("hipblasLtMatmulDescAttributes_t", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_MATMUL_DESC_TRANSA", ("HIPBLASLT_MATMUL_DESC_TRANSA", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_MATMUL_DESC_TRANSB", ("HIPBLASLT_MATMUL_DESC_TRANSB", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_MATMUL_DESC_EPILOGUE", ("HIPBLASLT_MATMUL_DESC_EPILOGUE", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_MATMUL_DESC_BIAS_POINTER", ("HIPBLASLT_MATMUL_DESC_BIAS_POINTER", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_MATMUL_DESC_A_SCALE_POINTER", ("HIPBLASLT_MATMUL_DESC_A_SCALE_POINTER", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_MATMUL_DESC_B_SCALE_POINTER", ("HIPBLASLT_MATMUL_DESC_B_SCALE_POINTER", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_MATMUL_DESC_D_SCALE_POINTER", ("HIPBLASLT_MATMUL_DESC_D_SCALE_POINTER", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_MATMUL_DESC_AMAX_D_POINTER", ("HIPBLASLT_MATMUL_DESC_AMAX_D_POINTER", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_MATMUL_DESC_BIAS_DATA_TYPE", ("HIPBLASLT_MATMUL_DESC_BIAS_DATA_TYPE", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatrixLayout_t", ("hipblasLtMatrixLayout_t", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatrixLayoutOpaque_t", ("hipblasLtMatrixLayoutOpaque_t", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatrixLayoutAttribute_t", ("hipblasLtMatrixLayoutAttribute_t", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatmulPreference_t", ("hipblasLtMatmulPreference_t", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatmulPreferenceOpaque_t", ("hipblasLtMatmulPreferenceOpaque_t", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatmulPreferenceAttributes_t", ("hipblasLtMatmulPreferenceAttributes_t", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_MATMUL_PREF_SEARCH_MODE", ("HIPBLASLT_MATMUL_PREF_SEARCH_MODE", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES", ("HIPBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatmulAlgo_t", ("hipblasLtMatmulAlgo_t", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatmulHeuristicResult_t", ("hipblasLtMatmulHeuristicResult_t", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatrixLayoutCreate", ("hipblasLtMatrixLayoutCreate", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatrixLayoutDestroy", ("hipblasLtMatrixLayoutDestroy", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtCreate", ("hipblasLtCreate", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtDestroy", ("hipblasLtDestroy", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatmulDescCreate", ("hipblasLtMatmulDescCreate", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatmulDescDestroy", ("hipblasLtMatmulDescDestroy", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatmulDescSetAttribute", ("hipblasLtMatmulDescSetAttribute", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatmulPreferenceCreate", ("hipblasLtMatmulPreferenceCreate", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatmulPreferenceDestroy", ("hipblasLtMatmulPreferenceDestroy", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatmulPreferenceSetAttribute", ("hipblasLtMatmulPreferenceSetAttribute", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatmulAlgoGetHeuristic", ("hipblasLtMatmulAlgoGetHeuristic", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatmul", ("hipblasLtMatmul", CONV_MATH_FUNC, API_BLAS)), + ( + "CURAND_STATUS_SUCCESS", + ("HIPRAND_STATUS_SUCCESS", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_VERSION_MISMATCH", + ("HIPRAND_STATUS_VERSION_MISMATCH", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_NOT_INITIALIZED", + ("HIPRAND_STATUS_NOT_INITIALIZED", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_ALLOCATION_FAILED", + ("HIPRAND_STATUS_ALLOCATION_FAILED", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_TYPE_ERROR", + ("HIPRAND_STATUS_TYPE_ERROR", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_OUT_OF_RANGE", + ("HIPRAND_STATUS_OUT_OF_RANGE", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_LENGTH_NOT_MULTIPLE", + ("HIPRAND_STATUS_LENGTH_NOT_MULTIPLE", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED", + ( + "HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED", + CONV_NUMERIC_LITERAL, + API_RAND, + ), + ), + ( + "CURAND_STATUS_LAUNCH_FAILURE", + ("HIPRAND_STATUS_LAUNCH_FAILURE", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_PREEXISTING_FAILURE", + ("HIPRAND_STATUS_PREEXISTING_FAILURE", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_INITIALIZATION_FAILED", + ("HIPRAND_STATUS_INITIALIZATION_FAILED", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_ARCH_MISMATCH", + ("HIPRAND_STATUS_ARCH_MISMATCH", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_STATUS_INTERNAL_ERROR", + ("HIPRAND_STATUS_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_RAND), + ), + ("CURAND_RNG_TEST", ("HIPRAND_RNG_TEST", CONV_NUMERIC_LITERAL, API_RAND)), + ( + "mtgp32dc_params_fast_11213", + ("mtgp32dc_params_fast_11213", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_PSEUDO_DEFAULT", + ("HIPRAND_RNG_PSEUDO_DEFAULT", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_PSEUDO_XORWOW", + ("HIPRAND_RNG_PSEUDO_XORWOW", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_PSEUDO_MRG32K3A", + ("HIPRAND_RNG_PSEUDO_MRG32K3A", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_PSEUDO_MTGP32", + ("HIPRAND_RNG_PSEUDO_MTGP32", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_PSEUDO_MT19937", + ("HIPRAND_RNG_PSEUDO_MT19937", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_PSEUDO_PHILOX4_32_10", + ("HIPRAND_RNG_PSEUDO_PHILOX4_32_10", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_QUASI_DEFAULT", + ("HIPRAND_RNG_QUASI_DEFAULT", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_QUASI_SOBOL32", + ("HIPRAND_RNG_QUASI_SOBOL32", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_QUASI_SCRAMBLED_SOBOL32", + ("HIPRAND_RNG_QUASI_SCRAMBLED_SOBOL32", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_QUASI_SOBOL64", + ("HIPRAND_RNG_QUASI_SOBOL64", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "CURAND_RNG_QUASI_SCRAMBLED_SOBOL64", + ("HIPRAND_RNG_QUASI_SCRAMBLED_SOBOL64", CONV_NUMERIC_LITERAL, API_RAND), + ), + ( + "curand_ORDERING_PSEUDO_BEST", + ( + "HIPRAND_ORDERING_PSEUDO_BEST", + CONV_NUMERIC_LITERAL, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_ORDERING_PSEUDO_DEFAULT", + ( + "HIPRAND_ORDERING_PSEUDO_DEFAULT", + CONV_NUMERIC_LITERAL, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_ORDERING_PSEUDO_SEEDED", + ( + "HIPRAND_ORDERING_PSEUDO_SEEDED", + CONV_NUMERIC_LITERAL, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_ORDERING_QUASI_DEFAULT", + ( + "HIPRAND_ORDERING_QUASI_DEFAULT", + CONV_NUMERIC_LITERAL, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_DIRECTION_VECTORS_32_JOEKUO6", + ( + "HIPRAND_DIRECTION_VECTORS_32_JOEKUO6", + CONV_NUMERIC_LITERAL, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6", + ( + "HIPRAND_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6", + CONV_NUMERIC_LITERAL, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_DIRECTION_VECTORS_64_JOEKUO6", + ( + "HIPRAND_DIRECTION_VECTORS_64_JOEKUO6", + CONV_NUMERIC_LITERAL, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6", + ( + "HIPRAND_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6", + CONV_NUMERIC_LITERAL, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_CHOOSE_BEST", + ("HIPRAND_CHOOSE_BEST", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_ITR", + ("HIPRAND_ITR", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_KNUTH", + ("HIPRAND_KNUTH", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_HITR", + ("HIPRAND_HITR", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ("curand_M1", ("HIPRAND_M1", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)), + ("curand_M2", ("HIPRAND_M2", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)), + ( + "curand_BINARY_SEARCH", + ("HIPRAND_BINARY_SEARCH", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_DISCRETE_GAUSS", + ("HIPRAND_DISCRETE_GAUSS", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_REJECTION", + ("HIPRAND_REJECTION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_DEVICE_API", + ("HIPRAND_DEVICE_API", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_FAST_REJECTION", + ("HIPRAND_FAST_REJECTION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_3RD", + ("HIPRAND_3RD", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_DEFINITION", + ("HIPRAND_DEFINITION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_POISSON", + ("HIPRAND_POISSON", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), + ), + ("curandCreateGenerator", ("hiprandCreateGenerator", CONV_MATH_FUNC, API_RAND)), + ( + "curandCreateGeneratorHost", + ("hiprandCreateGeneratorHost", CONV_MATH_FUNC, API_RAND), + ), + ( + "curandCreatePoissonDistribution", + ("hiprandCreatePoissonDistribution", CONV_MATH_FUNC, API_RAND), + ), + ( + "curandDestroyDistribution", + ("hiprandDestroyDistribution", CONV_MATH_FUNC, API_RAND), + ), + ( + "curandDestroyGenerator", + ("hiprandDestroyGenerator", CONV_MATH_FUNC, API_RAND), + ), + ("curandGenerate", ("hiprandGenerate", CONV_MATH_FUNC, API_RAND)), + ( + "curandGenerateLogNormal", + ("hiprandGenerateLogNormal", CONV_MATH_FUNC, API_RAND), + ), + ( + "curandGenerateLogNormalDouble", + ("hiprandGenerateLogNormalDouble", CONV_MATH_FUNC, API_RAND), + ), + ( + "curandGenerateLongLong", + ("hiprandGenerateLongLong", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED), + ), + ("curandGenerateNormal", ("hiprandGenerateNormal", CONV_MATH_FUNC, API_RAND)), + ( + "curandGenerateNormalDouble", + ("hiprandGenerateNormalDouble", CONV_MATH_FUNC, API_RAND), + ), + ("curandGeneratePoisson", ("hiprandGeneratePoisson", CONV_MATH_FUNC, API_RAND)), + ("curandGenerateSeeds", ("hiprandGenerateSeeds", CONV_MATH_FUNC, API_RAND)), + ("curandGenerateUniform", ("hiprandGenerateUniform", CONV_MATH_FUNC, API_RAND)), + ( + "curandGenerateUniformDouble", + ("hiprandGenerateUniformDouble", CONV_MATH_FUNC, API_RAND), + ), + ( + "curandGetDirectionVectors32", + ("hiprandGetDirectionVectors32", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandGetDirectionVectors64", + ("hiprandGetDirectionVectors64", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandGetProperty", + ("hiprandGetProperty", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandGetScrambleConstants32", + ( + "hiprandGetScrambleConstants32", + CONV_MATH_FUNC, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curandGetScrambleConstants64", + ( + "hiprandGetScrambleConstants64", + CONV_MATH_FUNC, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ("curandGetVersion", ("hiprandGetVersion", CONV_MATH_FUNC, API_RAND)), + ( + "curandSetGeneratorOffset", + ("hiprandSetGeneratorOffset", CONV_MATH_FUNC, API_RAND), + ), + ( + "curandSetGeneratorOrdering", + ("hiprandSetGeneratorOrdering", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curandSetPseudoRandomGeneratorSeed", + ("hiprandSetPseudoRandomGeneratorSeed", CONV_MATH_FUNC, API_RAND), + ), + ( + "curandSetQuasiRandomGeneratorDimensions", + ("hiprandSetQuasiRandomGeneratorDimensions", CONV_MATH_FUNC, API_RAND), + ), + ("curandSetStream", ("hiprandSetStream", CONV_MATH_FUNC, API_RAND)), + ("curand", ("hiprand", CONV_DEVICE_FUNC, API_RAND)), + ("curand4", ("hiprand4", CONV_DEVICE_FUNC, API_RAND)), + ("curand_init", ("hiprand_init", CONV_DEVICE_FUNC, API_RAND)), + ("curand_log_normal", ("hiprand_log_normal", CONV_DEVICE_FUNC, API_RAND)), + ( + "curand_log_normal_double", + ("hiprand_log_normal_double", CONV_DEVICE_FUNC, API_RAND), + ), + ("curand_log_normal2", ("hiprand_log_normal2", CONV_DEVICE_FUNC, API_RAND)), + ( + "curand_log_normal2_double", + ("hiprand_log_normal2_double", CONV_DEVICE_FUNC, API_RAND), + ), + ("curand_log_normal4", ("hiprand_log_normal4", CONV_DEVICE_FUNC, API_RAND)), + ( + "curand_log_normal4_double", + ("hiprand_log_normal4_double", CONV_DEVICE_FUNC, API_RAND), + ), + ( + "curand_mtgp32_single", + ("hiprand_mtgp32_single", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED), + ), + ( + "curand_mtgp32_single_specific", + ( + "hiprand_mtgp32_single_specific", + CONV_DEVICE_FUNC, + API_RAND, + HIP_UNSUPPORTED, + ), + ), + ( + "curand_mtgp32_specific", + ("hiprand_mtgp32_specific", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED), + ), + ("curand_normal", ("hiprand_normal", CONV_DEVICE_FUNC, API_RAND)), + ( + "curandMakeMTGP32Constants", + ("hiprandMakeMTGP32Constants", CONV_DEVICE_FUNC, API_RAND), + ), + ( + "curandMakeMTGP32KernelState", + ("hiprandMakeMTGP32KernelState", CONV_DEVICE_FUNC, API_RAND), + ), + ("curand_normal_double", ("hiprand_normal_double", CONV_DEVICE_FUNC, API_RAND)), + ("curand_normal2", ("hiprand_normal2", CONV_DEVICE_FUNC, API_RAND)), + ( + "curand_normal2_double", + ("hiprand_normal2_double", CONV_DEVICE_FUNC, API_RAND), + ), + ("curand_normal4", ("hiprand_normal4", CONV_DEVICE_FUNC, API_RAND)), + ( + "curand_normal4_double", + ("hiprand_normal4_double", CONV_DEVICE_FUNC, API_RAND), + ), + ("curand_uniform", ("hiprand_uniform", CONV_DEVICE_FUNC, API_RAND)), + ( + "curand_uniform_double", + ("hiprand_uniform_double", CONV_DEVICE_FUNC, API_RAND), + ), + ( + "curand_uniform2_double", + ("hiprand_uniform2_double", CONV_DEVICE_FUNC, API_RAND), + ), + ("curand_uniform4", ("hiprand_uniform4", CONV_DEVICE_FUNC, API_RAND)), + ( + "curand_uniform4_double", + ("hiprand_uniform4_double", CONV_DEVICE_FUNC, API_RAND), + ), + ("curand_discrete", ("hiprand_discrete", CONV_DEVICE_FUNC, API_RAND)), + ("curand_discrete4", ("hiprand_discrete4", CONV_DEVICE_FUNC, API_RAND)), + ("curand_poisson", ("hiprand_poisson", CONV_DEVICE_FUNC, API_RAND)), + ("curand_poisson4", ("hiprand_poisson4", CONV_DEVICE_FUNC, API_RAND)), + ( + "curand_Philox4x32_10", + ("hiprand_Philox4x32_10", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED), + ), + ("mtgp32_kernel_params", ("mtgp32_kernel_params_t", CONV_MATH_FUNC, API_RAND)), + ("CUFFT_FORWARD", ("HIPFFT_FORWARD", CONV_NUMERIC_LITERAL, API_BLAS)), + ("CUFFT_INVERSE", ("HIPFFT_BACKWARD", CONV_NUMERIC_LITERAL, API_BLAS)), + ( + "CUFFT_COMPATIBILITY_DEFAULT", + ( + "HIPFFT_COMPATIBILITY_DEFAULT", + CONV_NUMERIC_LITERAL, + API_BLAS, + HIP_UNSUPPORTED, + ), + ), + ( + "cuComplex", + ("hipComplex" if rocm_version >= (6, 0, 0) else "hipblasComplex", CONV_TYPE, API_BLAS) + ), + ( + "cuDoubleComplex", + ("hipDoubleComplex" if rocm_version >= (6, 0, 0) else "hipblasDoubleComplex", CONV_TYPE, API_BLAS), + ), + ("cufftResult_t", ("hipfftResult_t", CONV_TYPE, API_FFT)), + ("cufftResult", ("hipfftResult", CONV_TYPE, API_FFT)), + ("CUFFT_SUCCESS", ("HIPFFT_SUCCESS", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_INVALID_PLAN", ("HIPFFT_INVALID_PLAN", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_ALLOC_FAILED", ("HIPFFT_ALLOC_FAILED", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_INVALID_TYPE", ("HIPFFT_INVALID_TYPE", CONV_NUMERIC_LITERAL, API_FFT)), + ( + "CUFFT_INVALID_VALUE", + ("HIPFFT_INVALID_VALUE", CONV_NUMERIC_LITERAL, API_FFT), + ), + ( + "CUFFT_INTERNAL_ERROR", + ("HIPFFT_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_FFT), + ), + ("CUFFT_EXEC_FAILED", ("HIPFFT_EXEC_FAILED", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_SETUP_FAILED", ("HIPFFT_SETUP_FAILED", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_INVALID_SIZE", ("HIPFFT_INVALID_SIZE", CONV_NUMERIC_LITERAL, API_FFT)), + ( + "CUFFT_UNALIGNED_DATA", + ("HIPFFT_UNALIGNED_DATA", CONV_NUMERIC_LITERAL, API_FFT), + ), + ( + "CUFFT_INCOMPLETE_PARAMETER_LIST", + ("HIPFFT_INCOMPLETE_PARAMETER_LIST", CONV_NUMERIC_LITERAL, API_FFT), + ), + ( + "CUFFT_INVALID_DEVICE", + ("HIPFFT_INVALID_DEVICE", CONV_NUMERIC_LITERAL, API_FFT), + ), + ("CUFFT_PARSE_ERROR", ("HIPFFT_PARSE_ERROR", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_NO_WORKSPACE", ("HIPFFT_NO_WORKSPACE", CONV_NUMERIC_LITERAL, API_FFT)), + ( + "CUFFT_NOT_IMPLEMENTED", + ("HIPFFT_NOT_IMPLEMENTED", CONV_NUMERIC_LITERAL, API_FFT), + ), + ( + "CUFFT_LICENSE_ERROR", + ("HIPFFT_LICENSE_ERROR", CONV_NUMERIC_LITERAL, API_FFT, HIP_UNSUPPORTED), + ), + ( + "CUFFT_NOT_SUPPORTED", + ("HIPFFT_NOT_SUPPORTED", CONV_NUMERIC_LITERAL, API_FFT), + ), + ("cufftType_t", ("hipfftType_t", CONV_TYPE, API_FFT)), + ("cufftType", ("hipfftType", CONV_TYPE, API_FFT)), + ("CUFFT_R2C", ("HIPFFT_R2C", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_C2R", ("HIPFFT_C2R", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_C2C", ("HIPFFT_C2C", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_D2Z", ("HIPFFT_D2Z", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_Z2D", ("HIPFFT_Z2D", CONV_NUMERIC_LITERAL, API_FFT)), + ("CUFFT_Z2Z", ("HIPFFT_Z2Z", CONV_NUMERIC_LITERAL, API_FFT)), + ( + "cufftCompatibility_t", + ("hipfftCompatibility_t", CONV_TYPE, API_FFT, HIP_UNSUPPORTED), + ), + ( + "cufftCompatibility", + ("hipfftCompatibility", CONV_TYPE, API_FFT, HIP_UNSUPPORTED), + ), + ( + "CUFFT_COMPATIBILITY_FFTW_PADDING", + ( + "HIPFFT_COMPATIBILITY_FFTW_PADDING", + CONV_NUMERIC_LITERAL, + API_FFT, + HIP_UNSUPPORTED, + ), + ), + ("cufftReal", ("hipfftReal", CONV_TYPE, API_FFT)), + ("cufftDoubleReal", ("hipfftDoubleReal", CONV_TYPE, API_FFT)), + ("cufftComplex", ("hipfftComplex", CONV_TYPE, API_FFT)), + ("cufftDoubleComplex", ("hipfftDoubleComplex", CONV_TYPE, API_FFT)), + ("cufftHandle", ("hipfftHandle", CONV_TYPE, API_FFT)), + ("cufftPlan1d", ("hipfftPlan1d", CONV_MATH_FUNC, API_FFT)), + ("cufftPlan2d", ("hipfftPlan2d", CONV_MATH_FUNC, API_FFT)), + ("cufftPlan3d", ("hipfftPlan3d", CONV_MATH_FUNC, API_FFT)), + ("cufftPlanMany", ("hipfftPlanMany", CONV_MATH_FUNC, API_FFT)), + ("cufftMakePlan1d", ("hipfftMakePlan1d", CONV_MATH_FUNC, API_FFT)), + ("cufftMakePlan2d", ("hipfftMakePlan2d", CONV_MATH_FUNC, API_FFT)), + ("cufftMakePlan3d", ("hipfftMakePlan3d", CONV_MATH_FUNC, API_FFT)), + ("cufftMakePlanMany", ("hipfftMakePlanMany", CONV_MATH_FUNC, API_FFT)), + ("cufftMakePlanMany64", ("hipfftMakePlanMany64", CONV_MATH_FUNC, API_FFT)), + ("cufftGetSizeMany64", ("hipfftGetSizeMany64", CONV_MATH_FUNC, API_FFT)), + ("cufftEstimate1d", ("hipfftEstimate1d", CONV_MATH_FUNC, API_FFT)), + ("cufftEstimate2d", ("hipfftEstimate2d", CONV_MATH_FUNC, API_FFT)), + ("cufftEstimate3d", ("hipfftEstimate3d", CONV_MATH_FUNC, API_FFT)), + ("cufftEstimateMany", ("hipfftEstimateMany", CONV_MATH_FUNC, API_FFT)), + ("cufftCreate", ("hipfftCreate", CONV_MATH_FUNC, API_FFT)), + ("cufftGetSize1d", ("hipfftGetSize1d", CONV_MATH_FUNC, API_FFT)), + ("cufftGetSize2d", ("hipfftGetSize2d", CONV_MATH_FUNC, API_FFT)), + ("cufftGetSize3d", ("hipfftGetSize3d", CONV_MATH_FUNC, API_FFT)), + ("cufftGetSizeMany", ("hipfftGetSizeMany", CONV_MATH_FUNC, API_FFT)), + ("cufftGetSize", ("hipfftGetSize", CONV_MATH_FUNC, API_FFT)), + ("cufftSetWorkArea", ("hipfftSetWorkArea", CONV_MATH_FUNC, API_FFT)), + ( + "cufftSetAutoAllocation", + ("hipfftSetAutoAllocation", CONV_MATH_FUNC, API_FFT), + ), + ("cufftXtExec", ("hipfftXtExec", CONV_MATH_FUNC, API_FFT)), + ("cufftXtMakePlanMany", ("hipfftXtMakePlanMany", CONV_MATH_FUNC, API_FFT)), + ("cufftExecC2C", ("hipfftExecC2C", CONV_MATH_FUNC, API_FFT)), + ("cufftExecR2C", ("hipfftExecR2C", CONV_MATH_FUNC, API_FFT)), + ("cufftExecC2R", ("hipfftExecC2R", CONV_MATH_FUNC, API_FFT)), + ("cufftExecZ2Z", ("hipfftExecZ2Z", CONV_MATH_FUNC, API_FFT)), + ("cufftExecD2Z", ("hipfftExecD2Z", CONV_MATH_FUNC, API_FFT)), + ("cufftExecZ2D", ("hipfftExecZ2D", CONV_MATH_FUNC, API_FFT)), + ("cufftSetStream", ("hipfftSetStream", CONV_MATH_FUNC, API_FFT)), + ("cufftDestroy", ("hipfftDestroy", CONV_MATH_FUNC, API_FFT)), + ("cufftGetVersion", ("hipfftGetVersion", CONV_MATH_FUNC, API_FFT)), + ( + "cufftGetProperty", + ("hipfftGetProperty", CONV_MATH_FUNC, API_FFT, HIP_UNSUPPORTED), + ), + ("nvrtcResult", ("hiprtcResult", CONV_TYPE, API_RTC)), + ("NVRTC_SUCCESS", ("HIPRTC_SUCCESS", CONV_TYPE, API_RTC)), + ( + "NVRTC_ERROR_OUT_OF_MEMORY", + ("HIPRTC_ERROR_OUT_OF_MEMORY", CONV_TYPE, API_RTC), + ), + ( + "NVRTC_ERROR_PROGRAM_CREATION_FAILURE", + ("HIPRTC_ERROR_PROGRAM_CREATION_FAILURE", CONV_TYPE, API_RTC), + ), + ( + "NVRTC_ERROR_INVALID_INPUT", + ("HIPRTC_ERROR_INVALID_INPUT", CONV_TYPE, API_RTC), + ), + ( + "NVRTC_ERROR_INVALID_PROGRAM", + ("HIPRTC_ERROR_INVALID_PROGRAM", CONV_TYPE, API_RTC), + ), + ("NVRTC_ERROR_COMPILATION", ("HIPRTC_ERROR_COMPILATION", CONV_TYPE, API_RTC)), + ( + "NVRTC_ERROR_BUILTIN_OPERATION_FAILURE", + ("HIPRTC_ERROR_BUILTIN_OPERATION_FAILURE", CONV_TYPE, API_RTC), + ), + ( + "NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION", + ("HIPRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION", CONV_TYPE, API_RTC), + ), + ( + "NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID", + ("HIPRTC_ERROR_NAME_EXPRESSION_NOT_VALID", CONV_TYPE, API_RTC), + ), + ( + "NVRTC_ERROR_INTERNAL_ERROR", + ("HIPRTC_ERROR_INTERNAL_ERROR", CONV_TYPE, API_RTC), + ), + ("nvrtcGetErrorString", ("hiprtcGetErrorString", CONV_JIT, API_RTC)), + ("nvrtcVersion", ("hiprtcVersion", CONV_JIT, API_RTC)), + ("nvrtcProgram", ("hiprtcProgram", CONV_TYPE, API_RTC)), + ("nvrtcAddNameExpression", ("hiprtcAddNameExpression", CONV_JIT, API_RTC)), + ("nvrtcCompileProgram", ("hiprtcCompileProgram", CONV_JIT, API_RTC)), + ("nvrtcCreateProgram", ("hiprtcCreateProgram", CONV_JIT, API_RTC)), + ("nvrtcDestroyProgram", ("hiprtcDestroyProgram", CONV_JIT, API_RTC)), + ("nvrtcGetLoweredName", ("hiprtcGetLoweredName", CONV_JIT, API_RTC)), + ("nvrtcGetProgramLog", ("hiprtcGetProgramLog", CONV_JIT, API_RTC)), + ("nvrtcGetProgramLogSize", ("hiprtcGetProgramLogSize", CONV_JIT, API_RTC)), + ("nvrtcGetPTX", ("hiprtcGetCode", CONV_JIT, API_RTC)), + ("nvrtcGetPTXSize", ("hiprtcGetCodeSize", CONV_JIT, API_RTC)), + ("thrust::cuda", ("thrust::hip", CONV_MATH_FUNC, API_BLAS)), + ( + "cudaCpuDeviceId", + ("hipCpuDeviceId", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), + ), + # The caffe2 directory does a string match; pytorch does a word-boundary match. + # Patterns such as 'cub::' will not match for pytorch. + # We list all current uses of cub symbols for this reason. + ("cub::", ("hipcub::", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::ArgMax", ("hipcub::ArgMax", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::ArgMin", ("hipcub::ArgMin", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::BLOCK_REDUCE_WARP_REDUCTIONS", ("hipcub::BLOCK_REDUCE_WARP_REDUCTIONS", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::BlockReduce", ("hipcub::BlockReduce", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::BlockScan", ("hipcub::BlockScan", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::CachingDeviceAllocator", ("hipcub::CachingDeviceAllocator", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::CountingInputIterator", ("hipcub::CountingInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::DeviceRadixSort", ("hipcub::DeviceRadixSort", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::DeviceReduce", ("hipcub::DeviceReduce", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::DeviceRunLengthEncode", ("hipcub::DeviceRunLengthEncode", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::DeviceScan", ("hipcub::DeviceScan", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::DeviceSegmentedRadixSort", ("hipcub::DeviceSegmentedRadixSort", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::DeviceSegmentedReduce", ("hipcub::DeviceSegmentedReduce", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::DeviceSelect", ("hipcub::DeviceSelect", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::KeyValuePair", ("hipcub::KeyValuePair", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::Max", ("hipcub::Max", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::Min", ("hipcub::Min", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::Sum", ("hipcub::Sum", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::ArgIndexInputIterator", ("hipcub::ArgIndexInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::TransformInputIterator", ("hipcub::TransformInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("cub::WarpReduce", ("hipcub::WarpReduce", CONV_SPECIAL_FUNC, API_RUNTIME)), + ("nvtxMark", ("roctxMark", CONV_OTHER, API_ROCTX)), + ("nvtxMarkA", ("roctxMarkA", CONV_OTHER, API_ROCTX)), + ("nvtxRangePushA", ("roctxRangePushA", CONV_OTHER, API_ROCTX)), + ("nvtxRangePop", ("roctxRangePop", CONV_OTHER, API_ROCTX)), + ("nvtxRangeStartA", ("roctxRangeStartA", CONV_OTHER, API_ROCTX)), + ("nvtxRangeEnd", ("roctxRangeStop", CONV_OTHER, API_ROCTX)), + ("nvmlReturn_t", ("rsmi_status_t", CONV_OTHER, API_ROCMSMI)), + ("NVML_SUCCESS", ("RSMI_STATUS_SUCCESS", CONV_OTHER, API_ROCMSMI)), + ("NVML_P2P_CAPS_INDEX_READ", ("RSMI_STATUS_SUCCESS", CONV_OTHER, API_ROCMSMI)), + ("NVML_P2P_STATUS_OK", ("RSMI_STATUS_SUCCESS", CONV_OTHER, API_ROCMSMI)), + ("NVML_ERROR_INSUFFICIENT_SIZE", ("RSMI_STATUS_INSUFFICIENT_SIZE", CONV_OTHER, API_ROCMSMI)), + ("nvmlDevice_t", ("uint32_t", CONV_OTHER, API_ROCMSMI)), + ("nvmlGpuP2PStatus_t", ("bool", CONV_OTHER, API_ROCMSMI)), + ("nvmlProcessInfo_t", ("rsmi_process_info_t", CONV_OTHER, API_ROCMSMI)), + ("nvmlGpuP2PCapsIndex_t", ("uint32_t", CONV_OTHER, API_ROCMSMI)), + ] +) + +CUDA_SPECIAL_MAP = collections.OrderedDict( + [ + # SPARSE + ("cusparseStatus_t", ("hipsparseStatus_t", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseHandle_t", ("hipsparseHandle_t", CONV_MATH_FUNC, API_SPECIAL)), + ("cuComplex", ("hipComplex", CONV_TYPE, API_SPECIAL)), + ("cuDoubleComplex", ("hipDoubleComplex", CONV_TYPE, API_SPECIAL)), + ( + "CUSPARSE_POINTER_MODE_HOST", + ("HIPSPARSE_POINTER_MODE_HOST", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ("cusparseOperation_t", ("hipsparseOperation_t", CONV_TYPE, API_SPECIAL)), + ( + "cusparseCreateMatDescr", + ("hipsparseCreateMatDescr", CONV_MATH_FUNC, API_SPECIAL), + ), + ("cusparseCreate", ("hipsparseCreate", CONV_MATH_FUNC, API_SPECIAL)), + ( + "cusparseDestroyMatDescr", + ("hipsparseDestroyMatDescr", CONV_MATH_FUNC, API_SPECIAL), + ), + ("cusparseDestroy", ("hipsparseDestroy", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseXcoo2csr", ("hipsparseXcoo2csr", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseMatDescr_t", ("hipsparseMatDescr_t", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDiagType_t", ("hipsparseDiagType_t", CONV_TYPE, API_SPECIAL)), + ("CUSPARSE_DIAG_TYPE_UNIT", ("HIPSPARSE_DIAG_TYPE_UNIT", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_DIAG_TYPE_NON_UNIT", ("HIPSPARSE_DIAG_TYPE_NON_UNIT", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("cusparseSetMatDiagType", ("hipsparseSetMatDiagType", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseFillMode_t", ("hipsparseFillMode_t", CONV_TYPE, API_SPECIAL)), + ("CUSPARSE_FILL_MODE_UPPER", ("HIPSPARSE_FILL_MODE_UPPER", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_FILL_MODE_LOWER", ("HIPSPARSE_FILL_MODE_LOWER", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("cusparseSetMatFillMode", ("hipsparseSetMatFillMode", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDirection_t", ("hipsparseDirection_t", CONV_TYPE, API_SPECIAL)), + ("CUSPARSE_DIRECTION_ROW", ("HIPSPARSE_DIRECTION_ROW", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_DIRECTION_COLUMN", ("HIPSPARSE_DIRECTION_COLUMN", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("cusparseSolvePolicy_t", ("hipsparseSolvePolicy_t", CONV_TYPE, API_SPECIAL)), + ("CUSPARSE_SOLVE_POLICY_NO_LEVEL", ("HIPSPARSE_SOLVE_POLICY_NO_LEVEL", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_SOLVE_POLICY_USE_LEVEL", ("HIPSPARSE_SOLVE_POLICY_USE_LEVEL", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("cusparseCreateBsrsv2Info", ("hipsparseCreateBsrsv2Info", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCreateBsrsm2Info", ("hipsparseCreateBsrsm2Info", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDestroyBsrsv2Info", ("hipsparseDestroyBsrsv2Info", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDestroyBsrsm2Info", ("hipsparseDestroyBsrsm2Info", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSbsrmm", ("hipsparseSbsrmm", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDbsrmm", ("hipsparseDbsrmm", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCbsrmm", ("hipsparseCbsrmm", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZbsrmm", ("hipsparseZbsrmm", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSbsrmv", ("hipsparseSbsrmv", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDbsrmv", ("hipsparseDbsrmv", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCbsrmv", ("hipsparseCbsrmv", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZbsrmv", ("hipsparseZbsrmv", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSbsrsv2_bufferSize", ("hipsparseSbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDbsrsv2_bufferSize", ("hipsparseDbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCbsrsv2_bufferSize", ("hipsparseCbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZbsrsv2_bufferSize", ("hipsparseZbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSbsrsv2_analysis", ("hipsparseSbsrsv2_analysis", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDbsrsv2_analysis", ("hipsparseDbsrsv2_analysis", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCbsrsv2_analysis", ("hipsparseCbsrsv2_analysis", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZbsrsv2_analysis", ("hipsparseZbsrsv2_analysis", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSbsrsv2_solve", ("hipsparseSbsrsv2_solve", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDbsrsv2_solve", ("hipsparseDbsrsv2_solve", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCbsrsv2_solve", ("hipsparseCbsrsv2_solve", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZbsrsv2_solve", ("hipsparseZbsrsv2_solve", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSbsrsm2_bufferSize", ("hipsparseSbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDbsrsm2_bufferSize", ("hipsparseDbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCbsrsm2_bufferSize", ("hipsparseCbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZbsrsm2_bufferSize", ("hipsparseZbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSbsrsm2_analysis", ("hipsparseSbsrsm2_analysis", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDbsrsm2_analysis", ("hipsparseDbsrsm2_analysis", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCbsrsm2_analysis", ("hipsparseCbsrsm2_analysis", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZbsrsm2_analysis", ("hipsparseZbsrsm2_analysis", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSbsrsm2_solve", ("hipsparseSbsrsm2_solve", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDbsrsm2_solve", ("hipsparseDbsrsm2_solve", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCbsrsm2_solve", ("hipsparseCbsrsm2_solve", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZbsrsm2_solve", ("hipsparseZbsrsm2_solve", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseScsrmm2", ("hipsparseScsrmm2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDcsrmm2", ("hipsparseDcsrmm2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCcsrmm2", ("hipsparseCcsrmm2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZcsrmm2", ("hipsparseZcsrmm2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseScsrmm", ("hipsparseScsrmm", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDcsrmm", ("hipsparseDcsrmm", CONV_MATH_FUNC, API_SPECIAL)), + ( + "cusparseXcsrsort_bufferSizeExt", + ("hipsparseXcsrsort_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL), + ), + ("cusparseCreateCsrgemm2Info", ("hipsparseCreateCsrgemm2Info", CONV_MATH_FUNC, API_SPECIAL)), + ( + "cusparseDestroyCsrgemm2Info", + ("hipsparseDestroyCsrgemm2Info", CONV_MATH_FUNC, API_SPECIAL), + ), + ("cusparseXcsrgemm2Nnz", ("hipsparseXcsrgemm2Nnz", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDcsrgemm2_bufferSizeExt", ("hipsparseDcsrgemm2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseScsrgemm2_bufferSizeExt", ("hipsparseScsrgemm2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDcsrgemm2", ("hipsparseDcsrgemm2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseScsrgemm2", ("hipsparseScsrgemm2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSetPointerMode", ("hipsparseSetPointerMode", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseXcsrgeam2Nnz", ("hipsparseXcsrgeam2Nnz", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseScsrgeam2_bufferSizeExt", ("hipsparseScsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDcsrgeam2_bufferSizeExt", ("hipsparseDcsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCcsrgeam2_bufferSizeExt", ("hipsparseCcsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZcsrgeam2_bufferSizeExt", ("hipsparseZcsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseScsrgeam2", ("hipsparseScsrgeam2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDcsrgeam2", ("hipsparseDcsrgeam2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCcsrgeam2", ("hipsparseCcsrgeam2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseZcsrgeam2", ("hipsparseZcsrgeam2", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseXcsrsort", ("hipsparseXcsrsort", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseXbsrsm2_zeroPivot", ("hipsparseXbsrsm2_zeroPivot", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseXbsrsv2_zeroPivot", ("hipsparseXbsrsv2_zeroPivot", CONV_MATH_FUNC, API_SPECIAL)), + ( + "cusparseXcoosort_bufferSizeExt", + ("hipsparseXcoosort_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL), + ), + ( + "cusparseXcoosortByRow", + ("hipsparseXcoosortByRow", CONV_MATH_FUNC, API_SPECIAL), + ), + ("cusparseSetStream", ("hipsparseSetStream", CONV_MATH_FUNC, API_SPECIAL)), + ( + "cusparseCreateIdentityPermutation", + ("hipsparseCreateIdentityPermutation", CONV_MATH_FUNC, API_SPECIAL), + ), + ( + "cusparseSetMatIndexBase", + ("hipsparseSetMatIndexBase", CONV_MATH_FUNC, API_SPECIAL), + ), + ("cusparseSetMatType", ("hipsparseSetMatType", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpMV", ("hipsparseSpMV", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpMV_bufferSize", ("hipsparseSpMV_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpMM", ("hipsparseSpMM", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpMM_bufferSize", ("hipsparseSpMM_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCreateDnMat", ("hipsparseCreateDnMat", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDnMatSetStridedBatch", ("hipsparseDnMatSetStridedBatch", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCsrSetStridedBatch", ("hipsparseCsrSetStridedBatch", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCreateDnVec", ("hipsparseCreateDnVec", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCreateCsr", ("hipsparseCreateCsr", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDestroyDnMat", ("hipsparseDestroyDnMat", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDestroyDnVec", ("hipsparseDestroyDnVec", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDestroySpMat", ("hipsparseDestroySpMat", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpGEMM_destroyDescr", ("hipsparseSpGEMM_destroyDescr", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCreateCoo", ("hipsparseCreateCoo", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCreateCsr", ("hipsparseCreateCsr", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpGEMM_createDescr", ("hipsparseSpGEMM_createDescr", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseDnMatSetStridedBatch", ("hipsparseDnMatSetStridedBatch", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpGEMM_copy", ("hipsparseSpGEMM_copy", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSDDMM_bufferSize", ("hipsparseSDDMM_bufferSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSDDMM_preprocess", ("hipsparseSDDMM_preprocess", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSDDMM", ("hipsparseSDDMM", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpGEMM_compute", ("hipsparseSpGEMM_compute", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpGEMM_workEstimation", ("hipsparseSpGEMM_workEstimation", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpMatGetSize", ("hipsparseSpMatGetSize", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseCsrSetPointers", ("hipsparseCsrSetPointers", CONV_MATH_FUNC, API_SPECIAL)), + ("cusparseSpMVAlg_t", ("hipsparseSpMVAlg_t", CONV_TYPE, API_SPECIAL)), + ("cusparseSpMMAlg_t", ("hipsparseSpMMAlg_t", CONV_TYPE, API_SPECIAL)), + ("cusparseIndexType_t", ("hipsparseIndexType_t", CONV_TYPE, API_SPECIAL)), + # Unsupported ("cusparseMatDescr", ("hipsparseMatDescr", CONV_TYPE, API_SPECIAL)), + # Unsupported ("cusparseDnMatDescr", ("hipsparseDnMatDescr", CONV_TYPE, API_SPECIAL)), + # Unsupported ("cusparseDnVecDescr", ("hipsparseDnVecDescr", CONV_TYPE, API_SPECIAL)), + # Unsupported ("cusparseSpMatDescr", ("hipsparseSpMatDescr", CONV_TYPE, API_SPECIAL)), + # Unsupported ("cusparseSpGEMMDescr", ("hipsparseSpGEMMDescr", CONV_TYPE, API_SPECIAL)), + ("cusparseDnMatDescr_t", ("hipsparseDnMatDescr_t", CONV_TYPE, API_SPECIAL)), + ("cusparseDnVecDescr_t", ("hipsparseDnVecDescr_t", CONV_TYPE, API_SPECIAL)), + ("cusparseSpMatDescr_t", ("hipsparseSpMatDescr_t", CONV_TYPE, API_SPECIAL)), + ("cusparseSpGEMMDescr_t", ("hipsparseSpGEMMDescr_t", CONV_TYPE, API_SPECIAL)), + ("CUSPARSE_INDEX_32I", ("HIPSPARSE_INDEX_32I", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_INDEX_64I", ("HIPSPARSE_INDEX_64I", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_ORDER_COL", ("HIPSPARSE_ORDER_COLUMN", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_MV_ALG_DEFAULT", ("HIPSPARSE_MV_ALG_DEFAULT", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_MM_ALG_DEFAULT", ("HIPSPARSE_MM_ALG_DEFAULT", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_SPMM_COO_ALG1", ("HIPSPARSE_SPMM_COO_ALG1", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_SPMM_COO_ALG2", ("HIPSPARSE_SPMM_COO_ALG2", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_COOMV_ALG", ("HIPSPARSE_COOMV_ALG", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_SPMM_CSR_ALG1", ("HIPSPARSE_CSRMM_ALG1", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_SPGEMM_DEFAULT", ("HIPSPARSE_SPGEMM_DEFAULT", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSPARSE_SDDMM_ALG_DEFAULT", ("HIPSPARSE_SDDMM_ALG_DEFAULT", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ( + "CUSPARSE_STATUS_SUCCESS", + ("HIPSPARSE_STATUS_SUCCESS", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_STATUS_NOT_INITIALIZED", + ("HIPSPARSE_STATUS_NOT_INITIALIZED", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_STATUS_ALLOC_FAILED", + ("HIPSPARSE_STATUS_ALLOC_FAILED", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_STATUS_INVALID_VALUE", + ("HIPSPARSE_STATUS_INVALID_VALUE", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_STATUS_MAPPING_ERROR", + ("HIPSPARSE_STATUS_MAPPING_ERROR", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_STATUS_EXECUTION_FAILED", + ("HIPSPARSE_STATUS_EXECUTION_FAILED", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_STATUS_INTERNAL_ERROR", + ("HIPSPARSE_STATUS_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED", + ( + "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED", + CONV_NUMERIC_LITERAL, + API_SPECIAL, + ), + ), + ( + "CUSPARSE_STATUS_ARCH_MISMATCH", + ("HIPSPARSE_STATUS_ARCH_MISMATCH", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_STATUS_ZERO_PIVOT", + ("HIPSPARSE_STATUS_ZERO_PIVOT", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_OPERATION_TRANSPOSE", + ("HIPSPARSE_OPERATION_TRANSPOSE", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_OPERATION_NON_TRANSPOSE", + ("HIPSPARSE_OPERATION_NON_TRANSPOSE", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE", + ( + "HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE", + CONV_NUMERIC_LITERAL, + API_SPECIAL, + ), + ), + ( + "CUSPARSE_INDEX_BASE_ZERO", + ("HIPSPARSE_INDEX_BASE_ZERO", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_INDEX_BASE_ONE", + ("HIPSPARSE_INDEX_BASE_ONE", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUSPARSE_MATRIX_TYPE_GENERAL", + ("HIPSPARSE_MATRIX_TYPE_GENERAL", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + # SOLVER + ("cublasOperation_t", ("hipsolverOperation_t", CONV_TYPE, API_SPECIAL)), + ("CUBLAS_OP_N", ("HIPSOLVER_OP_N", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ( + "CUBLAS_OP_T", + ("HIPSOLVER_OP_T", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUBLAS_OP_C", + ("HIPSOLVER_OP_C", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ("cublasFillMode_t", ("hipsolverFillMode_t", CONV_TYPE, API_SPECIAL)), + ( + "CUBLAS_FILL_MODE_LOWER", + ("HIPSOLVER_FILL_MODE_LOWER", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ( + "CUBLAS_FILL_MODE_UPPER", + ("HIPSOLVER_FILL_MODE_UPPER", CONV_NUMERIC_LITERAL, API_SPECIAL), + ), + ("cublasSideMode_t", ("hipsolverSideMode_t", CONV_TYPE, API_SPECIAL)), + ("CUBLAS_SIDE_LEFT", ("HIPSOLVER_SIDE_LEFT", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUBLAS_SIDE_RIGHT", ("HIPSOLVER_SIDE_RIGHT", CONV_NUMERIC_LITERAL, API_SPECIAL)), + + ("cusolverEigMode_t", ("hipsolverEigMode_t", CONV_TYPE, API_SPECIAL)), + ("CUSOLVER_EIG_MODE_VECTOR", ("HIPSOLVER_EIG_MODE_VECTOR", CONV_NUMERIC_LITERAL, API_SPECIAL)), + ("CUSOLVER_EIG_MODE_NOVECTOR", ("HIPSOLVER_EIG_MODE_NOVECTOR", CONV_NUMERIC_LITERAL, API_SPECIAL)), + + ("syevjInfo_t", ("hipsolverSyevjInfo_t", CONV_TYPE, API_SPECIAL)), + ("cusolverDnCreateSyevjInfo", ("hipsolverDnCreateSyevjInfo", CONV_MATH_FUNC, API_SPECIAL)), + ("cusolverDnXsyevjSetSortEig", ("hipsolverDnXsyevjSetSortEig", CONV_MATH_FUNC, API_SPECIAL)), + ("cusolverDnDestroySyevjInfo", ("hipsolverDnDestroySyevjInfo", CONV_MATH_FUNC, API_SPECIAL)), + + ("gesvdjInfo_t", ("hipsolverGesvdjInfo_t", CONV_TYPE, API_SPECIAL)), + ("cusolverDnCreateGesvdjInfo", ("hipsolverDnCreateGesvdjInfo", CONV_MATH_FUNC, API_SPECIAL)), + ("cusolverDnXgesvdjSetSortEig", ("hipsolverDnXgesvdjSetSortEig", CONV_MATH_FUNC, API_SPECIAL)), + ("cusolverDnDestroyGesvdjInfo", ("hipsolverDnDestroyGesvdjInfo", CONV_MATH_FUNC, API_SPECIAL)), + + ("cusolverDnHandle_t", ("hipsolverDnHandle_t", CONV_TYPE, API_SPECIAL)), + ("cusolverDnCreate", ("hipsolverDnCreate", CONV_MATH_FUNC, API_SPECIAL)), + ("cusolverDnSetStream", ("hipsolverDnSetStream", CONV_MATH_FUNC, API_SPECIAL)), + ("cusolverDnDestroy", ("hipsolverDnDestroy", CONV_MATH_FUNC, API_SPECIAL)), + + # from aten/src/ATen/native/hip/linalg/HIPSolver.cpp + ('cusolverDnParams_t', ('hipsolverDnParams_t', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgeqrf', ('hipsolverDnCgeqrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgeqrf_bufferSize', ('hipsolverDnCgeqrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgesvd', ('hipsolverDnCgesvd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgesvd_bufferSize', ('hipsolverDnCgesvd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgesvdj', ('hipsolverDnCgesvdj', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgesvdjBatched', ('hipsolverDnCgesvdjBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgesvdjBatched_bufferSize', ('hipsolverDnCgesvdjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgesvdj_bufferSize', ('hipsolverDnCgesvdj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgetrf', ('hipsolverDnCgetrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgetrf_bufferSize', ('hipsolverDnCgetrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgetrs', ('hipsolverDnCgetrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCheevd', ('hipsolverDnCheevd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCheevd_bufferSize', ('hipsolverDnCheevd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCheevj', ('hipsolverDnCheevj', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCheevjBatched', ('hipsolverDnCheevjBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCheevjBatched_bufferSize', ('hipsolverDnCheevjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCheevj_bufferSize', ('hipsolverDnCheevj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCpotrf', ('hipsolverDnCpotrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCpotrfBatched', ('hipsolverDnCpotrfBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCpotrf_bufferSize', ('hipsolverDnCpotrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCpotrs', ('hipsolverDnCpotrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCpotrsBatched', ('hipsolverDnCpotrsBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCungqr', ('hipsolverDnCungqr', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCungqr_bufferSize', ('hipsolverDnCungqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCunmqr', ('hipsolverDnCunmqr', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCunmqr_bufferSize', ('hipsolverDnCunmqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgeqrf', ('hipsolverDnDgeqrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgeqrf_bufferSize', ('hipsolverDnDgeqrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgesvd', ('hipsolverDnDgesvd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgesvd_bufferSize', ('hipsolverDnDgesvd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgesvdj', ('hipsolverDnDgesvdj', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgesvdjBatched', ('hipsolverDnDgesvdjBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgesvdjBatched_bufferSize', ('hipsolverDnDgesvdjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgesvdj_bufferSize', ('hipsolverDnDgesvdj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgetrf', ('hipsolverDnDgetrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgetrf_bufferSize', ('hipsolverDnDgetrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgetrs', ('hipsolverDnDgetrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDorgqr', ('hipsolverDnDorgqr', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDorgqr_bufferSize', ('hipsolverDnDorgqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDormqr', ('hipsolverDnDormqr', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDormqr_bufferSize', ('hipsolverDnDormqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDpotrf', ('hipsolverDnDpotrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDpotrfBatched', ('hipsolverDnDpotrfBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDpotrf_bufferSize', ('hipsolverDnDpotrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDpotrs', ('hipsolverDnDpotrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDpotrsBatched', ('hipsolverDnDpotrsBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDsyevd', ('hipsolverDnDsyevd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDsyevd_bufferSize', ('hipsolverDnDsyevd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDsyevj', ('hipsolverDnDsyevj', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDsyevjBatched', ('hipsolverDnDsyevjBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDsyevjBatched_bufferSize', ('hipsolverDnDsyevjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDsyevj_bufferSize', ('hipsolverDnDsyevj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgeqrf', ('hipsolverDnSgeqrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgeqrf_bufferSize', ('hipsolverDnSgeqrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgesvd', ('hipsolverDnSgesvd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgesvd_bufferSize', ('hipsolverDnSgesvd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgesvdj', ('hipsolverDnSgesvdj', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgesvdjBatched', ('hipsolverDnSgesvdjBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgesvdjBatched_bufferSize', ('hipsolverDnSgesvdjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgesvdj_bufferSize', ('hipsolverDnSgesvdj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgetrf', ('hipsolverDnSgetrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgetrf_bufferSize', ('hipsolverDnSgetrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSgetrs', ('hipsolverDnSgetrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSorgqr', ('hipsolverDnSorgqr', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSorgqr_bufferSize', ('hipsolverDnSorgqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSormqr', ('hipsolverDnSormqr', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSormqr_bufferSize', ('hipsolverDnSormqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSpotrf', ('hipsolverDnSpotrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSpotrfBatched', ('hipsolverDnSpotrfBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSpotrf_bufferSize', ('hipsolverDnSpotrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSpotrs', ('hipsolverDnSpotrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSpotrsBatched', ('hipsolverDnSpotrsBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSsyevd', ('hipsolverDnSsyevd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSsyevd_bufferSize', ('hipsolverDnSsyevd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSsyevj', ('hipsolverDnSsyevj', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSsyevjBatched', ('hipsolverDnSsyevjBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSsyevjBatched_bufferSize', ('hipsolverDnSsyevjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSsyevj_bufferSize', ('hipsolverDnSsyevj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnXgeqrf', ('hipsolverDnXgeqrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnXgeqrf_bufferSize', ('hipsolverDnXgeqrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnXpotrf', ('hipsolverDnXpotrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnXpotrf_bufferSize', ('hipsolverDnXpotrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnXpotrs', ('hipsolverDnXpotrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnXsyevd', ('hipsolverDnXsyevd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnXsyevd_bufferSize', ('hipsolverDnXsyevd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgeqrf', ('hipsolverDnZgeqrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgeqrf_bufferSize', ('hipsolverDnZgeqrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgesvd', ('hipsolverDnZgesvd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgesvd_bufferSize', ('hipsolverDnZgesvd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgesvdj', ('hipsolverDnZgesvdj', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgesvdjBatched', ('hipsolverDnZgesvdjBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgesvdjBatched_bufferSize', ('hipsolverDnZgesvdjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgesvdj_bufferSize', ('hipsolverDnZgesvdj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgetrf', ('hipsolverDnZgetrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgetrf_bufferSize', ('hipsolverDnZgetrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgetrs', ('hipsolverDnZgetrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZheevd', ('hipsolverDnZheevd', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZheevd_bufferSize', ('hipsolverDnZheevd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZheevj', ('hipsolverDnZheevj', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZheevjBatched', ('hipsolverDnZheevjBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZheevjBatched_bufferSize', ('hipsolverDnZheevjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZheevj_bufferSize', ('hipsolverDnZheevj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZpotrf', ('hipsolverDnZpotrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZpotrfBatched', ('hipsolverDnZpotrfBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZpotrf_bufferSize', ('hipsolverDnZpotrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZpotrs', ('hipsolverDnZpotrs', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZpotrsBatched', ('hipsolverDnZpotrsBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZungqr', ('hipsolverDnZungqr', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZungqr_bufferSize', ('hipsolverDnZungqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZunmqr', ('hipsolverDnZunmqr', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZunmqr_bufferSize', ('hipsolverDnZunmqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + + # sytrf + ('cusolverDnDsytrf_bufferSize', ('hipsolverDnDsytrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSsytrf_bufferSize', ('hipsolverDnSsytrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZsytrf_bufferSize', ('hipsolverDnZsytrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCsytrf_bufferSize', ('hipsolverDnCsytrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDsytrf', ('hipsolverDnDsytrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnSsytrf', ('hipsolverDnSsytrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZsytrf', ('hipsolverDnZsytrf', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCsytrf', ('hipsolverDnCsytrf', CONV_MATH_FUNC, API_SPECIAL)), + + # gesdva strided + ( + 'cusolverDnSgesvdaStridedBatched_bufferSize', + ('hipsolverDnSgesvdaStridedBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL) + ), + ( + 'cusolverDnDgesvdaStridedBatched_bufferSize', + ('hipsolverDnDgesvdaStridedBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL) + ), + ( + 'cusolverDnCgesvdaStridedBatched_bufferSize', + ('hipsolverDnCgesvdaStridedBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL) + ), + ( + 'cusolverDnZgesvdaStridedBatched_bufferSize', + ('hipsolverDnZgesvdaStridedBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL) + ), + ('cusolverDnSgesvdaStridedBatched', ('hipsolverDnSgesvdaStridedBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnDgesvdaStridedBatched', ('hipsolverDnDgesvdaStridedBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnCgesvdaStridedBatched', ('hipsolverDnCgesvdaStridedBatched', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnZgesvdaStridedBatched', ('hipsolverDnZgesvdaStridedBatched', CONV_MATH_FUNC, API_SPECIAL)), + + # gesvdj SetXXX + ('cusolverDnXgesvdjSetTolerance', ('hipsolverDnXgesvdjSetTolerance', CONV_MATH_FUNC, API_SPECIAL)), + ('cusolverDnXgesvdjSetMaxSweeps', ('hipsolverDnXgesvdjSetMaxSweeps', CONV_MATH_FUNC, API_SPECIAL)), + ] +) + +PYTORCH_SPECIFIC_MAPPINGS = collections.OrderedDict( + [ + ("USE_CUDA", ("USE_ROCM", API_PYTORCH)), + ("CUDA_VERSION", ("TORCH_HIP_VERSION", API_PYTORCH)), + ("cudaHostAllocator", ("hipHostAllocator", API_PYTORCH)), + ("cudaDeviceAllocator", ("hipDeviceAllocator", API_PYTORCH)), + ("define MAX_NUM_BLOCKS 200", ("define MAX_NUM_BLOCKS 64", API_PYTORCH)), + ("cuda::CUDAGuard", ("hip::HIPGuardMasqueradingAsCUDA", API_PYTORCH)), + ("CUDAGuard", ("HIPGuardMasqueradingAsCUDA", API_PYTORCH)), + ( + "cuda::OptionalCUDAGuard", + ("hip::OptionalHIPGuardMasqueradingAsCUDA", API_PYTORCH), + ), + ("OptionalCUDAGuard", ("OptionalHIPGuardMasqueradingAsCUDA", API_PYTORCH)), + ( + "cuda::CUDAStreamGuard", + ("hip::HIPStreamGuardMasqueradingAsCUDA", API_PYTORCH), + ), + ("CUDAStreamGuard", ("HIPStreamGuardMasqueradingAsCUDA", API_PYTORCH)), + ( + "cuda::OptionalCUDAStreamGuard", + ("hip::OptionalHIPStreamGuardMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "OptionalCUDAStreamGuard", + ("OptionalHIPStreamGuardMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "cuda::CUDAMultiStreamGuard", + ("hip::HIPMultiStreamGuardMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "CUDAMultiStreamGuard", + ("HIPMultiStreamGuardMasqueradingAsCUDA", API_PYTORCH), + ), + # Only get needs to be transformed this way; all the other ones can go + # straight to the normal versions hip::HIPCachingAllocator + ( + "cuda::CUDACachingAllocator::get", + ("hip::HIPCachingAllocatorMasqueradingAsCUDA::get", API_PYTORCH), + ), + ( + "CUDACachingAllocator::get", + ("HIPCachingAllocatorMasqueradingAsCUDA::get", API_PYTORCH), + ), + ( + "cuda::CUDACachingAllocator::recordStream", + ( + "hip::HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA", + API_PYTORCH, + ), + ), + ( + "CUDACachingAllocator::recordStream", + ( + "HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA", + API_PYTORCH, + ), + ), + ( + "cuda::CUDAAllocator::recordStream", + ( + "hip::HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA", + API_PYTORCH, + ), + ), + ( + "CUDAAllocator::recordStream", + ( + "HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA", + API_PYTORCH, + ), + ), + ("cuda::CUDAStream", ("hip::HIPStreamMasqueradingAsCUDA", API_PYTORCH)), + ("CUDAStream", ("HIPStreamMasqueradingAsCUDA", API_PYTORCH)), + ( + "cuda::getStreamFromPool", + ("hip::getStreamFromPoolMasqueradingAsCUDA", API_PYTORCH), + ), + ("getStreamFromPool", ("getStreamFromPoolMasqueradingAsCUDA", API_PYTORCH)), + ( + "cuda::getDefaultCUDAStream", + ("hip::getDefaultHIPStreamMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "cuda::getStreamFromExternal", + ("hip::getStreamFromExternalMasqueradingAsCUDA", API_PYTORCH), + ), + ("getStreamFromExternal", ("getStreamFromExternalMasqueradingAsCUDA", API_PYTORCH)), + ( + "cuda::getDefaultCUDAStream", + ("hip::getDefaultHIPStreamMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "getDefaultCUDAStream", + ("getDefaultHIPStreamMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "cuda::getCurrentCUDAStream", + ("hip::getCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "getCurrentCUDAStream", + ("getCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "cuda::setCurrentCUDAStream", + ("hip::setCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "setCurrentCUDAStream", + ("setCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH), + ), + ( + "ATen/cudnn/Handle.h", + ("ATen/miopen/Handle.h", API_PYTORCH), + ), + # TODO: Undo this special-case; see the header for motivation behind this + # hack. It's VERY important this is only applied to PyTorch HIPify. + ( + "c10/cuda/CUDAGuard.h", + ("ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h", API_PYTORCH), + ), + ( + "c10/cuda/CUDACachingAllocator.h", + ("ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h", API_PYTORCH), + ), + ( + "c10/cuda/CUDAStream.h", + ("ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h", API_PYTORCH), + ), + ("gloo/cuda.h", ("gloo/hip.h", API_PYTORCH)), + ( + "gloo/cuda_allreduce_halving_doubling.h", + ("gloo/hip_allreduce_halving_doubling.h", API_PYTORCH), + ), + ( + "gloo/cuda_allreduce_halving_doubling_pipelined.h", + ("gloo/hip_allreduce_halving_doubling_pipelined.h", API_PYTORCH), + ), + ("gloo/cuda_allreduce_ring.h", ("gloo/hip_allreduce_ring.h", API_PYTORCH)), + ( + "gloo/cuda_broadcast_one_to_all.h", + ("gloo/hip_broadcast_one_to_all.h", API_PYTORCH), + ), + ( + "gloo::CudaAllreduceHalvingDoublingPipelined", + ("gloo::HipAllreduceHalvingDoublingPipelined", API_PYTORCH), + ), + ("gloo::CudaBroadcastOneToAll", ("gloo::HipBroadcastOneToAll", API_PYTORCH)), + ("gloo::CudaHostWorkspace", ("gloo::HipHostWorkspace", API_PYTORCH)), + ("gloo::CudaDeviceWorkspace", ("gloo::HipDeviceWorkspace", API_PYTORCH)), + ("CUDNN_RNN_RELU", ("miopenRNNRELU", API_PYTORCH)), + ("CUDNN_RNN_TANH", ("miopenRNNTANH", API_PYTORCH)), + ("CUDNN_LSTM", ("miopenLSTM", API_PYTORCH)), + ("CUDNN_GRU", ("miopenGRU", API_PYTORCH)), + ("cudnnRNNMode_t", ("miopenRNNMode_t", API_PYTORCH)), + ("magma_queue_create_from_cuda", ("magma_queue_create_from_hip", API_PYTORCH)), + ] +) + +CAFFE2_SPECIFIC_MAPPINGS = collections.OrderedDict( + [ + ("cuda_stream", ("hip_stream", API_CAFFE2)), + # if the header is a native hip folder (under hip directory), + # there is no need to add a hip path to it; the trie in hipify script + # takes this mapping order to forbid further replacement + ("/hip/", ("/hip/", API_CAFFE2)), + ("/context_gpu", ("/hip/context_gpu", API_CAFFE2)), + ("/common_gpu", ("/hip/common_gpu", API_CAFFE2)), + ("/cuda_nccl_gpu", ("/hip/hip_nccl_gpu", API_CAFFE2)), + ("/mixed_utils", ("/hip/mixed_utils", API_CAFFE2)), + ("/operator_fallback_gpu", ("/hip/operator_fallback_gpu", API_CAFFE2)), + ( + "/spatial_batch_norm_op_impl", + ("/hip/spatial_batch_norm_op_impl", API_CAFFE2), + ), + ( + "/recurrent_network_executor_gpu", + ("/hip/recurrent_network_executor_gpu", API_CAFFE2), + ), + ( + "/generate_proposals_op_util_nms_gpu", + ("/hip/generate_proposals_op_util_nms_gpu", API_CAFFE2), + ), + ("/max_pool_with_index_gpu", ("/hip/max_pool_with_index_gpu", API_CAFFE2)), + ("/THCCachingAllocator_gpu", ("/hip/THCCachingAllocator_gpu", API_CAFFE2)), + ("/top_k_heap_selection", ("/hip/top_k_heap_selection", API_CAFFE2)), + ("/top_k_radix_selection", ("/hip/top_k_radix_selection", API_CAFFE2)), + ("/GpuAtomics", ("/hip/GpuAtomics", API_CAFFE2)), + ("/GpuDefs", ("/hip/GpuDefs", API_CAFFE2)), + ("/GpuScanUtils", ("/hip/GpuScanUtils", API_CAFFE2)), + ("/GpuBitonicSort", ("/hip/GpuBitonicSort", API_CAFFE2)), + ("/math/reduce.cuh", ("/math/hip/reduce.cuh", API_CAFFE2)), + ("/sgd/adagrad_fused_op_gpu.cuh", ("/sgd/hip/adagrad_fused_op_gpu.cuh", API_CAFFE2)), + ("/operators/segment_reduction_op_gpu.cuh", ("/operators/hip/segment_reduction_op_gpu.cuh", API_CAFFE2)), + ("/gather_op.cuh", ("/hip/gather_op.cuh", API_CAFFE2)), + ("caffe2/core/common_cudnn.h", ("caffe2/core/hip/common_miopen.h", API_CAFFE2)), + ("REGISTER_CUDA_OPERATOR", ("REGISTER_HIP_OPERATOR", API_CAFFE2)), + ("CUDA_1D_KERNEL_LOOP", ("HIP_1D_KERNEL_LOOP", API_CAFFE2)), + ("CUDAContext", ("HIPContext", API_CAFFE2)), + ("CAFFE_CUDA_NUM_THREADS", ("CAFFE_HIP_NUM_THREADS", API_CAFFE2)), + ("HasCudaGPU", ("HasHipGPU", API_CAFFE2)), + ("__expf", ("expf", API_CAFFE2)), + ("CUBLAS_ENFORCE", ("HIPBLAS_ENFORCE", API_CAFFE2)), + ("CUBLAS_CHECK", ("HIPBLAS_CHECK", API_CAFFE2)), + ("cublas_handle", ("hipblas_handle", API_CAFFE2)), + ("CURAND_ENFORCE", ("HIPRAND_ENFORCE", API_CAFFE2)), + ("CURAND_CHECK", ("HIPRAND_CHECK", API_CAFFE2)), + ("curandGenerateUniform", ("hiprandGenerateUniform", API_CAFFE2)), + ("curand_generator", ("hiprand_generator", API_CAFFE2)), + ("CaffeCudaGetDevice", ("CaffeHipGetDevice", API_CAFFE2)), + # do not rename CUDA_KERNEL_ASSERT, lazyInitCUDA in caffe2 sources + # the ordered dict guarantees this pattern will match first, before "CUDA" + ("CUDA_KERNEL_ASSERT", ("CUDA_KERNEL_ASSERT", API_CAFFE2)), + ("lazyInitCUDA", ("lazyInitCUDA", API_CAFFE2)), + ("CUDA_VERSION", ("TORCH_HIP_VERSION", API_CAFFE2)), + ("CUDA", ("HIP", API_CAFFE2)), + ("Cuda", ("Hip", API_CAFFE2)), + ("cuda_", ("hip_", API_CAFFE2)), + ("_cuda", ("_hip", API_CAFFE2)), + ("CUDNN", ("MIOPEN", API_CAFFE2)), + ("CuDNN", ("MIOPEN", API_CAFFE2)), + ("cudnn", ("miopen", API_CAFFE2)), + ("namespace cuda", ("namespace hip", API_CAFFE2)), + ("cuda::CUDAGuard", ("hip::HIPGuard", API_CAFFE2)), + ("cuda::OptionalCUDAGuard", ("hip::OptionalHIPGuard", API_CAFFE2)), + ("cuda::CUDAStreamGuard", ("hip::HIPStreamGuard", API_CAFFE2)), + ("cuda::OptionalCUDAStreamGuard", ("hip::OptionalHIPStreamGuard", API_CAFFE2)), + ("c10/cuda/CUDAGuard.h", ("c10/hip/HIPGuard.h", API_CAFFE2)), + ("gloo/cuda", ("gloo/hip", API_CAFFE2)), + ] +) + +# We must tread very carefully here. Blanket conversions like are done +# in CAFFE2_SPECIFIC_MAPPINGS are not presently supported on PyTorch, +# because a regex for CUDA will also match a filename like CUDAGuard.h, +# but the HIPIFY script doesn't presently move the file and so the substitution +# will be invalid. Instead, we specifically list out every identifier +# and file from c10/cuda which may be used externally, and do substitutions this +# way. +# +# NB: if you want a transformation to ONLY apply to the c10/ directory, +# put it as API_CAFFE2 +C10_MAPPINGS = collections.OrderedDict( + [ + ("CUDA_VERSION", ("TORCH_HIP_VERSION", API_PYTORCH)), + ("CUDA_LAUNCH_BLOCKING=1", ("AMD_SERIALIZE_KERNEL=3", API_C10)), + ("CUDA_LAUNCH_BLOCKING", ("AMD_SERIALIZE_KERNEL", API_C10)), + ("cuda::compat::", ("hip::compat::", API_C10)), + ("c10/cuda/CUDAAlgorithm.h", ("c10/hip/HIPAlgorithm.h", API_C10)), + ("c10/cuda/CUDADeviceAssertion.h", ("c10/hip/HIPDeviceAssertion.h", API_C10)), + ("c10/cuda/CUDADeviceAssertionHost.h", ("c10/hip/HIPDeviceAssertionHost.h", API_C10)), + ("c10/cuda/CUDAException.h", ("c10/hip/HIPException.h", API_C10)), + ("c10/cuda/CUDAMacros.h", ("c10/hip/HIPMacros.h", API_C10)), + ("c10/cuda/CUDAMathCompat.h", ("c10/hip/HIPMathCompat.h", API_C10)), + ("c10/cuda/CUDAFunctions.h", ("c10/hip/HIPFunctions.h", API_C10)), + ("c10/cuda/CUDAMiscFunctions.h", ("c10/hip/HIPMiscFunctions.h", API_C10)), + ("c10/cuda/CUDAStream.h", ("c10/hip/HIPStream.h", API_C10)), + ("c10/cuda/CUDAGraphsC10Utils.h", ("c10/hip/HIPGraphsC10Utils.h", API_C10)), + ("c10/cuda/CUDAAllocatorConfig.h", ("c10/hip/HIPAllocatorConfig.h", API_C10)), + ("c10/cuda/CUDACachingAllocator.h", ("c10/hip/HIPCachingAllocator.h", API_C10)), + ("c10/cuda/impl/CUDATest.h", ("c10/hip/impl/HIPTest.h", API_C10)), + ("c10/cuda/impl/CUDAGuardImpl.h", ("c10/hip/impl/HIPGuardImpl.h", API_C10)), + ( + "c10/cuda/impl/cuda_cmake_macros.h", + ("c10/hip/impl/hip_cmake_macros.h", API_C10), + ), + ("C10_CUDA_CHECK", ("C10_HIP_CHECK", API_C10)), + ("C10_CUDA_CHECK_WARN", ("C10_HIP_CHECK_WARN", API_C10)), + ("C10_CUDA_ERROR_HANDLED", ("C10_HIP_ERROR_HANDLED", API_C10)), + ("C10_CUDA_IGNORE_ERROR", ("C10_HIP_IGNORE_ERROR", API_C10)), + ("C10_CUDA_CLEAR_ERROR", ("C10_HIP_CLEAR_ERROR", API_C10)), + ("c10::cuda", ("c10::hip", API_C10)), + ("cuda::CUDAStream", ("hip::HIPStream", API_C10)), + ("CUDAStream", ("HIPStream", API_C10)), + # This substitution is not permissible, because there's another copy of this + # function in torch/cuda.h + # ("cuda::device_count", ("hip::device_count", API_C10)), + ("cuda::current_device", ("hip::current_device", API_C10)), + ("cuda::set_device", ("hip::set_device", API_C10)), + ("cuda::device_synchronize", ("hip::device_synchronize", API_C10)), + ("cuda::getStreamFromPool", ("hip::getStreamFromPool", API_C10)), + ("getStreamFromPool", ("getStreamFromPool", API_C10)), + ("cuda::getDefaultCUDAStream", ("hip::getDefaultHIPStream", API_C10)), + ("getDefaultCUDAStream", ("getDefaultHIPStream", API_C10)), + ("cuda::getCurrentCUDAStream", ("hip::getCurrentHIPStream", API_C10)), + ("getCurrentCUDAStream", ("getCurrentHIPStream", API_C10)), + ("cuda::get_cuda_check_prefix", ("hip::get_cuda_check_prefix", API_C10)), + ("cuda::setCurrentCUDAStream", ("hip::setCurrentHIPStream", API_C10)), + ("setCurrentCUDAStream", ("setCurrentHIPStream", API_C10)), + ("cuda::CUDACachingAllocator", ("hip::HIPCachingAllocator", API_C10)), + ("CUDACachingAllocator", ("HIPCachingAllocator", API_C10)), + ("cuda::CUDAAllocatorConfig", ("hip::HIPAllocatorConfig", API_C10)), + ("CUDAAllocatorConfig", ("HIPAllocatorConfig", API_C10)), + ("pinned_use_cuda_host_register", ("pinned_use_hip_host_register", API_C10)), + ("c10::cuda::CUDAAllocator", ("c10::hip::HIPAllocator", API_C10)), + ("cuda::CUDAAllocator", ("hip::HIPAllocator", API_C10)), + ("CUDAAllocator", ("HIPAllocator", API_C10)), + ("C10_CUDA_KERNEL_LAUNCH_CHECK", ("C10_HIP_KERNEL_LAUNCH_CHECK", API_C10)) + ] +) + +# NB: C10 mappings are more specific than Caffe2 mappings, so run them +# first +CUDA_TO_HIP_MAPPINGS = [ + CUDA_IDENTIFIER_MAP, + CUDA_TYPE_NAME_MAP, + CUDA_INCLUDE_MAP, + CUDA_SPECIAL_MAP, + C10_MAPPINGS, + PYTORCH_SPECIFIC_MAPPINGS, + CAFFE2_SPECIFIC_MAPPINGS, +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/hipify_python.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/hipify_python.py new file mode 100644 index 0000000000000000000000000000000000000000..3fd7132fde3b7e8c5ab8a6c484c2096a87ef7f5b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/hipify_python.py @@ -0,0 +1,1159 @@ +#!/usr/bin/env python3 +""" The Python Hipify script. +## +# Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved. +# 2017-2018 Advanced Micro Devices, Inc. and +# Facebook Inc. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +""" +import argparse +import fnmatch +import re +import shutil +import sys +import os + +from . import constants +from .cuda_to_hip_mappings import CUDA_TO_HIP_MAPPINGS +from .cuda_to_hip_mappings import MATH_TRANSPILATIONS + +from typing import Dict, List, Iterator, Optional +from collections.abc import Mapping, Iterable +from enum import Enum + +class CurrentState(Enum): + INITIALIZED = 1 + DONE = 2 + +class HipifyResult: + def __init__(self, current_state, hipified_path): + self.current_state = current_state + self.hipified_path = hipified_path + self.status = "" + + def __str__(self): + return ("HipifyResult:: current_state: {}, hipified_path : {}, status: {}".format(self.current_state, + self.hipified_path, self.status)) + +HipifyFinalResult = Dict[str, HipifyResult] +HIPIFY_C_BREADCRUMB = "// !!! This is a file automatically generated by hipify!!!\n" +HIPIFY_FINAL_RESULT: HipifyFinalResult = {} + +# Hardcode the PyTorch template map +"""This dictionary provides the mapping from PyTorch kernel template types +to their actual types.""" +PYTORCH_TEMPLATE_MAP = {"Dtype": "scalar_t", "T": "scalar_t"} + +__all__ = ['InputError', 'openf', 'bcolors', 'GeneratedFileCleaner', 'match_extensions', 'matched_files_iter', + 'preprocess_file_and_save_result', 'compute_stats', 'add_dim3', 'processKernelLaunches', 'find_closure_group', + 'find_bracket_group', 'find_parentheses_group', 'replace_math_functions', 'hip_header_magic', 'replace_extern_shared', + 'get_hip_file_path', 'is_out_of_place', 'is_pytorch_file', 'is_cusparse_file', 'is_special_file', 'is_caffe2_gpu_file', + 'is_caffe2_gpu_file', 'Trie', 'preprocessor', 'file_specific_replacement', 'file_add_header', + 'fix_static_global_kernels', 'extract_arguments', 'str2bool', 'CurrentState', 'HipifyResult', 'hipify'] + + +class InputError(Exception): + # Exception raised for errors in the input. + + def __init__(self, message): + super().__init__(message) + self.message = message + + def __str__(self): + return f"Input error: {self.message}" + + +def openf(filename, mode): + return open(filename, mode, errors='ignore') + + +# Color coding for printing +class bcolors: + HEADER = '\033[95m' + OKBLUE = '\033[94m' + OKGREEN = '\033[92m' + WARNING = '\033[93m' + FAIL = '\033[91m' + ENDC = '\033[0m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + + +# To the programmer, the output of hipify most likely are intermediates. +# This class allows users of hipify to ask for a cleanup by running the +# hipify and compilation in a with instantiating this context manager class +# with keep_intermediates=False. +# The main usecase is the cpp_extensions, specifically the load method. +# It is a good idea to keep intermediates (in case of errors or to +# not recompile unchanged files), but in cases where you don't want to +# keep them (e.g. in the CI), this can be used to remove files. +class GeneratedFileCleaner: + """Context Manager to clean up generated files""" + def __init__(self, keep_intermediates=False): + self.keep_intermediates = keep_intermediates + self.files_to_clean = set() + self.dirs_to_clean = [] + + def __enter__(self): + return self + + def open(self, fn, *args, **kwargs): + if not os.path.exists(fn): + self.files_to_clean.add(os.path.abspath(fn)) + return open(fn, *args, **kwargs) + + def makedirs(self, dn, exist_ok=False): + parent, n = os.path.split(dn) + if not n: + parent, n = os.path.split(parent) + if parent and n and not os.path.exists(parent): + self.makedirs(parent, exist_ok=True) + if not os.path.isdir(dn) or not exist_ok: + os.mkdir(dn) + self.dirs_to_clean.append(os.path.abspath(dn)) + + def __exit__(self, type, value, traceback): + if not self.keep_intermediates: + for f in self.files_to_clean: + os.unlink(f) + for d in self.dirs_to_clean[::-1]: + os.rmdir(d) + + +def match_extensions(filename: str, extensions: Iterable) -> bool: + """Helper method to see if filename ends with certain extension""" + return any(filename.endswith(e) for e in extensions) + + +def _fnmatch(filepath, patterns): + return any(fnmatch.fnmatch(filepath, pattern) for pattern in patterns) + + +def matched_files_iter( + root_path: str, + includes: Iterable = (), + ignores: Iterable = (), + extensions: Iterable = (), + out_of_place_only: bool = False, + is_pytorch_extension: bool = False) -> Iterator[str]: + + exact_matches = set(includes) + + # This is a very rough heuristic; really, we want to avoid scanning + # any file which is not checked into source control, but this script + # needs to work even if you're in a Git or Hg checkout, so easier to + # just block the biggest time sinks that won't matter in the + # end. + for (abs_dirpath, dirs, filenames) in os.walk(root_path, topdown=True): + rel_dirpath = os.path.relpath(abs_dirpath, root_path) + if rel_dirpath == '.': + # Blah blah blah O(n) blah blah + if ".git" in dirs: + dirs.remove(".git") + if "build" in dirs: + dirs.remove("build") + if "third_party" in dirs: + dirs.remove("third_party") + dirs.append("third_party/nvfuser") + for filename in filenames: + filepath = os.path.join(abs_dirpath, filename) + rel_filepath = os.path.join(rel_dirpath, filename) + # We respect extensions, UNLESS you wrote the entire + # filename verbatim, in which case we always accept it + if ( + _fnmatch(filepath, includes) + and (not _fnmatch(filepath, ignores)) + and (match_extensions(filepath, extensions) or filepath in exact_matches) + ): + if not is_pytorch_extension: # for pytorch extensions, consider all files + if not is_pytorch_file(rel_filepath) and not is_caffe2_gpu_file(rel_filepath): + continue + if out_of_place_only and not is_out_of_place(rel_filepath): + continue + yield filepath + + +def preprocess_file_and_save_result( + output_directory: str, + filepath: str, + all_files: Iterable, + header_include_dirs: Iterable, + stats: Dict[str, List], + hip_clang_launch: bool, + is_pytorch_extension: bool, + clean_ctx: GeneratedFileCleaner, + show_progress: bool) -> None: + fin_path = os.path.abspath(os.path.join(output_directory, filepath)) + hipify_result = HipifyResult(current_state=CurrentState.INITIALIZED, hipified_path=fin_path) + HIPIFY_FINAL_RESULT[fin_path] = hipify_result + result = preprocessor(output_directory, filepath, all_files, header_include_dirs, stats, + hip_clang_launch, is_pytorch_extension, clean_ctx, show_progress) + + # Show what happened + if show_progress and "ignored" not in result.status: + print( + fin_path, "->", + result.hipified_path, result.status, flush=True) + + HIPIFY_FINAL_RESULT[fin_path] = result + + +def compute_stats(stats): + unsupported_calls = {cuda_call for (cuda_call, _filepath) in stats["unsupported_calls"]} + + # Print the number of unsupported calls + print(f"Total number of unsupported CUDA function calls: {len(unsupported_calls):d}") + + # Print the list of unsupported calls + print(", ".join(unsupported_calls)) + + # Print the number of kernel launches + print(f"\nTotal number of replaced kernel launches: {len(stats['kernel_launches']):d}") + + +def add_dim3(kernel_string, cuda_kernel): + '''adds dim3() to the second and third arguments in the kernel launch''' + count = 0 + closure = 0 + kernel_string = kernel_string.replace("<<<", "").replace(">>>", "") + arg_locs: List[Dict[str, int]] = [{} for _ in range(2)] + arg_locs[count]['start'] = 0 + for ind, c in enumerate(kernel_string): + if count > 1: + break + if c == "(": + closure += 1 + elif c == ")": + closure -= 1 + if (c == "," or ind == len(kernel_string) - 1) and closure == 0: + arg_locs[count]['end'] = ind + (c != ",") + count += 1 + if count < 2: + arg_locs[count]['start'] = ind + 1 + + first_arg_raw = kernel_string[arg_locs[0]['start']:arg_locs[0]['end'] + 1] + second_arg_raw = kernel_string[arg_locs[1]['start']:arg_locs[1]['end']] + + first_arg_clean = kernel_string[arg_locs[0]['start']:arg_locs[0]['end']].replace("\n", "").strip(" ") + second_arg_clean = kernel_string[arg_locs[1]['start']:arg_locs[1]['end']].replace("\n", "").strip(" ") + + first_arg_dim3 = f"dim3({first_arg_clean})" + second_arg_dim3 = f"dim3({second_arg_clean})" + + first_arg_raw_dim3 = first_arg_raw.replace(first_arg_clean, first_arg_dim3) + second_arg_raw_dim3 = second_arg_raw.replace(second_arg_clean, second_arg_dim3) + cuda_kernel = cuda_kernel.replace(first_arg_raw + second_arg_raw, first_arg_raw_dim3 + second_arg_raw_dim3) + return cuda_kernel + + +RE_KERNEL_LAUNCH = re.compile(r'([ ]+)(detail?)::[ ]+\\\n[ ]+') + + +def processKernelLaunches(string, stats): + """ Replace the CUDA style Kernel launches with the HIP style kernel launches.""" + # Concat the namespace with the kernel names. (Find cleaner way of doing this later). + string = RE_KERNEL_LAUNCH.sub(lambda inp: f"{inp.group(1)}{inp.group(2)}::", string) + + def grab_method_and_template(in_kernel): + # The positions for relevant kernel components. + pos = { + "kernel_launch": {"start": in_kernel["start"], "end": in_kernel["end"]}, + "kernel_name": {"start": -1, "end": -1}, + "template": {"start": -1, "end": -1} + } + + # Count for balancing template + count = {"<>": 0} + + # Status for whether we are parsing a certain item. + START = 0 + AT_TEMPLATE = 1 + AFTER_TEMPLATE = 2 + AT_KERNEL_NAME = 3 + + status = START + + # Parse the string character by character + for i in range(pos["kernel_launch"]["start"] - 1, -1, -1): + char = string[i] + + # Handle Templating Arguments + if status in (START, AT_TEMPLATE): + if char == ">": + if status == START: + status = AT_TEMPLATE + pos["template"]["end"] = i + count["<>"] += 1 + + if char == "<": + count["<>"] -= 1 + if count["<>"] == 0 and (status == AT_TEMPLATE): + pos["template"]["start"] = i + status = AFTER_TEMPLATE + + # Handle Kernel Name + if status != AT_TEMPLATE: + if string[i].isalnum() or string[i] in {'(', ')', '_', ':', '#'}: + if status != AT_KERNEL_NAME: + status = AT_KERNEL_NAME + pos["kernel_name"]["end"] = i + + # Case: Kernel name starts the string. + if i == 0: + pos["kernel_name"]["start"] = 0 + + # Finished + return [(pos["kernel_name"]), (pos["template"]), (pos["kernel_launch"])] + + else: + # Potential ending point if we're already traversing a kernel's name. + if status == AT_KERNEL_NAME: + pos["kernel_name"]["start"] = i + + # Finished + return [(pos["kernel_name"]), (pos["template"]), (pos["kernel_launch"])] + + def find_kernel_bounds(string): + """Finds the starting and ending points for all kernel launches in the string.""" + kernel_end = 0 + kernel_positions = [] + + # Continue until we cannot find any more kernels anymore. + while string.find("<<<", kernel_end) != -1: + # Get kernel starting position (starting from the previous ending point) + kernel_start = string.find("<<<", kernel_end) + + # Get kernel ending position (adjust end point past the >>>) + kernel_end = string.find(">>>", kernel_start) + 3 + if kernel_end <= 0: + raise InputError("no kernel end found") + + # Add to list of traversed kernels + kernel_positions.append({"start": kernel_start, "end": kernel_end, + "group": string[kernel_start: kernel_end]}) + + return kernel_positions + + # Replace comments and string literals from the code so that find_kernel_bounds does not + # wrongly capture kernels in comments and string literals. + # This function replaces them with "x" to keep positions. + def mask_comments(string): + in_comment = '' + prev_c = '' + new_string = '' + for c in string: + if in_comment == '': + # Outside comments + if c == '/' and prev_c == '/': + in_comment = '//' + elif c == '*' and prev_c == '/': + in_comment = '/*' + elif c == '"' and prev_c != '\\' and prev_c != "'": + in_comment = '"' + elif in_comment == '//': + # In // xxx + if c == '\r' or c == '\n': + in_comment = '' + elif in_comment == '/*': + # In /* xxx */ + if c == '/' and prev_c == '*': + in_comment = '' + elif in_comment == '"': + # In "" + if c == '"' and prev_c != '\\': + in_comment = '' + prev_c = c + if in_comment == '': + new_string += c + else: + new_string += 'x' + return new_string + + # Grab positional ranges of all kernel launches + get_kernel_positions = list(find_kernel_bounds(mask_comments(string))) + output_string = string + + # Replace each CUDA kernel with a HIP kernel. + for kernel in get_kernel_positions: + # Get kernel components + params = grab_method_and_template(kernel) + + # Find parenthesis after kernel launch + parenthesis = string.find("(", kernel["end"]) + + # Extract cuda kernel + cuda_kernel = string[params[0]["start"]:parenthesis + 1] + kernel_string = string[kernel['start']:kernel['end']] + end_param_index = 0 if params[1]['end'] == -1 else 1 + kernel_name_with_template = string[params[0]['start']:params[end_param_index]['end'] + 1] + cuda_kernel_dim3 = add_dim3(kernel_string, cuda_kernel) + # Keep number of kernel launch params consistent (grid dims, group dims, stream, dynamic shared size) + num_klp = len(extract_arguments(0, kernel["group"].replace("<<<", "(").replace(">>>", ")"))) + + hip_kernel = "hipLaunchKernelGGL(" + cuda_kernel_dim3[0:-1].replace( + ">>>", ", 0" * (4 - num_klp) + ">>>").replace("<<<", ", ").replace( + ">>>", ", ").replace(kernel_name_with_template, "(" + kernel_name_with_template + ")") + + # Replace cuda kernel with hip kernel + output_string = output_string.replace(cuda_kernel, hip_kernel) + + # Update the statistics + stats["kernel_launches"].append(hip_kernel) + + return output_string + + +def find_closure_group(input_string, start, group): + """Generalization for finding a balancing closure group + + if group = ["(", ")"], then finds the first balanced parentheses. + if group = ["{", "}"], then finds the first balanced bracket. + + Given an input string, a starting position in the input string, and the group type, + find_closure_group returns the positions of group[0] and group[1] as a tuple. + + Example: + >>> find_closure_group("(hi)", 0, ["(", ")"]) + (0, 3) + """ + + inside_parenthesis = False + parens = 0 + pos = start + p_start, p_end = -1, -1 + + while pos < len(input_string): + if input_string[pos] == group[0]: + if inside_parenthesis is False: + inside_parenthesis = True + parens = 1 + p_start = pos + else: + parens += 1 + elif input_string[pos] == group[1] and inside_parenthesis: + parens -= 1 + + if parens == 0: + p_end = pos + return p_start, p_end + + pos += 1 + return None, None + + +def find_bracket_group(input_string, start): + """Finds the first balanced parantheses.""" + return find_closure_group(input_string, start, group=["{", "}"]) + + +def find_parentheses_group(input_string, start): + """Finds the first balanced bracket.""" + return find_closure_group(input_string, start, group=["(", ")"]) + + +RE_ASSERT = re.compile(r"\bassert[ ]*\(") + + +def replace_math_functions(input_string): + """FIXME: Temporarily replace std:: invocations of math functions + with non-std:: versions to prevent linker errors NOTE: This + can lead to correctness issues when running tests, since the + correct version of the math function (exp/expf) might not get + called. Plan is to remove this function once HIP supports + std:: math function calls inside device code + + """ + output_string = input_string + for func in MATH_TRANSPILATIONS: + output_string = output_string.replace(fr'{func}(', f'{MATH_TRANSPILATIONS[func]}(') + + return output_string + + +RE_SYNCTHREADS = re.compile(r":?:?\b(__syncthreads)\b(\w*\()") + + +def hip_header_magic(input_string): + """If the file makes kernel builtin calls and does not include the cuda_runtime.h header, + then automatically add an #include to match the "magic" includes provided by NVCC. + TODO: + Update logic to ignore cases where the cuda_runtime.h is included by another file. + """ + + # Copy the input. + output_string = input_string + + # Check if one of the following headers is already included. + headers = ["hip/hip_runtime.h", "hip/hip_runtime_api.h"] + if any(re.search(fr'#include ("{ext}"|<{ext}>)', output_string) for ext in headers): + return output_string + + # Rough logic to detect if we're inside device code + hasDeviceLogic: int + hasDeviceLogic = "hipLaunchKernelGGL" in output_string + hasDeviceLogic += "__global__" in output_string + hasDeviceLogic += "__shared__" in output_string + hasDeviceLogic += RE_SYNCTHREADS.search(output_string) is not None + + # If device logic found, provide the necessary header. + if hasDeviceLogic: + output_string = '#include "hip/hip_runtime.h"\n' + input_string + + return output_string + + +RE_EXTERN_SHARED = re.compile(r"extern\s+([\w\(\)]+)?\s*__shared__\s+([\w:<>\s]+)\s+(\w+)\s*\[\s*\]\s*;") + + +def replace_extern_shared(input_string): + """Match extern __shared__ type foo[]; syntax and use HIP_DYNAMIC_SHARED() MACRO instead. + https://github.com/ROCm-Developer-Tools/HIP/blob/master/docs/markdown/hip_kernel_language.md#__shared__ + Example: + "extern __shared__ char smemChar[];" => "HIP_DYNAMIC_SHARED( char, smemChar)" + "extern __shared__ unsigned char smem[];" => "HIP_DYNAMIC_SHARED( unsigned char, my_smem)" + """ + output_string = input_string + output_string = RE_EXTERN_SHARED.sub( + lambda inp: f"HIP_DYNAMIC_SHARED({inp.group(1) or ''} {inp.group(2)}, {inp.group(3)})", output_string) + + return output_string + + +def get_hip_file_path(rel_filepath, is_pytorch_extension=False): + """ + Returns the new name of the hipified file + """ + # At the moment, some PyTorch source files are HIPified in place. The predicate + # is_out_of_place tells us if this is the case or not. + assert not os.path.isabs(rel_filepath) + if not is_pytorch_extension and not is_out_of_place(rel_filepath): + return rel_filepath + + dirpath, filename = os.path.split(rel_filepath) + root, ext = os.path.splitext(filename) + + # Here's the plan: + # + # In general, we need to disambiguate the HIPified filename so that + # it gets a different name from the original filename, so + # that we don't overwrite the original file + # + # There's a lot of different naming conventions across PyTorch + # and Caffe2, but the general recipe is to convert occurrences + # of cuda/gpu to hip, and add hip if there are no occurrences + # of cuda/gpu anywhere. + # + # Concretely, we do the following: + # + # - If there is a directory component named "cuda", replace + # it with "hip", AND + # + # - If the file name contains "CUDA", replace it with "HIP", AND + # + # - ALWAYS replace '.cu' with '.hip', because those files + # contain CUDA kernels that needs to be hipified and processed with + # hip compiler + # + # - If we are not hipifying a PyTorch extension, and the parent + # directory name did not change as a result of the above + # transformations, insert "hip" in the file path + # as the direct parent folder of the file + # + # - If we are hipifying a PyTorch extension, and the parent directory + # name as well as the filename (incl. extension) did not change as + # a result of the above transformations, insert "_hip" in the filename + # + # This isn't set in stone; we might adjust this to support other + # naming conventions. + + if ext == '.cu': + ext = '.hip' + + orig_filename = filename + orig_dirpath = dirpath + + dirpath = dirpath.replace('cuda', 'hip') + dirpath = dirpath.replace('CUDA', 'HIP') + dirpath = dirpath.replace('THC', 'THH') + + root = root.replace('cuda', 'hip') + root = root.replace('CUDA', 'HIP') + # Special case to handle caffe2/core/THCCachingAllocator + if dirpath != "caffe2/core": + root = root.replace('THC', 'THH') + + if not is_pytorch_extension and dirpath == orig_dirpath: + dirpath = os.path.join(dirpath, 'hip') + + if is_pytorch_extension and dirpath == orig_dirpath and (root + ext) == orig_filename: + root = root + "_hip" + + return os.path.join(dirpath, root + ext) + + +def is_out_of_place(rel_filepath): + assert not os.path.isabs(rel_filepath) + if rel_filepath.startswith("torch/"): + return False + if rel_filepath.startswith("third_party/nvfuser/"): + return False + if rel_filepath.startswith("tools/autograd/templates/"): + return False + return True + + +# Keep this synchronized with includes/ignores in build_amd.py +def is_pytorch_file(rel_filepath): + assert not os.path.isabs(rel_filepath) + if rel_filepath.startswith("aten/"): + if rel_filepath.startswith("aten/src/ATen/core/"): + return False + return True + if rel_filepath.startswith("torch/"): + return True + if rel_filepath.startswith("third_party/nvfuser/"): + return True + if rel_filepath.startswith("tools/autograd/templates/"): + return True + return False + + +def is_cusparse_file(rel_filepath): + if is_pytorch_file(rel_filepath): + return "sparse" in rel_filepath.lower() + return False + + +def is_special_file(rel_filepath): + if is_pytorch_file(rel_filepath): + if "sparse" in rel_filepath.lower(): + return True + elif "linalg" in rel_filepath.lower(): + if "batchlinearalgebralibblas" in rel_filepath.lower(): + return False # don't use "special" mappings for this specific linalg cublas file + return True + return False + +def is_caffe2_gpu_file(rel_filepath): + assert not os.path.isabs(rel_filepath) + if rel_filepath.startswith("c10/cuda"): + return True + filename = os.path.basename(rel_filepath) + _, ext = os.path.splitext(filename) + return ('gpu' in filename or ext in ['.cu', '.cuh']) and ('cudnn' not in filename) + +class TrieNode: + """A Trie node whose children are represented as a directory of char: TrieNode. + A special char '' represents end of word + """ + + def __init__(self): + self.children = {} + +class Trie: + """Creates a Trie out of a list of words. The trie can be exported to a Regex pattern. + The corresponding Regex should match much faster than a simple Regex union.""" + + def __init__(self): + """Initialize the trie with an empty root node.""" + self.root = TrieNode() + + def add(self, word): + """Add a word to the Trie. """ + node = self.root + + for char in word: + node.children.setdefault(char, TrieNode()) + node = node.children[char] + node.children[''] = True # Mark the end of the word + + def dump(self): + """Return the root node of Trie. """ + return self.root + + def quote(self, char): + """ Escape a char for regex. """ + return re.escape(char) + + def search(self, word): + """Search whether word is present in the Trie. + Returns True if yes, else return False""" + node = self.root + for char in word: + if char in node.children: + node = node.children[char] + else: + return False + + # make sure to check the end-of-word marker present + return '' in node.children + + def _pattern(self, root): + """Convert a Trie into a regular expression pattern""" + node = root + + if "" in node.children and len(node.children.keys()) == 1: + return None + + alt = [] # store alternative patterns + cc = [] # store char to char classes + q = 0 # for node representing the end of word + for char in sorted(node.children.keys()): + if isinstance(node.children[char], TrieNode): + try: + recurse = self._pattern(node.children[char]) + alt.append(self.quote(char) + recurse) + except Exception: + cc.append(self.quote(char)) + else: + q = 1 + cconly = not len(alt) > 0 + + if len(cc) > 0: + if len(cc) == 1: + alt.append(cc[0]) + else: + alt.append('[' + ''.join(cc) + ']') + + if len(alt) == 1: + result = alt[0] + else: + result = "(?:" + "|".join(alt) + ")" + + if q: + if cconly: + result += "?" + else: + result = f"(?:{result})?" + return result + + def pattern(self): + """Export the Trie to a regex pattern.""" + return self._pattern(self.root) + + def export_to_regex(self): + """Export the Trie to a regex pattern.""" + return self._pattern(self.root) + +CAFFE2_TRIE = Trie() +CAFFE2_MAP = {} +PYTORCH_TRIE = Trie() +PYTORCH_MAP: Dict[str, object] = {} + +# In PyTorch, we map cuBLAS->rocBLAS and cuSPARSE->hipSPARSE. Note the prefix, roc versus hip. +# The 'hip' APIs offer a more direct CUDA-friendly mapping, but calling rocBLAS directly has better performance. +# Unfortunately, the roc* types and hip* types differ, i.e., rocblas_float_complex versus hipComplex. +# In the case of SPARSE, we must use the hip types for complex instead of the roc types, +# but the pytorch mappings assume roc. Therefore, we create a new SPARSE mapping that has a higher priority. +# Its mappings will trigger first, and only when a miss occurs will the lower-priority pytorch mapping take place. +# When a file contains "sparse" in the filename, a mapping marked with API_SPARSE is preferred over other choices. +# Similarly, "linalg" files require rocBLAS -> hipSOLVER so they also need special handling. +PYTORCH_SPECIAL_MAP = {} + +for mapping in CUDA_TO_HIP_MAPPINGS: + assert isinstance(mapping, Mapping) + for src, value in mapping.items(): + dst = value[0] + meta_data = value[1:] + if constants.API_CAFFE2 not in meta_data: + PYTORCH_TRIE.add(src) + # if src is already in PYTORCH_MAP and dst belongs to API_SPECIAL + # do not overwrite PYTORCH_MAP, store dst separately + if constants.API_SPECIAL in meta_data and PYTORCH_MAP.get(src, ""): + PYTORCH_SPECIAL_MAP[src] = dst + else: + PYTORCH_MAP[src] = dst + if constants.API_PYTORCH not in meta_data and constants.API_SPECIAL not in meta_data: + CAFFE2_TRIE.add(src) + CAFFE2_MAP[src] = dst +RE_CAFFE2_PREPROCESSOR = re.compile(CAFFE2_TRIE.export_to_regex()) +RE_PYTORCH_PREPROCESSOR = re.compile(fr'(?<=\W)({PYTORCH_TRIE.export_to_regex()})(?=\W)') + +RE_QUOTE_HEADER = re.compile(r'#include "([^"]+)"') +RE_ANGLE_HEADER = re.compile(r'#include <([^>]+)>') +RE_THC_GENERIC_FILE = re.compile(r'#define THC_GENERIC_FILE "([^"]+)"') +RE_CU_SUFFIX = re.compile(r'\.cu\b') # be careful not to pick up .cuh + +""" +Returns a HipifyResult object with the following details: + "hipified_path" : absolute path of hipified source file + "status" : "ok" if hipified file was written out + "skipped" if an identical hipified file already existed or hipified file couldn't be written out + "ignored" if the source file was a hipified file itself or not meant to be hipified + "current_state" : CurrentState.INITIALIZED if source file is first ready to be hipified + CurrentState.DONE if source file is done with hipification process +""" + + +def preprocessor( + output_directory: str, + filepath: str, + all_files: Iterable, + header_include_dirs: Iterable, + stats: Dict[str, List], + hip_clang_launch: bool, + is_pytorch_extension: bool, + clean_ctx: GeneratedFileCleaner, + show_progress: bool) -> HipifyResult: + """ Executes the CUDA -> HIP conversion on the specified file. """ + fin_path = os.path.abspath(os.path.join(output_directory, filepath)) + hipify_result = HIPIFY_FINAL_RESULT[fin_path] + if filepath not in all_files: + hipify_result.hipified_path = None + hipify_result.status = "[ignored, not to be hipified]" + hipify_result.current_state = CurrentState.DONE + return hipify_result + + rel_filepath = os.path.relpath(filepath, output_directory) + + with open(fin_path, encoding='utf-8') as fin: + if fin.readline() == HIPIFY_C_BREADCRUMB: + hipify_result.hipified_path = None + hipify_result.status = "[ignored, input is hipified output]" + hipify_result.current_state = CurrentState.DONE + return hipify_result + fin.seek(0) + output_source = fin.read() + + orig_output_source = output_source + + # get_hip_file_path needs a relative path to work correctly + fout_path = os.path.abspath(os.path.join(output_directory, get_hip_file_path(rel_filepath, is_pytorch_extension))) + if not os.path.exists(os.path.dirname(fout_path)): + clean_ctx.makedirs(os.path.dirname(fout_path)) + + # unsupported_calls statistics reporting is broken atm + def pt_repl(m): + return PYTORCH_MAP[m.group(0)] + + def pt_special_repl(m): + # checks SPECIAL map first, and if a miss occurs, falls back to pytorch mappings + return PYTORCH_SPECIAL_MAP.get(m.group(0), pt_repl(m)) + + + if is_pytorch_extension: + output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source) + else: + if is_special_file(rel_filepath): + output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_special_repl, output_source) + elif is_pytorch_file(rel_filepath): + output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source) + else: + def c2_repl(m): + return CAFFE2_MAP[m.group(0)] + output_source = RE_CAFFE2_PREPROCESSOR.sub(c2_repl, output_source) + + # Header rewrites + def mk_repl(templ, include_current_dir=True): + def repl(m): + f = m.group(1) + dirpath, filename = os.path.split(f) + if ( + f.startswith(("ATen/cuda", + "ATen/native/cuda", + "ATen/native/nested/cuda", + "ATen/native/quantized/cuda", + "ATen/native/sparse/cuda", + "ATen/native/transformers/cuda", + "THC/")) or + (f.startswith("THC") and not f.startswith("THCP")) + ): + return templ.format(get_hip_file_path(m.group(1), is_pytorch_extension)) + # if filename is one of the files being hipified for this extension + if (is_pytorch_extension and any(s.endswith(filename) for s in all_files)): + header_dir = None + header_filepath = None + # If include_current_dir True, look first in same dir as the including source file + if include_current_dir: + header_dir_to_check = os.path.dirname(fin_path) + header_path_to_check = os.path.abspath(os.path.join(header_dir_to_check, f)) + if os.path.exists(header_path_to_check): + header_dir = header_dir_to_check + header_filepath = header_path_to_check + # If not found, look in include dirs one by one and first match wins + if header_filepath is None: + for header_include_dir in header_include_dirs: + header_dir_to_check = os.path.join(output_directory, header_include_dir) + header_path_to_check = os.path.abspath(os.path.join(header_dir_to_check, f)) + if os.path.exists(header_path_to_check): + header_dir = header_dir_to_check + header_filepath = header_path_to_check + # If header file not found, keep as is + if header_filepath is None: + return m.group(0) + # Hipify header file first if needed + if header_filepath not in HIPIFY_FINAL_RESULT: + preprocess_file_and_save_result(output_directory, + header_filepath, + all_files, header_include_dirs, stats, hip_clang_launch, + is_pytorch_extension, clean_ctx, show_progress) + elif header_filepath in HIPIFY_FINAL_RESULT: + header_result = HIPIFY_FINAL_RESULT[header_filepath] + if header_result.current_state == CurrentState.INITIALIZED: + # get_hip_file_path needs a relative path to work correctly + header_rel_path = os.path.relpath(header_filepath, output_directory) + header_fout_path = os.path.abspath(os.path.join(output_directory, + get_hip_file_path(header_rel_path, is_pytorch_extension))) + header_result.hipified_path = header_fout_path + HIPIFY_FINAL_RESULT[header_filepath] = header_result + return templ.format(os.path.relpath(header_fout_path if header_fout_path is not None + else header_filepath, header_dir)) + hipified_header_filepath = HIPIFY_FINAL_RESULT[header_filepath].hipified_path + return templ.format(os.path.relpath(hipified_header_filepath if hipified_header_filepath is not None + else header_filepath, header_dir)) + + return m.group(0) + return repl + output_source = RE_QUOTE_HEADER.sub(mk_repl('#include "{0}"', True), output_source) + output_source = RE_ANGLE_HEADER.sub(mk_repl('#include <{0}>', False), output_source) + output_source = RE_THC_GENERIC_FILE.sub(mk_repl('#define THC_GENERIC_FILE "{0}"'), output_source) + + # CMakeLists.txt rewrites + if filepath.endswith('CMakeLists.txt'): + output_source = output_source.replace('CUDA', 'HIP') + output_source = output_source.replace('THC', 'THH') + output_source = RE_CU_SUFFIX.sub('.hip', output_source) + + # Perform Kernel Launch Replacements + if not hip_clang_launch: + output_source = processKernelLaunches(output_source, stats) + + # Replace std:: with non-std:: versions + if (filepath.endswith((".cu", ".cuh"))) and "PowKernel" not in filepath: + output_source = replace_math_functions(output_source) + + # Include header if device code is contained. + output_source = hip_header_magic(output_source) + + # Replace the extern __shared__ + # NOTE: No longer needed after transition from hcc to hipclang. + # output_source = replace_extern_shared(output_source) + + # Don't write out identical hipified files for extensions if dirpath has not changed + if ( + is_pytorch_extension + and orig_output_source == output_source + and os.path.dirname(fin_path) == os.path.dirname(fout_path) + ): + hipify_result.hipified_path = fin_path + hipify_result.status = "[skipped, no changes]" + hipify_result.current_state = CurrentState.DONE + return hipify_result + + # Add hipify breadcrumb for C-style files to avoid re-hipification + if fin_path != fout_path and match_extensions(fin_path, (".cu", ".cuh", ".c", ".cc", ".cpp", ".h", ".hpp")): + output_source = HIPIFY_C_BREADCRUMB + output_source + + do_write = True + if os.path.exists(fout_path): + with open(fout_path, encoding='utf-8') as fout_old: + do_write = fout_old.read() != output_source + if do_write: + try: + with clean_ctx.open(fout_path, 'w', encoding='utf-8') as fout: + fout.write(output_source) + hipify_result.hipified_path = fout_path + hipify_result.status = "[ok]" + hipify_result.current_state = CurrentState.DONE + return hipify_result + except PermissionError as e: + print(f"{bcolors.WARNING}Failed to save {fout_path} with \"{e.strerror}\", leaving {fin_path} unchanged.{bcolors.ENDC}", + file=sys.stderr) + hipify_result.hipified_path = fin_path + hipify_result.status = "[skipped, no permissions]" + hipify_result.current_state = CurrentState.DONE + return hipify_result + else: + hipify_result.hipified_path = fout_path + hipify_result.status = "[skipped, already hipified]" + hipify_result.current_state = CurrentState.DONE + return hipify_result + +def file_specific_replacement(filepath, search_string, replace_string, strict=False): + with openf(filepath, "r+") as f: + contents = f.read() + if strict: + contents = re.sub(fr'\b({re.escape(search_string)})\b', lambda x: replace_string, contents) + else: + contents = contents.replace(search_string, replace_string) + f.seek(0) + f.write(contents) + f.truncate() + + +def file_add_header(filepath, header): + with openf(filepath, "r+") as f: + contents = f.read() + if header[0] != "<" and header[-1] != ">": + header = f'"{header}"' + contents = (f'#include {header} \n') + contents + f.seek(0) + f.write(contents) + f.truncate() + + +def fix_static_global_kernels(in_txt): + """Static global kernels in HIP results in a compilation error.""" + in_txt = in_txt.replace(" __global__ static", "__global__") + return in_txt + + +RE_INCLUDE = re.compile(r"#include .*\n") + + +def extract_arguments(start, string): + """ Return the list of arguments in the upcoming function parameter closure. + Example: + string (input): '(blocks, threads, 0, THCState_getCurrentStream(state))' + arguments (output): + '[{'start': 1, 'end': 7}, + {'start': 8, 'end': 16}, + {'start': 17, 'end': 19}, + {'start': 20, 'end': 53}]' + """ + + arguments = [] + closures = { + "<": 0, + "(": 0 + } + current_position = start + argument_start_pos = current_position + 1 + + # Search for final parenthesis + while current_position < len(string): + if string[current_position] == "(": + closures["("] += 1 + elif string[current_position] == ")": + closures["("] -= 1 + elif string[current_position] == "<": + closures["<"] += 1 + elif string[current_position] == ">" and string[current_position - 1] != "-" and closures["<"] > 0: + closures["<"] -= 1 + + # Finished all arguments + if closures["("] == 0 and closures["<"] == 0: + # Add final argument + arguments.append({"start": argument_start_pos, "end": current_position}) + break + + # Finished current argument + if closures["("] == 1 and closures["<"] == 0 and string[current_position] == ",": + arguments.append({"start": argument_start_pos, "end": current_position}) + argument_start_pos = current_position + 1 + + current_position += 1 + + return arguments + + +def str2bool(v): + """ArgumentParser doesn't support type=bool. Thus, this helper method will convert + from possible string types to True / False.""" + if v.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif v.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') + + +def hipify( + project_directory: str, + show_detailed: bool = False, + extensions: Iterable = (".cu", ".cuh", ".c", ".cc", ".cpp", ".h", ".in", ".hpp"), + header_extensions: Iterable = (".cuh", ".h", ".hpp"), + output_directory: str = "", + header_include_dirs: Iterable = (), + includes: Iterable = ('*',), + extra_files: Iterable = (), + out_of_place_only: bool = False, + ignores: Iterable = (), + show_progress: bool = True, + hip_clang_launch: bool = False, + is_pytorch_extension: bool = False, + hipify_extra_files_only: bool = False, + clean_ctx: Optional[GeneratedFileCleaner] = None +) -> HipifyFinalResult: + if project_directory == "": + project_directory = os.getcwd() + + # Verify the project directory exists. + if not os.path.exists(project_directory): + print("The project folder specified does not exist.") + sys.exit(1) + + # If no output directory, provide a default one. + if not output_directory: + project_directory.rstrip("/") + output_directory = project_directory + "_amd" + + if project_directory != output_directory: + includes = [include.replace(project_directory, output_directory) for include in includes] + ignores = [ignore.replace(project_directory, output_directory) for ignore in ignores] + + # Copy from project directory to output directory if not done already. + if not os.path.exists(output_directory): + shutil.copytree(project_directory, output_directory) + + all_files = list(matched_files_iter(output_directory, includes=includes, + ignores=ignores, extensions=extensions, + out_of_place_only=out_of_place_only, + is_pytorch_extension=is_pytorch_extension)) + all_files_set = set(all_files) + for f in extra_files: + if not os.path.isabs(f): + f = os.path.join(output_directory, f) + if f not in all_files_set: + all_files.append(f) + + # List all files in header_include_paths to ensure they are hipified + from pathlib import Path + for header_include_dir in header_include_dirs: + if os.path.isabs(header_include_dir): + header_include_dir_path = Path(header_include_dir) + else: + header_include_dir_path = Path(os.path.join(output_directory, header_include_dir)) + for path in header_include_dir_path.rglob('*'): + if ( + path.is_file() + and _fnmatch(str(path), includes) + and (not _fnmatch(str(path), ignores)) + and match_extensions(path.name, header_extensions) + ): + all_files.append(str(path)) + + if clean_ctx is None: + clean_ctx = GeneratedFileCleaner(keep_intermediates=True) + + # Preprocessing statistics. + stats: Dict[str, List] = {"unsupported_calls": [], "kernel_launches": []} + + for filepath in (all_files if not hipify_extra_files_only else extra_files): + preprocess_file_and_save_result(output_directory, filepath, all_files, header_include_dirs, + stats, hip_clang_launch, is_pytorch_extension, clean_ctx, show_progress) + + print(bcolors.OKGREEN + "Successfully preprocessed all matching files." + bcolors.ENDC, file=sys.stderr) + + # Show detailed summary + if show_detailed: + compute_stats(stats) + + return HIPIFY_FINAL_RESULT diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/version.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/version.py new file mode 100644 index 0000000000000000000000000000000000000000..1f356cc57bfa00a3b251402604c54702fb414c96 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/hipify/version.py @@ -0,0 +1 @@ +__version__ = '1.0.0' diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5fed94a43361ac10fb13ceaf31c60ff3a3961c8f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/__init__.py @@ -0,0 +1,414 @@ +#!/usr/bin/env python3 +""" +model_dump: a one-stop shop for TorchScript model inspection. + +The goal of this tool is to provide a simple way to extract lots of +useful information from a TorchScript model and make it easy for humans +to consume. It (mostly) replaces zipinfo, common uses of show_pickle, +and various ad-hoc analysis notebooks. + +The tool extracts information from the model and serializes it as JSON. +That JSON can then be rendered by an HTML+JS page, either by +loading the JSON over HTTP or producing a fully self-contained page +with all of the code and data burned-in. +""" + +# Maintainer notes follow. +""" +The implementation strategy has tension between 3 goals: +- Small file size. +- Fully self-contained. +- Easy, modern JS environment. +Using Preact and HTM achieves 1 and 2 with a decent result for 3. +However, the models I tested with result in ~1MB JSON output, +so even using something heavier like full React might be tolerable +if the build process can be worked out. + +One principle I have followed that I think is very beneficial +is to keep the JSON data as close as possible to the model +and do most of the rendering logic on the client. +This makes for easier development (just refresh, usually), +allows for more laziness and dynamism, and lets us add more +views of the same data without bloating the HTML file. + +Currently, this code doesn't actually load the model or even +depend on any part of PyTorch. I don't know if that's an important +feature to maintain, but it's probably worth preserving the ability +to run at least basic analysis on models that cannot be loaded. + +I think the easiest way to develop this code is to cd into model_dump and +run "python -m http.server", then load http://localhost:8000/skeleton.html +in the browser. In another terminal, run +"python -m torch.utils.model_dump --style=json FILE > \ + torch/utils/model_dump/model_info.json" +every time you update the Python code or model. +When you update JS, just refresh. + +Possible improvements: + - Fix various TODO comments in this file and the JS. + - Make the HTML much less janky, especially the auxiliary data panel. + - Make the auxiliary data panel start small, expand when + data is available, and have a button to clear/contract. + - Clean up the JS. There's a lot of copypasta because + I don't really know how to use Preact. + - Make the HTML render and work nicely inside a Jupyter notebook. + - Add the ability for JS to choose the URL to load the JSON based + on the page URL (query or hash). That way we could publish the + inlined skeleton once and have it load various JSON blobs. + - Add a button to expand all expandable sections so ctrl-F works well. + - Add hyperlinking from data to code, and code to code. + - Add hyperlinking from debug info to Diffusion. + - Make small tensor contents available. + - Do something nice for quantized models + (they probably don't work at all right now). +""" + +import sys +import os +import io +import pathlib +import re +import argparse +import zipfile +import json +import pickle +import pprint +import urllib.parse + +from typing import ( + Dict, +) + +import torch.utils.show_pickle + + +DEFAULT_EXTRA_FILE_SIZE_LIMIT = 16 * 1024 + +__all__ = ['get_storage_info', 'hierarchical_pickle', 'get_model_info', 'get_inline_skeleton', + 'burn_in_info', 'get_info_and_burn_skeleton'] + +def get_storage_info(storage): + assert isinstance(storage, torch.utils.show_pickle.FakeObject) + assert storage.module == "pers" + assert storage.name == "obj" + assert storage.state is None + assert isinstance(storage.args, tuple) + assert len(storage.args) == 1 + sa = storage.args[0] + assert isinstance(sa, tuple) + assert len(sa) == 5 + assert sa[0] == "storage" + assert isinstance(sa[1], torch.utils.show_pickle.FakeClass) + assert sa[1].module == "torch" + assert sa[1].name.endswith("Storage") + storage_info = [sa[1].name.replace("Storage", "")] + list(sa[2:]) + return storage_info + + +def hierarchical_pickle(data): + if isinstance(data, (bool, int, float, str, type(None))): + return data + if isinstance(data, list): + return [hierarchical_pickle(d) for d in data] + if isinstance(data, tuple): + return { + "__tuple_values__": hierarchical_pickle(list(data)), + } + if isinstance(data, dict): + return { + "__is_dict__": True, + "keys": hierarchical_pickle(list(data.keys())), + "values": hierarchical_pickle(list(data.values())), + } + if isinstance(data, torch.utils.show_pickle.FakeObject): + typename = f"{data.module}.{data.name}" + if ( + typename.startswith(('__torch__.', 'torch.jit.LoweredWrapper.', 'torch.jit.LoweredModule.')) + ): + assert data.args == () + return { + "__module_type__": typename, + "state": hierarchical_pickle(data.state), + } + if typename == "torch._utils._rebuild_tensor_v2": + assert data.state is None + if len(data.args) == 6: + storage, offset, size, stride, requires_grad, hooks = data.args + else: + storage, offset, size, stride, requires_grad, hooks, metadata = data.args + storage_info = get_storage_info(storage) + return {"__tensor_v2__": [storage_info, offset, size, stride, requires_grad]} + if typename == "torch._utils._rebuild_qtensor": + assert data.state is None + storage, offset, size, stride, quantizer, requires_grad, hooks = data.args + storage_info = get_storage_info(storage) + assert isinstance(quantizer, tuple) + assert isinstance(quantizer[0], torch.utils.show_pickle.FakeClass) + assert quantizer[0].module == "torch" + if quantizer[0].name == "per_tensor_affine": + assert len(quantizer) == 3 + assert isinstance(quantizer[1], float) + assert isinstance(quantizer[2], int) + quantizer_extra = list(quantizer[1:3]) + else: + quantizer_extra = [] + quantizer_json = [quantizer[0].name] + quantizer_extra + return {"__qtensor__": [storage_info, offset, size, stride, quantizer_json, requires_grad]} + if typename == "torch.jit._pickle.restore_type_tag": + assert data.state is None + obj, typ = data.args + assert isinstance(typ, str) + return hierarchical_pickle(obj) + if re.fullmatch(r"torch\.jit\._pickle\.build_[a-z]+list", typename): + assert data.state is None + ls, = data.args + assert isinstance(ls, list) + return hierarchical_pickle(ls) + if typename == "torch.device": + assert data.state is None + name, = data.args + assert isinstance(name, str) + # Just forget that it was a device and return the name. + return name + if typename == "builtin.UnicodeDecodeError": + assert data.state is None + msg, = data.args + assert isinstance(msg, str) + # Hack: Pretend this is a module so we don't need custom serialization. + # Hack: Wrap the message in a tuple so it looks like a nice state object. + # TODO: Undo at least that second hack. We should support string states. + return { + "__module_type__": typename, + "state": hierarchical_pickle((msg,)), + } + raise Exception(f"Can't prepare fake object of type for JS: {typename}") + raise Exception(f"Can't prepare data of type for JS: {type(data)}") + + +def get_model_info( + path_or_file, + title=None, + extra_file_size_limit=DEFAULT_EXTRA_FILE_SIZE_LIMIT): + """Get JSON-friendly information about a model. + + The result is suitable for being saved as model_info.json, + or passed to burn_in_info. + """ + + if isinstance(path_or_file, os.PathLike): + default_title = os.fspath(path_or_file) + file_size = path_or_file.stat().st_size # type: ignore[attr-defined] + elif isinstance(path_or_file, str): + default_title = path_or_file + file_size = pathlib.Path(path_or_file).stat().st_size + else: + default_title = "buffer" + path_or_file.seek(0, io.SEEK_END) + file_size = path_or_file.tell() + path_or_file.seek(0) + + title = title or default_title + + with zipfile.ZipFile(path_or_file) as zf: + path_prefix = None + zip_files = [] + for zi in zf.infolist(): + prefix = re.sub("/.*", "", zi.filename) + if path_prefix is None: + path_prefix = prefix + elif prefix != path_prefix: + raise Exception(f"Mismatched prefixes: {path_prefix} != {prefix}") + zip_files.append(dict( + filename=zi.filename, + compression=zi.compress_type, + compressed_size=zi.compress_size, + file_size=zi.file_size, + )) + + assert path_prefix is not None + version = zf.read(path_prefix + "/version").decode("utf-8").strip() + + def get_pickle(name): + assert path_prefix is not None + with zf.open(path_prefix + f"/{name}.pkl") as handle: + raw = torch.utils.show_pickle.DumpUnpickler(handle, catch_invalid_utf8=True).load() + return hierarchical_pickle(raw) + + model_data = get_pickle("data") + constants = get_pickle("constants") + + # Intern strings that are likely to be re-used. + # Pickle automatically detects shared structure, + # so re-used strings are stored efficiently. + # However, JSON has no way of representing this, + # so we have to do it manually. + interned_strings : Dict[str, int] = {} + + def ist(s): + if s not in interned_strings: + interned_strings[s] = len(interned_strings) + return interned_strings[s] + + code_files = {} + for zi in zf.infolist(): + if not zi.filename.endswith(".py"): + continue + with zf.open(zi) as handle: + raw_code = handle.read() + with zf.open(zi.filename + ".debug_pkl") as handle: + raw_debug = handle.read() + + # Parse debug info and add begin/end markers if not present + # to ensure that we cover the entire source code. + debug_info_t = pickle.loads(raw_debug) + text_table = None + + if (len(debug_info_t) == 3 and + isinstance(debug_info_t[0], str) and + debug_info_t[0] == 'FORMAT_WITH_STRING_TABLE'): + _, text_table, content = debug_info_t + + def parse_new_format(line): + # (0, (('', '', 0), 0, 0)) + num, ((text_indexes, fname_idx, offset), start, end), tag = line + text = ''.join(text_table[x] for x in text_indexes) # type: ignore[index] + fname = text_table[fname_idx] # type: ignore[index] + return num, ((text, fname, offset), start, end), tag + + debug_info_t = map(parse_new_format, content) + + debug_info = list(debug_info_t) + if not debug_info: + debug_info.append((0, (('', '', 0), 0, 0))) + if debug_info[-1][0] != len(raw_code): + debug_info.append((len(raw_code), (('', '', 0), 0, 0))) + + code_parts = [] + for di, di_next in zip(debug_info, debug_info[1:]): + start, source_range, *_ = di + end = di_next[0] + assert end > start + source, s_start, s_end = source_range + s_text, s_file, s_line = source + # TODO: Handle this case better. TorchScript ranges are in bytes, + # but JS doesn't really handle byte strings. + # if bytes and chars are not equivalent for this string, + # zero out the ranges so we don't highlight the wrong thing. + if len(s_text) != len(s_text.encode("utf-8")): + s_start = 0 + s_end = 0 + text = raw_code[start:end] + code_parts.append([text.decode("utf-8"), ist(s_file), s_line, ist(s_text), s_start, s_end]) + code_files[zi.filename] = code_parts + + extra_files_json_pattern = re.compile(re.escape(path_prefix) + "/extra/.*\\.json") + extra_files_jsons = {} + for zi in zf.infolist(): + if not extra_files_json_pattern.fullmatch(zi.filename): + continue + if zi.file_size > extra_file_size_limit: + continue + with zf.open(zi) as handle: + try: + json_content = json.load(handle) + extra_files_jsons[zi.filename] = json_content + except json.JSONDecodeError: + extra_files_jsons[zi.filename] = "INVALID JSON" + + always_render_pickles = { + "bytecode.pkl", + } + extra_pickles = {} + for zi in zf.infolist(): + if not zi.filename.endswith(".pkl"): + continue + with zf.open(zi) as handle: + # TODO: handle errors here and just ignore the file? + # NOTE: For a lot of these files (like bytecode), + # we could get away with just unpickling, but this should be safer. + obj = torch.utils.show_pickle.DumpUnpickler(handle, catch_invalid_utf8=True).load() + buf = io.StringIO() + pprint.pprint(obj, buf) + contents = buf.getvalue() + # Checked the rendered length instead of the file size + # because pickles with shared structure can explode in size during rendering. + if os.path.basename(zi.filename) not in always_render_pickles and \ + len(contents) > extra_file_size_limit: + continue + extra_pickles[zi.filename] = contents + + return {"model": dict( + title=title, + file_size=file_size, + version=version, + zip_files=zip_files, + interned_strings=list(interned_strings), + code_files=code_files, + model_data=model_data, + constants=constants, + extra_files_jsons=extra_files_jsons, + extra_pickles=extra_pickles, + )} + + +def get_inline_skeleton(): + """Get a fully-inlined skeleton of the frontend. + + The returned HTML page has no external network dependencies for code. + It can load model_info.json over HTTP, or be passed to burn_in_info. + """ + + import importlib.resources + + skeleton = importlib.resources.read_text(__package__, "skeleton.html") + js_code = importlib.resources.read_text(__package__, "code.js") + for js_module in ["preact", "htm"]: + js_lib = importlib.resources.read_binary(__package__, f"{js_module}.mjs") + js_url = "data:application/javascript," + urllib.parse.quote(js_lib) + js_code = js_code.replace(f"https://unpkg.com/{js_module}?module", js_url) + skeleton = skeleton.replace(' src="./code.js">', ">\n" + js_code) + return skeleton + + +def burn_in_info(skeleton, info): + """Burn model info into the HTML skeleton. + + The result will render the hard-coded model info and + have no external network dependencies for code or data. + """ + + # Note that Python's json serializer does not escape slashes in strings. + # Since we're inlining this JSON directly into a script tag, a string + # containing "" would end the script prematurely and + # mess up our page. Unconditionally escape fixes that. + return skeleton.replace( + "BURNED_IN_MODEL_INFO = null", + "BURNED_IN_MODEL_INFO = " + json.dumps(info, sort_keys=True).replace("/", "\\/")) + + +def get_info_and_burn_skeleton(path_or_bytesio, **kwargs): + model_info = get_model_info(path_or_bytesio, **kwargs) + skeleton = get_inline_skeleton() + page = burn_in_info(skeleton, model_info) + return page + + +def main(argv, *, stdout=None): + parser = argparse.ArgumentParser() + parser.add_argument("--style", choices=["json", "html"]) + parser.add_argument("--title") + parser.add_argument("model") + args = parser.parse_args(argv[1:]) + + info = get_model_info(args.model, title=args.title) + + output = stdout or sys.stdout + + if args.style == "json": + output.write(json.dumps(info, sort_keys=True) + "\n") + elif args.style == "html": + skeleton = get_inline_skeleton() + page = burn_in_info(skeleton, info) + output.write(page) + else: + raise Exception("Invalid style") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/__main__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..5d4bdac389bb1f270d74efb6c876258d46077110 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/__main__.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python3 +import sys +from . import main + +sys.exit(main(sys.argv)) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8be021f4f64b01009e16bd02d83ec0db1a322a72 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/__pycache__/__main__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68ff5bb7f23accf0d6047c901c4d8ab05b1f3200 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/__pycache__/__main__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/code.js b/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/code.js new file mode 100644 index 0000000000000000000000000000000000000000..173ddfb639d847159ee4fdf46691404bf1bbb7a3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/code.js @@ -0,0 +1,689 @@ +import { h, Component, render } from 'https://unpkg.com/preact?module'; +import htm from 'https://unpkg.com/htm?module'; + +const html = htm.bind(h); + +const BURNED_IN_MODEL_INFO = null; + +// https://stackoverflow.com/a/20732091 +function humanFileSize(size) { + if (size == 0) { return "0 B"; } + var i = Math.floor( Math.log(size) / Math.log(1024) ); + return (size / Math.pow(1024, i)).toFixed(2) * 1 + ' ' + ['B', 'kB', 'MB', 'GB', 'TB'][i]; +} + +function caret(down) { + return down ? "\u25BE" : "\u25B8"; +} + +class Blamer { + constructor() { + this.blame_on_click = false; + this.aux_content_pane = null; + } + + setAuxContentPane(pane) { + this.aux_content_pane = pane; + } + + readyBlame() { + this.blame_on_click = true; + } + + maybeBlame(arg) { + if (!this.blame_on_click) { + return; + } + this.blame_on_click = false; + if (!this.aux_content_pane) { + return; + } + this.aux_content_pane.doBlame(arg); + } +} + +let blame = new Blamer(); + +class Hider extends Component { + constructor() { + super(); + this.state = { shown: null }; + } + + componentDidMount() { + this.setState({ shown: this.props.shown === "true" }); + } + + render({name, children}, {shown}) { + let my_caret = html` this.click()} >${caret(shown)}`; + return html`
+

${my_caret} ${name}

+
${shown ? this.props.children : []}
`; + } + + click() { + this.setState({shown: !this.state.shown}); + } +} + +function ModelSizeSection({model: {file_size, zip_files}}) { + let store_size = 0; + let compr_size = 0; + for (const zi of zip_files) { + if (zi.compression === 0) { + // TODO: Maybe check that compressed_size === file_size. + store_size += zi.compressed_size; + } else { + compr_size += zi.compressed_size; + } + } + let zip_overhead = file_size - store_size - compr_size; + // TODO: Better formatting. Right-align this. + return html` + <${Hider} name="Model Size" shown=true> +
.
+      Model size: ${file_size} (${humanFileSize(file_size)})
+      Stored files: ${store_size} (${humanFileSize(store_size)})
+      Compressed files: ${compr_size} (${humanFileSize(compr_size)})
+      Zip overhead: ${zip_overhead} (${humanFileSize(zip_overhead)})
+    
`; +} + +function StructuredDataSection({name, data, shown}) { + return html` + <${Hider} name=${name} shown=${shown}> +
+ <${StructuredData} data=${data} indent="" prefix=""/> +
`; +} + +class StructuredData extends Component { + constructor() { + super(); + this.state = { shown: false }; + + this.INLINE_TYPES = new Set(["boolean", "number", "string"]) + this.IGNORED_STATE_KEYS = new Set(["training", "_is_full_backward_hook"]) + } + + click() { + this.setState({shown: !this.state.shown}); + } + + expando(data) { + if (data === null || this.INLINE_TYPES.has(typeof(data))) { + return false; + } + if (typeof(data) != "object") { + throw new Error("Not an object"); + } + if (Array.isArray(data)) { + // TODO: Maybe show simple lists and tuples on one line. + return true; + } + if (data.__tuple_values__) { + // TODO: Maybe show simple lists and tuples on one line. + return true; + } + if (data.__is_dict__) { + // TODO: Maybe show simple (empty?) dicts on one line. + return true; + } + if (data.__module_type__) { + return true; + } + if (data.__tensor_v2__) { + return false; + } + if (data.__qtensor__) { + return false; + } + throw new Error("Can't handle data type.", data); + } + + renderHeadline(data) { + if (data === null) { + return "None"; + } + if (typeof(data) == "boolean") { + const sd = String(data); + return sd.charAt(0).toUpperCase() + sd.slice(1); + } + if (typeof(data) == "number") { + return JSON.stringify(data); + } + if (typeof(data) == "string") { + return JSON.stringify(data); + } + if (typeof(data) != "object") { + throw new Error("Not an object"); + } + if (Array.isArray(data)) { + return "list(["; + } + if (data.__tuple_values__) { + return "tuple(("; + } + if (data.__is_dict__) { + return "dict({"; + } + if (data.__module_type__) { + return data.__module_type__ + "()"; + } + if (data.__tensor_v2__) { + const [storage, offset, size, stride, grad] = data.__tensor_v2__; + const [dtype, key, device, numel] = storage; + return this.renderTensor( + "tensor", dtype, key, device, numel, offset, size, stride, grad, []); + } + if (data.__qtensor__) { + const [storage, offset, size, stride, quantizer, grad] = data.__qtensor__; + const [dtype, key, device, numel] = storage; + let extra_parts = []; + if (quantizer[0] == "per_tensor_affine") { + extra_parts.push(`scale=${quantizer[1]}`); + extra_parts.push(`zero_point=${quantizer[2]}`); + } else { + extra_parts.push(`quantizer=${quantizer[0]}`); + } + return this.renderTensor( + "qtensor", dtype, key, device, numel, offset, size, stride, grad, extra_parts); + } + throw new Error("Can't handle data type.", data); + } + + renderTensor( + prefix, + dtype, + storage_key, + device, + storage_numel, + offset, + size, + stride, + grad, + extra_parts) { + let parts = [ + "(" + size.join(",") + ")", + dtype, + ]; + parts.push(...extra_parts); + if (device != "cpu") { + parts.push(device); + } + if (grad) { + parts.push("grad"); + } + // TODO: Check stride and indicate if the tensor is channels-last or non-contiguous + // TODO: Check size, stride, offset, and numel and indicate if + // the tensor doesn't use all data in storage. + // TODO: Maybe show key? + void(offset); + void(stride); + void(storage_key); + void(storage_numel); + return prefix + "(" + parts.join(", ") + ")"; + } + + renderBody(indent, data) { + if (data === null || this.INLINE_TYPES.has(typeof(data))) { + throw "Should not reach here." + } + if (typeof(data) != "object") { + throw new Error("Not an object"); + } + if (Array.isArray(data)) { + let new_indent = indent + "\u00A0\u00A0"; + let parts = []; + for (let idx = 0; idx < data.length; idx++) { + // Does it make sense to put explicit index numbers here? + parts.push(html`
<${StructuredData} prefix=${idx + ": "} indent=${new_indent} data=${data[idx]} />`); + } + return parts; + } + if (data.__tuple_values__) { + // Handled the same as lists. + return this.renderBody(indent, data.__tuple_values__); + } + if (data.__is_dict__) { + let new_indent = indent + "\u00A0\u00A0"; + let parts = []; + for (let idx = 0; idx < data.keys.length; idx++) { + if (typeof(data.keys[idx]) != "string") { + parts.push(html`
${new_indent}Non-string key`); + } else { + parts.push(html`
<${StructuredData} prefix=${data.keys[idx] + ": "} indent=${new_indent} data=${data.values[idx]} />`); + } + } + return parts; + } + if (data.__module_type__) { + const mstate = data.state; + if (mstate === null || typeof(mstate) != "object") { + throw new Error("Bad module state"); + } + let new_indent = indent + "\u00A0\u00A0"; + let parts = []; + if (mstate.__is_dict__) { + // TODO: Less copy/paste between this and normal dicts. + for (let idx = 0; idx < mstate.keys.length; idx++) { + if (typeof(mstate.keys[idx]) != "string") { + parts.push(html`
${new_indent}Non-string key`); + } else if (this.IGNORED_STATE_KEYS.has(mstate.keys[idx])) { + // Do nothing. + } else { + parts.push(html`
<${StructuredData} prefix=${mstate.keys[idx] + ": "} indent=${new_indent} data=${mstate.values[idx]} />`); + } + } + } else if (mstate.__tuple_values__) { + parts.push(html`
<${StructuredData} prefix="" indent=${new_indent} data=${mstate} />`); + } else if (mstate.__module_type__) { + // We normally wouldn't have the state of a module be another module, + // but we use "modules" to encode special values (like Unicode decode + // errors) that might be valid states. Just go with it. + parts.push(html`
<${StructuredData} prefix="" indent=${new_indent} data=${mstate} />`); + } else { + throw new Error("Bad module state"); + } + return parts; + } + if (data.__tensor_v2__) { + throw "Should not reach here." + } + if (data.__qtensor__) { + throw "Should not reach here." + } + throw new Error("Can't handle data type.", data); + } + + render({data, indent, prefix}, {shown}) { + const exp = this.expando(data) ? html` this.click()} >${caret(shown)} ` : ""; + const headline = this.renderHeadline(data); + const body = shown ? this.renderBody(indent, data) : ""; + return html`${indent}${exp}${prefix}${headline}${body}`; + } +} + +function ZipContentsSection({model: {zip_files}}) { + // TODO: Add human-readable sizes? + // TODO: Add sorting options? + // TODO: Add hierarchical collapsible tree? + return html` + <${Hider} name="Zip Contents" shown=false> + + + + + + + + + + + ${zip_files.map(zf => html` + + + + + `)} + +
ModeSizeCompressedName
${{0: "store", 8: "deflate"}[zf.compression] || zf.compression}${zf.file_size}${zf.compressed_size}${zf.filename}
`; +} + +function CodeSection({model: {code_files}}) { + return html` + <${Hider} name="Code" shown=false> +
+ ${Object.entries(code_files).map(([fn, code]) => html`<${OneCodeSection} + filename=${fn} code=${code} />`)} +
`; +} + +class OneCodeSection extends Component { + constructor() { + super(); + this.state = { shown: false }; + } + + click() { + const shown = !this.state.shown; + this.setState({shown: shown}); + } + + render({filename, code}, {shown}) { + const header = html` +

+ this.click()} >${caret(shown)} + ${filename}

+ `; + if (!shown) { + return header; + } + return html` + ${header} +
${code.map(c => this.renderBlock(c))}
+ `; + } + + renderBlock([text, ist_file, line, ist_s_text, s_start, s_end]) { + return html` blame.maybeBlame({ist_file, line, ist_s_text, s_start, s_end})} + >${text}`; + } +} + +function ExtraJsonSection({files}) { + return html` + <${Hider} name="Extra files (JSON)" shown=false> +
+

Use "Log Raw Model Info" for hierarchical view in browser console.

+ ${Object.entries(files).map(([fn, json]) => html`<${OneJsonSection} + filename=${fn} json=${json} />`)} +
`; +} + +class OneJsonSection extends Component { + constructor() { + super(); + this.state = { shown: false }; + } + + click() { + const shown = !this.state.shown; + this.setState({shown: shown}); + } + + render({filename, json}, {shown}) { + const header = html` +

+ this.click()} >${caret(shown)} + ${filename}

+ `; + if (!shown) { + return header; + } + return html` + ${header} +
${JSON.stringify(json, null, 2)}
+ `; + } +} + +function ExtraPicklesSection({files}) { + return html` + <${Hider} name="Extra Pickles" shown=false> +
+ ${Object.entries(files).map(([fn, content]) => html`<${OnePickleSection} + filename=${fn} content=${content} />`)} +
`; +} + +class OnePickleSection extends Component { + constructor() { + super(); + this.state = { shown: false }; + } + + click() { + const shown = !this.state.shown; + this.setState({shown: shown}); + } + + render({filename, content}, {shown}) { + const header = html` +

+ this.click()} >${caret(shown)} + ${filename}

+ `; + if (!shown) { + return header; + } + return html` + ${header} +
${content}
+ `; + } +} + +function assertStorageAreEqual(key, lhs, rhs) { + if (lhs.length !== rhs.length || + !lhs.every((val, idx) => val === rhs[idx])) { + throw new Error("Storage mismatch for key '" + key + "'"); + } +} + +function computeTensorMemory(numel, dtype) { + const sizes = { + "Byte": 1, + "Char": 1, + "Short": 2, + "Int": 4, + "Long": 8, + "Half": 2, + "Float": 4, + "Double": 8, + "ComplexHalf": 4, + "ComplexFloat": 8, + "ComplexDouble": 16, + "Bool": 1, + "QInt8": 1, + "QUInt8": 1, + "QInt32": 4, + "BFloat16": 2, + }; + let dtsize = sizes[dtype]; + if (!dtsize) { + throw new Error("Unrecognized dtype: " + dtype); + } + return numel * dtsize; +} + +// TODO: Maybe track by dtype as well. +// TODO: Maybe distinguish between visible size and storage size. +function getTensorStorages(data) { + if (data === null) { + return new Map(); + } + if (typeof(data) == "boolean") { + return new Map(); + } + if (typeof(data) == "number") { + return new Map(); + } + if (typeof(data) == "string") { + return new Map(); + } + if (typeof(data) != "object") { + throw new Error("Not an object"); + } + if (Array.isArray(data)) { + let result = new Map(); + for (const item of data) { + const tensors = getTensorStorages(item); + for (const [key, storage] of tensors.entries()) { + if (!result.has(key)) { + result.set(key, storage); + } else { + const old_storage = result.get(key); + assertStorageAreEqual(key, old_storage, storage); + } + } + } + return result; + } + if (data.__tuple_values__) { + return getTensorStorages(data.__tuple_values__); + } + if (data.__is_dict__) { + return getTensorStorages(data.values); + } + if (data.__module_type__) { + return getTensorStorages(data.state); + } + if (data.__tensor_v2__) { + const [storage, offset, size, stride, grad] = data.__tensor_v2__; + const [dtype, key, device, numel] = storage; + return new Map([[key, storage]]); + } + if (data.__qtensor__) { + const [storage, offset, size, stride, quantizer, grad] = data.__qtensor__; + const [dtype, key, device, numel] = storage; + return new Map([[key, storage]]); + } + throw new Error("Can't handle data type.", data); +} + +function getTensorMemoryByDevice(pickles) { + let all_tensors = []; + for (const [name, pickle] of pickles) { + const tensors = getTensorStorages(pickle); + all_tensors.push(...tensors.values()); + } + let result = {}; + for (const storage of all_tensors.values()) { + const [dtype, key, device, numel] = storage; + const size = computeTensorMemory(numel, dtype); + result[device] = (result[device] || 0) + size; + } + return result; +} + +// Make this a separate component so it is rendered lazily. +class OpenTensorMemorySection extends Component { + render({model: {model_data, constants}}) { + let sizes = getTensorMemoryByDevice(new Map([ + ["data", model_data], + ["constants", constants], + ])); + return html` + + + + + + + + + + ${Object.entries(sizes).map(([dev, size]) => html` + + + + `)} + +
DeviceBytesHuman
${dev}${size}${humanFileSize(size)}
`; + } +} + +function TensorMemorySection({model}) { + return html` + <${Hider} name="Tensor Memory" shown=false> + <${OpenTensorMemorySection} model=${model} />`; +} + +class AuxContentPane extends Component { + constructor() { + super(); + this.state = { + blame_info: null, + }; + } + + doBlame(arg) { + this.setState({...this.state, blame_info: arg}); + } + + render({model: {interned_strings}}, {blame_info}) { + let blame_content = ""; + if (blame_info) { + const {ist_file, line, ist_s_text, s_start, s_end} = blame_info; + let s_text = interned_strings[ist_s_text]; + if (s_start != 0 || s_end != s_text.length) { + let prefix = s_text.slice(0, s_start); + let main = s_text.slice(s_start, s_end); + let suffix = s_text.slice(s_end); + s_text = html`${prefix}${main}${suffix}`; + } + blame_content = html` +

${interned_strings[ist_file]}:${line}

+
${s_start}:${s_end}
+
${s_text}

+ `; + } + return html` + +
+ ${blame_content} + `; + } +} + +class App extends Component { + constructor() { + super(); + this.state = { + err: false, + model: null, + }; + } + + componentDidMount() { + const app = this; + if (BURNED_IN_MODEL_INFO !== null) { + app.setState({model: BURNED_IN_MODEL_INFO}); + } else { + fetch("./model_info.json").then(function(response) { + if (!response.ok) { + throw new Error("Response not ok."); + } + return response.json(); + }).then(function(body) { + app.setState({model: body}); + }).catch(function(error) { + console.log("Top-level error: ", error); + }); + } + } + + componentDidCatch(error) { + void(error); + this.setState({...this.state, err: true}); + } + + render(_, {err}) { + if (this.state.model === null) { + return html`

Loading...

`; + } + + const model = this.state.model.model; + + let error_msg = ""; + if (err) { + error_msg = html`

An error occurred. Check console

`; + } + + return html` + ${error_msg} +
+

TorchScript Model (version ${model.version}): ${model.title}

+ + <${ModelSizeSection} model=${model}/> + <${StructuredDataSection} name="Model Data" data=${model.model_data} shown=true/> + <${StructuredDataSection} name="Constants" data=${model.constants} shown=false/> + <${ZipContentsSection} model=${model}/> + <${CodeSection} model=${model}/> + <${ExtraJsonSection} files=${model.extra_files_jsons}/> + <${ExtraPicklesSection} files=${model.extra_pickles}/> + <${TensorMemorySection} model=${model}/> +
+
+ <${AuxContentPane} + err=${this.state.error} + model=${model} + ref=${(p) => blame.setAuxContentPane(p)}/> +
+ `; + } +} + +render(h(App), document.body); diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/htm.mjs b/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/htm.mjs new file mode 100644 index 0000000000000000000000000000000000000000..06f25a13d8021ff4f43de442bbf0279f24735d6c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/htm.mjs @@ -0,0 +1,2 @@ +// HTM, Apache License +var n=function(t,s,r,e){var u;s[0]=0;for(var h=1;h=5&&((e||!n&&5===r)&&(h.push(r,0,e,s),r=6),n&&(h.push(r,n,0,s),r=6)),e=""},a=0;a"===t?(r=1,e=""):e=t+e[0]:u?t===u?u="":e+=t:'"'===t||"'"===t?u=t:">"===t?(p(),r=1):r&&("="===t?(r=5,s=e,e=""):"/"===t&&(r<5||">"===n[a][l+1])?(p(),3===r&&(h=h[0]),r=h,(h=h[0]).push(2,0,r),r=0):" "===t||"\t"===t||"\n"===t||"\r"===t?(p(),r=2):e+=t),3===r&&"!--"===e&&(r=4,h=h[0])}return p(),h}(s)),r),arguments,[])).length>1?r:r[0]} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/preact.mjs b/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/preact.mjs new file mode 100644 index 0000000000000000000000000000000000000000..8c85bd948c6772ca8d40fc8d6fab6a220d55a1ef --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/model_dump/preact.mjs @@ -0,0 +1,2 @@ +// Preact, MIT License +var n,l,u,i,t,o,r={},f=[],e=/acit|ex(?:s|g|n|p|$)|rph|grid|ows|mnc|ntw|ine[ch]|zoo|^ord|itera/i;function c(e,n){for(var t in n)e[t]=n[t];return e}function s(e){var n=e.parentNode;n&&n.removeChild(e)}function a(e,n,t){var _,l,o,r=arguments,i={};for(o in n)"key"==o?_=n[o]:"ref"==o?l=n[o]:i[o]=n[o];if(arguments.length>3)for(t=[t],o=3;o0?v(m.type,m.props,m.key,null,m.__v):m)){if(m.__=t,m.__b=t.__b+1,null===(h=P[p])||h&&m.key==h.key&&m.type===h.type)P[p]=void 0;else for(a=0;a3)for(t=[t],o=3;o + + + TorchScript Model + + + + + + + +