python_code
stringlengths 0
229k
|
---|
import atexit
import collections
import contextlib
import copy
import cProfile
import dataclasses
import datetime
import dis
import enum
import functools
import gc
import inspect
import itertools
import linecache
import logging
import math
import operator
import os
import pstats
import sys
import textwrap
import time
import types
import typing
import weakref
from contextlib import contextmanager
from functools import lru_cache, wraps
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
# import torch._logging
# import torch._numpy as tnp
# from torch._guards import detect_fake_mode # noqa: F401
from torch._dynamo import config
# NOTE: Make sure `NP_SUPPORTED_MODULES` and `NP_TO_TNP_MODULE` are in sync.
NP_SUPPORTED_MODULES = (np, np.fft, np.linalg, np.random)
# NP_TO_TNP_MODULE = {
# np: tnp,
# np.fft: tnp.fft,
# np.linalg: tnp.linalg,
# np.random: tnp.random,
# }
import importlib
import torch
import torch._functorch.config
import torch.fx.experimental.symbolic_shapes
from torch import fx
from torch._dispatch.python import enable_python_dispatcher
from torch._subclasses.fake_tensor import FakeTensor
from torch.nn.modules.lazy import LazyModuleMixin
from torch.utils._pytree import tree_map
counters = collections.defaultdict(collections.Counter)
troubleshooting_url = "https://pytorch.org/docs/master/compile/troubleshooting.html"
nnmodule_doc_url = "https://pytorch.org/docs/master/compile/nn-module.html"
nnmodule_doc_url_msg = f"See {nnmodule_doc_url} for more information and limitations."
log = logging.getLogger(__name__)
# profiling compilation time by function
compilation_time_metrics = collections.OrderedDict()
# profiling compilation time by frame phase
frame_phase_timing = collections.OrderedDict()
timer_counter = itertools.count()
def tabulate(rows, headers):
try:
import tabulate
return tabulate.tabulate(rows, headers=headers)
except ImportError:
return "\n".join(
", ".join(map(str, row)) for row in itertools.chain([headers], rows)
)
def dynamo_profiled(func):
@wraps(func)
def profile_wrapper(*args, **kwargs):
global timer_counter
datafn = (
func.__name__ + f"{next(timer_counter)}.profile"
) # Name the data file sensibly
prof = cProfile.Profile()
prof.enable()
retval = prof.runcall(func, *args, **kwargs)
prof.disable()
print(f"### Cprofile for {func.__name__} iter {next(timer_counter)} ###")
ps = pstats.Stats(prof)
ps.sort_stats(pstats.SortKey.TIME).print_stats(20)
ps.sort_stats(pstats.SortKey.CUMULATIVE).print_stats(20)
prof.dump_stats(datafn)
return retval
return profile_wrapper
curr_frame = 0
# Note: Called for you by dynamo - you almost never ever want to invoke this yourself.
def increment_frame():
global curr_frame
curr_frame = curr_frame + 1
# Note: Called for you by dynamo - you almost never ever want to invoke this yourself.
def reset_frame_count():
global curr_frame
frame_phase_timing.clear()
compilation_time_metrics.clear()
curr_frame = 0
op_count = 0
def increment_op_count(cnt):
global op_count
op_count += cnt
# Print a report of time spent so far
# Ex:
# TIMING:
# entire_frame_compile:8.574629999999999
# backend_compile:5.26806
def print_time_report():
total = 0
total_by_key = {}
for timings in frame_phase_timing.values():
for key, timing in timings.items():
total += timing
if key not in total_by_key:
total_by_key[key] = timing
else:
total_by_key[key] += timing
out = "TIMING:"
for key, value in total_by_key.items():
out = f"{out} {key}:{round(value, 5)}"
print(out)
# dynamo_timed API works as a function decorator
# By wrapping a function in dynamo_timed, we can store a record in compilation_time_metrics
# where the key is the functions name.
# For example:
#
# @dynamo_timed
# def _foo(...):
#
# Would show up as an entry in our timing dict:
# OrderedDict([('bar.<locals>._foo', [0.083690, 0.23949, 3.1425e-05])])
# This is extremely useful for granular debugging.
#
# For a higher-level mode, pass a phase_name into dynamo_timed
# phase_names record an extra record into a separate compilation timing structure,
# one keyed on frame+name rather than function.
# The frame is incremented outside of this function, in def increment_frame() above.
def dynamo_timed(original_function=None, phase_name=None):
def dynamo_timed_inner(func):
@wraps(func)
def time_wrapper(*args, **kwargs):
key = func.__qualname__
if key not in compilation_time_metrics:
compilation_time_metrics[key] = []
with torch.profiler.record_function(f"{key} (dynamo_timed)"):
t0 = time.time()
r = func(*args, **kwargs)
time_spent = time.time() - t0
compilation_time_metrics[key].append(time_spent)
if phase_name:
frame_key = str(curr_frame)
if frame_key not in frame_phase_timing:
frame_phase_timing[frame_key] = {}
assert (
phase_name not in frame_phase_timing[frame_key]
), f"Duplicate phase name {phase_name} for frame {frame_key}"
frame_phase_timing[frame_key][phase_name] = time_spent
return r
return time_wrapper
if original_function:
return dynamo_timed_inner(original_function)
return dynamo_timed_inner
def compile_times(repr="str", aggregate=False):
"""
Get metrics about torchdynamo frontend/backend compilation times.
Accumulates information from functions tagged with `@dynamo_timed`.
repr='str' returns a printable string for user interaction, and 'csv'
returns headers, rows which can be logged for output
aggregate causes values from multiple compilations (e.g. split graphs)
to be accumulated into one value. If false, expect more than one value
per metric.
"""
def fmt_fn(values, item_fn=lambda x: x):
if aggregate:
return item_fn(sum(values))
return ", ".join(map(item_fn, values))
if repr == "str":
rows = [
(k, fmt_fn(compilation_time_metrics[k], item_fn=lambda x: f"{x:.4f}"))
for k in compilation_time_metrics
]
out = "TorchDynamo compilation metrics:\n"
out += tabulate(rows, headers=("Function", "Runtimes (s)"))
return out
elif repr == "csv":
values = [
fmt_fn(v, item_fn=lambda x: f"{x:.6f}")
for v in compilation_time_metrics.values()
]
headers = list(compilation_time_metrics.keys())
return headers, values
@atexit.register
def dump_compile_times():
log.info(compile_times(repr="str", aggregate=True))
tensortype_to_dtype = {
torch.FloatTensor: (torch.float32, torch.float),
torch.DoubleTensor: (torch.float64, torch.double),
torch.HalfTensor: (torch.float16, torch.half),
torch.BFloat16Tensor: (torch.bfloat16,),
torch.ByteTensor: (torch.uint8,),
torch.CharTensor: (torch.int8,),
torch.LongTensor: (torch.int64, torch.long),
torch.IntTensor: (torch.int32, torch.int),
torch.ShortTensor: (torch.int16, torch.short),
torch.BoolTensor: (torch.bool,),
}
class DuplicateWarningChecker:
def __init__(self, maxsize=4096):
self.maxsize = maxsize
self.reset()
def reset(self):
self.set = collections.OrderedDict()
def add(self, key):
if key in self.set:
self.set.move_to_end(key, last=True)
if not config.verbose:
return False
else:
self.set[key] = None
while len(self.set) > self.maxsize:
self.set.popitem(last=False)
return True
graph_break_dup_warning_checker = DuplicateWarningChecker()
def setup_compile_debug():
compile_debug = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1"
if compile_debug:
torch._logging.set_logs(
dynamo=logging.DEBUG,
aot=logging.DEBUG,
inductor=logging.DEBUG,
output_code=True, # this is off by default
)
return add_file_handler()
return contextlib.ExitStack()
def reset_graph_break_dup_checker():
graph_break_dup_warning_checker.reset()
def add_file_handler():
log_path = os.path.join(get_debug_dir(), "torchdynamo")
if not os.path.exists(log_path):
os.makedirs(log_path)
log_file_handler = logging.FileHandler(os.path.join(log_path, "debug.log"))
logger = logging.getLogger("torch._dynamo")
logger.addHandler(log_file_handler)
exitstack = contextlib.ExitStack()
exitstack.callback(lambda: logger.removeHandler(log_file_handler))
return exitstack
def setup_log_file():
exitstack = contextlib.ExitStack()
if config.log_file_name is not None:
log_file_handler = logging.FileHandler(config.log_file_name)
for logger in logging.get_loggers():
logger.addHandler(log_file_handler)
exitstack.callback(lambda: logger.removeHandler(log_file_handler))
return exitstack
return exitstack
def gen_record_file_name(exc, code):
return f"{get_debug_dir()}/error_recordings/\
{code.co_name}_{type(exc).__name__}_{code.co_firstlineno}.rec"
def write_record_to_file(filename, exec_record):
try:
if os.path.exists(filename):
log.warning(
"Unable to write execution record %s; file already exists.", filename
)
else:
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "wb") as f:
exec_record.dump(f)
except Exception:
log.error("Unable to write execution record %s", filename, exc_info=1)
def count_calls(g: fx.Graph):
c = 0
for n in g.nodes:
if "call" in n.op:
c += 1
return c
def identity(x):
return x
def nothing(*args, **kwargs):
pass
class ExactWeakKeyDictionary:
"""Similar to weakref.WeakKeyDictionary, but use `is`/`id` rather than `==` to compare equality"""
def __init__(self):
self.values = dict()
self.refs = dict()
def __getitem__(self, key):
return self.values[id(key)]
def get(self, key, default=None):
return self.values.get(id(key), default)
def __contains__(self, key):
return id(key) in self.values
def __setitem__(self, key, value):
idx = id(key)
if idx not in self.refs:
self.refs[idx] = weakref.ref(key, lambda ref: self._remove_id(idx))
self.values[idx] = value
def _remove_id(self, idx):
if idx in self.values:
del self.values[idx]
if idx in self.refs:
del self.refs[idx]
def clear(self):
self.refs.clear()
self.values.clear()
def istype(obj, allowed_types):
"""isinstance() without subclasses"""
if isinstance(allowed_types, (tuple, list, set)):
return type(obj) in allowed_types
return type(obj) is allowed_types
def is_typing(value):
if sys.version_info < (3, 9):
return isinstance(value, typing._GenericAlias)
else:
return isinstance(
value, (typing._SpecialGenericAlias, typing._UnionGenericAlias)
)
def is_numpy_int_type(value):
return istype(
value,
(
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
),
)
def is_numpy_float_type(value):
return istype(
value,
(
np.float16,
np.float32,
np.float64,
),
)
def is_numpy_ndarray(value):
return istype(value, np.ndarray)
def istensor(obj):
"""Check of obj is a tensor"""
tensor_list = (
torch.Tensor,
torch.nn.Parameter,
*config.traceable_tensor_subclasses,
)
tensor_list = tensor_list + (torch._subclasses.FakeTensor,)
return istype(obj, tensor_list)
def is_lazy_module(mod):
return isinstance(mod, LazyModuleMixin)
@functools.lru_cache(4096)
def print_once(*args):
print(*args)
def make_cell(val=None):
"""Some black magic to create a cell object that usually only exists in a closure"""
x = val
def f():
return x
assert len(f.__closure__) == 1
return f.__closure__[0]
def proxy_args_kwargs(args, kwargs):
try:
proxy_args = tuple(arg.as_proxy() for arg in args)
proxy_kwargs = {key: arg.as_proxy() for key, arg in kwargs.items()}
return proxy_args, proxy_kwargs
except NotImplementedError as e:
from .exc import unimplemented
from .variables.base import typestr
raise unimplemented(
f"call_function args: {typestr(*args)} {typestr(*list(kwargs.values()))}"
) from e
@dataclasses.dataclass
class CompilationMetrics:
frame_key: str
co_name: str
co_filename: str
co_firstlineno: int
cache_size: int
guard_count: Optional[int]
graph_op_count: Optional[int]
graph_node_count: Optional[int]
graph_input_count: Optional[int]
entire_frame_compile_time_s: Optional[float]
backend_compile_time_s: Optional[float]
fail_reason: Optional[str]
@dataclasses.dataclass
class CleanupHook:
"""Remove a global variable when hook is called"""
scope: Dict[str, Any]
name: str
def __call__(self, *args):
CleanupManager.count -= 1
del self.scope[self.name]
@staticmethod
def create(scope, name, val):
assert name not in scope
CleanupManager.count += 1
scope[name] = val
return CleanupHook(scope, name)
class CleanupManager(ExactWeakKeyDictionary):
count = 0
def _remove_id(self, idx):
for hook in self.values[idx]:
hook()
super()._remove_id(idx)
CleanupManager.instance = CleanupManager()
def clone_tensor(x):
"""Clone the tensor and its gradient"""
y = x.clone().requires_grad_(x.requires_grad)
if x.is_leaf and x.grad is not None:
y.grad = x.grad.clone()
return y
def clone_input(x, *, dtype=None):
"""copy while preserving strides"""
# TODO: this is questionable
if isinstance(x, torch._subclasses.FakeTensor):
# this func fails on fake tensors in __torch_dispatch__
return x
def torch_clone(x):
y = torch.clone(x)
if x.is_leaf:
y.requires_grad_(x.requires_grad)
if x.is_leaf and x.grad is not None:
y.grad = clone_input(x.grad, dtype=dtype)
if hasattr(x, "_dynamo_dynamic_indices"):
y._dynamo_dynamic_indices = x._dynamo_dynamic_indices.copy()
return y
with torch.no_grad():
if x.device.type == "xla":
# Access data_ptr() for a xla tensor will cause crash
return torch_clone(x)
needed_size = sum(
(shape - 1) * stride for shape, stride in zip(x.size(), x.stride())
)
if x.is_quantized:
result = torch.empty_quantized((needed_size + 32,), x)
else:
result = torch.empty(
needed_size + 32, dtype=dtype or x.dtype, device=x.device
)
cache_line_offset = (
(x.data_ptr() - result.data_ptr()) % 32
) // x.element_size()
result.as_strided_(x.size(), x.stride(), cache_line_offset)
try:
result.copy_(x.clone())
if x.is_leaf:
result.requires_grad_(x.requires_grad)
if x.is_leaf and x.grad is not None:
result.grad = clone_input(x.grad, dtype=dtype)
except RuntimeError:
# RuntimeError: unsupported operation: more than one element of the written-to
# tensor refers to a single memory location. Please clone() the tensor before
# performing the operation.
return torch_clone(x)
if hasattr(x, "_dynamo_dynamic_indices"):
result._dynamo_dynamic_indices = x._dynamo_dynamic_indices.copy()
return result
def clone_inputs(example_inputs):
if type(example_inputs) is dict:
res = dict(example_inputs)
for key, value in res.items():
if isinstance(value, tuple):
res[key] = clone_inputs(value)
else:
assert isinstance(value, torch.Tensor), type(value)
res[key] = clone_input(value)
return res
res = list(example_inputs)
for i in range(len(res)):
if isinstance(res[i], torch.Tensor):
res[i] = clone_input(res[i])
return res
@contextmanager
def preserve_rng_state():
with torch.utils._python_dispatch._disable_current_modes():
rng_state = torch.clone(torch.random.get_rng_state())
if torch.cuda.is_available():
cuda_rng_state = torch.clone(torch.cuda.get_rng_state())
try:
yield
finally:
with torch.utils._python_dispatch._disable_current_modes():
torch.random.set_rng_state(rng_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
def is_jit_model(model0):
return isinstance(
model0,
(
torch.jit._trace.TopLevelTracedModule,
torch.jit._script.RecursiveScriptModule,
torch.jit.ScriptFunction,
torch.jit.ScriptModule,
),
)
def torchscript(model, example_inputs, verbose=False):
if is_jit_model(model):
# already done?
return model
try:
return torch.jit.trace(model, example_inputs)
except Exception:
try:
return torch.jit.script(model)
except Exception:
if verbose:
log.exception("jit error")
else:
log.error("Both torch.jit.trace and torch.jit.script failed")
return None
def getfile(obj):
try:
return inspect.getfile(obj)
except TypeError:
return None
def is_namedtuple(obj):
"""Test if an object is a namedtuple or a torch.return_types.* quasi-namedtuple"""
return is_namedtuple_cls(type(obj))
def is_namedtuple_cls(cls):
"""Test if an object is a namedtuple or a torch.return_types.* quasi-namedtuple"""
try:
if issubclass(cls, tuple):
bases = getattr(cls, "__bases__", []) or [None]
module = getattr(cls, "__module__", None)
return module == "torch.return_types" or (
bases[0] is tuple and hasattr(cls, "_make") and hasattr(cls, "_fields")
)
except TypeError:
pass
return False
@functools.lru_cache(1)
def namedtuple_fields(cls):
"""Get the fields of a namedtuple or a torch.return_types.* quasi-namedtuple"""
if cls is slice:
return ["start", "stop", "step"]
assert issubclass(cls, tuple)
if hasattr(cls, "_fields"):
# normal namedtuples
return cls._fields
@dataclasses.dataclass
class Marker:
index: int
# frustrating ones e.g. torch.return_types.max
assert cls.__module__ == "torch.return_types"
obj = cls(map(Marker, range(cls.n_fields)))
fields = [None] * cls.n_fields
for name in dir(obj):
if name[0] != "_" and isinstance(getattr(obj, name), Marker):
fields[getattr(obj, name).index] = name
return fields
def checkpoint_params(gm):
with torch.no_grad():
rng_state = torch.clone(torch.random.get_rng_state())
if torch.cuda.is_available():
cuda_rng_state = torch.clone(torch.cuda.get_rng_state())
saved_state = []
for param in itertools.chain(gm.parameters(), gm.buffers()):
saved_state.append((param, param._version, torch.clone(param)))
def restore():
with torch.no_grad():
torch.random.set_rng_state(rng_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
for param, version, original_value in saved_state:
if param._version != version:
param.copy_(original_value)
return restore
def timed(model, example_inputs, times=1):
if torch.cuda.is_available():
synchronize = torch.cuda.synchronize
else:
synchronize = nothing
synchronize()
gc.collect()
torch.manual_seed(1337)
t0 = time.perf_counter()
for _ in range(times):
result = model(*example_inputs)
synchronize()
t1 = time.perf_counter()
return result, t1 - t0
def check_is_cuda(gm, example_inputs):
return all(x.is_cuda for x in itertools.chain(example_inputs, gm.parameters(True)))
@lru_cache(32)
def rot_n_helper(n):
assert n > 1
vars = [f"v{i}" for i in range(n)]
rotated = reversed(vars[-1:] + vars[:-1])
fn = eval(f"lambda {','.join(vars)}: ({','.join(rotated)})")
fn.__name__ = f"rot_{n}_helper"
return fn
def is_safe_constant(v):
if istype(v, (tuple, frozenset)):
return all(map(is_safe_constant, v))
return isinstance(v, (enum.Enum, type)) or istype(
v,
(
types.CodeType,
int,
float,
bool,
str,
bytes,
type(None),
slice,
type(type),
torch.device,
torch.dtype,
),
)
def guard_if_dyn(arg):
from .variables import ConstantVariable, SymNodeVariable
if isinstance(arg, SymNodeVariable):
# This is because SymNodeVariable intentionally doesn't define
# as_python_constant to avoid shunting down some codepaths
# that expect consts. In this case, we know we definitely
# want to specialize though.
return arg.evaluate_expr()
elif isinstance(arg, ConstantVariable):
return arg.as_python_constant()
return arg
def check_constant_args(args, kwargs):
return all(x.is_python_constant() for x in itertools.chain(args, kwargs.values()))
def check_unspec_python_args(args, kwargs):
from torch._dynamo.variables.constant import ConstantVariable
from torch._dynamo.variables.tensor import UnspecializedPythonVariable
unspec_count = 0
for x in itertools.chain(args, kwargs.values()):
if isinstance(x, UnspecializedPythonVariable):
unspec_count += 1
elif not isinstance(x, (UnspecializedPythonVariable, ConstantVariable)):
return False
else:
pass
return unspec_count > 0
def check_numpy_ndarray_args(args, kwargs):
from torch._dynamo.variables.tensor import NumpyNdarrayVariable
return any(
isinstance(x, NumpyNdarrayVariable)
for x in itertools.chain(args, kwargs.values())
)
def specialize_args_kwargs(tx, args, kwargs):
specialized_args = []
specialized_kwargs = {}
for x in args:
specialized_args.append(x.as_specialized(tx))
for k, v in kwargs.items():
specialized_kwargs.update({k: v.as_specialized(tx)})
return specialized_args, specialized_kwargs
dict_values = type(dict().values())
odict_values = type(collections.OrderedDict().values())
tuple_iterator = type(iter(tuple()))
tuple_iterator_len = tuple_iterator.__length_hint__
object_new = object.__new__
def nn_module_new(cls):
obj = object_new(cls)
torch.nn.Module.__init__(obj)
return obj
def product(it):
return functools.reduce(operator.mul, it, 1)
def tuple_iterator_getitem(it, index):
_, (obj,), start = it.__reduce__()
return obj[start + index]
def enum_repr(value, local):
# enum class can override __str__ method. Use __class__ and name attribute
# to extract the class name and key name.
name = value.__class__.__name__
val = value.name
scope = "L" if local else "G"
local_name = f'{scope}["{name}"].{val}'
return local_name
def dict_param_key_ids(value):
return {
id(k) for k in value.keys() if isinstance(k, (torch.nn.Parameter, torch.Tensor))
}
def dict_const_keys(value):
return {
k for k in value.keys() if not isinstance(k, (torch.nn.Parameter, torch.Tensor))
}
def dict_const_keys_repr(const_keys, *, local):
if any(isinstance(k, enum.Enum) for k in const_keys):
# To workaround repr(Enum) returning invalid global reference before python 3.11
# by calling enum_repr and removing quotes to render enum in guard code.
const_keys_str = f"{ {enum_repr(k, local=local) if isinstance(k, enum.Enum) else repr(k) for k in const_keys} }".replace(
"'", ""
)
else:
const_keys_str = f"{const_keys!r}"
return const_keys_str
def global_key_name(key):
return f"__dict_key_{id(key)}"
from torch._subclasses import ( # noqa: F401
FakeTensorMode,
UnsupportedFakeTensorException,
)
def wrap_fake_exception(fn):
try:
return fn()
except UnsupportedFakeTensorException as e:
from .exc import unimplemented
msg = f"Unsupported: {e.reason} with fake tensor propagation."
log.warning(msg)
raise unimplemented(msg) from e
def deepcopy_to_fake_tensor(obj, fake_mode):
with torch._subclasses.fake_tensor.FakeCopyMode(fake_mode):
return wrap_fake_exception(lambda: copy.deepcopy(obj))
def rmse(ref, res):
"""
Calculate root mean squared error
"""
return torch.sqrt(torch.mean(torch.square(ref - res)))
def same(
ref,
res,
fp64_ref=None,
cos_similarity=False,
tol=1e-4,
equal_nan=False,
exact_dtype=True,
relax_numpy_equality=False,
ignore_non_fp=False,
log_error=log.error,
):
"""Check correctness to see if ref and res match"""
if fp64_ref is None:
fp64_ref = ref
if isinstance(ref, (list, tuple, torch.nn.ParameterList, torch.Size)):
assert isinstance(res, (list, tuple)), f"type mismatch {type(ref)} {type(res)}"
if len(ref) != len(res):
log_error("Length mismatch")
return False
return len(ref) == len(res) and all(
same(
ai,
bi,
fp64_refi,
cos_similarity,
tol,
equal_nan,
exact_dtype,
relax_numpy_equality,
ignore_non_fp,
log_error=log_error,
)
for ai, bi, fp64_refi in zip(ref, res, fp64_ref)
)
elif isinstance(ref, dict):
assert isinstance(res, dict)
assert set(ref.keys()) == set(
res.keys()
), f"keys mismatch {set(ref.keys())} == {set(res.keys())}"
for k in sorted(ref.keys()):
if not (
same(
ref[k],
res[k],
fp64_ref[k],
cos_similarity=cos_similarity,
tol=tol,
equal_nan=equal_nan,
exact_dtype=exact_dtype,
relax_numpy_equality=relax_numpy_equality,
ignore_non_fp=ignore_non_fp,
log_error=log_error,
)
):
log_error("Accuracy failed for key name %s", k)
return False
return True
elif isinstance(ref, torch.Tensor):
assert not isinstance(ref, torch._subclasses.FakeTensor)
assert not isinstance(res, torch._subclasses.FakeTensor)
if ref.is_sparse:
assert res.is_sparse
ref = ref.to_dense()
res = res.to_dense()
assert isinstance(res, torch.Tensor), f"type mismatch {type(ref)} {type(res)}"
if exact_dtype:
if ref.dtype != res.dtype:
log_error("dtype mismatch %s, %s", ref.dtype, res.dtype)
return False
if ref.dtype == torch.bool:
if ignore_non_fp:
return True
# triton stores bool as int8, so add this for more accurate checking
r = torch.allclose(
ref.to(dtype=torch.uint8),
res.to(dtype=torch.uint8),
atol=tol,
rtol=tol,
equal_nan=equal_nan,
)
if not r:
log_error("Accuracy failed: uint8 tensor did not match")
return r
if cos_similarity:
ref = ref.flatten().to(torch.float32)
res = res.flatten().to(torch.float32)
if torch.allclose(ref, res, atol=tol, rtol=tol, equal_nan=True):
# early exit that handles zero/nan better
# cosine_similarity(zeros(10), zeros(10), dim=0) is 0
return True
score = torch.nn.functional.cosine_similarity(ref, res, dim=0, eps=1e-6)
if score < 0.99:
log.warning("Similarity score=%s", score.cpu().detach().item())
return score >= 0.99
else:
if not exact_dtype:
ref = ref.to(res.dtype)
# First try usual allclose
if torch.allclose(ref, res, atol=tol, rtol=tol, equal_nan=equal_nan):
return True
# Check error from fp64 version
if fp64_ref.dtype == torch.float64:
ref_error = rmse(fp64_ref, ref).item()
res_error = rmse(fp64_ref, res).item()
multiplier = 2.0
if (
fp64_ref.numel() < 1000
or (ref.ndim == 4 and ref.shape[-1] == ref.shape[-2] == 1)
# large tol means a benchmark has been specified as REQUIRE_HIGHER_TOLERANCE
or tol >= 2 * 1e-2
):
# In the presence of noise, noise might dominate our error
# metric for smaller tensors.
# Similary, for 1x1 kernels, there seems to be high noise with amp.
multiplier = 3.0
passes_test = res_error <= (multiplier * ref_error + tol / 10.0)
if not passes_test:
log_error(
"RMSE (res-fp64): %.5f, (ref-fp64): %.5f and shape=%s",
res_error,
ref_error,
res.size(),
)
# import pdb; pdb.set_trace()
return passes_test
if ignore_non_fp:
return True
log_error("Accuracy failed: allclose not within tol=%s", tol)
return False
elif isinstance(ref, (str, int, type(None), bool, torch.device)):
if ignore_non_fp:
return True
r = ref == res
if not r:
log_error("Accuracy failed (%s): %s != %s", type(ref), ref, res)
return r
elif isinstance(ref, float):
r = math.isclose(ref, res, rel_tol=tol, abs_tol=tol)
if not r:
log_error(
"Accuracy failed (float): %s != %s (within tol=%s)", ref, res, tol
)
return r
elif is_numpy_int_type(ref) or is_numpy_float_type(ref):
if relax_numpy_equality and not (
is_numpy_int_type(res) or is_numpy_float_type(res)
):
ref = ref.item()
r = (type(ref) is type(res)) and (ref == res)
if not r:
log_error("Accuracy failed (numpy): %s != %s", ref, res)
return r
elif is_numpy_ndarray(ref):
return (type(ref) is type(res)) and same(
torch.as_tensor(ref),
torch.as_tensor(res),
fp64_ref,
cos_similarity=cos_similarity,
tol=tol,
equal_nan=equal_nan,
exact_dtype=exact_dtype,
relax_numpy_equality=relax_numpy_equality,
ignore_non_fp=ignore_non_fp,
log_error=log_error,
)
elif type(ref).__name__ in (
"MaskedLMOutput",
"Seq2SeqLMOutput",
"CausalLMOutputWithCrossAttentions",
"LongformerMaskedLMOutput",
"Instances",
"SquashedNormal",
"Boxes",
"Normal",
"TanhTransform",
"Foo",
"Variable",
):
assert type(ref) is type(res)
return all(
same(
getattr(ref, key),
getattr(res, key),
getattr(fp64_ref, key),
cos_similarity=cos_similarity,
tol=tol,
equal_nan=equal_nan,
exact_dtype=exact_dtype,
relax_numpy_equality=relax_numpy_equality,
ignore_non_fp=ignore_non_fp,
log_error=log_error,
)
for key in ref.__dict__.keys()
)
else:
raise RuntimeError(f"unsupported type: {type(ref).__name__}")
def format_func_info(code):
short_filename = code.co_filename.split("/")[-1]
return f"'{code.co_name}' ({short_filename}:{code.co_firstlineno})"
@contextlib.contextmanager
def disable_cache_limit():
prior = config.cache_size_limit
config.cache_size_limit = sys.maxsize
try:
yield
finally:
config.cache_size_limit = prior
# map from transformed code back to original user code
orig_code_map = ExactWeakKeyDictionary()
# keep a record of code_obj -> list of guard failure reasons for logging
guard_failures = collections.defaultdict(list)
# Keep a record of graph break reasons for logging
graph_break_reasons = list()
# keep record of compiled code, if we are in "error if recompile"
# to track code that dynamo has compiled previously
seen_code_map = ExactWeakKeyDictionary()
class CompileProfiler:
"""Utility for profiling how and what dynamo would compile.
Can be used for
* diagnosing recompilation issues
* determining an appropriate compile cache limit
* (TODO)confirming which functions got compiled/skipped
"""
def __init__(self):
self.frame_count = 0
self.op_count = 0
self.backend_ctx_ctor = lambda: disable_cache_limit()
def __call__(self, gm: torch.fx.GraphModule, example_inputs):
self.frame_count += 1
for node in gm.graph.nodes:
if "call" in node.op:
self.op_count += 1
return gm.forward
def __enter__(self):
self.old_report_guard_failure = config.report_guard_failures
config.report_guard_failures = True
return self
def __exit__(self, typ, val, traceback):
config.report_guard_failures = self.old_report_guard_failure
def get_metrics(self):
return {"guard_failures": guard_failures}
def report(self):
metrics = self.get_metrics()
gf = metrics["guard_failures"]
def num_recompiles(code):
return len(gf[code])
def recompile_reasons(code):
return "\n".join([str(x) for x in gf[code]])
summarized_gf = [
[format_func_info(code), num_recompiles(code), recompile_reasons(code)]
for code in gf
]
def graph_break_report():
if "graph_break" in counters:
graph_breaks = counters["graph_break"]
return tabulate(
[[msg, graph_breaks[msg]] for msg in graph_breaks],
headers=["Graph Break Reason", "Count"],
)
def recompilation_report():
if len(gf):
max_recompiles = max([num_recompiles(code) for code in gf])
recomp_table = tabulate(
summarized_gf,
headers=["Function", "Recompiles", "Recompile Reasons"],
)
return recomp_table + textwrap.dedent(
f"""
Set torch._dynamo.config.cache_size_limit to {max_recompiles} to avoid being cache limited.
"""
)
report = textwrap.dedent(
"""
Torchdynamo Profiler Report
===========================
Graph Breaks
------------
Graph breaks happen when torchdynamo encounters code it can't safely trace.
If you want to find out why breaks are happening, check below for each break reason
You may gain additional insight by passing `fullgraph=True` to torch.compile,
to stop at the first break.
"""
)
report += graph_break_report() or "No graph breaks detected."
report += textwrap.dedent(
"""
Recompilation
-------------
These subgraphs were recompiled more than once due to guard failures
Guard failures indicate some condition assumed to be static by the tracer changed,
making it unsafe to reuse the compiled program.
"""
)
report += recompilation_report() or "No recompilation detected.\n"
return report
# return same dir unless user changes config between calls
@functools.lru_cache(None)
def _get_debug_dir(root_dir):
dir_name = (
"run_"
+ datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")
# use pid to avoid conflicts among ranks
+ "-pid_"
+ str(os.getpid())
)
return os.path.join(root_dir, dir_name)
def get_debug_dir():
debug_root = config.debug_dir_root
return _get_debug_dir(debug_root)
def get_fake_value(node, tx):
"""
Run the computation represented by `node` using fake tensors and return the result.
"""
from .exc import (
TorchRuntimeError,
unimplemented,
Unsupported,
UserError,
UserErrorType,
)
op = node.op
def fake_wrapper(e):
if isinstance(e, torch.Tensor):
assert is_fake(e)
return e
def visit(n: torch.fx.Node):
return n.meta["example_value"]
args, kwargs = torch.fx.node.map_arg((node.args, node.kwargs), visit)
args = tree_map(fake_wrapper, args)
kwargs = tree_map(fake_wrapper, kwargs)
nnmodule = None
if op == "call_method" and len(args) > 0 and isinstance(args[0], torch.nn.Module):
# If the first argument is nn.Module, should copy to fake mode.
args = (deepcopy_to_fake_tensor(args[0], tx.fake_mode),) + tuple(args[1:])
if op == "call_module":
nnmodule = tx.output.nn_modules[node.target]
if is_lazy_module(nnmodule) and hasattr(nnmodule, "_initialize_hook"):
# In the case of a lazy module, we want to run
# the pre-hooks which initialize it.
# Afterwards, lazy module deletes its pre-hooks
# to avoid treating it as lazy on subsequent recompile.
nnmodule._infer_parameters(nnmodule, args)
# no matter it's lazy module or not, we should copy to fake mode.
nnmodule = deepcopy_to_fake_tensor(nnmodule, tx.fake_mode)
try:
with tx.fake_mode, enable_python_dispatcher():
return wrap_fake_exception(
lambda: run_node(tx.output, node, args, kwargs, nnmodule)
)
except Unsupported:
raise
except RuntimeError as e:
cause = e
if e.__cause__ is not None:
cause = e.__cause__
if isinstance(
cause, torch._subclasses.fake_tensor.DataDependentOutputException
):
unimplemented(f"data dependent operator: {cause.func}")
elif isinstance(
cause, torch._subclasses.fake_tensor.DynamicOutputShapeException
):
unimplemented(f"dynamic shape operator: {cause.func}")
elif isinstance(
cause, torch._subclasses.fake_tensor.UnsupportedOperatorException
):
unimplemented(
f"unsupported operator: {cause.func} (see "
"https://docs.google.com/document/d/1GgvOe7C8_NVOMLOCwDaYV1mXXyHMXY7ExoewHqooxrs/edit#heading=h.64r4npvq0w0"
" for how to fix)"
)
elif isinstance(
cause, torch.fx.experimental.symbolic_shapes.GuardOnDataDependentSymNode
):
unimplemented("guard on data-dependent symbolic int/float")
elif isinstance(cause, torch.utils._sympy.value_ranges.ValueRangeError):
raise UserError(UserErrorType.CONSTRAIN_VIOLATION, e.args[0]) from e
raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None
def run_node(tracer, node, args, kwargs, nnmodule):
"""
Runs a given node, with the given args and kwargs.
Behavior is dicatated by a node's op.
run_node is useful for extracting real values out of nodes.
See get_real_value for more info on common usage.
Note: The tracer arg is only used for 'get_attr' ops
Note: The nnmodule arg is only used for 'call_module' ops
Nodes that are not call_function, call_method, call_module, or get_attr will
raise an AssertionError.
"""
op = node.op
try:
if op == "call_function":
return node.target(*args, **kwargs)
elif op == "call_method":
return getattr(args[0], node.target)(*args[1:], **kwargs)
elif op == "call_module":
assert nnmodule is not None
return nnmodule(*args, **kwargs)
elif op == "get_attr":
return tracer.get_submodule(node.target)
elif op == "placeholder":
assert "example_value" in node.meta
return node.meta["example_value"]
except Exception as e:
fn_str = f"Failed running {op} {node.target}(*{args}, **{kwargs}):\n"
raise RuntimeError(fn_str + str(e)).with_traceback(e.__traceback__) from e
raise AssertionError(op)
def get_real_value(node, tracer):
"""
Run the actual computation represented by `node` and return the result.
This will execute any dependent nodes in the graph as well.
"""
from .exc import TorchRuntimeError
cache = tracer.real_value_cache
if node in cache:
return cache[node]
op = node.op
args, kwargs = torch.fx.node.map_arg(
(node.args, node.kwargs),
lambda n: get_real_value(n, tracer),
)
if op == "call_module":
nn_module = tracer.output_graph.nn_modules[node.target]
if not is_lazy_module(nn_module):
nn_module = copy.deepcopy(nn_module)
else:
# In the case of a lazy module, we want to run
# the pre-hooks which initialize it
nn_module(*args, **kwargs)
else:
nn_module = None
try:
real_value = run_node(tracer, node, args, kwargs, nn_module)
cache[node] = real_value
except RuntimeError as e:
raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None
return real_value
def assert_no_fake_params_or_buffers(gm):
from torch._subclasses.fake_tensor import FakeTensorConfig
def stack_or_hint(t):
if FakeTensorConfig.debug:
import traceback
return f"FAKE TENSOR CREATION TRACEBACK: \n {traceback.format_list(t._debug_trace)}"
else:
return "Enable TORCH_FAKE_TENSOR_DEBUG=1 to get creation stack traces on fake tensors."
for name, buffer in gm.named_buffers():
assert not isinstance(
buffer, torch._subclasses.FakeTensor
), f"Unexpected fake buffer {name} {stack_or_hint(buffer)}"
for name, param in gm.named_parameters():
assert not isinstance(
param, torch._subclasses.FakeTensor
), f"Unexpected fake param {name} {stack_or_hint(param)}"
def fqn(obj: Any):
"""
Returns the fully qualified name of the object.
"""
return f"{obj.__module__}.{obj.__qualname__}"
def ifdynstaticdefault(count1, count2):
if torch._dynamo.config.assume_static_by_default:
return count1
else:
return count2
def import_submodule(mod: types.ModuleType):
"""
Ensure all the files in a given submodule are imported
"""
for filename in sorted(os.listdir(os.path.dirname(mod.__file__))):
if filename.endswith(".py") and filename[0] != "_":
importlib.import_module(f"{mod.__name__}.{filename[:-3]}")
def object_has_getattribute(value: Any):
try:
if isinstance(
inspect.getattr_static(type(value), "__getattribute__"),
types.FunctionType,
):
return True
except AttributeError:
pass
return False
def get_custom_getattr(value: Any):
try:
getattr_fn = inspect.getattr_static(type(value), "__getattr__")
except AttributeError:
getattr_fn = None
if getattr_fn is torch.nn.Module.__getattr__:
# ignore this case of getattr
getattr_fn = None
return getattr_fn
class TensorStaticReason(enum.Enum):
PARAMETER = 2
NOT_TENSOR = 4
NN_MODULE_PROPERTY = 5
def tensor_static_reason_to_message(reason: TensorStaticReason):
if reason == TensorStaticReason.PARAMETER:
return "mark_dynamic on parameter, parameters are always static today."
if reason == TensorStaticReason.NOT_TENSOR:
return "mark_dynamic on a non tensor, how did this happen?"
if reason == TensorStaticReason.NN_MODULE_PROPERTY:
return "tensor is static because it is nn module associated."
raise AssertionError(f"Illegal reason {reason}")
def tensor_always_has_static_shape(
tensor: Union[torch.Tensor, Any], is_tensor: bool, guard_source: "GuardSource"
) -> Tuple[bool, TensorStaticReason]:
"""
Given a tensor, source, and is_tensor flag, determine if a shape should be static.
Args:
tensor - the real tensor to evaluate, parameters force a static shape.
is_tensor - internal dynamo check, esentially "is_tensor": target_cls is TensorVariable,
tensors not in a TensorVariable for whatever reason are forced static.
Returns a tuple, where the first element is the bool of whether or not this tensor should have a static shape.
The second element is a TensorStaticReason, useful for passing to tensor_static_reason_to_message if needed.
"""
if guard_source.is_nn_module() and config.force_nn_module_property_static_shapes:
return True, TensorStaticReason.NN_MODULE_PROPERTY
if type(tensor) is torch.nn.Parameter and config.force_parameter_static_shapes:
return True, TensorStaticReason.PARAMETER
if not is_tensor:
return True, TensorStaticReason.NOT_TENSOR
return False, None
class LazyString:
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __str__(self):
return self.func(*self.args, **self.kwargs)
def lazy_format_graph_code(name, gm, maybe_id=None):
def format_name():
if maybe_id is not None:
return f"{name} {maybe_id}"
else:
return name
return LazyString(
lambda: _format_graph_code(
f"===== {format_name()} =====\n",
gm.forward.__code__.co_filename,
gm.print_readable(print_output=False),
)
)
def _format_graph_code(name, filename, graph_str):
return f"TRACED GRAPH\n {name} {filename} {graph_str}\n"
def lazy_format_graph_tabular(fn_name, gm):
def inner():
try:
from tabulate import tabulate # TODO: Check that this is installed
except ImportError:
return (
"Tabulate module missing, please install tabulate to log the graph in tabular format, logging code instead:\n"
+ str(lazy_format_graph_code(fn_name, gm))
)
node_specs = [
[n.op, n.name, n.target, n.args, n.kwargs] for n in gm.graph.nodes
]
graph_str = tabulate(
node_specs, headers=["opcode", "name", "target", "args", "kwargs"]
)
return _format_graph_code(fn_name, gm.forward.__code__.co_filename, graph_str)
return LazyString(inner)
def format_bytecode(prefix, name, filename, line_no, code):
return f"{prefix} {name} {filename} line {line_no} \n{dis.Bytecode(code).dis()}\n"
forward_hook_names = ["_forward_pre_hooks", "_forward_hooks"]
backward_hook_names = ["_backward_pre_hooks", "_backward_hooks"]
state_dict_hook_names = [
"_state_dict_pre_hooks",
"_state_dict_hooks",
"_load_state_dict_pre_hooks",
"_load_state_dict_post_hooks",
]
all_hook_names = forward_hook_names + backward_hook_names + state_dict_hook_names
def nn_module_get_all_hooks(
mod,
check_forward_hooks=False,
check_backward_hooks=False,
check_state_dict_hooks=False,
):
reset_code = torch._C._dynamo.eval_frame.reset_code
"""
Sometimes its useful to differentiate between types of hooks such as forward/backward/pre
hooks executed during module.__call__, and state_dict hooks which are executed separately.
"""
hook_dicts_to_check = []
check_all_hooks = (
not check_forward_hooks
and not check_backward_hooks
and not check_state_dict_hooks
)
if check_forward_hooks or check_all_hooks:
hook_dicts_to_check.extend(forward_hook_names)
if check_backward_hooks or check_all_hooks:
hook_dicts_to_check.extend(backward_hook_names)
if check_state_dict_hooks:
hook_dicts_to_check.extend(state_dict_hook_names)
all_hooks = []
for hook_dict_name in hook_dicts_to_check:
hooks = getattr(mod, hook_dict_name, [])
for hook_name in hooks:
hook = hooks[hook_name]
all_hooks.append(hook)
return all_hooks
def nnmodule_has_hooks(
mod,
check_forward_hooks=False,
check_backward_hooks=False,
check_state_dict_hooks=False,
):
"""
Helper function to check if a module has any hooks attached to it.
"""
hooks = nn_module_get_all_hooks(
mod,
check_forward_hooks=check_forward_hooks,
check_backward_hooks=check_backward_hooks,
check_state_dict_hooks=check_state_dict_hooks,
)
return bool(hooks)
def to_numpy_helper(value):
"""Convert tensor and tnp.ndarray to numpy.ndarray."""
if isinstance(value, tnp.ndarray):
return to_numpy_helper(value.tensor)
elif isinstance(value, torch.Tensor):
return value.cpu().numpy()
elif isinstance(value, (tuple, list)):
return type(value)(to_numpy_helper(obj) for obj in value)
else:
return value
def numpy_to_tensor(value):
"""Convert tnp.ndarray to tensor, leave other types intact. If a list/tuple, loop through it to convert."""
if isinstance(value, np.ndarray):
return torch.as_tensor(value)
if isinstance(value, tnp.ndarray):
return value.tensor
elif isinstance(value, (tuple, list)):
return type(value)(numpy_to_tensor(obj) for obj in value)
else:
return value
class numpy_to_tensor_wrapper:
def __init__(self, f):
self.f = f
self.__name__ = "wrapped_" + self.f.__name__
def __repr__(self):
return f"<Wrapped function <original {self.f.__name__}>>"
def __call__(self, *args, **kwargs):
out = self.f(*args, **kwargs)
return numpy_to_tensor(out)
def numpy_attr_wrapper(obj, name):
if isinstance(obj, tnp.ndarray):
out = getattr(obj, name)
return numpy_to_tensor(out)
elif isinstance(obj, torch.Tensor):
out = getattr(tnp.ndarray(obj), name)
return numpy_to_tensor(out)
class numpy_method_wrapper:
"""Convert obj from torch.Tensor to tnp.ndarray and call method. Then convert result back to torch.Tensor."""
def __init__(self, method: str):
self.method = method
self.__name__ = "wrapped_" + self.method
def __repr__(self):
return f"<Wrapped method <original {self.method}>>"
def __call__(self, *args, **kwargs):
obj = args[0]
if isinstance(obj, torch.Tensor):
obj = tnp.ndarray(obj)
method_callable = getattr(obj, self.method)
out = method_callable(*args[1:], **kwargs)
return numpy_to_tensor(out)
def defake(x):
if not isinstance(x, FakeTensor):
return x
if x._has_symbolic_sizes_strides:
size = [
s.node.shape_env.size_hint(s.node.expr)
if isinstance(s, torch.SymInt)
else s
for s in x.size()
]
stride = [
s.node.shape_env.size_hint(s.node.expr)
if isinstance(s, torch.SymInt)
else s
for s in x.stride()
]
else:
size = x.size()
stride = x.stride()
y = torch.empty_strided(
size,
stride,
dtype=x.dtype,
device=x.device,
requires_grad=x.requires_grad,
)
y.zero_()
return y
def is_utils_checkpoint(obj):
# Lazy import to avoid circular dependenices
import torch.utils.checkpoint
return obj is torch.utils.checkpoint.checkpoint
def build_checkpoint_variable(**options):
import torch._higher_order_ops.wrap as higher_order_ops
from .variables.higher_order_ops import TorchHigherOrderOperatorVariable
# TODO - This is a temporary sitaution where we have two versions of
# checkpointing implemetation. We will converge on one and remove the other.
activation_checkpoint_op = higher_order_ops.tag_activation_checkpoint
if torch._functorch.config.functionalize_rng_ops:
activation_checkpoint_op = higher_order_ops.wrap_activation_checkpoint
return TorchHigherOrderOperatorVariable.make(
activation_checkpoint_op,
**options,
)
def is_compile_supported(device_type):
from .eval_frame import is_dynamo_supported
compile_supported = is_dynamo_supported()
if device_type == "cpu":
pass
elif device_type == "cuda" and compile_supported:
from torch._inductor.utils import has_triton
compile_supported = has_triton()
else:
compile_supported = False
return compile_supported
# The following 3.11 source code functions are adapted from
# https://github.com/python/cpython/blob/v3.11.4/Lib/traceback.py
# in order to output source code corresponding to bytecode in 3.11+.
# We need our own versions since we want to support multiline expressions.
def _fix_offset(str: str, offset: int) -> int:
"""
Convert byte offset `offset` of `str` into character offset.
Byte offset is used for 3.11+ instruction column data.
Takes things like unicode characters into consideration.
Unchanged from CPython implementation.
"""
as_utf8 = str.encode("utf-8")
return len(as_utf8[:offset].decode("utf-8", errors="replace"))
@dataclasses.dataclass
class _Anchors:
# inclusive
left_end_lineno: int
left_end_offset: int
right_start_lineno: int
# exclusive
right_start_offset: int
def _extract_anchors_from_expr(segment: str) -> Optional[_Anchors]:
"""
Given source code `segment` corresponding to a bytecode
instruction, determine:
- for binary ops, the location of the binary op
- for indexing, the location of the brackets.
`segment` is expected to be a valid Python expression
"""
assert sys.version_info >= (3, 11)
import ast
try:
# Without brackets, `segment` is parsed as a statement.
# We expect an expression, so wrap `segment` in
# brackets to handle multi-line expressions.
tree = ast.parse("(\n" + segment + "\n)")
except SyntaxError:
return None
if len(tree.body) != 1:
return None
lines = segment.split("\n")
# get character index given byte offset
def normalize(lineno, offset):
return _fix_offset(lines[lineno], offset)
# Gets the next valid character index in `lines`, if
# the current location is not valid. Handles empty lines.
def next_valid_char(lineno, col):
while lineno < len(lines) and col >= len(lines[lineno]):
col = 0
lineno += 1
assert lineno < len(lines) and col < len(lines[lineno])
return lineno, col
# Get the next valid character index in `lines`.
def increment(lineno, col):
col += 1
lineno, col = next_valid_char(lineno, col)
assert lineno < len(lines) and col < len(lines[lineno])
return lineno, col
# Get the next valid character at least on the next line
def nextline(lineno, col):
col = 0
lineno += 1
lineno, col = next_valid_char(lineno, col)
assert lineno < len(lines) and col < len(lines[lineno])
return lineno, col
statement = tree.body[0]
if isinstance(statement, ast.Expr):
expr = statement.value
if isinstance(expr, ast.BinOp):
# ast gives locations for BinOp subexpressions, e.g.
# ( left_expr ) + ( right_expr )
# left^^^^^ right^^^^^
# -2 since end_lineno is 1-indexed and because we added an extra
# bracket to `segment` when calling ast.parse
cur_lineno = expr.left.end_lineno - 2
cur_col = normalize(cur_lineno, expr.left.end_col_offset)
cur_lineno, cur_col = next_valid_char(cur_lineno, cur_col)
# Heuristic to find the operator character.
# The original CPython implementation did not look for ), \, or #,
# leading to incorrect anchor location, e.g.
# (x) + (y)
# ~~^~~~~~~
while (ch := lines[cur_lineno][cur_col]).isspace() or ch in ")\\#":
if ch in "\\#":
cur_lineno, cur_col = nextline(cur_lineno, cur_col)
else:
cur_lineno, cur_col = increment(cur_lineno, cur_col)
# binary op is 1 or 2 characters long, on the same line
right_col = cur_col + 1
if (
right_col < len(lines[cur_lineno])
and not (ch := lines[cur_lineno][right_col]).isspace()
and ch not in "\\#"
):
right_col += 1
# right_col can be invalid since it is exclusive
return _Anchors(cur_lineno, cur_col, cur_lineno, right_col)
elif isinstance(expr, ast.Subscript):
# ast gives locations for value and slice subexpressions, e.g.
# ( value_expr ) [ slice_expr ]
# value^^^^^ slice^^^^^
# subscript^^^^^^^^^^^^^^^^^^^^
# find left bracket (first '[' after value)
left_lineno = expr.value.end_lineno - 2
left_col = normalize(left_lineno, expr.value.end_col_offset)
left_lineno, left_col = next_valid_char(left_lineno, left_col)
while lines[left_lineno][left_col] != "[":
left_lineno, left_col = increment(left_lineno, left_col)
# find right bracket (final character of expression)
right_lineno = expr.end_lineno - 2
right_col = normalize(right_lineno, expr.end_col_offset)
return _Anchors(left_lineno, left_col, right_lineno, right_col)
elif isinstance(expr, ast.Call):
# ( func_expr ) (args, kwargs)
# func^^^^^
# call^^^^^^^^^^^^^^^^^^^^^^^^
# find left bracket (first '(' after func)
left_lineno = expr.func.end_lineno - 2
left_col = normalize(left_lineno, expr.func.end_col_offset)
left_lineno, left_col = next_valid_char(left_lineno, left_col)
while lines[left_lineno][left_col] != "(":
left_lineno, left_col = increment(left_lineno, left_col)
# find right bracket (final character of expression)
right_lineno = expr.end_lineno - 2
right_col = normalize(right_lineno, expr.end_col_offset)
return _Anchors(left_lineno, left_col, right_lineno, right_col)
return None
def get_instruction_source_311(code: types.CodeType, inst: dis.Instruction) -> str:
"""
Python 3.11+ only. Returns lines of source code (from code object `code`)
corresponding to `inst`'s location data, and underlines relevant code to `inst`.
Example: CALL on `g`:
f(g(
^^
h(x)))
^^^^^
We need our own implementation since `format_frame_summary` in
Python's `traceback` module doesn't handle multi-line expressions
(and their anchor extraction code is not completely correct).
"""
if inst.positions.lineno is None:
return ""
# The rstrip + "\n" pattern is used throughout this function to handle
# linecache.getline errors. Error lines are treated as empty strings "", but we want
# to treat them as blank lines "\n".
first_line = linecache.getline(code.co_filename, inst.positions.lineno).rstrip()
if inst.positions.end_lineno is None:
return first_line
if inst.positions.col_offset is None or inst.positions.end_col_offset is None:
return first_line
# character index of the start of the instruction
start_offset = _fix_offset(first_line, inst.positions.col_offset)
# character index of the end of the instruction
# compute later since end may be a different line
end_offset = None
# expression corresponding to the instruction so we can get anchors
segment = ""
# underline markers to be printed - start with `~` marker and replace with `^` later
markers = []
# Compute segment and initial markers
if inst.positions.end_lineno == inst.positions.lineno:
end_offset = _fix_offset(first_line, inst.positions.end_col_offset)
segment = first_line[start_offset:end_offset]
markers.append(" " * start_offset + "~" * (end_offset - start_offset))
else:
segment = first_line[start_offset:] + "\n"
markers.append(" " * start_offset + "~" * (len(first_line) - start_offset))
last_line = linecache.getline(
code.co_filename, inst.positions.end_lineno
).rstrip()
end_offset = _fix_offset(last_line, inst.positions.end_col_offset)
for lineno in range(inst.positions.lineno + 1, inst.positions.end_lineno):
line = linecache.getline(code.co_filename, lineno).rstrip()
segment += line + "\n"
# don't underline leading spaces
num_spaces = len(line) - len(line.lstrip())
markers.append(" " * num_spaces + "~" * (len(line) - num_spaces))
segment += last_line[:end_offset]
num_spaces = len(last_line) - len(last_line.lstrip())
markers.append(" " * num_spaces + "~" * (end_offset - num_spaces))
anchors: Optional[_Anchors] = None
try:
anchors = _extract_anchors_from_expr(segment)
except AssertionError:
pass
# replace `~` markers with `^` where necessary
if anchors is None:
markers = [marker.replace("~", "^") for marker in markers]
else:
# make markers mutable
markers = [list(marker) for marker in markers]
# anchor positions do not take start_offset into account
if anchors.left_end_lineno == 0:
anchors.left_end_offset += start_offset
if anchors.right_start_lineno == 0:
anchors.right_start_offset += start_offset
# Turn `~`` markers between anchors to `^`
for line in range(len(markers)):
for col in range(len(markers[line])):
if line < anchors.left_end_lineno:
continue
if line == anchors.left_end_lineno and col < anchors.left_end_offset:
continue
if (
line == anchors.right_start_lineno
and col >= anchors.right_start_offset
):
continue
if line > anchors.right_start_lineno:
continue
if markers[line][col] == "~":
markers[line][col] = "^"
# make markers into strings again
markers = ["".join(marker) for marker in markers]
result = ""
for i in range(len(markers)):
result += (
linecache.getline(code.co_filename, inst.positions.lineno + i).rstrip()
+ "\n"
)
result += markers[i] + "\n"
return result
def is_guard_failure_reporting_enabled():
return (
config.report_guard_failures
or torch._logging._internal.log_state.is_artifact_enabled("recompiles")
)
def get_static_address_type(t):
if isinstance(t, torch.Tensor):
return getattr(t, "_dynamo_static_input_type", None)
return None
|
import contextlib
import dis
import functools
import logging
import os.path
import re
import sys
import types
import unittest
from typing import Sequence, Union
from unittest.mock import patch
import torch
from torch import fx
from torch._dynamo.output_graph import OutputGraph
from torch._dynamo import config, eval_frame, optimize_assert, reset
from torch._dynamo.bytecode_transformation import (
create_instruction,
debug_checks,
is_generator,
transform_code_object,
)
from torch._dynamo.guards import CheckFunctionManager, GuardedCode
from .utils import same
unsupported = eval_frame.unsupported
three = 3
log = logging.getLogger(__name__)
def clone_me(x):
if x is None:
return None
return x.detach().clone().requires_grad_(x.requires_grad)
def skip_if_pytest(fn):
@functools.wraps(fn)
def wrapped(*args, **kwargs):
if "PYTEST_CURRENT_TEST" in os.environ:
raise unittest.SkipTest("does not work under pytest")
return fn(*args, **kwargs)
return wrapped
def named_parameters_for_optimized_module(mod):
assert isinstance(mod, eval_frame.OptimizedModule)
return mod._orig_mod.named_parameters
def named_buffers_for_optimized_module(mod):
assert isinstance(mod, eval_frame.OptimizedModule)
return mod._orig_mod.named_buffers
def remove_optimized_module_prefix(name):
return re.sub(r"^_orig_mod[.]", "", name)
def collect_results(model, prediction, loss, example_inputs):
results = []
results.append(prediction)
results.append(loss)
# if isinstance(loss, torch.Tensor) and loss.item() > 1:
# log.warning(
# f"High loss value alert - {loss:.2f}. Can result in unstable gradients."
# )
grads = dict()
params = dict()
for name, param in model.named_parameters():
if isinstance(model, eval_frame.OptimizedModule):
name = remove_optimized_module_prefix(name)
param_copy = param
grad = param.grad
# Treat None and zero grad as same
if param.grad is None:
grad = torch.zeros_like(param)
grads[name + ".grad"] = grad
params[name] = param_copy
results.append(grads)
results.append(params)
buffers = dict()
for name, buffer in model.named_buffers():
if isinstance(model, eval_frame.OptimizedModule):
name = remove_optimized_module_prefix(name)
buffers[name] = buffer
results.append(buffers)
for example in example_inputs:
if isinstance(example, (tuple, list)):
for inp in example:
if isinstance(inp, torch.Tensor):
results.append(inp.grad)
else:
if isinstance(example, torch.Tensor):
results.append(example.grad)
return results
def requires_bwd_pass(out):
if isinstance(out, torch.Tensor):
return out.requires_grad
elif isinstance(out, (list, tuple)):
return any(requires_bwd_pass(x) for x in out)
elif out is None:
return False
elif isinstance(out, int):
return False
raise NotImplementedError("Don't know how to reduce", type(out))
def reduce_to_scalar_loss(out):
"""Reduce the output of a model to get scalar loss"""
if isinstance(out, torch.Tensor):
# Mean does not work on integer tensors
return out.sum() / out.numel()
elif isinstance(out, (list, tuple)):
return sum([reduce_to_scalar_loss(x) for x in out]) / len(out)
elif type(out).__name__ in (
"MaskedLMOutput",
"Seq2SeqLMOutput",
"CausalLMOutputWithCrossAttentions",
):
return reduce_to_scalar_loss(out.logits)
elif type(out).__name__ == "SquashedNormal":
return out.mean.sum()
elif isinstance(out, dict):
return sum([reduce_to_scalar_loss(value) for value in out.values()]) / len(
out.keys()
)
raise NotImplementedError("Don't know how to reduce", type(out))
def debug_dir():
path = os.path.join(os.path.dirname(__file__), "../debug")
if not os.path.exists(path):
os.mkdir(path)
return path
def debug_dump(name, code: types.CodeType, extra=""):
with open(os.path.join(debug_dir(), name), "w") as fd:
fd.write(
f"{dis.Bytecode(code).info()}\n\n{dis.Bytecode(code).dis()}\n\n{extra}\n"
)
def debug_insert_nops(frame, cache_size, hooks, _):
"""used to debug jump updates"""
def insert_nops(instructions, code_options):
instructions.insert(0, create_instruction("NOP"))
instructions.insert(0, create_instruction("NOP"))
if is_generator(frame.f_code):
return None
debug_checks(frame.f_code)
code = transform_code_object(frame.f_code, insert_nops)
graph = OutputGraph(
code_options={},
compiler_fn=None,
root_tx=None,
export=False,
export_constraints=None,
frame_state={"_id": 0},
# TODO: shouldn't this be f_locals/f_globals from frame?
local_scope=locals(),
global_scope=globals(),
f_code=frame.f_code,
)
return GuardedCode(code, CheckFunctionManager(graph).check_fn)
class CompileCounter:
def __init__(self):
self.frame_count = 0
self.op_count = 0
def __call__(self, gm: torch.fx.GraphModule, example_inputs):
self.frame_count += 1
for node in gm.graph.nodes:
if "call" in node.op:
self.op_count += 1
return gm.forward
def clear(self):
self.frame_count = 0
self.op_count = 0
class CompileCounterWithBackend:
def __init__(self, backend):
self.frame_count = 0
self.op_count = 0
self.backend = backend
self.graphs = []
def __call__(self, gm: torch.fx.GraphModule, example_inputs):
from .backends.registry import lookup_backend
self.frame_count += 1
for node in gm.graph.nodes:
if "call" in node.op:
self.op_count += 1
self.graphs.append(gm)
return lookup_backend(self.backend)(gm, example_inputs)
# Equivalent to backend="eager", but also records graphs that
# we can assert on
class EagerAndRecordGraphs:
def __init__(self):
self.graphs = []
def __call__(self, gm: torch.fx.GraphModule, example_inputs):
self.graphs.append(gm)
return gm
def strip_comment(code):
code = str(code)
return re.sub(r"(?m)^ *#.*\n?", "", code)
def remove_trailing_space(code):
return "\n".join([line.rstrip() for line in code.split("\n")])
def normalize_gm(gm_str):
# strip comments as comments have path to files which may differ from
# system to system.
return remove_trailing_space(strip_comment(gm_str))
def standard_test(self, fn, nargs, expected_ops=None, expected_ops_dynamic=None):
if not config.assume_static_by_default and expected_ops_dynamic is not None:
expected_ops = expected_ops_dynamic
actual = CompileCounter()
if expected_ops is None:
expected = CompileCounter()
try:
gm = torch.fx.symbolic_trace(fn)
expected(gm)
print("\nfx.symbolic_trace graph:")
gm.graph.print_tabular()
expected_ops = expected.op_count
except Exception:
pass # Silently ignore FX errors (not our issue)
args1 = [torch.randn(10, 10) for _ in range(nargs)]
args2 = [torch.randn(10, 10) for _ in range(nargs)]
correct1 = fn(*args1)
correct2 = fn(*args2)
reset()
opt_fn = optimize_assert(actual)(fn)
val1a = opt_fn(*args1)
val2a = opt_fn(*args2)
val1b = opt_fn(*args1)
val2b = opt_fn(*args2)
reset()
self.assertTrue(same(val1a, correct1))
self.assertTrue(same(val1b, correct1))
self.assertTrue(same(val2a, correct2))
self.assertTrue(same(val2b, correct2))
self.assertEqual(actual.frame_count, 1)
if expected_ops is not None:
self.assertEqual(actual.op_count, expected_ops)
def dummy_fx_compile(gm: fx.GraphModule, example_inputs):
return gm.forward
def format_speedup(speedup, pvalue, is_correct=True, pvalue_threshold=0.1):
if not is_correct:
return "ERROR"
if pvalue > pvalue_threshold:
return f"{speedup:.3f}x SAME"
return f"{speedup:.3f}x p={pvalue:.2f}"
def rand_strided(
size: Sequence[int],
stride: Sequence[int],
dtype: torch.dtype = torch.float32,
device: Union[str, torch.device] = "cpu",
extra_size: int = 0,
):
needed_size = (
sum((shape - 1) * stride for shape, stride in zip(size, stride))
+ 1
+ extra_size
)
if dtype.is_floating_point:
buffer = torch.randn(needed_size, dtype=dtype, device=device)
else:
buffer = torch.zeros(size=[needed_size], dtype=dtype, device=device)
return torch.as_strided(buffer, size, stride)
def _make_fn_with_patches(fn, *patches):
@functools.wraps(fn)
def _fn(*args, **kwargs):
with contextlib.ExitStack() as stack:
for module, attr, val in patches:
stack.enter_context(patch.object(module, attr, val))
return fn(*args, **kwargs)
return _fn
def make_test_cls_with_patches(cls, cls_prefix, fn_suffix, *patches, xfail_prop=None):
class DummyTestClass(cls):
pass
DummyTestClass.__name__ = f"{cls_prefix}{cls.__name__}"
DummyTestClass.__qualname__ = DummyTestClass.__name__
for name in dir(cls):
if name.startswith("test_"):
fn = getattr(cls, name)
if not callable(fn):
continue
new_name = f"{name}{fn_suffix}"
new_fn = _make_fn_with_patches(fn, *patches)
new_fn.__name__ = new_name
if xfail_prop is not None and hasattr(fn, xfail_prop):
new_fn = unittest.expectedFailure(new_fn)
setattr(DummyTestClass, new_name, new_fn)
return DummyTestClass
# test Python 3.11+ specific features
def skipIfNotPy311(fn):
if sys.version_info >= (3, 11):
return fn
return unittest.skip(fn)
# Controls tests generated in test/inductor/test_torchinductor_dynamic_shapes.py
# and test/dynamo/test_dynamic_shapes.py
def expectedFailureDynamic(fn):
fn._expected_failure_dynamic = True
return fn
# Controls tests generated in test/inductor/test_torchinductor_codegen_dynamic_shapes.py
def expectedFailureCodegenDynamic(fn):
fn._expected_failure_codegen_dynamic = True
return fn
# Controls test generated in test/inductor/test_cpp_wrapper.py
def expectedFailureDynamicWrapper(fn):
fn._expected_failure_dynamic_wrapper = True
return fn
|
import argparse
import csv
import functools
import gc
import io
import itertools
import logging
import numpy as np
import os
import re
import sys
import time
import torch
from torch import nn
from torch.jit import fuser, optimized_execution
from os.path import abspath
from scipy.stats import ttest_ind
import importlib
import glob
import collections
import random
import torch._lazy
import torch._lazy.metrics as metrics
import torch._lazy.ts_backend
def set_seeds(seed=1337):
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed_all(seed)
def get_unique_suffix():
return f"{time.time()}_{os.getpid()}"
def get_benchmark_cls(model_name):
if ("Benchmark(dims=[" in model_name):
# just evaluate the model name + args
# it should create a model with the right dim
return eval(model_name)
try:
module = importlib.import_module(f'.models.{model_name}', package="torchbenchmark")
Model = getattr(module, 'Model', None)
if Model is None:
raise RuntimeError(f"{module} does not define attribute Model, skip it")
if not hasattr(Model, 'name'):
Model.name = model_name
return Model
except ModuleNotFoundError as e:
raise RuntimeError(f"Could not find dependent module {e.name} for Model {model_name}, skip it")
# from caffe2.python import workspace
# workspace.GlobalInit(['caffe2', '--caffe2_log_level=-5'])
import torch._lazy.metrics
torch._lazy.ts_backend.init()
os.environ["KALDI_ROOT"] = "/tmp" # avoids some spam
log = logging.getLogger(__name__)
# Models that are known to crash or otherwise not work with lazy tensor are
# disabled, but should be removed from these lists once fixed
SKIP = {
"densenet121": "Disabled by torchbench upstream due to OOM on T4 CI machine",
"timm_nfnet": "Disabled by torchbench upstream due to OOM on T4 CI machine",
"moco": "Distributed/ProcessGroupNCCL: Tensors must be CUDA and dense",
"tacotron2": "Disabled by torchbench upstream due to OOM on T4 CI machine",
}
SKIP_TRAIN_ONLY = {
"squeezenet1_1": "Disabled by torchbench upstream due to OOM on T4 CI machine",
"demucs": "Disabled by torchbench upstream due to OOM on T4 CI machine",
}
current_name = ""
current_device = ""
@functools.lru_cache(maxsize=None)
def output_csv(name, headers):
output = csv.writer(
io.TextIOWrapper(
open(name, "wb", buffering=0),
"utf-8",
write_through=True,
),
delimiter=",",
quotechar='"',
quoting=csv.QUOTE_MINIMAL
)
output.writerow(headers)
return output
class HardSwishBenchmark:
def __init__(self, dims):
self.name = "HardSwishBenchmark(dims=[" + ','.join([str(d) for d in dims]) + '])'
self.dims = dims
# test and extra_args are placeholders to match TorchBench API
def __call__(self, device, test, extra_args):
return HardSwish(self.dims, device)
class HardSwish(nn.Module):
def __init__(self, dims, device='cuda'):
super(HardSwish, self).__init__()
self.name = "HardSwish[" + ','.join([str(d) for d in dims]) + ']'
self.example_inputs = (
torch.randn(*dims, device=device, dtype=torch.float32),
)
def get_module(self):
return self, self.example_inputs
def name(self):
return self.name
def forward(self, x):
return x * torch.clamp(x + 3.0, 0.0, 6.0) / 6.0
class DivAddMulBenchmark:
"""This wrapper helps interface with the same iterator as torchbench models
"""
def __init__(self, dims):
self.name = "DivAddMulBenchmark(dims=[" + ','.join([str(d) for d in dims]) + '])'
self.dims = dims
# test and extra_args are placeholders to match TorchBench API
def __call__(self, device, test, extra_args):
return DivAddMul(self.dims, device)
class DivAddMul(nn.Module):
def __init__(self, dims, device='cuda'):
super(DivAddMul, self).__init__()
self.attention_head_size = dims[1]
self.W = torch.ones(*dims[-2:], device=device, dtype=torch.float32)
self.name = "DivAddMul[" + ','.join([str(d) for d in dims]) + ']'
self.example_inputs = (
torch.ones(*dims, device=device, dtype=torch.float32),
torch.randn(*dims, device=device, dtype=torch.float32),
)
def get_module(self):
return self, self.example_inputs
def name(self):
return self.name
def forward(self, inputs, mask):
out3 = ((inputs / 0.1) + mask) * 2.0
out5 = out3.matmul(self.W)
out8 = ((out5 / 0.1) + mask) * 2.00
return out8
toy_models = [
HardSwishBenchmark,
DivAddMulBenchmark,
]
toy_dims = [
[1, 1, 1, 1],
[32, 16, 128, 128],
[128, 16, 128, 128],
[256, 16, 128, 128],
]
for dims in toy_dims:
# The toy benchmarks don't support training..
# and it's too late to add it inside the generator func below...
SKIP_TRAIN_ONLY["DivAddMulBenchmark(dims=[" + ','.join([str(d) for d in dims]) + '])'] = "This model has no train()"
SKIP_TRAIN_ONLY["HardSwishBenchmark(dims=[" + ','.join([str(d) for d in dims]) + '])'] = "This model has no train()"
def iter_toy_model_names():
for dims in toy_dims:
for model in toy_models:
yield model(dims=dims).name
def pick_grad(args, name):
if args.test == 'train':
return torch.enable_grad()
if name in ("maml",):
return torch.enable_grad()
else:
return torch.no_grad()
def short_name(name, limit=20):
"""Truncate a model name to limit chars"""
return name if len(name) <= limit else f"{name[:limit - 3].rstrip('_')}..."
def iter_torchbench_model_names():
from torchbenchmark import _list_model_paths
for model_path in _list_model_paths():
model_name = os.path.basename(model_path)
yield model_name
def iter_models(args, dirpath):
for name in itertools.chain(iter_toy_model_names(), iter_torchbench_model_names()):
if (
(len(args.filter) and (not re.search("|".join(args.filter), name, re.I)))
or (len(args.exclude) and re.search("|".join(args.exclude), name, re.I))
):
save_error(name, args.test, "disabled via cmdline filter/exclude", dirpath)
continue
if name in SKIP:
save_error(name, args.test, f"SKIP because {SKIP[name]}", dirpath)
continue
if name in SKIP_TRAIN_ONLY and args.test == "train":
save_error(name, args.test, f"SKIP_TRAIN_ONLY because {SKIP_TRAIN_ONLY[name]}", dirpath)
continue
yield name
def call_model_with(model, inputs):
if isinstance(inputs, tuple) or isinstance(inputs, list):
return model(*inputs)
elif isinstance(inputs, dict):
return model(**inputs)
elif isistance(inputs, torch.Tensor):
return model(inputs)
raise RuntimeError("invalid example inputs ", inputs)
class CudaSync:
def __init__(self, sync_every_iter=False):
self.sync_every_iter = sync_every_iter
def iter_sync(self):
if self.sync_every_iter:
torch.cuda.synchronize()
def final_sync(self):
torch.cuda.synchronize()
class NoOpSync:
def __init__(self, sync_every_iter=False):
pass
def iter_sync(self):
pass
def final_sync(self):
pass
class LazySync:
def __init__(self, sync_every_iter=False, skip_final_sync=False):
self.sync_every_iter = sync_every_iter
self.skip_final_sync = skip_final_sync
def iter_sync(self):
torch._lazy.mark_step()
if self.sync_every_iter:
torch._lazy.wait_device_ops()
if current_device == 'cuda':
torch.cuda.synchronize()
def final_sync(self):
torch._lazy.mark_step()
if self.skip_final_sync:
return
torch._lazy.wait_device_ops()
if current_device == 'cuda':
torch.cuda.synchronize()
def dump_lazy_metrics(reset=False):
met = {name: int(metrics.counter_value(name)) for name in metrics.counter_names() if int(metrics.counter_value(name) > 0)}
if reset:
metrics.reset()
return met
def timed(args, benchmark, sync, times=1):
results = None
sync.final_sync()
set_seeds()
if args.test == 'eval':
model, example_inputs = benchmark.get_module()
if current_device == 'lazy':
torch.cuda.set_sync_debug_mode(2)
elif current_device == 'cuda':
torch.cuda.set_sync_debug_mode(0)
# keep the lazy tensor results alive until the final sync
t0 = time.perf_counter()
for i in range(times):
if args.test == 'eval':
results = call_model_with(model, example_inputs)
elif args.test == 'train':
benchmark.train()
# for the last i, let final_sync take care of it
if i < times - 1:
# may be just an async 'mark_step' for lazy, or no-op for cuda
sync.iter_sync()
if current_device in ['lazy', 'cuda']:
# don't assume torch.cuda present unless using cuda
torch.cuda.set_sync_debug_mode(0)
# should be a hard sync for lazy and cuda
# unless strictly measuring lazy trace overhead, then no-op
sync.final_sync()
t1 = time.perf_counter()
return results, t1 - t0
def to_device(tensors, device):
"""Handles moving tensor or tensors (in various containers) to a new device.
Used for various purposes (either correctness checking, or even as an impromptu
means of synchronization.) Note: this method doesn't apply a cuda sync, do that outside.
"""
try:
import transformers.modeling_outputs
if (
isinstance(tensors, transformers.modeling_outputs.MaskedLMOutput) or
isinstance(tensors, transformers.modeling_outputs.Seq2SeqLMOutput)
):
# huggingface transformers return classes as model output with many attributes
# we don't want to sync (such as hidden states of every layer) - just sync the logits
tensors = tensors.logits
except ImportError:
pass
try:
import torchbenchmark.models.soft_actor_critic.nets
import torchbenchmark.models.drq.drqutils
if (
isinstance(tensors, torchbenchmark.models.soft_actor_critic.nets.SquashedNormal) or
isinstance(tensors, torchbenchmark.models.drq.drqutils.SquashedNormal)
):
# a SquashedNormal is a py class that holds a loc and scale torch tensor,
# so convert it to a tuple for compatibility with downstream check_results
tensors = (tensors.loc, tensors.scale)
except ImportError:
pass
if isinstance(tensors, tuple) or isinstance(tensors, list):
return tuple(to_device(i, device) for i in tensors)
elif isinstance(tensors, dict):
return {k: to_device(tensors[k], device) for k in tensors}
elif isinstance(tensors, torch.Tensor):
return tensors.to(device)
raise RuntimeError("invalid example tensors ", tensors)
def lazy_overhead_experiment(args, results, benchmark, lazy_benchmark):
timings = np.zeros((args.repeat, 2), np.float64)
ref_sync = CudaSync if current_device == 'cuda' else NoOpSync
warmup0 = time.perf_counter()
for rep in range(args.warmup):
# interleave the runs to handle frequency scaling and load changes
timed(args, benchmark, sync=ref_sync(sync_every_iter=True))
timed(args, lazy_benchmark, sync=LazySync(sync_every_iter=True))
warmup_time = time.perf_counter() - warmup0
bench0 = time.perf_counter()
dump_lazy_metrics(reset=True)
for rep in range(args.repeat):
# interleave the runs to handle frequency scaling and load changes
_, timings[rep, 0] = timed(args, benchmark, sync=ref_sync(sync_every_iter=True))
_, timings[rep, 1] = timed(args, lazy_benchmark, sync=LazySync(skip_final_sync=True))
torch._lazy.wait_device_ops()
if current_device == 'cuda':
torch.cuda.synchronize()
lazy_metrics = dump_lazy_metrics(reset=True)
bench_time = time.perf_counter() - bench0
pvalue = ttest_ind(timings[:, 0], timings[:, 1]).pvalue
median = np.median(timings, axis=0)
fallbacks = ";".join([f"{m}:{lazy_metrics[m]}" for m in lazy_metrics if "aten::" in m])
ops = int(sum([lazy_metrics[m] for m in lazy_metrics if 'lazy::' in m or 'aten::' in m]) / args.repeat)
trace_us = median[1] / 1e-6
us_per_op = trace_us / ops
overhead = median[1] / median[0]
results.append(overhead)
output_csv(
os.path.join(args.output_dir, f"lazy-overheads_{args.test}_{get_unique_suffix()}.csv"),
("dev", "name", "test", "overhead", "pvalue", "ops", "trace_us", "us_per_op", "fallbacks"),
).writerow([current_device, current_name, args.test, f"{overhead:.4f}", f"{pvalue:.4e}",
f"{ops}", f"{trace_us:.4f}", f"{us_per_op:.4f}", f"{fallbacks}"])
print(f"{short_name(current_name, limit=30):<30} {current_device:<4} {args.test:<5} "
f"{'trace overheads':<20} overhead: {overhead:.3f} pvalue: {pvalue:.2e} us_per_op {us_per_op:.3f}")
if args.verbose:
print(f"CIDEBUGOUTPUT,lazy_overhead_experiment,"
f"{current_name},{args.test},{current_device},{overhead:.4f},"
f"{pvalue:.4e},{args.warmup},{args.repeat},{warmup_time:.2f},{bench_time:.2f}")
return (overhead, pvalue)
def lazy_compute_experiment(args, experiment, results, benchmark, lazy_benchmark, sync_every_iter=False):
timings = np.zeros((args.repeat, 2), np.float64)
ref_sync = CudaSync(sync_every_iter=sync_every_iter) if current_device == 'cuda' else NoOpSync()
lazy_sync = LazySync(sync_every_iter=sync_every_iter)
# interleave the runs to handle frequency scaling and load changes
warmup0 = time.perf_counter()
for rep in range(args.warmup):
# warmup
timed(args, benchmark, sync=ref_sync)
timed(args, lazy_benchmark, sync=lazy_sync)
warmup_time = time.perf_counter() - warmup0
# fresh metrics for each timed run
dump_lazy_metrics(reset=True)
bench0 = time.perf_counter()
for rep in range(args.repeat):
# measure
_, timings[rep, 0] = timed(args, benchmark, times=args.inner_loop_repeat, sync=ref_sync)
_, timings[rep, 1] = timed(args, lazy_benchmark, times=args.inner_loop_repeat, sync=lazy_sync)
bench_time = time.perf_counter() - bench0
lazy_metrics = dump_lazy_metrics(reset=True)
if 'CachedCompile' not in lazy_metrics or lazy_metrics['CachedCompile'] != args.repeat * args.inner_loop_repeat:
print("WARNING: lazy cached compile count indicates fallbacks, or something else")
fallbacks = {k: v for (k, v) in lazy_metrics.items() if 'aten::' in k}
if len(fallbacks):
print(f"WARNING: lazy-eager fallbacks detected for [{fallbacks}]")
if args.dump_lazy_counters:
print(lazy_metrics)
pvalue = ttest_ind(timings[:, 0], timings[:, 1]).pvalue
median = np.median(timings, axis=0)
speedup = median[0] / median[1]
results.append(speedup)
output_csv(
os.path.join(args.output_dir, f"lazy-compute_{args.test}_{get_unique_suffix()}.csv"),
("name", "dev", "experiment", "test", "speedup", "pvalue"),
).writerow([current_name, current_device, experiment, args.test, f"{speedup:.4f}", f"{pvalue:.2e}"])
print(f"{short_name(current_name, limit=30):<30} {current_device:<4} "
f"{args.test:<5} {experiment:<20} speedup: {speedup:.3f} pvalue: {pvalue:.2e}")
if args.verbose:
print(f"CIDEBUGOUTPUT,lazy_compute_experiment,"
f"{current_name},{current_device},{experiment},{args.test},{speedup:.4f},"
f"{pvalue:.2e},{args.warmup},{args.repeat},{warmup_time:.2f},{bench_time:.2f}")
return (speedup, pvalue)
def check_eval_correctness(args, benchmark, lazy_benchmark, name):
try:
set_seeds()
model, example_inputs = benchmark.get_module()
model.eval()
correct_result = call_model_with(model, example_inputs)
set_seeds()
lazy_model, lazy_inputs = lazy_benchmark.get_module()
lazy_model.eval()
lazy_result = call_model_with(lazy_model, lazy_inputs)
if not check_results(correct_result, lazy_result, args.device, args.allclose_atol):
print(f"INCORRECT: {name}")
save_error(name, args.test, "Incorrect results.", args.output_dir)
return False
except Exception as e:
print(f"ERROR: {name}: {e}")
save_error(name, args.test, e, args.output_dir)
return False
return True
def just_run_once(args, lazy_benchmark):
set_seeds()
if args.test == 'eval':
model, example_inputs = lazy_benchmark.get_module()
results.append(call_model_with(model, example_inputs))
elif args.test == 'train':
lazy_benchmark.train()
torch._lazy.mark_step()
torch._lazy.wait_device_ops()
if current_device == 'cuda':
torch.cuda.synchronize()
def check_results_impl(correct_result, lazy_result, atol):
# recursive helper for dealing with nested data structures
if type(correct_result) is tuple:
for c, l in zip(correct_result, lazy_result):
return check_results_impl(c, l, atol)
if type(correct_result) is dict:
print(correct_result.keys())
for k in correct_result:
assert k in lazy_result
return check_results_impl(correct_result[k], lazy_result[k], atol)
assert type(correct_result) is torch.Tensor, f"Expect torch.Tensor but got {type(correct_result)}."
ans = torch.allclose(correct_result, lazy_result, atol=atol)
if not ans:
print(f"correct_result:\n{correct_result}, lazy_result:\n{lazy_result}")
return ans
def check_results(correct_result, lazy_result, device, atol):
# to_device has recursive logic and special handling for
# extracting relevant tensors from huggingface data structures
correct_result = to_device(correct_result, device)
lazy_result = to_device(lazy_result, device)
return check_results_impl(correct_result, lazy_result, atol)
def check_fuser(args):
if args.fuser == 'noopt':
return
if args.fuser is None:
args.fuser = 'fuser1' if args.device == 'cpu' else 'fuser2'
if args.device == 'cpu':
assert args.fuser in ['fuser0', 'fuser1']
if args.fuser == 'fuser1':
assert torch._C._llvm_enabled(), "Can't use fuser1 (nnc) for CPU without building torch with llvm."
if args.device == 'cuda':
assert args.fuser in ['fuser0', 'fuser1', 'fuser2']
def run_tracing_execute_noops(test, lazy_benchmark):
ltm.set_noop_execution_mode(True)
if test == 'eval':
model, example_inputs = lazy_benchmark.get_module()
# doesn't actualyl collect a profile, but runs just the lazy trace
# so you can use a profiler on top of the program.
# note: depends on making the backend do a 'no-op' for executecomputation
results = []
for i in range(300):
if test == 'eval':
results.append(call_model_with(model, example_inputs))
elif test == 'train':
lazy_benchmark.train()
# we still do a mark step, to preserve the ratio of how often we split the graph
# and run through the process of 'compile and execute' (even though these are now noops)
torch._lazy.mark_step()
ltm.set_noop_execution_mode(False)
def merge_with_prefix(prefix, tmp_dir, out_dir, headers):
results = []
rfnames = glob.glob(os.path.join(tmp_dir, prefix + "*"))
for rfname in rfnames:
results.extend(open(rfname).readlines()[1:]) # skip header
# the header shouldn't require quotations and the results should already be properly
# quoted via output_csv
with open(os.path.join(out_dir, prefix + "acc.csv"), "a+") as acc_csv:
acc_csv.write(",".join(headers) + "\n")
for l in results:
acc_csv.write(l)
def merge_reformat(tmp_dir, out_dir, table):
out_dir = args.output_dir
# depending on the type of an experiment, fields can be in a different order
# `get_field` deals with all three types including `error`
def get_field(row, name, file_type):
headers = {
"error": ("name", "test", "error"),
"lazy-compute" : ("name", "dev", "experiment", "test", "speedup", "pvalue"),
"lazy-overheads" : ("dev", "name", "test", "overhead", "pvalue", "ops", "trace_us", "us_per_op", "fallbacks")
}
header = headers[file_type]
r = row[header.index(name)] if name in header else "N/A"
return r
csv_files = glob.glob(os.path.join(tmp_dir, "*.csv"))
for csvf in csv_files:
with open(csvf, "r") as csvfile:
prefix = os.path.basename(csvf).split("_")[0]
csvreader = csv.reader(csvfile, delimiter=",", quotechar='"')
# This skips the first row of the CSV file.
next(csvreader)
for r in csvreader:
key = (get_field(r, "name", prefix), get_field(r, "test", prefix))
entry = table[key]
if prefix == "error":
entry["error"] = f'{entry.get("error", "")} {get_field(r, "error", prefix)}'
elif prefix == "lazy-overheads":
entry["overhead"] = get_field(r, "overhead", prefix)
entry["ops"] = get_field(r, "ops", prefix)
entry["trace_us"] = get_field(r, "trace_us", prefix)
entry["us_per_op"] = get_field(r, "us_per_op", prefix)
entry["fallbacks"] = get_field(r, "fallbacks", prefix)
else:
entry[get_field(r, "experiment", prefix)] = get_field(r, "speedup", prefix)
amortized_header = f"amortized {args.inner_loop_repeat}x"
headers = ("name", "test", amortized_header, "unamortized", "overhead", "error", "rc",
"ops", "trace_us", "us_per_op", "fallbacks")
cw = output_csv(
os.path.join(out_dir, f"{args.test}_reformat.csv"),
headers
)
for k, v in table.items():
cw.writerow((k[0], k[1], v.get(amortized_header, 'N/A'),
v.get('unamortized', 'N/A'), v.get('overhead', 'N/A'), v.get('error', 'N/A'), v.get('rc'),
v.get('ops', 'N/A'), v.get('trace_us', 'N/A'), v.get('us_per_op', 'N/A'), v.get('fallbacks', 'N/A')))
def save_error(name, test, error, dir):
output_csv(
os.path.join(dir, f"error_{get_unique_suffix()}.csv"),
("name", "test", "error"),
).writerow([name, test, error])
if __name__ == "__main__" :
parser = argparse.ArgumentParser()
parser.add_argument("--filter", "-k", action="append", default=[], help="filter benchmarks")
parser.add_argument("--exclude", "-x", action="append", default=[], help="filter benchmarks")
parser.add_argument("--device", "-d", default='cuda', help="cpu or cuda")
parser.add_argument("--warmup", type=int, default=4, help="number of warmup runs")
parser.add_argument("--timeout", type=int, default=60 * 10, help="time allocated to each model")
parser.add_argument("--repeat", "-n", type=int, default=4, help="number of timing runs (samples)")
parser.add_argument("--inner_loop_repeat", type=int, default=10, help="repeat the computation this many times per sample")
parser.add_argument("--fuser", type=str, choices=['noopt', 'fuser0', 'fuser1', 'fuser2'], help="0=legacy, 1=nnc, 2=nvfuser")
parser.add_argument("--test", type=str, choices=['eval', 'train'], default='eval')
parser.add_argument("--verbose", action='store_true')
parser.add_argument("--torchbench_dir", type=str, help="path to torchbenchmark repo")
parser.add_argument("--output_dir", type=str, default=".", help="path to write output files")
parser.add_argument("--dump_lazy_counters", action='store_true', help="dump lazy counter values after each timing run")
parser.add_argument("--just_run_once", action="store_true")
parser.add_argument("--run_tracing_execute_noops", action='store_true',
help="Run the tracing portion only, with noop backend, useful for running under a profiler.")
parser.add_argument("--run_in_subprocess", "-s", type=str,
help="which model run in subprocess. This will ignore filter and exclude")
parser.add_argument("--allclose_atol", type=float, default=1e-4,
help="Absolute tolerance to check lazy result again the correct result")
parser.add_argument("--precision", choices=["fp32", "fp16", "amp"], default="fp32", help="enable fp16 modes from: fp32, fp16/half, or amp")
args = parser.parse_args()
results = []
check_fuser(args)
# torchbench_dir = abspath(args.torchbench_dir) if args.torchbench_dir else abspath("../../benchmark")
# assert os.path.exists(os.path.join(torchbench_dir, "torchbenchmark")), "set --torchbench_dir to installed torchbench repo"
# sys.path.append(torchbench_dir)
copy_argv = [] + sys.argv
if args.run_in_subprocess:
try:
from fastNLP.core import logger
logger.setLevel(logging.WARNING)
current_name = args.run_in_subprocess
benchmark_cls = get_benchmark_cls(args.run_in_subprocess)
current_device = args.device
if args.device == 'cuda':
assert 'LTC_TS_CUDA' in os.environ and bool(os.environ['LTC_TS_CUDA']), "set LTC_TS_CUDA for cuda device"
with pick_grad(args, current_name):
with fuser(args.fuser) if args.fuser != 'noopt' else optimized_execution(False):
if args.fuser == 'noopt':
# TODO(whc) cleaner way to configure the fusers; seems i have to set both optimized_execution(False)
# _and_ disable fusers to get no-optimization
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
if args.fuser == 'fuser2':
# special case to disable nvfuser horizontal fusion as it is currently broken
# TODO(whc) remove this once it's fixed
torch._C._jit_set_nvfuser_horizontal_mode(False)
# no try since we should've already filtered out models we can't create
set_seeds()
benchmark = benchmark_cls(test=args.test, device=args.device, extra_args=["--precision", args.precision])
set_seeds()
lazy_benchmark = benchmark_cls(test=args.test, device='lazy', extra_args=["--precision", args.precision])
# TODO: might be redundant
gc.collect()
if args.run_tracing_execute_noops:
print(f"Profiling {current_name}")
run_tracing_execute_noops(args.test, lazy_benchmark)
# when profiling, we really don't want to do anything else
exit(0)
if args.just_run_once:
just_run_once(args, lazy_benchmark)
exit(0)
if args.test == 'eval':
if not check_eval_correctness(args, benchmark, lazy_benchmark, current_name):
exit(3)
lazy_overhead_experiment(args, results, benchmark, lazy_benchmark)
lazy_compute_experiment(args, f"amortized {args.inner_loop_repeat}x", results, benchmark, lazy_benchmark)
lazy_compute_experiment(args, "unamortized", results, benchmark, lazy_benchmark, sync_every_iter=True)
except Exception as e:
print(f"ERROR: {current_name}: {e}")
save_error(current_name, args.test, e, args.output_dir)
exit(13)
exit(0)
import psutil
import subprocess
import tempfile
dirpath = tempfile.mkdtemp()
table = collections.defaultdict(dict)
for model_name in iter_models(args, dirpath):
# if `--run_in_subprocess` is specified, it will override any filters and excludes
# pass the rest of arguments intact such as device, test, repeat, etc
# note, the latest output_dir will override the original one and this is exactly what we want
# for child processes
launch_command = f"python {' '.join(copy_argv)} --run_in_subprocess '{model_name}' --output_dir={dirpath}"
env = os.environ
env["LTC_TS_CUDA"] = "1" if args.device == "cuda" else "0"
rc = 0
try:
if args.verbose:
cp = subprocess.run("nvidia-smi --query-gpu=timestamp,utilization.memory,memory.total,memory.free,memory.used"
" --format=csv,noheader",
capture_output=True, text=True, shell=True)
print(f"CIDEBUGOUTPUT,BEFORE subprocess.run,{model_name},{cp.stdout}")
proc = subprocess.Popen(launch_command,
env=env,
shell=True,
stderr=subprocess.STDOUT)
outs, errs = proc.communicate(timeout=args.timeout)
rc = proc.poll()
except subprocess.TimeoutExpired:
print(f"{model_name} timed out after {args.timeout // 60} minutes! Include it in SKIP or SKIP_TRAIN_ONLY")
save_error(model_name, args.test, "Timed out.", dirpath)
# to visualize highlight timeouts, they will also have
# "timed out" in the error column
rc = 17
process = psutil.Process(proc.pid)
for p in process.children(recursive=True):
p.kill()
process.kill()
if args.verbose:
cp = subprocess.run("nvidia-smi --query-gpu=timestamp,utilization.memory,memory.total,memory.free,memory.used"
" --format=csv,noheader",
capture_output=True, text=True, shell=True)
print(f"CIDEBUGOUTPUT,AFTER subprocess.run,{model_name},{args.test},{cp.stdout}")
entry = table[(model_name, args.test)]
entry["rc"] = rc
merge_with_prefix("lazy-overheads_", dirpath, args.output_dir, ("dev", "name", "test", "overhead", "pvalue"))
merge_with_prefix("lazy-compute_", dirpath, args.output_dir, ("name", "dev", "experiment", "test", "speedup", "pvalue"))
merge_with_prefix("error_", dirpath, args.output_dir, ("name", "test", "error"))
merge_reformat(dirpath, args, table)
|
"""
Run PyTorch nightly benchmarking.
"""
import re
import argparse
import itertools
import json
import math
import os
import yaml
import numpy
from typing import List, Tuple, Dict, Optional, Any
from ..utils import REPO_PATH, add_path, get_output_json, get_default_output_json_path
from . import BM_NAME
with add_path(REPO_PATH):
from torchbenchmark.util.experiment.instantiator import list_models, load_model_isolated, TorchBenchModelConfig, \
list_devices, list_tests
from torchbenchmark.util.experiment.metrics import TorchBenchModelMetrics, get_model_test_metrics
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
DEFAULT_DELTA_THRESHOLD = 0.07
DEFAULT_TARGET_SCORE = 1000.0
def generate_model_configs(devices: List[str], tests: List[str], model_names: List[str]) -> List[TorchBenchModelConfig]:
"""Use the default batch size and default mode."""
if not model_names:
model_names = list_models()
cfgs = itertools.product(*[devices, tests, model_names])
result = [TorchBenchModelConfig(
name=model_name,
device=device,
test=test,
batch_size=None,
extra_args=[],
extra_env=None,
) for device, test, model_name in cfgs]
return result
def get_metrics(_config: TorchBenchModelConfig) -> List[str]:
return ["latencies",]
def compute_score(results, reference_latencies: Dict[str, float]) -> float:
# sanity checks
latency_results = {k: v for k, v in results.items() if k.endswith("_latency")}
test_set = set(latency_results.keys())
reference_set = set(reference_latencies.keys())
test_only_set = test_set.difference(reference_set)
assert not test_only_set, f"Tests {test_only_set} only appears in result json, not in reference yaml."
reference_only_set = reference_set.difference(test_set)
assert not reference_only_set, f"Tests {reference_only_set} only appears in reference yaml, not in result json."
# check that for every test in reference_latencies, we can find the corresponding tests in latency_results
total_score = 0.0
weight = 1.0 / len(reference_latencies)
for key, ref_latency in reference_latencies.items():
test_latency = latency_results[key]
ref_latency = float(ref_latency)
delta = (test_latency - ref_latency) / test_latency
# If less than threshold, treat it as noise
if abs(delta) <= DEFAULT_DELTA_THRESHOLD:
test_latency = ref_latency
total_score += weight * math.log(ref_latency / test_latency)
score = math.exp(total_score) * DEFAULT_TARGET_SCORE
return score
def result_to_output_metrics(results: List[Tuple[TorchBenchModelConfig, TorchBenchModelMetrics]]) -> Dict[str, float]:
# metrics name examples:
# test_eval[timm_regnet-cuda-eager]_latency
# test_eval[timm_regnet-cuda-eager]_cmem
# test_eval[timm_regnet-cuda-eager]_gmem
result_metrics = {}
for _config_id, (config, metrics) in enumerate(results):
metrics_base = f"test_{config.test}[{config.name}-{config.device}-eager]"
latency_metric = f"{metrics_base}_latency"
median_latency = numpy.median(metrics.latencies)
assert median_latency, f"Run failed for metric {latency_metric}"
result_metrics[latency_metric] = median_latency
if metrics.cpu_peak_mem:
cpu_peak_mem = f"{metrics_base}_cmem"
result_metrics[cpu_peak_mem] = metrics.cpu_peak_mem
if metrics.gpu_peak_mem:
gpu_peak_mem = f"{metrics_base}_gmem"
result_metrics[gpu_peak_mem] = metrics.gpu_peak_mem
return result_metrics
def validate(candidates: List[str], choices: List[str]) -> List[str]:
"""Validate the candidates provided by the user is valid"""
for candidate in candidates:
assert candidate in choices, f"Specified {candidate}, but not in available list: {choices}."
return candidates
def generate_model_configs_from_yaml(yaml_file: str) -> Tuple[List[TorchBenchModelConfig], Dict[str, float], Any]:
yaml_file_path = os.path.join(CURRENT_DIR, yaml_file)
with open(yaml_file_path, "r") as yf:
config_obj = yaml.safe_load(yf)
devices = config_obj["metadata"]["devices"]
configs = []
reference_latencies = {}
for device in devices:
for c in config_obj[device]:
if not c["stable"]:
continue
config = TorchBenchModelConfig(
name=c["model"],
device=device,
test=c["test"],
batch_size=c["batch_size"] if "batch_size" in c else None,
extra_args=[],
extra_env=None,
)
configs.append(config)
metrics_base = f"test_{config.test}[{config.name}-{config.device}-eager]"
latency_metric_key = f"{metrics_base}_latency"
reference_latencies[latency_metric_key] = c["median_latency"]
return configs, reference_latencies, config_obj
def parse_test_name(test_name: str) -> TorchBenchModelConfig:
regex = "test_(.*)\[(.*)-(.*)-eager\]"
test, model, device = re.match(regex, test_name).groups()
return TorchBenchModelConfig(
name=model,
device=device,
test=test,
batch_size=None,
extra_args=[],
extra_env=None,
)
def generate_model_configs_from_bisect_yaml(bisect_yaml_file: str) -> List[TorchBenchModelConfig]:
def _remove_suffix(test_name: str):
index_last_underscore = test_name.rfind("_")
return test_name[:index_last_underscore]
with open(bisect_yaml_file, "r") as yf:
bisect_obj = yaml.safe_load(yf)
# remove the suffix
bisect_tests = [ _remove_suffix(test_name) for test_name in bisect_obj["details"] ]
bisect_tests = set(bisect_tests)
configs = [ parse_test_name(test_name_str) for test_name_str in sorted(bisect_tests) ]
return configs
def parse_str_to_list(candidates):
if isinstance(candidates, list):
return candidates
candidates = list(map(lambda x: x.strip(), candidates.split(",")))
return candidates
def run_config(config: TorchBenchModelConfig, dryrun: bool=False) -> Optional[TorchBenchModelMetrics]:
"""This function only handles NotImplementedError, all other errors will fail."""
metrics = get_metrics(config)
print(f"Running {config} ...", end='', flush=True)
if dryrun:
print(" [Skip: Dryrun]", flush=True)
return None
# We do not allow RuntimeError in this test
try:
# load the model instance in subprocess
model = load_model_isolated(config)
# get the model test metrics
result: TorchBenchModelMetrics = get_model_test_metrics(model, metrics=metrics)
except NotImplementedError as e:
print(" [NotImplemented]", flush=True)
return None
print(" [Done]", flush=True)
return result
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--device", "-d", default="cuda", help="Devices to run, splited by comma.")
parser.add_argument("--test", "-t", default="eval", help="Tests to run, splited by comma.")
parser.add_argument("--model", "-m", default=None, type=str, help="Only run the specifice models, splited by comma.")
parser.add_argument("--config", "-c", default=None, help="YAML config to specify tests to run.")
parser.add_argument("--run-bisect", help="Run with the output of regression detector.")
parser.add_argument("--dryrun", action="store_true", help="Dryrun the command.")
parser.add_argument("--score", default=None, help="Generate score from the past run json only.")
parser.add_argument("--output", default=get_default_output_json_path(BM_NAME), help="Specify the path of the output file")
return parser.parse_args(args)
def run(args: List[str]):
args = parse_args(args)
if args.score:
assert args.config, f"To compute score, you must specify the config YAML using --config."
configs, reference_latencies, config_obj = generate_model_configs_from_yaml(args.config)
with open(args.score, "r") as sp:
run_result = json.load(sp)
input_metrics = run_result["metrics"]
score = compute_score(input_metrics, reference_latencies)
score_version = config_obj["metadata"]["score_version"]
score_name = f"{score_version}_score"
print(f"TorchBench {score_name}: {score}.")
exit(0)
elif args.config:
configs, reference_latencies, config_obj = generate_model_configs_from_yaml(args.config)
elif args.run_bisect:
configs = generate_model_configs_from_bisect_yaml(args.run_bisect)
reference_latencies = None
else:
# If not specified, use the entire model set
if not args.model:
args.model = list_models()
devices = validate(parse_str_to_list(args.device), list_devices())
tests = validate(parse_str_to_list(args.test), list_tests())
models = validate(parse_str_to_list(args.model), list_models())
configs = generate_model_configs(devices, tests, model_names=models)
reference_latencies = None
results = []
try:
for config in configs:
metrics = run_config(config, dryrun=args.dryrun)
if metrics:
results.append([config, metrics])
except KeyboardInterrupt:
print("User keyboard interrupted!")
if not args.dryrun:
metrics = result_to_output_metrics(results)
if reference_latencies:
score = compute_score(metrics, reference_latencies)
score_version = config_obj["metadata"]["score_version"]
score_name = f"{score_version}_score"
metrics[score_name] = score
result = get_output_json(BM_NAME, metrics)
import torch
result["environ"]["device"] = torch.cuda.get_device_name()
with open(args.output, 'w') as f:
json.dump(result, f, indent=4)
|
BM_NAME = "rocm-test" |
"""
Test user-customized invoke function.
"""
import argparse
from typing import List
from ..utils import REPO_PATH, add_path, get_output_json, dump_output
with add_path(REPO_PATH):
from torchbenchmark.util.experiment.instantiator import list_models, load_model_isolated, TorchBenchModelConfig, \
list_devices, list_tests, inject_model_invoke
from torchbenchmark.util.experiment.metrics import TorchBenchModelMetrics, get_model_test_metrics
from typing import Optional
def user_defined_invoke(self):
print(f"Model {self.name} invoke has been replaced!")
self.output_metrics_list = [1.0, 2.0, 3.0, 4.0]
self.output_metrics_dict ={
"m1": 1.0,
"m2": 2.0,
"m3": 3.0,
}
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--device", "-d", default="cuda", help="Devices to run, splited by comma.")
parser.add_argument("--test", "-t", default="eval", help="Tests to run, splited by comma.")
parser.add_argument("--bs", type=int, default=1, help="Test batch size")
parser.add_argument("--model", "-m", default=None, type=str, help="Only run the specifice models, splited by comma.")
parser.add_argument("--inject", action="store_true", help="Inject user defined invoke function to the model.")
return parser.parse_args(args)
def get_metrics(_config: TorchBenchModelConfig) -> List[str]:
return ["latencies"]
def run_config(config: TorchBenchModelConfig, dryrun: bool=False) -> Optional[TorchBenchModelMetrics]:
"""This function only handles NotImplementedError, all other errors will fail."""
metrics = get_metrics(config)
print(f"Running {config} ...", end='')
if dryrun:
return None
# We do not allow RuntimeError in this test
result ={}
try:
# load the model instance within the same process
model = load_model_isolated(config)
inject_model_invoke(model, user_defined_invoke)
# get the model test metrics
model.invoke()
result["list_result"] = model.get_model_attribute("output_metrics_list")
result["dict_output"] = model.get_model_attribute("output_metrics_dict")
except NotImplementedError as e:
print(" [NotImplemented]")
return None
print(" [Done]")
return result
def run(args: List[str]):
args = parse_args(args)
config = TorchBenchModelConfig(
name=args.model,
device=args.device,
test=args.test,
batch_size=args.bs,
extra_args=[],
extra_env=None,
)
result = run_config(config)
print(result)
|
import torch
from ..utils import dump_output
from .cases import benchmark_cases
from .util import benchmark
import pprint
from typing import List
BM_NAME = 'functorch'
def run_benchmarks():
metrics = {}
for case_ctor in benchmark_cases:
case = case_ctor()
runtime_ms = benchmark(case)
metrics[case.name()] = runtime_ms
return metrics
def run(args: List[str]):
metrics = run_benchmarks()
result = {
'name': BM_NAME,
'environ': {
'pytorch_git_version': torch.version.git_version,
},
'metrics': metrics,
}
pprint.pprint(result)
dump_output(BM_NAME, result)
|
import torch
import torch.nn as nn
from functorch import vmap, jacfwd, jacrev
from .util import BenchmarkCase
# batched hessians of fully connected layers is a popular quantity
# in physics-related models.
# This test case is from https://github.com/pytorch/functorch/issues/989
# We haven't been able to get the full model yet, so, this test case
# is going into the functorch userbenchmark instead of torchbenchmark.
class VmapHessianFC(BenchmarkCase):
def __init__(self):
device = 'cuda'
D1 = 2 # x, y
D2 = 3 # u, v, p
B = 10000
x = torch.randn(B, D1).to(device)
model = nn.Sequential(
nn.Linear(D1, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, D2),
).to(device)
self.model = model
self.x = x
def name(self):
return 'vmap_hessian_fc_cuda'
def run(self):
def predict(x):
out = self.model(x)
return out, out
hessian, pred = vmap(
jacfwd(jacrev(predict, argnums=0, has_aux=True), argnums=0, has_aux=True),
in_dims=0,
)(
self.x
)
|
from abc import ABC, abstractmethod
from typing import Any, Callable
from torch.utils.benchmark import Timer
from torch.utils._pytree import tree_flatten
class BenchmarkCase(ABC):
@abstractmethod
def name(self) -> str:
pass
@abstractmethod
def run(self) -> Callable:
pass
def time(fn: Callable, test_runs: int) -> float:
t = Timer(stmt="fn()", globals={"fn": fn})
times = t.blocked_autorange()
return times.median * 1000 # time in ms
def benchmark(case: BenchmarkCase, warmup_runs: int = 10, test_runs: int = 20) -> float:
for _ in range(warmup_runs):
case.run()
return time(case.run, test_runs)
|
from .util import BenchmarkCase
from torchbenchmark.models.lennard_jones import Model as LJModel
from torchbenchmark.models.functorch_maml_omniglot import Model as FTMamlOmniglot
from torchbenchmark.models.functorch_dp_cifar10 import Model as FTDPCifar10
from .vmap_hessian_fc import VmapHessianFC
from .simple_models import (
SimpleCNN,
SimpleMLP,
VmapWrapper,
EnsembleMultiWrapper,
EnsembleSingleWrapper,
PerSampleGradWrapper,
)
class TorchBenchModelWrapper(BenchmarkCase):
def __init__(self, name, model, device):
self.model = model('train', device)
self.name_ = f'{name}_{device}'
def name(self):
return self.name_
def run(self):
return self.model.train()
# functorch user benchmark
# ------------------------
# This userbenchmark is used for regression testing of:
# - microbenchmarks,
# - low-quality models that shouldn't go into torchbenchmark
# - pieces of models where we do not have access to the full model.
# - models in torchbenchmark that have not yet made it to a release branch
# (and therefore are not being tracked for regressions).
#
# When adding a functorch-related benchmark, please prefer finding a high-quality
# model that uses the benchmark and adding it to the torchbenchmark suite.
# There is better infra support there and other folks use those models
# for cross-cutting tests.
benchmark_cases = [
# [models from torchbench that haven't made it to stable yet]
lambda: TorchBenchModelWrapper('lennard_jones', LJModel, 'cpu'),
lambda: TorchBenchModelWrapper('lennard_jones', LJModel, 'cuda'),
lambda: TorchBenchModelWrapper('functorch_maml_omniglot', FTMamlOmniglot, 'cpu'),
lambda: TorchBenchModelWrapper('functorch_maml_omniglot', FTMamlOmniglot, 'cuda'),
lambda: TorchBenchModelWrapper('functorch_dp_cifar10', FTDPCifar10, 'cuda'),
# end [models from torchbench that haven't made it to stable yet]
VmapHessianFC,
# [combinations from functorch tutorials]
lambda: VmapWrapper(SimpleMLP, 'cpu'),
lambda: VmapWrapper(SimpleMLP, 'cuda'),
lambda: EnsembleMultiWrapper(SimpleMLP, 'cpu'),
lambda: EnsembleMultiWrapper(SimpleMLP, 'cuda'),
lambda: EnsembleMultiWrapper(SimpleCNN, 'cuda'),
lambda: EnsembleSingleWrapper(SimpleMLP, 'cpu'),
lambda: EnsembleSingleWrapper(SimpleMLP, 'cuda'),
lambda: EnsembleSingleWrapper(SimpleCNN, 'cuda'),
lambda: PerSampleGradWrapper(SimpleMLP, 'cpu'),
lambda: PerSampleGradWrapper(SimpleMLP, 'cuda'),
lambda: PerSampleGradWrapper(SimpleCNN, 'cuda'),
# end [combinations from functorch tutorials]
]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from functorch import vmap, grad, combine_state_for_ensemble, make_functional_with_buffers
import functools
from .util import BenchmarkCase
class SimpleMLP(nn.Module):
def __init__(self):
super(SimpleMLP, self).__init__()
self.fc1 = nn.Linear(784, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, 10)
def forward(self, x):
x = x.flatten(1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
@classmethod
def make_input(cls, bs=None):
shape = [64, 1, 28, 28]
if bs is None:
return torch.randn(*shape)
return torch.randn(bs, *shape)
@classmethod
def make_target(cls, bs=None):
shape = [64]
if bs is None:
return torch.randint(10, shape)
return torch.randn(10, [bs] + shape)
class SimpleCNN(nn.Module):
def __init__(self):
super(SimpleCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
output = x
return output
@classmethod
def make_input(cls, bs=None):
shape = [64, 1, 28, 28]
if bs is None:
return torch.randn(*shape)
return torch.randn(bs, *shape)
@classmethod
def make_target(cls, bs=None):
shape = [64]
if bs is None:
return torch.randint(10, shape)
return torch.randn(10, [bs] + shape)
class VmapWrapper(BenchmarkCase):
def __init__(self, model_cls, device):
self.name_ = f'{model_cls.__name__}_vmap_{device}'
self.model = model_cls().to(device)
self.inputs = model_cls.make_input().to(device)
def name(self):
return self.name_
def run(self):
vmap(self.model)(self.inputs)
def ensemble_setup(self, model_cls, device):
num_models = 10
models = [model_cls().to(device) for _ in range(num_models)]
fmodel, params, buffers = combine_state_for_ensemble(models)
self.fmodel = fmodel
self.params = params
self.buffers = buffers
self.inputs = model_cls.make_input(num_models).to(device)
class EnsembleMultiWrapper(BenchmarkCase):
def __init__(self, model_cls, device):
self.name_ = f'{model_cls.__name__}_ensemble_multi_{device}'
ensemble_setup(self, model_cls, device)
def name(self):
return self.name_
def run(self):
vmap(self.fmodel)(self.params, self.buffers, self.inputs)
class EnsembleSingleWrapper(BenchmarkCase):
def __init__(self, model_cls, device):
self.name_ = f'{model_cls.__name__}_ensemble_single_{device}'
ensemble_setup(self, model_cls, device)
self.inputs = self.inputs[0]
def name(self):
return self.name_
def run(self):
vmap(self.fmodel, (0, 0, None))(self.params, self.buffers, self.inputs)
def loss_fn(predictions, targets):
return F.nll_loss(predictions, targets)
def compute_loss(fmodel, params, buffers, sample, target):
sample = sample.unsqueeze(0) # prepend batch dimension for processing
target = target.unsqueeze(0)
prediction = fmodel(params, buffers, sample)
return loss_fn(prediction, target)
class PerSampleGradWrapper(BenchmarkCase):
def __init__(self, model_cls, device):
self.name_ = f'{model_cls.__name__}_persamplegrad_{device}'
model = model_cls().to(device)
self.model = make_functional_with_buffers(model)
self.inputs = model_cls.make_input().to(device)
self.targets = model_cls.make_target().to(device)
def name(self):
return self.name_
def run(self):
fmodel, params, buffers = self.model
loss = functools.partial(compute_loss, fmodel)
vmap(grad(loss), (None, None, 0, 0))(params, buffers, self.inputs, self.targets)
|
import argparse
import traceback
import torch
import numpy as np
import json
import os
import time
from datetime import datetime
from typing import List, Union
from torchbenchmark.util.experiment.instantiator import (
TorchBenchModelConfig,
load_model_isolated,
list_models,
)
from torchbenchmark import (
ModelTask,
load_canary_model_by_name,
load_model_by_name,
ModelNotFoundError,
)
from torchbenchmark.util.model import BenchmarkModel
def cli(args: List[str]):
"""Parse input arguments, extracting model specification and batch size"""
arg_parser = argparse.ArgumentParser(args)
arg_parser.add_argument(
"--model",
help="Full or partial name of a model to run. If partial, picks the first match.",
default="",
type=str,
)
arg_parser.add_argument(
"--bs",
help="Input batch size to test.",
default=1,
type=int,
)
arg_parser.add_argument(
"--num_warmup",
help="Number of inference warmup iterations.",
default=10,
type=int,
)
arg_parser.add_argument(
"--num_iter",
help="Number of inference iterations for benchmarking.",
default=100,
type=int,
)
parsed_args, unknown = arg_parser.parse_known_args()
return vars(parsed_args), unknown
def save_metrics(metrics):
"""Save metrics to a JSON file with formatted filename"""
metrics_json = {
"name": "torch_trt",
"environ": {
"metrics_version": "v0.1",
"pytorch_git_version": torch.version.git_version,
},
"metrics": metrics,
}
# Obtain target save directory for JSON metrics from current save directory
current_dir = os.path.dirname(os.path.abspath(__file__))
target_dir = os.path.normpath(
os.path.join(current_dir, "../../.userbenchmark/torch_trt/")
)
os.makedirs(target_dir, exist_ok=True)
# Format filename and path to save metrics
metrics_file = "metrics-{}.json".format(
datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S")
)
metrics_save_path = os.path.join(target_dir, metrics_file)
with open(metrics_save_path, "w") as f:
json.dump(metrics_json, f, indent=4)
def run_single_model(
model: Union[BenchmarkModel, ModelTask],
selected_ir: str,
num_warmup: int,
num_iter: int,
):
"""Run inference benchmarking on a single model"""
# Get basic metrics for the model
metrics = run_one_step(model.invoke, model, num_warmup, num_iter, selected_ir)
# Get PT2 compilation time for the model
try:
if isinstance(model, ModelTask):
pt2_compilation_time = model.get_model_attribute("pt2_compilation_time")
name = model.get_model_attribute("name")
batch_size = model.get_model_attribute("batch_size")
precision = model.get_model_attribute("dargs", "precision")
else:
pt2_compilation_time = getattr(model, "pt2_compilation_time", None)
name = getattr(model, "name", None)
batch_size = getattr(model, "batch_size", None)
precision = getattr(model, "precision", None)
if pt2_compilation_time is not None and pt2_compilation_time:
metrics[
f"{name}.bs_{batch_size}.precision_{precision}."
+ f"ir_{selected_ir}.pt2_compilation_time"
] = pt2_compilation_time
except:
pass
return metrics
def run_one_step(
func,
model: Union[BenchmarkModel, ModelTask],
num_warmup: int,
num_iter: int,
selected_ir: str,
):
"""Run one step of inference benchmarking on a single model"""
# Warmup model inference
for _ in range(num_warmup):
func()
result_summary = []
# Run inference for the specified number of iterations
for _ in range(num_iter):
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
# Collect time_ns() instead of time() which does not provide better precision than 1
# second according to https://docs.python.org/3/library/time.html#time.time.
t0 = time.time_ns()
start_event.record()
func()
end_event.record()
torch.cuda.synchronize()
t1 = time.time_ns()
result_summary.append(
(start_event.elapsed_time(end_event), (t1 - t0) / 1_000_000)
)
# Get median times for GPU and CPU Walltime
gpu_time = np.median(list(map(lambda x: x[0], result_summary)))
cpu_walltime = np.median(list(map(lambda x: x[1], result_summary)))
# Differentiate model attribute access based on input type
if isinstance(model, ModelTask):
num_batches = model.get_model_attribute("NUM_BATCHES")
name = model.get_model_attribute("name")
batch_size = model.get_model_attribute("batch_size")
precision = model.get_model_attribute("dargs", "precision")
else:
num_batches = getattr(model, "NUM_BATCHES", None)
name = getattr(model, "name", None)
batch_size = getattr(model, "batch_size", None)
precision = getattr(model, "precision", None)
if num_batches is not None:
median_gpu_time_per_batch = gpu_time / num_batches
median_cpu_walltime_per_batch = cpu_walltime / num_batches
else:
median_gpu_time_per_batch = gpu_time
median_cpu_walltime_per_batch = cpu_walltime
# Store metrics as dictionary
metrics = {
f"{name}.bs_{batch_size}.precision_{precision}."
+ f"ir_{selected_ir}.median_gpu_time_ms_per_batch": median_gpu_time_per_batch,
f"{name}.bs_{batch_size}.precision_{precision}."
+ f"ir_{selected_ir}.median_cpu_walltime_ms_per_batch": median_cpu_walltime_per_batch,
}
return metrics
def run(args: List[str]):
"""Run inference and extract requested metrics"""
parsed_args, unknown_args = cli(args)
# Attempt to extract specified IR for logging purposes
try:
ir_idx = unknown_args.index("--ir")
selected_ir = unknown_args[ir_idx + 1]
except (ValueError, IndexError):
selected_ir = "default"
# Parse model string if specified, otherwise run all models
# Adapted from benchmark/run.py
if parsed_args["model"]:
try:
Model = load_model_by_name(parsed_args["model"])
except ModuleNotFoundError:
traceback.print_exc()
exit(-1)
except ModelNotFoundError:
print(
f"Warning: The model {parsed_args['model']} cannot be found at core set."
)
if not Model:
try:
Model = load_canary_model_by_name(parsed_args["model"])
except ModuleNotFoundError:
traceback.print_exc()
exit(-1)
except ModelNotFoundError:
print(
f"Error: The model {parsed_args['model']} cannot be found at either core or canary model set."
)
exit(-1)
# For single models, use a BenchmarkModel instance
model = Model(
device="cuda",
test="eval",
batch_size=parsed_args["bs"],
extra_args=[
"--backend",
]
+ unknown_args,
)
all_metrics = run_single_model(
model,
selected_ir,
parsed_args["num_warmup"],
parsed_args["num_iter"],
)
else:
all_metrics = {}
# For all models, use ModelTask instances
for model_name in list_models():
config = TorchBenchModelConfig(
name=model_name,
test="eval",
device="cuda",
batch_size=parsed_args["bs"],
extra_args=[
"--backend",
]
+ unknown_args,
)
try:
Model = load_model_isolated(config=config)
except ValueError as e:
print(
f"Loading model {model_name} failed with:\n{e}\nSkipping the model."
)
continue
metrics = run_single_model(
Model,
selected_ir,
parsed_args["num_warmup"],
parsed_args["num_iter"],
)
all_metrics = {**all_metrics, **metrics}
# Delete model instance and clean up workspace
del Model
save_metrics(all_metrics)
|
import torch
import argparse
import json
import os
import time
import torch.utils.jit.log_extract as log_extract
from datetime import datetime
from typing import Any, List
def parse_fusers(extra_args: List[str]):
parser = argparse.ArgumentParser()
parser.add_argument(
"--fusers",
nargs="*",
default=[],
choices=["no_fuser", "fuser0", "fuser1", "fuser2"],
help="List of fusers to run tests on")
parser.add_argument("--filters", nargs="*", default=[], help='List of fuser microbenchmarks to test')
parser.add_argument("--output", help="specifiy the output file name")
args = parser.parse_args(extra_args)
return args
class NVFuserBenchmark():
def __init__(self, name, ir, warmup_runs=10, test_runs=20):
self.name = name
self.ir = ir
self.warmup_runs = warmup_runs
self.test_runs = test_runs
def run_test(self, inputs, fuser_name: str) -> float:
if fuser_name == "no_fuser":
return log_extract.run_baseline_no_fusion(self.ir, inputs)
elif fuser_name == "nnc-static":
return log_extract.run_nnc(self.ir, inputs, dynamic=False)
elif fuser_name == "nnc-dynamic" or fuser_name == "fuser1":
return log_extract.run_nnc(self.ir, inputs, dynamic=True)
elif fuser_name == "fuser2" or fuser_name == "nvfuser":
return log_extract.run_nvfuser(self.ir, inputs)
assert False
def get_inputs(self) -> List[Any]:
_, inputs = log_extract.load_graph_and_inputs(self.ir)
return inputs
def dump_metrics(metrics, output_name):
output = {
"name": "nvfuser",
"environ": {"pytorch_git_version": torch.version.git_version},
"metrics": metrics,
}
current_dir = os.path.dirname(os.path.abspath(__file__))
target_dir = os.path.normpath(os.path.join(current_dir, "../../.userbenchmark/nvfuser/"))
os.makedirs(target_dir, exist_ok=True)
fname = "metrics-{}.json".format(datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S"))
full_fname = os.path.join(target_dir, fname)
if output_name is not None:
full_fname = output_name
with open(full_fname, 'w') as f:
json.dump(output, f, indent=4)
def run_nvfuser_microbenchmarks(extra_args: List[str]):
from userbenchmark.nvfuser.ir import ir_list
benchmarks = [NVFuserBenchmark(name, ir) for name, ir in ir_list]
args = parse_fusers(extra_args)
filters, fusers = args.filters, args.fusers
if len(filters) > 0:
benchmarks = [x for x in benchmarks if x.name in filters]
if len(fusers) == 0:
fusers = ["no_fuser", "nnc-static", "nnc-dynamic", "nvfuser"]
metrics = {}
for b in benchmarks:
outputs = []
for fuser in fusers:
inputs = b.get_inputs()
runtime = b.run_test(inputs, fuser)
outputs.append((fuser, runtime))
metrics[f"{fuser}:{b.name}"] = runtime
print(f"{b.name}:", "; ".join(f"{name} = {time:.3f} ms" for name, time in outputs))
dump_metrics(metrics, args.output)
def run(args: List[str]):
run_nvfuser_microbenchmarks(extra_args=args)
|
# contains the list of microbenchmark strings
# format: list of tuples (name, IR)
ir_list = [("autogen-0", """graph(%0 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(4096, 512, strides=[512, 1], requires_grad=0, device=cuda:0),
%2 : int):
%3 : int[] = prim::Constant[value=[4096, 512]]()
%4 : int[] = prim::Constant[value=[1, 4096, 512]]()
%5 : Float(1, 4096, 512, strides=[2097152, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %4)
%6 : Float(1, 4096, 512, strides=[2097152, 512, 1], requires_grad=0, device=cuda:0) = aten::add(%5, %0, %2)
%7 : Float(4096, 512, strides=[512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%6, %3)
%8 : Float(1, 4096, 512, strides=[2097152, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %4)
%9 : Float(1, 4096, 512, strides=[2097152, 512, 1], requires_grad=0, device=cuda:0) = aten::relu(%8)
%10 : Float(4096, 512, strides=[512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %3)
return (%10)
"""), ("autogen-1", """graph(%0 : Float(1, 12, 4096, 64, strides=[3145728, 64, 768, 1], requires_grad=0, device=cuda:0),
%1 : Float(requires_grad=0, device=cuda:0)):
%2 : int[] = prim::Constant[value=[1, 12, 64, 64, 64]]()
%3 : Float(1, 12, 4096, 64, strides=[3145728, 64, 768, 1], requires_grad=0, device=cuda:0) = aten::div(%0, %1)
%4 : Float(1, 12, 64, 64, 64, strides=[768, 64, 49152, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %2)
return (%4)
"""), ("autogen-2", """graph(%0 : Float(8, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0),
%1 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(8, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0),
%6 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%7 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%8 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%9 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%10 : int):
%11 : float = prim::Constant[value=1.0000000000000001e-05]()
%12 : float = prim::Constant[value=0.10000000000000001]()
%13 : bool = prim::Constant[value=0]()
%14 : Float(8, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0), %15 : Tensor, %16 : Tensor = aten::native_batch_norm(%5, %6, %7, %8, %9, %13, %12, %11)
%17 : Float(8, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0), %18 : Tensor, %19 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %13, %12, %11)
%20 : Float(8, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0) = aten::add(%17, %14, %10)
%21 : Float(8, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0) = aten::relu(%20)
return (%21)
"""), ("autogen-3", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Double(requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%5 : Double(requires_grad=0, device=cuda:0),
%6 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%7 : Double(requires_grad=0, device=cuda:0),
%8 : Double(requires_grad=0, device=cuda:0),
%9 : Double(requires_grad=0, device=cuda:0),
%10 : Double(requires_grad=0, device=cuda:0),
%11 : Double(requires_grad=0, device=cuda:0),
%12 : Double(requires_grad=0, device=cuda:0),
%13 : Double(requires_grad=0, device=cuda:0),
%14 : Double(requires_grad=0, device=cuda:0),
%15 : Double(requires_grad=0, device=cuda:0),
%16 : Double(requires_grad=0, device=cuda:0),
%17 : Double(requires_grad=0, device=cuda:0),
%18 : Double(requires_grad=0, device=cuda:0),
%19 : Double(requires_grad=0, device=cuda:0),
%20 : Double(requires_grad=0, device=cuda:0),
%21 : Double(requires_grad=0, device=cuda:0),
%22 : Double(requires_grad=0, device=cuda:0),
%23 : Double(1, 1, 26, strides=[26, 26, 1], requires_grad=0, device=cuda:0),
%24 : Double(requires_grad=0, device=cuda:0),
%25 : Double(requires_grad=0, device=cuda:0),
%26 : Double(requires_grad=0, device=cuda:0),
%27 : int,
%28 : int,
%29 : int,
%30 : int,
%31 : int,
%32 : int,
%33 : int,
%34 : int,
%35 : int,
%36 : int,
%37 : int,
%38 : int,
%39 : int,
%40 : int,
%41 : int,
%42 : int,
%43 : int,
%44 : int,
%45 : int,
%46 : int,
%47 : int,
%48 : int,
%49 : int,
%50 : int,
%51 : int,
%52 : int,
%53 : int,
%54 : int,
%55 : int,
%56 : int,
%57 : int,
%58 : int,
%59 : int,
%60 : int,
%61 : int,
%62 : int,
%63 : int,
%64 : int,
%65 : int,
%66 : int):
%67 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%6, %16)
%68 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%67, %12)
%69 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%6, %26)
%70 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %25)
%71 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%70, %10, %66)
%72 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %71)
%73 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%72, %24, %65)
%74 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%73, %69, %64)
%75 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%74, %23)
%76 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %22)
%77 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%76, %9, %63)
%78 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %77)
%79 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%78, %8, %62)
%80 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %79)
%81 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%80, %21, %61)
%82 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sqrt(%6)
%83 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%82, %81)
%84 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %20)
%85 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%84, %7, %60)
%86 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %85)
%87 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%86, %19, %59)
%88 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%87, %83, %58)
%89 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%6, %88)
%90 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %18)
%91 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%90, %5, %57)
%92 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %91)
%93 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%92, %3, %56)
%94 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %93)
%95 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%94, %17, %55)
%96 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%95, %89, %54)
%97 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%96, %74)
%98 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %16)
%99 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%98, %15, %53)
%100 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%6, %99)
%101 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%100, %12)
%102 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %14)
%103 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%102, %13, %52)
%104 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %103)
%105 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%104, %12)
%106 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%105, %11, %51)
%107 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%106, %101, %50)
%108 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::pow(%107, %49)
%109 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%108, %97, %48)
%110 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sqrt(%109)
%111 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%105, %11, %47)
%112 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%111, %101, %46)
%113 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%112, %110, %45)
%114 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%113, %75, %44)
%115 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::reciprocal(%114)
%116 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%115, %2)
%117 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%105, %11, %43)
%118 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%117, %101, %42)
%119 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%118, %110, %41)
%120 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::reciprocal(%119)
%121 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%120, %2)
%122 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%110, %121)
%123 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%122, %116)
%124 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%75, %0)
%125 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%124, %123)
%126 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%125, %2, %40)
%127 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%70, %0)
%128 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%127, %10, %39)
%129 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%76, %0)
%130 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%129, %9, %38)
%131 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %130)
%132 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%78, %8, %37)
%133 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%132, %131, %36)
%134 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%82, %133)
%135 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%84, %0)
%136 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%135, %7, %35)
%137 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%136, %134, %34)
%138 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%6, %137)
%139 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%90, %0)
%140 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%139, %5, %33)
%141 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %140)
%142 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%92, %3, %32)
%143 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%142, %141, %31)
%144 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%143, %138, %30)
%145 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%144, %74)
%146 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%102, %2)
%147 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%146, %1, %29)
%148 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%147, %68, %28)
%149 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%107, %0)
%150 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%149, %148)
%151 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%150, %145, %27)
return (%151, %148, %146, %144, %128, %126, %123, %121, %119, %116, %114, %110, %109, %108, %107, %104, %102, %100, %96, %82, %75, %74, %68, %67)
"""), ("batchnorm-silu", """graph(%0 : Float(32, 480, 14, 14, strides=[94080, 196, 14, 1], requires_grad=0, device=cuda:0),
%1 : Float(480, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(480, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(480, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(480, strides=[1], requires_grad=0, device=cuda:0)):
%5 : float = prim::Constant[value=1.0000000000000001e-05]()
%6 : float = prim::Constant[value=0.10000000000000001]()
%7 : bool = prim::Constant[value=0]()
%8 : Float(32, 480, 14, 14, strides=[94080, 196, 14, 1], requires_grad=0, device=cuda:0), %9 : Tensor, %10 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %7, %6, %5)
%11 : Float(32, 480, 14, 14, strides=[94080, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::silu(%8)
return (%11)
"""), ("autogen-4", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0),
%3 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(4096, 768, strides=[768, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : float = prim::Constant[value=9.9999999999999998e-13]()
%8 : int[] = prim::Constant[value=[768]]()
%9 : int[] = prim::Constant[value=[4096, 768]]()
%10 : int[] = prim::Constant[value=[1, 4096, 768]]()
%11 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %10)
%12 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %3, %6)
%13 : Float(4096, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %9)
%14 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%13, %10)
%15 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%14, %2, %5)
%16 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0), %17 : Tensor, %18 : Tensor = aten::native_layer_norm(%15, %8, %0, %1, %7)
%19 : Float(4096, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%16, %9)
return (%19)
"""), ("autogen-5", """graph(%0 : Float(96, 160, 7, 7, strides=[7840, 49, 7, 1], requires_grad=0, device=cuda:0),
%1 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(96, 160, 7, 7, strides=[7840, 49, 7, 1], requires_grad=0, device=cuda:0),
%6 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%7 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%8 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%9 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%10 : Float(96, 160, 7, 7, strides=[7840, 49, 7, 1], requires_grad=0, device=cuda:0),
%11 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%12 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%13 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%14 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%15 : int,
%16 : int):
%17 : float = prim::Constant[value=1.0000000000000001e-05]()
%18 : float = prim::Constant[value=0.10000000000000001]()
%19 : bool = prim::Constant[value=0]()
%20 : Float(96, 160, 7, 7, strides=[7840, 49, 7, 1], requires_grad=0, device=cuda:0), %21 : Tensor, %22 : Tensor = aten::native_batch_norm(%10, %11, %12, %13, %14, %19, %18, %17)
%23 : Float(96, 160, 7, 7, strides=[7840, 49, 7, 1], requires_grad=0, device=cuda:0), %24 : Tensor, %25 : Tensor = aten::native_batch_norm(%5, %6, %7, %8, %9, %19, %18, %17)
%26 : Float(96, 160, 7, 7, strides=[7840, 49, 7, 1], requires_grad=0, device=cuda:0), %27 : Tensor, %28 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %19, %18, %17)
%29 : Float(96, 160, 7, 7, strides=[7840, 49, 7, 1], requires_grad=0, device=cuda:0) = aten::add(%26, %23, %16)
%30 : Float(96, 160, 7, 7, strides=[7840, 49, 7, 1], requires_grad=0, device=cuda:0) = aten::add(%29, %20, %15)
return (%30)
"""), ("autogen-6", """graph(%0 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%1 : Double(requires_grad=0, device=cuda:0),
%2 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%5 : Double(requires_grad=0, device=cuda:0),
%6 : Double(requires_grad=0, device=cuda:0),
%7 : Double(requires_grad=0, device=cuda:0),
%8 : Double(requires_grad=0, device=cuda:0),
%9 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%10 : Double(requires_grad=0, device=cuda:0),
%11 : Double(requires_grad=0, device=cuda:0),
%12 : Double(requires_grad=0, device=cuda:0),
%13 : Double(requires_grad=0, device=cuda:0),
%14 : Double(requires_grad=0, device=cuda:0),
%15 : Double(requires_grad=0, device=cuda:0),
%16 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%17 : Double(requires_grad=0, device=cuda:0),
%18 : Double(requires_grad=0, device=cuda:0),
%19 : Double(requires_grad=0, device=cuda:0),
%20 : Double(requires_grad=0, device=cuda:0),
%21 : Double(requires_grad=0, device=cuda:0),
%22 : Double(requires_grad=0, device=cuda:0),
%23 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%24 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%25 : Double(requires_grad=0, device=cuda:0),
%26 : Double(requires_grad=0, device=cuda:0),
%27 : Double(requires_grad=0, device=cuda:0),
%28 : Double(requires_grad=0, device=cuda:0),
%29 : Double(requires_grad=0, device=cuda:0),
%30 : Double(requires_grad=0, device=cuda:0),
%31 : Double(requires_grad=0, device=cuda:0),
%32 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%33 : Double(requires_grad=0, device=cuda:0),
%34 : Double(requires_grad=0, device=cuda:0),
%35 : Double(requires_grad=0, device=cuda:0),
%36 : Double(requires_grad=0, device=cuda:0),
%37 : Double(requires_grad=0, device=cuda:0),
%38 : Double(requires_grad=0, device=cuda:0),
%39 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%40 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%41 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%42 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%43 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%44 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%45 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%46 : Double(requires_grad=0, device=cuda:0),
%47 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%48 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%49 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%50 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%51 : Double(1, 1, 26, strides=[26, 26, 1], requires_grad=0, device=cuda:0),
%52 : int,
%53 : int,
%54 : int,
%55 : int,
%56 : int,
%57 : int,
%58 : int,
%59 : int,
%60 : int,
%61 : int,
%62 : int,
%63 : int,
%64 : int,
%65 : int,
%66 : int,
%67 : int,
%68 : int,
%69 : int,
%70 : int,
%71 : int,
%72 : int,
%73 : int,
%74 : int,
%75 : int,
%76 : int,
%77 : int,
%78 : int,
%79 : int,
%80 : int,
%81 : int,
%82 : int,
%83 : int,
%84 : int,
%85 : int,
%86 : int,
%87 : int,
%88 : int,
%89 : int,
%90 : int,
%91 : int,
%92 : int,
%93 : int,
%94 : int):
%95 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%50, %51)
%96 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%24, %50)
%97 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%49, %96, %94)
%98 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::reciprocal(%47)
%99 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%98, %3)
%100 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%99, %97)
%101 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::div(%100, %22)
%102 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%45, %46, %93)
%103 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%102, %44, %92)
%104 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%103, %101, %91)
%105 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%104, %95, %90)
%106 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::pow(%48, %89)
%107 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%42, %47)
%108 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%107, %22)
%109 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%108, %41)
%110 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::div(%109, %106)
%111 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%110, %105)
%112 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%45, %46, %88)
%113 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%112, %44, %87)
%114 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%113, %101, %86)
%115 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::pow(%43, %85)
%116 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::div(%108, %115)
%117 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%116, %40)
%118 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%117, %114)
%119 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%42, %99)
%120 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%119, %41)
%121 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%120, %40)
%122 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%121, %97)
%123 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%95, %22)
%124 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%123, %39)
%125 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%124, %122, %84)
%126 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%125, %118, %83)
%127 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%126, %111, %82)
%128 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::reciprocal(%2)
%129 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%128, %3)
%130 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%9, %38)
%131 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %37)
%132 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%131, %36, %81)
%133 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%132, %130, %80)
%134 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %133)
%135 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%134, %35, %79)
%136 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%135, %22)
%137 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%136, %23)
%138 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %34)
%139 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%138, %33, %78)
%140 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%139, %24)
%141 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%139, %32)
%142 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%141, %31)
%143 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%142, %129)
%144 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%143, %140, %77)
%145 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%144, %137, %76)
%146 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%145, %129)
%147 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %30)
%148 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%147, %29, %75)
%149 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%9, %148)
%150 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %28)
%151 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%150, %27, %74)
%152 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %151)
%153 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%152, %26, %73)
%154 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %153)
%155 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%154, %25, %72)
%156 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%155, %149, %71)
%157 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%156, %146, %70)
%158 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%157, %23)
%159 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%135, %24)
%160 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%23, %129)
%161 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%140, %22)
%162 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%161, %160)
%163 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%162, %159, %69)
%164 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%163, %129)
%165 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %21)
%166 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%165, %20, %68)
%167 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %166)
%168 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%167, %19, %67)
%169 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %168)
%170 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%169, %18, %66)
%171 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %170)
%172 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%171, %17, %65)
%173 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%16, %172)
%174 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%9, %15)
%175 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %14)
%176 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%175, %13, %64)
%177 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %176)
%178 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%177, %12, %63)
%179 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %178)
%180 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%179, %11, %62)
%181 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %180)
%182 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%181, %10, %61)
%183 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%182, %174, %60)
%184 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%183, %173, %59)
%185 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%9, %184)
%186 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %8)
%187 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%186, %7, %58)
%188 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %187)
%189 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%188, %6, %57)
%190 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %189)
%191 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%190, %5, %56)
%192 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %191)
%193 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%192, %3, %55)
%194 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%193, %185, %54)
%195 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%194, %164, %53)
%196 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%195, %2)
%197 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%196, %158, %52)
%198 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%197, %129)
%199 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%198, %1)
%200 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%199, %99)
%201 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%200, %127)
%202 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::div(%201, %0)
return (%202, %198, %197, %195, %190, %188, %186, %179, %177, %175, %169, %167, %165, %163, %161, %160, %157, %152, %150, %145, %142, %139, %136, %135, %134, %131, %130, %129, %99, %97, %95)
"""), ("autogen-7", """graph(%0 : Float(8, 197, 6, 64, strides=[75648, 64, 12608, 1], requires_grad=0, device=cuda:0)):
%1 : int[] = prim::Constant[value=[1576, 384]]()
%2 : int[] = prim::Constant[value=[8, 197, 384]]()
%3 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %2)
%4 : Float(1576, 384, strides=[384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %1)
return (%4)
"""), ("autogen-8", """graph(%0 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%1 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%2 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%3 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%4 : Double(requires_grad=0, device=cuda:0),
%5 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0)):
%6 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::log(%5)
%7 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%3, %4)
%8 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::div(%7, %2)
%9 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::div(%8, %1)
%10 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%9, %6)
%11 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%10, %0)
return (%11, %6)
"""), ("autogen-9", """graph(%0 : Float(1, 12, 1, 64, 64, strides=[768, 64, 49152, 768, 1], requires_grad=0, device=cuda:0)):
%1 : int[] = prim::Constant[value=[1, 12, 64, 64, 1, 1]]()
%2 : int[] = prim::Constant[value=[1, 12, 64, 64, 1]]()
%3 : int[] = prim::Constant[value=[1, 12, 64, 64]]()
%4 : Float(1, 12, 64, 64, strides=[768, 64, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %3)
%5 : Float(1, 12, 64, 64, 1, strides=[768, 64, 768, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %2)
%6 : Float(1, 12, 64, 64, 1, 1, strides=[768, 64, 768, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%5, %1)
return (%6, %4)
"""), ("autogen-10", """graph(%0 : Long(1, 1, 26, strides=[26, 26, 1], requires_grad=0, device=cuda:0),
%1 : Long(200, 200, strides=[200, 1], requires_grad=0, device=cuda:0)):
%2 : int[] = prim::Constant[value=[200, 200, 1]]()
%3 : Long(200, 200, 1, strides=[200, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %2)
%4 : Bool(200, 200, 26, strides=[5200, 26, 1], requires_grad=0, device=cuda:0) = aten::ge(%0, %3)
return (%4)
"""), ("autogen-11", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0),
%2 : int):
%3 : int[] = prim::Constant[value=[1, 512, 12, 64]]()
%4 : int[] = prim::Constant[value=[512, 768]]()
%5 : int[] = prim::Constant[value=[1, 512, 768]]()
%6 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %5)
%7 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%6, %0, %2)
%8 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %4)
%9 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%8, %5)
%10 : Float(1, 512, 12, 64, strides=[393216, 768, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %3)
return (%10)
"""), ("autogen-12", """graph(%0 : Float(32, 360, 14, 14, strides=[70560, 196, 14, 1], requires_grad=0, device=cuda:0),
%1 : Float(32, 360, 1, 1, strides=[360, 1, 1, 1], requires_grad=0, device=cuda:0)):
%2 : Float(32, 360, 1, 1, strides=[360, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::sigmoid(%1)
%3 : Float(32, 360, 14, 14, strides=[70560, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::mul(%0, %2)
return (%3)
"""), ("autogen-13", """graph(%0 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(32, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0),
%5 : Float(256, strides=[1], requires_grad=0, device=cuda:0)):
%6 : float = prim::Constant[value=1.0000000000000001e-05]()
%7 : float = prim::Constant[value=0.10000000000000001]()
%8 : bool = prim::Constant[value=0]()
%9 : int[] = prim::Constant[value=[1, 256, 1, 1]]()
%10 : Float(1, 256, 1, 1, strides=[256, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%5, %9)
%11 : Float(32, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0) = aten::div(%4, %10)
%12 : Float(32, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0), %13 : Tensor, %14 : Tensor = aten::native_batch_norm(%11, %0, %1, %2, %3, %8, %7, %6)
return (%12, %13, %14)
"""), ("autogen-14", """graph(%0 : Float(8, 2048, 2048, strides=[4194304, 2048, 1], requires_grad=0, device=cuda:0),
%1 : Float(8, 2048, 2048, strides=[1, 16384, 8], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(1, 1, 2048, 2048, strides=[4194304, 4194304, 2048, 1], requires_grad=0, device=cuda:0),
%5 : Float(1, 1, 1, 2048, strides=[2048, 2048, 2048, 1], requires_grad=0, device=cuda:0),
%6 : int,
%7 : int,
%8 : int):
%9 : bool = prim::Constant[value=0]()
%10 : int = prim::Constant[value=-1]()
%11 : int[] = prim::Constant[value=[8, 2048, 2048]]()
%12 : int[] = prim::Constant[value=[1, 8, 2048, 2048]]()
%13 : Float(1, 1, 2048, 2048, strides=[4194304, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %5)
%14 : Float(1, 1, 2048, 2048, strides=[4194304, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::sub(%3, %13, %8)
%15 : Float(1, 1, 2048, 2048, strides=[4194304, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::mul(%14, %2)
%16 : Float(1, 8, 2048, 2048, strides=[8, 1, 16384, 8], requires_grad=0, device=cuda:0) = aten::reshape(%1, %12)
%17 : Float(1, 8, 2048, 2048, strides=[8, 1, 16384, 8], requires_grad=0, device=cuda:0) = aten::add(%16, %15, %7)
%18 : Float(1, 8, 2048, 2048, strides=[33554432, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %12)
%19 : Float(1, 8, 2048, 2048, strides=[33554432, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::add(%18, %17, %6)
%20 : Float(8, 2048, 2048, strides=[4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%19, %11)
%21 : Float(1, 8, 2048, 2048, strides=[33554432, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%20, %12)
%22 : Float(1, 8, 2048, 2048, strides=[33554432, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::_softmax(%21, %10, %9)
return (%22, %17)
"""), ("batchnorm-silu-mean", """graph(%0 : Float(32, 240, 14, 14, strides=[47040, 196, 14, 1], requires_grad=0, device=cuda:0),
%1 : Float(240, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(240, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(240, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(240, strides=[1], requires_grad=0, device=cuda:0)):
%5 : NoneType = prim::Constant()
%6 : bool = prim::Constant[value=1]()
%7 : int[] = prim::Constant[value=[2, 3]]()
%8 : float = prim::Constant[value=1.0000000000000001e-05]()
%9 : float = prim::Constant[value=0.10000000000000001]()
%10 : bool = prim::Constant[value=0]()
%11 : Float(32, 240, 14, 14, strides=[47040, 196, 14, 1], requires_grad=0, device=cuda:0), %12 : Tensor, %13 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %10, %9, %8)
%14 : Float(32, 240, 14, 14, strides=[47040, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::silu(%11)
%15 : Float(32, 240, 1, 1, strides=[240, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%14, %7, %6, %5)
return (%15, %14)
"""), ("autogen-15", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0),
%2 : int):
%3 : int[] = prim::Constant[value=[512, 768]]()
%4 : int[] = prim::Constant[value=[1, 512, 768]]()
%5 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %4)
%6 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%5, %0, %2)
%7 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%6, %3)
%8 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %4)
%9 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%8, %3)
return (%9, %8)
"""), ("autogen-16", """graph(%0 : Float(1, 1, 512, 512, strides=[262144, 262144, 512, 1], requires_grad=0, device=cuda:0),
%1 : Float(12, 512, 512, strides=[262144, 512, 1], requires_grad=0, device=cuda:0),
%2 : int):
%3 : bool = prim::Constant[value=0]()
%4 : int = prim::Constant[value=-1]()
%5 : int[] = prim::Constant[value=[12, 512, 512]]()
%6 : int[] = prim::Constant[value=[1, 12, 512, 512]]()
%7 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %6)
%8 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::add(%7, %0, %2)
%9 : Float(12, 512, 512, strides=[262144, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%8, %5)
%10 : Float(12, 512, 512, strides=[262144, 512, 1], requires_grad=0, device=cuda:0) = aten::_softmax(%9, %4, %3)
return (%10)
"""), ("autogen-17", """graph(%0 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0),
%1 : Float(768, 64, 128, strides=[8192, 128, 1], requires_grad=0, device=cuda:0),
%2 : int,
%3 : int,
%4 : int):
%5 : NoneType = prim::Constant()
%6 : bool = prim::Constant[value=1]()
%7 : int[] = prim::Constant[value=[-1]]()
%8 : int[] = prim::Constant[value=[1, 12, 64, 64, 128]]()
%9 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %8)
%10 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%9, %0, %4)
%11 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::exp(%10)
%12 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::sum(%11, %7, %6, %5)
%13 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::log(%12)
%14 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::add(%13, %0, %3)
%15 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%9, %14, %2)
%16 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::exp(%15)
return (%16)
"""), ("autogen-18", """graph(%0 : Float(384, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(384, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0),
%3 : Float(1, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0),
%4 : int):
%5 : int[] = prim::Constant[value=[1576, 384]]()
%6 : float = prim::Constant[value=9.9999999999999995e-07]()
%7 : int[] = prim::Constant[value=[384]]()
%8 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %3, %4)
%9 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0), %10 : Tensor, %11 : Tensor = aten::native_layer_norm(%8, %7, %0, %1, %6)
%12 : Float(1576, 384, strides=[384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %5)
return (%12, %8)
"""), ("autogen-19", """graph(%0 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1, 4096, 256, strides=[2097152, 512, 1], requires_grad=0, device=cuda:0),
%3 : Float(4096, 256, strides=[256, 1], requires_grad=0, device=cuda:0),
%4 : int):
%5 : int[] = prim::Constant[value=[4096, 256]]()
%6 : float = prim::Constant[value=9.9999999999999998e-13]()
%7 : int[] = prim::Constant[value=[256]]()
%8 : int[] = prim::Constant[value=[1, 4096, 256]]()
%9 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %8)
%10 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %9, %4)
%11 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0), %12 : Tensor, %13 : Tensor = aten::native_layer_norm(%10, %7, %0, %1, %6)
%14 : Float(4096, 256, strides=[256, 1], requires_grad=0, device=cuda:0) = aten::reshape(%11, %5)
return (%14, %10)
"""), ("autogen-20", """graph(%0 : Float(16, 512, 7, 7, strides=[25088, 49, 7, 1], requires_grad=0, device=cuda:0),
%1 : Float(16, 512, 7, 7, strides=[25088, 49, 7, 1], requires_grad=0, device=cuda:0),
%2 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%6 : int):
%7 : int[] = prim::Constant[value=[16, 512]]()
%8 : NoneType = prim::Constant()
%9 : bool = prim::Constant[value=1]()
%10 : int[] = prim::Constant[value=[-1, -2]]()
%11 : float = prim::Constant[value=1.0000000000000001e-05]()
%12 : float = prim::Constant[value=0.10000000000000001]()
%13 : bool = prim::Constant[value=0]()
%14 : Float(16, 512, 7, 7, strides=[25088, 49, 7, 1], requires_grad=0, device=cuda:0), %15 : Tensor, %16 : Tensor = aten::native_batch_norm(%1, %2, %3, %4, %5, %13, %12, %11)
%17 : Float(16, 512, 7, 7, strides=[25088, 49, 7, 1], requires_grad=0, device=cuda:0) = aten::add(%14, %0, %6)
%18 : Float(16, 512, 7, 7, strides=[25088, 49, 7, 1], requires_grad=0, device=cuda:0) = aten::relu(%17)
%19 : Float(16, 512, 1, 1, strides=[512, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%18, %10, %9, %8)
%20 : Float(16, 512, strides=[512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%19, %7)
return (%20)
"""), ("autogen-21", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0),
%3 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0),
%4 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : int[] = prim::Constant[value=[512, 768]]()
%8 : float = prim::Constant[value=9.9999999999999998e-13]()
%9 : int[] = prim::Constant[value=[768]]()
%10 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%3, %4, %6)
%11 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%10, %2, %5)
%12 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0), %13 : Tensor, %14 : Tensor = aten::native_layer_norm(%11, %9, %0, %1, %8)
%15 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %7)
return (%15, %12)
"""), ("autogen-22", """graph(%0 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%1 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%4 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%5 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%6 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%7 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%8 : Double(requires_grad=0, device=cuda:0),
%9 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%10 : Double(requires_grad=0, device=cuda:0),
%11 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%12 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%13 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%14 : Double(requires_grad=0, device=cuda:0),
%15 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%16 : Double(requires_grad=0, device=cuda:0),
%17 : Double(requires_grad=0, device=cuda:0),
%18 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%19 : int,
%20 : int,
%21 : int,
%22 : int,
%23 : int,
%24 : int,
%25 : int,
%26 : int,
%27 : int):
%28 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::pow(%18, %27)
%29 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::reciprocal(%28)
%30 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%29, %17)
%31 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%15, %16)
%32 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%13, %14)
%33 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%30, %12)
%34 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%0, %7)
%35 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%32, %0)
%36 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%11, %8)
%37 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%36, %10, %26)
%38 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%37, %9, %25)
%39 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%38, %8)
%40 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%39, %4)
%41 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%6, %7)
%42 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%3, %5)
%43 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%3, %4)
%44 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%43, %2)
%45 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%44, %34)
%46 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%35, %45, %24)
%47 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%1, %33)
%48 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%46, %47, %23)
%49 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%48, %31, %22)
%50 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%49, %42, %21)
%51 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%50, %40, %20)
%52 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%51, %41, %19)
%53 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%52, %0)
return (%53, %43, %42, %38, %36, %34, %33, %31, %30)
"""), ("autogen-23", """graph(%0 : Float(32, 2, 256, 28, 28, strides=[401408, 200704, 784, 28, 1], requires_grad=0, device=cuda:0),
%1 : Float(32, 2, 1, 256, strides=[512, 256, 512, 1], requires_grad=0, device=cuda:0)):
%2 : NoneType = prim::Constant()
%3 : int[] = prim::Constant[value=[1]]()
%4 : int[] = prim::Constant[value=[32, 2, 256, 1, 1]]()
%5 : int[] = prim::Constant[value=[32, 512, 1, 1]]()
%6 : int[] = prim::Constant[value=[32, 512]]()
%7 : bool = prim::Constant[value=0]()
%8 : int = prim::Constant[value=1]()
%9 : Float(32, 2, 1, 256, strides=[512, 256, 256, 1], requires_grad=0, device=cuda:0) = aten::_softmax(%1, %8, %7)
%10 : Float(32, 512, strides=[512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %6)
%11 : Float(32, 512, 1, 1, strides=[512, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%10, %5)
%12 : Float(32, 2, 256, 1, 1, strides=[512, 256, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%11, %4)
%13 : Float(32, 2, 256, 28, 28, strides=[401408, 200704, 784, 28, 1], requires_grad=0, device=cuda:0) = aten::mul(%0, %12)
%14 : Float(32, 256, 28, 28, strides=[200704, 784, 28, 1], requires_grad=0, device=cuda:0) = aten::sum(%13, %3, %7, %2)
return (%14)
"""), ("autogen-24", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Double(requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(1024, 3072, strides=[3072, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int,
%7 : float):
%8 : int[] = prim::Constant[value=[1024, 3072]]()
%9 : int[] = prim::Constant[value=[1, 1024, 3072]]()
%10 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %9)
%11 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::pow(%10, %7)
%12 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%11, %3)
%13 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::add(%10, %12, %6)
%14 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%13, %2)
%15 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::tanh(%14)
%16 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::add(%15, %1, %5)
%17 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%10, %0)
%18 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%17, %16)
%19 : Float(1024, 3072, strides=[3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%18, %8)
return (%19)
"""), ("autogen-25", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0),
%3 : Float(16, 128, 1, strides=[128, 1, 1], requires_grad=0, device=cuda:0),
%4 : Double(requires_grad=0, device=cuda:0),
%5 : int,
%6 : int,
%7 : int):
%8 : int[] = prim::Constant[value=[2048, 768]]()
%9 : NoneType = prim::Constant()
%10 : bool = prim::Constant[value=1]()
%11 : int[] = prim::Constant[value=[-1]]()
%12 : Float(16, 128, 1, strides=[128, 1, 1], requires_grad=0, device=cuda:0) = aten::add(%3, %4, %7)
%13 : Float(16, 128, 1, strides=[128, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%2, %11, %10, %9)
%14 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0) = aten::sub(%2, %13, %6)
%15 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0) = aten::mul(%1, %14)
%16 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0) = aten::div(%15, %12)
%17 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%16, %0, %5)
%18 : Float(2048, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%17, %8)
return (%18)
"""), ("autogen-26", """graph(%0 : Float(1, 8, 2048, 2048, strides=[8, 1, 16384, 8], requires_grad=0, device=cuda:0),
%1 : Float(8, 2048, 2048, strides=[4194304, 2048, 1], requires_grad=0, device=cuda:0),
%2 : int):
%3 : bool = prim::Constant[value=0]()
%4 : int = prim::Constant[value=-1]()
%5 : int[] = prim::Constant[value=[8, 2048, 2048]]()
%6 : int[] = prim::Constant[value=[1, 8, 2048, 2048]]()
%7 : Float(1, 8, 2048, 2048, strides=[33554432, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %6)
%8 : Float(1, 8, 2048, 2048, strides=[33554432, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::add(%7, %0, %2)
%9 : Float(8, 2048, 2048, strides=[4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%8, %5)
%10 : Float(1, 8, 2048, 2048, strides=[33554432, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %6)
%11 : Float(1, 8, 2048, 2048, strides=[33554432, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::_softmax(%10, %4, %3)
return (%11)
"""), ("autogen-27", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0),
%3 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : float = prim::Constant[value=9.9999999999999998e-13]()
%8 : int[] = prim::Constant[value=[768]]()
%9 : int[] = prim::Constant[value=[512, 768]]()
%10 : int[] = prim::Constant[value=[1, 512, 768]]()
%11 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %10)
%12 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %3, %6)
%13 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %9)
%14 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%13, %10)
%15 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%14, %2, %5)
%16 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0), %17 : Tensor, %18 : Tensor = aten::native_layer_norm(%15, %8, %0, %1, %7)
%19 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%16, %9)
return (%19, %16)
"""), ("autogen-28", """graph(%0 : Float(128, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(128, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0),
%3 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0),
%4 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : int[] = prim::Constant[value=[512, 128]]()
%8 : float = prim::Constant[value=9.9999999999999998e-13]()
%9 : int[] = prim::Constant[value=[128]]()
%10 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%3, %4, %6)
%11 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%10, %2, %5)
%12 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0), %13 : Tensor, %14 : Tensor = aten::native_layer_norm(%11, %9, %0, %1, %8)
%15 : Float(512, 128, strides=[128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %7)
return (%15)
"""), ("autogen-29", """graph(%0 : Float(720, 64, 64, strides=[4096, 64, 1], requires_grad=0, device=cuda:0),
%1 : Float(720, 64, 64, strides=[4096, 64, 1], requires_grad=0, device=cuda:0),
%2 : Float(1, 12, 60, 64, 64, 1, strides=[64, 245760, 4096, 64, 1, 64], requires_grad=0, device=cuda:0),
%3 : Float(1, 12, 60, 64, 64, 1, strides=[64, 245760, 4096, 64, 1, 64], requires_grad=0, device=cuda:0),
%4 : int,
%5 : int,
%6 : int):
%7 : int[] = prim::Constant[value=[720, 64, 64]]()
%8 : int[] = prim::Constant[value=[1, 12, 60, 64, 64]]()
%9 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %8)
%10 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%2, %8)
%11 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %8)
%12 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %8)
%13 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::add(%12, %11, %6)
%14 : Float(720, 64, 64, strides=[4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%13, %7)
%15 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%14, %8)
%16 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::add(%15, %10, %5)
%17 : Float(720, 64, 64, strides=[4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%16, %7)
%18 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%17, %8)
%19 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::add(%18, %9, %4)
%20 : Float(720, 64, 64, strides=[4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%19, %7)
%21 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%20, %8)
return (%21)
"""), ("autogen-30", """graph(%0 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0),
%3 : Float(4096, 256, strides=[256, 1], requires_grad=0, device=cuda:0),
%4 : int):
%5 : int[] = prim::Constant[value=[4096, 256]]()
%6 : float = prim::Constant[value=9.9999999999999998e-13]()
%7 : int[] = prim::Constant[value=[256]]()
%8 : int[] = prim::Constant[value=[1, 4096, 256]]()
%9 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %8)
%10 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %9, %4)
%11 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0), %12 : Tensor, %13 : Tensor = aten::native_layer_norm(%10, %7, %0, %1, %6)
%14 : Float(4096, 256, strides=[256, 1], requires_grad=0, device=cuda:0) = aten::reshape(%11, %5)
return (%14)
"""), ("autogen-31", """graph(%0 : Float(1, 64, 64, 256, strides=[1048576, 16384, 256, 1], requires_grad=0, device=cuda:0)):
%1 : int[] = prim::Constant[value=[4096, 256]]()
%2 : int[] = prim::Constant[value=[1, 4096, 256]]()
%3 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %2)
%4 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %2)
%5 : Float(4096, 256, strides=[256, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %1)
return (%5)
"""), ("autogen-32", """graph(%0 : Float(1, 12, 64, 64, 64, strides=[3145728, 262144, 4096, 64, 1], requires_grad=0, device=cuda:0),
%1 : Float(1, 4096, strides=[4096, 1], requires_grad=0, device=cuda:0)):
%2 : int[] = prim::Constant[value=[1, 12, 4096, 64]]()
%3 : int[] = prim::Constant[value=[1, 1, 4096, 1]]()
%4 : Float(1, 1, 4096, 1, strides=[4096, 4096, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %3)
%5 : Float(1, 12, 4096, 64, strides=[3145728, 262144, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %2)
%6 : Float(1, 12, 4096, 64, strides=[3145728, 262144, 64, 1], requires_grad=0, device=cuda:0) = aten::mul(%5, %4)
return (%6, %4)
"""), ("autogen-33", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Float(12, 64, 4096, strides=[262144, 4096, 1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(1, 1, 1, 4096, strides=[4096, 4096, 4096, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : int[] = prim::Constant[value=[12, 64, 4096]]()
%8 : bool = prim::Constant[value=0]()
%9 : int = prim::Constant[value=-1]()
%10 : int[] = prim::Constant[value=[1, 12, 64, 4096]]()
%11 : Float(1, 1, 1, 4096, strides=[4096, 4096, 4096, 1], requires_grad=0, device=cuda:0) = aten::sub(%3, %4, %6)
%12 : Float(1, 1, 1, 4096, strides=[4096, 4096, 4096, 1], requires_grad=0, device=cuda:0) = aten::mul(%11, %2)
%13 : Float(1, 12, 64, 4096, strides=[3145728, 262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %10)
%14 : Float(1, 12, 64, 4096, strides=[3145728, 262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::mul(%13, %0)
%15 : Float(1, 12, 64, 4096, strides=[3145728, 262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::add(%14, %12, %5)
%16 : Float(1, 12, 64, 4096, strides=[3145728, 262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::_softmax(%15, %9, %8)
%17 : Float(12, 64, 4096, strides=[262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::reshape(%16, %7)
return (%17)
"""), ("autogen-34", """graph(%0 : Float(384, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(384, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0),
%3 : Float(384, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(1576, 384, strides=[384, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : float = prim::Constant[value=9.9999999999999995e-07]()
%8 : int[] = prim::Constant[value=[384]]()
%9 : int[] = prim::Constant[value=[1576, 384]]()
%10 : int[] = prim::Constant[value=[8, 197, 384]]()
%11 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %10)
%12 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %3, %6)
%13 : Float(1576, 384, strides=[384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %9)
%14 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%13, %10)
%15 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %14, %5)
%16 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0), %17 : Tensor, %18 : Tensor = aten::native_layer_norm(%15, %8, %0, %1, %7)
%19 : Float(1576, 384, strides=[384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%16, %9)
return (%19, %15)
"""), ("autogen-35", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0),
%4 : Float(2048, 512, strides=[512, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int,
%7 : int):
%8 : int[] = prim::Constant[value=[2048, 512]]()
%9 : NoneType = prim::Constant()
%10 : bool = prim::Constant[value=1]()
%11 : int[] = prim::Constant[value=[-1]]()
%12 : int[] = prim::Constant[value=[1, 2048, 512]]()
%13 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %12)
%14 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::add(%3, %13, %7)
%15 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::pow(%14, %6)
%16 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%15, %11, %10, %9)
%17 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::add(%16, %2, %5)
%18 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::rsqrt(%17)
%19 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%14, %18)
%20 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%1, %19)
%21 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%20, %0)
%22 : Float(2048, 512, strides=[512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%21, %8)
return (%22)
"""), ("autogen-36", """graph(%0 : Float(32, 512, 28, 28, strides=[401408, 784, 28, 1], requires_grad=0, device=cuda:0),
%1 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(512, strides=[1], requires_grad=0, device=cuda:0)):
%5 : bool = prim::Constant[value=1]()
%6 : int[] = prim::Constant[value=[2, 3]]()
%7 : NoneType = prim::Constant()
%8 : int[] = prim::Constant[value=[1]]()
%9 : int[] = prim::Constant[value=[32, 2, 256, 28, 28]]()
%10 : float = prim::Constant[value=1.0000000000000001e-05]()
%11 : float = prim::Constant[value=0.10000000000000001]()
%12 : bool = prim::Constant[value=0]()
%13 : Float(32, 512, 28, 28, strides=[401408, 784, 28, 1], requires_grad=0, device=cuda:0), %14 : Tensor, %15 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %12, %11, %10)
%16 : Float(32, 512, 28, 28, strides=[401408, 784, 28, 1], requires_grad=0, device=cuda:0) = aten::relu(%13)
%17 : Float(32, 2, 256, 28, 28, strides=[401408, 200704, 784, 28, 1], requires_grad=0, device=cuda:0) = aten::reshape(%16, %9)
%18 : Float(32, 256, 28, 28, strides=[200704, 784, 28, 1], requires_grad=0, device=cuda:0) = aten::sum(%17, %8, %12, %7)
%19 : Float(32, 256, 1, 1, strides=[256, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%18, %6, %5, %7)
return (%19, %17)
"""), ("autogen-37", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Float(720, 64, 192, strides=[12288, 192, 1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(1, 1, 60, 64, 192, strides=[737280, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : int[] = prim::Constant[value=[1, 12, 60, 64, 192]]()
%8 : Float(1, 1, 60, 64, 192, strides=[737280, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::sub(%3, %4, %6)
%9 : Float(1, 1, 60, 64, 192, strides=[737280, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::mul(%8, %2)
%10 : Float(1, 12, 60, 64, 192, strides=[8847360, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %7)
%11 : Float(1, 12, 60, 64, 192, strides=[8847360, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::mul(%10, %0)
%12 : Float(1, 12, 60, 64, 192, strides=[8847360, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %9, %5)
return (%12)
"""), ("autogen-38", """graph(%0 : Float(1, 4096, 256, strides=[2097152, 512, 1], requires_grad=0, device=cuda:0),
%1 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(256, strides=[1], requires_grad=0, device=cuda:0)):
%3 : int[] = prim::Constant[value=[4096, 256]]()
%4 : float = prim::Constant[value=9.9999999999999998e-13]()
%5 : int[] = prim::Constant[value=[256]]()
%6 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0), %7 : Tensor, %8 : Tensor = aten::native_layer_norm(%0, %5, %1, %2, %4)
%9 : Float(4096, 256, strides=[256, 1], requires_grad=0, device=cuda:0) = aten::reshape(%6, %3)
return (%9)
"""), ("autogen-39", """graph(%0 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0),
%1 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0),
%2 : int,
%3 : int,
%4 : int):
%5 : NoneType = prim::Constant()
%6 : bool = prim::Constant[value=1]()
%7 : int[] = prim::Constant[value=[-1]]()
%8 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%0, %1, %4)
%9 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::exp(%8)
%10 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::sum(%9, %7, %6, %5)
%11 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::log(%10)
%12 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %1, %3)
%13 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%0, %12, %2)
%14 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::exp(%13)
return (%14)
"""), ("autogen-40", """graph(%0 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0),
%1 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0),
%6 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%7 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%8 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%9 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%10 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0),
%11 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%12 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%13 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%14 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%15 : int,
%16 : int):
%17 : float = prim::Constant[value=0.001]()
%18 : float = prim::Constant[value=0.01]()
%19 : bool = prim::Constant[value=0]()
%20 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0), %21 : Tensor, %22 : Tensor = aten::native_batch_norm(%10, %11, %12, %13, %14, %19, %18, %17)
%23 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0), %24 : Tensor, %25 : Tensor = aten::native_batch_norm(%5, %6, %7, %8, %9, %19, %18, %17)
%26 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::add(%23, %20, %16)
%27 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0), %28 : Tensor, %29 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %19, %18, %17)
%30 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::add(%27, %26, %15)
return (%30)
"""), ("autogen-41", """graph(%0 : Float(12, 64, 64, strides=[4096, 64, 1], requires_grad=0, device=cuda:0)):
%1 : int[] = prim::Constant[value=[1, 12, 1, 64, 64]]()
%2 : int[] = prim::Constant[value=[12, 64, 64]]()
%3 : int = prim::Constant[value=2]()
%4 : int[] = prim::Constant[value=[1, 12, 64, 64]]()
%5 : Float(1, 12, 64, 64, strides=[49152, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %4)
%6 : Float(1, 12, 1, 64, 64, strides=[49152, 4096, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::unsqueeze(%5, %3)
%7 : Float(1, 12, 64, 64, strides=[49152, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%6, %4)
%8 : Float(12, 64, 64, strides=[4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %2)
%9 : Float(1, 12, 64, 64, strides=[49152, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%8, %4)
%10 : Float(1, 12, 1, 64, 64, strides=[49152, 4096, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %1)
return (%10)
"""), ("autogen-42", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Double(requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(3072, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(512, 3072, strides=[3072, 1], requires_grad=0, device=cuda:0),
%6 : int,
%7 : int,
%8 : float,
%9 : int):
%10 : int[] = prim::Constant[value=[512, 3072]]()
%11 : int[] = prim::Constant[value=[1, 512, 3072]]()
%12 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%5, %11)
%13 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::add(%12, %4, %9)
%14 : Float(512, 3072, strides=[3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%13, %10)
%15 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%14, %11)
%16 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::pow(%15, %8)
%17 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%16, %3)
%18 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::add(%15, %17, %7)
%19 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%18, %2)
%20 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::tanh(%19)
%21 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::add(%20, %1, %6)
%22 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%15, %0)
%23 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%22, %21)
%24 : Float(512, 3072, strides=[3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%23, %10)
return (%24)
"""), ("autogen-43", """graph(%0 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0),
%1 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0),
%6 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%7 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%8 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%9 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%10 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0),
%11 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%12 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%13 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%14 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%15 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0),
%16 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%17 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%18 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%19 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%20 : int,
%21 : int,
%22 : int):
%23 : float = prim::Constant[value=0.001]()
%24 : float = prim::Constant[value=0.01]()
%25 : bool = prim::Constant[value=0]()
%26 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0), %27 : Tensor, %28 : Tensor = aten::native_batch_norm(%15, %16, %17, %18, %19, %25, %24, %23)
%29 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0), %30 : Tensor, %31 : Tensor = aten::native_batch_norm(%10, %11, %12, %13, %14, %25, %24, %23)
%32 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::add(%29, %26, %22)
%33 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0), %34 : Tensor, %35 : Tensor = aten::native_batch_norm(%5, %6, %7, %8, %9, %25, %24, %23)
%36 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::add(%33, %32, %21)
%37 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0), %38 : Tensor, %39 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %25, %24, %23)
%40 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::add(%37, %36, %20)
return (%40)
"""), ("autogen-44", """graph(%0 : Float(128, 1024, 7, 7, strides=[50176, 49, 7, 1], requires_grad=0, device=cuda:0),
%1 : Float(1024, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1024, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(1024, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(1024, strides=[1], requires_grad=0, device=cuda:0)):
%5 : NoneType = prim::Constant()
%6 : int[] = prim::Constant[value=[2, 3]]()
%7 : float = prim::Constant[value=1.0000000000000001e-05]()
%8 : float = prim::Constant[value=0.10000000000000001]()
%9 : bool = prim::Constant[value=0]()
%10 : Float(128, 1024, 7, 7, strides=[50176, 49, 7, 1], requires_grad=0, device=cuda:0), %11 : Tensor, %12 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %9, %8, %7)
%13 : Float(128, 1024, 7, 7, strides=[50176, 49, 7, 1], requires_grad=0, device=cuda:0) = aten::relu(%10)
%14 : Float(128, 1024, strides=[1024, 1], requires_grad=0, device=cuda:0) = aten::mean(%13, %6, %9, %5)
return (%14)
"""), ("autogen-45", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Double(requires_grad=0, device=cuda:0),
%5 : Double(requires_grad=0, device=cuda:0),
%6 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%7 : Float(4096, 768, strides=[768, 1], requires_grad=0, device=cuda:0),
%8 : int,
%9 : int,
%10 : int):
%11 : float = prim::Constant[value=9.9999999999999998e-13]()
%12 : int[] = prim::Constant[value=[768]]()
%13 : int[] = prim::Constant[value=[4096, 768]]()
%14 : int[] = prim::Constant[value=[1, 4096, 768]]()
%15 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %14)
%16 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%15, %6, %10)
%17 : Float(4096, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%16, %13)
%18 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%17, %14)
%19 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::mul(%18, %5)
%20 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::mul(%19, %18)
%21 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%20, %3, %9)
%22 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::mul(%18, %4)
%23 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::mul(%22, %21)
%24 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::tanh(%23)
%25 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%24, %3, %8)
%26 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::mul(%18, %2)
%27 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::mul(%26, %25)
%28 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0), %29 : Tensor, %30 : Tensor = aten::native_layer_norm(%27, %12, %0, %1, %11)
%31 : Float(4096, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%28, %13)
return (%31)
"""), ("autogen-46", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Double(requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(3072, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(4096, 3072, strides=[3072, 1], requires_grad=0, device=cuda:0),
%6 : int,
%7 : int,
%8 : int):
%9 : int[] = prim::Constant[value=[4096, 3072]]()
%10 : int[] = prim::Constant[value=[1, 4096, 3072]]()
%11 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%5, %10)
%12 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %4, %8)
%13 : Float(4096, 3072, strides=[3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %9)
%14 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%13, %10)
%15 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%14, %3)
%16 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%15, %14)
%17 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::add(%16, %1, %7)
%18 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%14, %2)
%19 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%18, %17)
%20 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::tanh(%19)
%21 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::add(%20, %1, %6)
%22 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%14, %0)
%23 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%22, %21)
%24 : Float(4096, 3072, strides=[3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%23, %9)
return (%24)
"""), ("autogen-47", """graph(%0 : Float(1, 12, 4096, 64, strides=[3145728, 64, 768, 1], requires_grad=0, device=cuda:0)):
%1 : int[] = prim::Constant[value=[768, 64, 64]]()
%2 : int[] = prim::Constant[value=[1, 12, 64, 64, 64]]()
%3 : Float(1, 12, 64, 64, 64, strides=[768, 64, 49152, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %2)
%4 : Float(768, 64, 64, strides=[4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %1)
return (%4, %3)
"""), ("autogen-48", """graph(%0 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%1 : Double(requires_grad=0, device=cuda:0),
%2 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0),
%3 : Float(2048, 512, strides=[512, 1], requires_grad=0, device=cuda:0),
%4 : int,
%5 : int,
%6 : int):
%7 : int[] = prim::Constant[value=[2048, 512]]()
%8 : NoneType = prim::Constant()
%9 : bool = prim::Constant[value=1]()
%10 : int[] = prim::Constant[value=[-1]]()
%11 : int[] = prim::Constant[value=[1, 2048, 512]]()
%12 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %11)
%13 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %12, %6)
%14 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::pow(%13, %5)
%15 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%14, %10, %9, %8)
%16 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::add(%15, %1, %4)
%17 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::rsqrt(%16)
%18 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%13, %17)
%19 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%0, %18)
%20 : Float(2048, 512, strides=[512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%19, %7)
return (%20, %13)
"""), ("autogen-49", """graph(%0 : Long(requires_grad=0, device=cuda:0),
%1 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0),
%2 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0),
%3 : int,
%4 : int):
%5 : NoneType = prim::Constant()
%6 : bool = prim::Constant[value=0]()
%7 : int[] = prim::Constant[value=[1]]()
%8 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%1, %2, %4)
%9 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::div(%8, %0)
%10 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::pow(%9, %3)
%11 : Float(96, 128, 128, strides=[16384, 128, 1], requires_grad=0, device=cuda:0) = aten::mean(%10, %7, %6, %5)
%12 : Float(96, 128, strides=[128, 1], requires_grad=0, device=cuda:0) = aten::mean(%11, %7, %6, %5)
%13 : Float(96, strides=[1], requires_grad=0, device=cuda:0) = aten::mean(%12, %7, %6, %5)
return (%13)
"""), ("autogen-50", """graph(%0 : Float(1, 12, 1, 4096, 64, 1, strides=[64, 262144, 64, 64, 1, 64], requires_grad=0, device=cuda:0)):
%1 : int[] = prim::Constant[value=[1, 12, 1, 4096, 64]]()
%2 : Float(1, 12, 1, 4096, 64, strides=[3145728, 262144, 262144, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %1)
%3 : Float(1, 12, 1, 4096, 64, strides=[3145728, 262144, 262144, 64, 1], requires_grad=0, device=cuda:0) = aten::neg(%2)
return (%3, %2)
"""), ("autogen-51", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Float(12, 512, 512, strides=[262144, 512, 1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(1, 512, strides=[512, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : bool = prim::Constant[value=0]()
%8 : int = prim::Constant[value=-1]()
%9 : int[] = prim::Constant[value=[1, 12, 512, 512]]()
%10 : int[] = prim::Constant[value=[1, 1, 1, 512]]()
%11 : int[] = prim::Constant[value=[1, 1, 512]]()
%12 : Float(1, 1, 512, strides=[512, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %11)
%13 : Float(1, 1, 1, 512, strides=[512, 512, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %10)
%14 : Float(1, 1, 1, 512, strides=[512, 512, 512, 1], requires_grad=0, device=cuda:0) = aten::sub(%3, %13, %6)
%15 : Float(1, 1, 1, 512, strides=[512, 512, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%14, %2)
%16 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %9)
%17 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::div(%16, %0)
%18 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::add(%17, %15, %5)
%19 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::_softmax(%18, %8, %7)
return (%19, %15)
"""), ("autogen-52", """graph(%0 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0),
%1 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0),
%6 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%7 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%8 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%9 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%10 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0),
%11 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%12 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%13 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%14 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%15 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0),
%16 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%17 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%18 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%19 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%20 : int,
%21 : int,
%22 : int):
%23 : float = prim::Constant[value=1.0000000000000001e-05]()
%24 : float = prim::Constant[value=0.10000000000000001]()
%25 : bool = prim::Constant[value=0]()
%26 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0), %27 : Tensor, %28 : Tensor = aten::native_batch_norm(%15, %16, %17, %18, %19, %25, %24, %23)
%29 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0), %30 : Tensor, %31 : Tensor = aten::native_batch_norm(%10, %11, %12, %13, %14, %25, %24, %23)
%32 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0), %33 : Tensor, %34 : Tensor = aten::native_batch_norm(%5, %6, %7, %8, %9, %25, %24, %23)
%35 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0), %36 : Tensor, %37 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %25, %24, %23)
%38 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::add(%35, %32, %22)
%39 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::add(%38, %29, %21)
%40 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::add(%39, %26, %20)
return (%40)
"""), ("autogen-53", """graph(%0 : Float(128, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(128, strides=[1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Double(requires_grad=0, device=cuda:0),
%5 : Double(requires_grad=0, device=cuda:0),
%6 : Float(128, strides=[1], requires_grad=0, device=cuda:0),
%7 : Float(512, 128, strides=[128, 1], requires_grad=0, device=cuda:0),
%8 : int,
%9 : int,
%10 : float,
%11 : int):
%12 : float = prim::Constant[value=1.0000000000000001e-05]()
%13 : int[] = prim::Constant[value=[128]]()
%14 : int[] = prim::Constant[value=[512, 128]]()
%15 : int[] = prim::Constant[value=[1, 512, 128]]()
%16 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %15)
%17 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%16, %6, %11)
%18 : Float(512, 128, strides=[128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%17, %14)
%19 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%18, %15)
%20 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::pow(%19, %10)
%21 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%20, %5)
%22 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%19, %21, %9)
%23 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%22, %4)
%24 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::tanh(%23)
%25 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%24, %3, %8)
%26 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%19, %2)
%27 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%26, %25)
%28 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0), %29 : Tensor, %30 : Tensor = aten::native_layer_norm(%27, %13, %0, %1, %12)
%31 : Float(512, 128, strides=[128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%28, %14)
return (%31)
"""), ("autogen-54", """graph(%0 : Float(32, 1000, 13, 13, strides=[169000, 169, 13, 1], requires_grad=0, device=cuda:0)):
%1 : NoneType = prim::Constant()
%2 : bool = prim::Constant[value=1]()
%3 : int[] = prim::Constant[value=[-1, -2]]()
%4 : Float(32, 1000, 13, 13, strides=[169000, 169, 13, 1], requires_grad=0, device=cuda:0) = aten::relu(%0)
%5 : Float(32, 1000, 1, 1, strides=[1000, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%4, %3, %2, %1)
return (%5)
"""), ("autogen-55", """graph(%0 : Float(96, strides=[1], requires_grad=0, device=cuda:0),
%1 : Long(requires_grad=0, device=cuda:0),
%2 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0),
%3 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0),
%4 : Float(96, strides=[1], requires_grad=0, device=cuda:0),
%5 : Double(requires_grad=0, device=cuda:0),
%6 : int,
%7 : int,
%8 : int,
%9 : int):
%10 : NoneType = prim::Constant()
%11 : bool = prim::Constant[value=0]()
%12 : int[] = prim::Constant[value=[1]]()
%13 : Float(96, strides=[1], requires_grad=0, device=cuda:0) = aten::add(%4, %5, %9)
%14 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%2, %3, %8)
%15 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::div(%14, %1)
%16 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::pow(%15, %7)
%17 : Float(96, 128, 128, strides=[16384, 128, 1], requires_grad=0, device=cuda:0) = aten::mean(%16, %12, %11, %10)
%18 : Float(96, 128, strides=[128, 1], requires_grad=0, device=cuda:0) = aten::mean(%17, %12, %11, %10)
%19 : Float(96, strides=[1], requires_grad=0, device=cuda:0) = aten::mean(%18, %12, %11, %10)
%20 : Float(96, strides=[1], requires_grad=0, device=cuda:0) = aten::sub(%0, %19, %6)
%21 : Float(96, strides=[1], requires_grad=0, device=cuda:0) = aten::div(%20, %13)
return (%21)
"""), ("autogen-56", """graph(%0 : Float(384, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(384, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0),
%3 : Float(384, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(1576, 384, strides=[384, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : float = prim::Constant[value=9.9999999999999995e-07]()
%8 : int[] = prim::Constant[value=[384]]()
%9 : int[] = prim::Constant[value=[1576, 384]]()
%10 : int[] = prim::Constant[value=[8, 197, 384]]()
%11 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %10)
%12 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %3, %6)
%13 : Float(1576, 384, strides=[384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %9)
%14 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%13, %10)
%15 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %14, %5)
%16 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0), %17 : Tensor, %18 : Tensor = aten::native_layer_norm(%15, %8, %0, %1, %7)
return (%16, %17, %18)
"""), ("autogen-57", """graph(%0 : Float(32, 960, 7, 7, strides=[47040, 49, 7, 1], requires_grad=0, device=cuda:0)):
%1 : int[] = prim::Constant[value=[32, 960]]()
%2 : NoneType = prim::Constant()
%3 : bool = prim::Constant[value=1]()
%4 : int[] = prim::Constant[value=[-1, -2]]()
%5 : Float(32, 960, 1, 1, strides=[960, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%0, %4, %3, %2)
%6 : Float(32, 960, strides=[960, 1], requires_grad=0, device=cuda:0) = aten::reshape(%5, %1)
return (%6)
"""), ("autogen-59", """graph(%0 : Long(1, 12, 4096, strides=[49152, 4096, 1], requires_grad=0, device=cuda:0),
%1 : Long(requires_grad=0, device=cuda:0),
%2 : Long(1, 12, 1, 4096, strides=[49152, 4096, 4096, 1], requires_grad=0, device=cuda:0),
%3 : Long(1, 12, 1, 1, strides=[1, 0, 1, 1], requires_grad=0, device=cuda:0),
%4 : int,
%5 : int):
%6 : int[] = prim::Constant[value=[1, 12, 4096]]()
%7 : Long(1, 12, 1, 4096, strides=[49152, 4096, 4096, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %3, %5)
%8 : Long(1, 12, 4096, strides=[49152, 4096, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %6)
%9 : Long(1, 12, 4096, strides=[49152, 4096, 1], requires_grad=0, device=cuda:0) = aten::mul(%8, %1)
%10 : Long(1, 12, 4096, strides=[49152, 4096, 1], requires_grad=0, device=cuda:0) = aten::add(%9, %0, %4)
return (%10)
"""), ("autogen-60", """graph(%0 : Float(requires_grad=0, device=cuda:0),
%1 : Double(requires_grad=0, device=cuda:0),
%2 : Float(1, 12, 4096, 64, strides=[3145728, 262144, 64, 1], requires_grad=0, device=cuda:0),
%3 : int,
%4 : int):
%5 : NoneType = prim::Constant()
%6 : bool = prim::Constant[value=1]()
%7 : int[] = prim::Constant[value=[-1]]()
%8 : int[] = prim::Constant[value=[1, 12, 64, 64, 64]]()
%9 : Float(1, 12, 64, 64, 64, strides=[3145728, 262144, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%2, %8)
%10 : Float(1, 12, 64, 64, 64, strides=[3145728, 262144, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::pow(%9, %4)
%11 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%10, %7, %6, %5)
%12 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %1, %3)
%13 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::rsqrt(%12)
%14 : Float(1, 12, 64, 64, 64, strides=[3145728, 262144, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::mul(%9, %13)
%15 : Float(1, 12, 64, 64, 64, strides=[3145728, 262144, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::mul(%14, %0)
return (%15, %9)
"""), ("autogen-61", """graph(%0 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0),
%1 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(2048, 768, strides=[768, 1], requires_grad=0, device=cuda:0),
%3 : int,
%4 : int):
%5 : int[] = prim::Constant[value=[2048, 768]]()
%6 : int[] = prim::Constant[value=[16, 128, 768]]()
%7 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%2, %6)
%8 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%7, %1, %4)
%9 : Float(2048, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%8, %5)
%10 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %6)
%11 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%0, %10, %3)
%12 : Float(2048, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%11, %5)
return (%12, %11)
"""), ("autogen-62", """graph(%0 : Float(32, 2048, 7, 7, strides=[100352, 49, 7, 1], requires_grad=0, device=cuda:0),
%1 : Float(2048, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(2048, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(2048, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(2048, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(32, 2048, 7, 7, strides=[100352, 49, 7, 1], requires_grad=0, device=cuda:0),
%6 : Float(2048, strides=[1], requires_grad=0, device=cuda:0),
%7 : Float(2048, strides=[1], requires_grad=0, device=cuda:0),
%8 : Float(2048, strides=[1], requires_grad=0, device=cuda:0),
%9 : Float(2048, strides=[1], requires_grad=0, device=cuda:0),
%10 : int):
%11 : int[] = prim::Constant[value=[32, 2048]]()
%12 : NoneType = prim::Constant()
%13 : bool = prim::Constant[value=1]()
%14 : int[] = prim::Constant[value=[-1, -2]]()
%15 : float = prim::Constant[value=1.0000000000000001e-05]()
%16 : float = prim::Constant[value=0.10000000000000001]()
%17 : bool = prim::Constant[value=0]()
%18 : Float(32, 2048, 7, 7, strides=[100352, 49, 7, 1], requires_grad=0, device=cuda:0), %19 : Tensor, %20 : Tensor = aten::native_batch_norm(%5, %6, %7, %8, %9, %17, %16, %15)
%21 : Float(32, 2048, 7, 7, strides=[100352, 49, 7, 1], requires_grad=0, device=cuda:0), %22 : Tensor, %23 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %17, %16, %15)
%24 : Float(32, 2048, 7, 7, strides=[100352, 49, 7, 1], requires_grad=0, device=cuda:0) = aten::add(%21, %18, %10)
%25 : Float(32, 2048, 7, 7, strides=[100352, 49, 7, 1], requires_grad=0, device=cuda:0) = aten::relu(%24)
%26 : Float(32, 2048, 1, 1, strides=[2048, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%25, %14, %13, %12)
%27 : Float(32, 2048, strides=[2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%26, %11)
return (%27)
"""), ("autogen-63", """graph(%0 : Float(480, 1, 1, 3, strides=[13, 3, 3, 1], requires_grad=0, device=cuda:0),
%1 : Long(requires_grad=0, device=cuda:0),
%2 : Float(480, 1, 64, 2, 64, 2, strides=[16384, 16384, 64, 8192, 1, 4096], requires_grad=0, device=cuda:0),
%3 : int,
%4 : int):
%5 : int[] = prim::Constant[value=[480, 128, 128, 1]]()
%6 : int[] = prim::Constant[value=[480, 128, 128]]()
%7 : int[] = prim::Constant[value=[480, 1, 128, 128]]()
%8 : Float(480, 1, 128, 128, strides=[16384, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%2, %7)
%9 : Float(480, 1, 128, 128, strides=[16384, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sigmoid(%8)
%10 : Float(480, 128, 128, strides=[16384, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %6)
%11 : Float(480, 128, 128, strides=[16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%1, %10, %4)
%12 : Float(480, 128, 128, strides=[16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%1, %11, %3)
%13 : Float(480, 128, 128, 1, strides=[16384, 128, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %5)
%14 : Float(480, 128, 128, 3, strides=[49152, 384, 3, 1], requires_grad=0, device=cuda:0) = aten::mul(%13, %0)
return (%14, %13)
"""), ("autogen-64", """graph(%0 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%1 : Double(requires_grad=0, device=cuda:0),
%2 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%5 : Double(requires_grad=0, device=cuda:0),
%6 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%7 : Double(requires_grad=0, device=cuda:0),
%8 : int,
%9 : int,
%10 : int,
%11 : int):
%12 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%6, %7)
%13 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %5)
%14 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%13, %3, %11)
%15 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%2, %14)
%16 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%0, %1, %10)
%17 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%16, %15, %9)
%18 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%17, %12, %8)
return (%18)
"""), ("autogen-65", """graph(%0 : Float(20005, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(2048, 20005, strides=[20005, 1], requires_grad=0, device=cuda:0),
%2 : int):
%3 : int[] = prim::Constant[value=[2048, 20005]]()
%4 : int[] = prim::Constant[value=[16, 128, 20005]]()
%5 : Float(16, 128, 20005, strides=[2560640, 20005, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %4)
%6 : Float(16, 128, 20005, strides=[2560640, 20005, 1], requires_grad=0, device=cuda:0) = aten::add(%5, %0, %2)
%7 : Float(2048, 20005, strides=[20005, 1], requires_grad=0, device=cuda:0) = aten::reshape(%6, %3)
%8 : Float(16, 128, 20005, strides=[2560640, 20005, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %4)
return (%8)
"""), ("autogen-66", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1, 1024, 768, strides=[786432, 768, 1], requires_grad=0, device=cuda:0),
%3 : Float(1024, 768, strides=[768, 1], requires_grad=0, device=cuda:0),
%4 : int):
%5 : int[] = prim::Constant[value=[1024, 768]]()
%6 : float = prim::Constant[value=1.0000000000000001e-05]()
%7 : int[] = prim::Constant[value=[768]]()
%8 : int[] = prim::Constant[value=[1, 1024, 768]]()
%9 : Float(1, 1024, 768, strides=[786432, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %8)
%10 : Float(1, 1024, 768, strides=[786432, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %9, %4)
%11 : Float(1, 1024, 768, strides=[786432, 768, 1], requires_grad=0, device=cuda:0), %12 : Tensor, %13 : Tensor = aten::native_layer_norm(%10, %7, %0, %1, %6)
%14 : Float(1, 1024, 768, strides=[786432, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%11, %8)
%15 : Float(1024, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%14, %5)
return (%15)
"""), ("autogen-67", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Float(720, 64, 192, strides=[12288, 192, 1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(1, 60, 64, 1, strides=[3840, 64, 1, 1], requires_grad=0, device=cuda:0),
%5 : Float(1, 60, 1, 192, strides=[11520, 192, 1, 1], requires_grad=0, device=cuda:0),
%6 : int,
%7 : int):
%8 : int[] = prim::Constant[value=[1, 12, 60, 64, 192]]()
%9 : int = prim::Constant[value=1]()
%10 : Float(1, 60, 64, 192, strides=[737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %5)
%11 : Float(1, 1, 60, 64, 192, strides=[737280, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::unsqueeze(%10, %9)
%12 : Float(1, 1, 60, 64, 192, strides=[737280, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::sub(%3, %11, %7)
%13 : Float(1, 1, 60, 64, 192, strides=[737280, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::mul(%12, %2)
%14 : Float(1, 12, 60, 64, 192, strides=[8847360, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %8)
%15 : Float(1, 12, 60, 64, 192, strides=[8847360, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::mul(%14, %0)
%16 : Float(1, 12, 60, 64, 192, strides=[8847360, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::add(%15, %13, %6)
return (%16, %11)
"""), ("autogen-68", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0),
%3 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(1, 512, 768, 1, 1, strides=[768, 768, 1, 768, 768], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : int[] = prim::Constant[value=[512, 768]]()
%8 : float = prim::Constant[value=9.9999999999999998e-13]()
%9 : int[] = prim::Constant[value=[768]]()
%10 : int[] = prim::Constant[value=[1, 512, 768]]()
%11 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %10)
%12 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %3, %6)
%13 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %12, %5)
%14 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0), %15 : Tensor, %16 : Tensor = aten::native_layer_norm(%13, %9, %0, %1, %8)
%17 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%14, %7)
return (%17, %14)
"""), ("autogen-69", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Float(12, 64, 4096, strides=[262144, 4096, 1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(1, 4096, strides=[4096, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : int[] = prim::Constant[value=[12, 64, 4096]]()
%8 : bool = prim::Constant[value=0]()
%9 : int = prim::Constant[value=-1]()
%10 : int[] = prim::Constant[value=[1, 12, 64, 4096]]()
%11 : int[] = prim::Constant[value=[1, 1, 1, 4096]]()
%12 : Float(1, 1, 1, 4096, strides=[4096, 4096, 4096, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %11)
%13 : Float(1, 1, 1, 4096, strides=[4096, 4096, 4096, 1], requires_grad=0, device=cuda:0) = aten::sub(%3, %12, %6)
%14 : Float(1, 1, 1, 4096, strides=[4096, 4096, 4096, 1], requires_grad=0, device=cuda:0) = aten::mul(%13, %2)
%15 : Float(1, 12, 64, 4096, strides=[3145728, 262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %10)
%16 : Float(1, 12, 64, 4096, strides=[3145728, 262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::mul(%15, %0)
%17 : Float(1, 12, 64, 4096, strides=[3145728, 262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::add(%16, %14, %5)
%18 : Float(1, 12, 64, 4096, strides=[3145728, 262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::_softmax(%17, %9, %8)
%19 : Float(12, 64, 4096, strides=[262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::reshape(%18, %7)
return (%19, %12)
"""), ("autogen-70", """graph(%0 : Long(1, 12, 64, 64, strides=[49152, 4096, 64, 1], requires_grad=0, device=cuda:0),
%1 : Long(1, 12, 64, 128, strides=[98304, 8192, 128, 1], requires_grad=0, device=cuda:0)):
%2 : int[] = prim::Constant[value=[1, 12, 64, 64, 1]]()
%3 : int[] = prim::Constant[value=[1, 12, 64, 1, 128]]()
%4 : Long(1, 12, 64, 1, 128, strides=[98304, 8192, 128, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %3)
%5 : Long(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %2)
%6 : Bool(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::ne(%5, %4)
return (%6)
"""), ("autogen-71", """graph(%0 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : int,
%4 : int):
%5 : int[] = prim::Constant[value=[2048, 512]]()
%6 : NoneType = prim::Constant()
%7 : bool = prim::Constant[value=1]()
%8 : int[] = prim::Constant[value=[-1]]()
%9 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::pow(%1, %4)
%10 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%9, %8, %7, %6)
%11 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::add(%10, %2, %3)
%12 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::rsqrt(%11)
%13 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%1, %12)
%14 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%0, %13)
%15 : Float(2048, 512, strides=[512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%14, %5)
return (%15)
"""), ("autogen-72", """graph(%0 : Long(2232, strides=[1], requires_grad=0, device=cuda:0),
%1 : Long(2232, strides=[1], requires_grad=0, device=cuda:0),
%2 : Long(requires_grad=0, device=cuda:0),
%3 : Long(1, 12, 62, 3, strides=[2232, 186, 3, 1], requires_grad=0, device=cuda:0),
%4 : int,
%5 : int):
%6 : int[] = prim::Constant[value=[2232]]()
%7 : Long(2232, strides=[1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %6)
%8 : Long(2232, strides=[1], requires_grad=0, device=cuda:0) = aten::mul(%1, %2)
%9 : Long(2232, strides=[1], requires_grad=0, device=cuda:0) = aten::add(%7, %8, %5)
%10 : Long(2232, strides=[1], requires_grad=0, device=cuda:0) = aten::add(%7, %0, %4)
return (%10, %9)
"""), ("autogen-73", """graph(%0 : Long(requires_grad=0, device=cuda:0),
%1 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0),
%2 : Long(requires_grad=0, device=cuda:0),
%3 : Float(96, 1, 1, 128, 128, strides=[81920, 16384, 16384, 128, 1], requires_grad=0, device=cuda:0),
%4 : Float(96, 1, 3, 128, 128, strides=[245760, 49152, 1, 384, 3], requires_grad=0, device=cuda:0),
%5 : Float(96, 1, 1, 128, 128, strides=[81920, 16384, 16384, 128, 1], requires_grad=0, device=cuda:0),
%6 : Float(96, 1, 3, 128, 128, strides=[245760, 49152, 1, 384, 3], requires_grad=0, device=cuda:0),
%7 : Float(96, 1, 1, 128, 128, strides=[81920, 16384, 16384, 128, 1], requires_grad=0, device=cuda:0),
%8 : Float(96, 1, 3, 128, 128, strides=[245760, 49152, 1, 384, 3], requires_grad=0, device=cuda:0),
%9 : Float(96, 1, 1, 128, 128, strides=[81920, 16384, 16384, 128, 1], requires_grad=0, device=cuda:0),
%10 : Float(96, 1, 3, 128, 128, strides=[245760, 49152, 1, 384, 3], requires_grad=0, device=cuda:0),
%11 : Float(96, 1, 1, 128, 128, strides=[81920, 16384, 16384, 128, 1], requires_grad=0, device=cuda:0),
%12 : Float(96, 1, 3, 128, 128, strides=[245760, 49152, 1, 384, 3], requires_grad=0, device=cuda:0),
%13 : int,
%14 : int,
%15 : int,
%16 : int,
%17 : int,
%18 : int,
%19 : int,
%20 : int,
%21 : int,
%22 : int):
%23 : int[] = prim::Constant[value=[96, 1, 128, 128]]()
%24 : int[] = prim::Constant[value=[96, 3, 128, 128]]()
%25 : Float(96, 3, 128, 128, strides=[245760, 1, 384, 3], requires_grad=0, device=cuda:0) = aten::reshape(%12, %24)
%26 : Float(96, 1, 128, 128, strides=[81920, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%11, %23)
%27 : Float(96, 1, 128, 128, strides=[16384, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%2, %26, %22)
%28 : Float(96, 3, 128, 128, strides=[245760, 1, 384, 3], requires_grad=0, device=cuda:0) = aten::reshape(%10, %24)
%29 : Float(96, 1, 128, 128, strides=[81920, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %23)
%30 : Float(96, 1, 128, 128, strides=[16384, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%2, %29, %21)
%31 : Float(96, 3, 128, 128, strides=[245760, 1, 384, 3], requires_grad=0, device=cuda:0) = aten::reshape(%8, %24)
%32 : Float(96, 1, 128, 128, strides=[81920, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %23)
%33 : Float(96, 1, 128, 128, strides=[16384, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%2, %32, %20)
%34 : Float(96, 3, 128, 128, strides=[245760, 1, 384, 3], requires_grad=0, device=cuda:0) = aten::reshape(%6, %24)
%35 : Float(96, 1, 128, 128, strides=[81920, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%5, %23)
%36 : Float(96, 1, 128, 128, strides=[16384, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%2, %35, %19)
%37 : Float(96, 3, 128, 128, strides=[245760, 1, 384, 3], requires_grad=0, device=cuda:0) = aten::reshape(%4, %24)
%38 : Float(96, 1, 128, 128, strides=[81920, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %23)
%39 : Float(96, 1, 128, 128, strides=[16384, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%2, %38, %18)
%40 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::div(%1, %0)
%41 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%40, %39)
%42 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%41, %37, %17)
%43 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%42, %36)
%44 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%43, %34, %16)
%45 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%44, %33)
%46 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%45, %31, %15)
%47 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%46, %30)
%48 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%47, %28, %14)
%49 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%48, %27)
%50 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%49, %25, %13)
%51 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%50, %0)
return (%51)
"""), ("autogen-74", """graph(%0 : Long(200, 200, strides=[204, 1], requires_grad=0, device=cuda:0),
%1 : Long(requires_grad=0, device=cuda:0),
%2 : int,
%3 : int):
%4 : Long(200, 200, strides=[200, 1], requires_grad=0, device=cuda:0) = aten::sub(%0, %1, %3)
%5 : Bool(200, 200, strides=[200, 1], requires_grad=0, device=cuda:0) = aten::ge(%4, %2)
return (%5, %4)
"""), ("autogen-75", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Float(12, 512, 512, strides=[262144, 512, 1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(1, 1, 1, 512, strides=[512, 512, 512, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : bool = prim::Constant[value=0]()
%8 : int = prim::Constant[value=-1]()
%9 : int[] = prim::Constant[value=[1, 12, 512, 512]]()
%10 : Float(1, 1, 1, 512, strides=[512, 512, 512, 1], requires_grad=0, device=cuda:0) = aten::sub(%3, %4, %6)
%11 : Float(1, 1, 1, 512, strides=[512, 512, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%10, %2)
%12 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %9)
%13 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::div(%12, %0)
%14 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::add(%13, %11, %5)
%15 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::_softmax(%14, %8, %7)
return (%15, %11)
"""), ("autogen-76", """graph(%0 : Float(2048, 2048, strides=[2048, 1], requires_grad=0, device=cuda:0)):
%1 : int[] = prim::Constant[value=[2048, 2048]]()
%2 : int[] = prim::Constant[value=[1, 2048, 2048]]()
%3 : Float(1, 2048, 2048, strides=[4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %2)
%4 : Float(1, 2048, 2048, strides=[4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::relu(%3)
%5 : Float(2048, 2048, strides=[2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %1)
return (%5)
""")]
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import random
from typing import List, Tuple
import click
import numpy as np
import torch
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
CacheAlgorithm,
EmbeddingLocation,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from torch import nn, Tensor
logging.basicConfig(level=logging.DEBUG)
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:cumem_utils")
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:split_table_batched_embeddings"
)
# pyre-ignore
def benchmark_same_input(iters: int, f, *args) -> float:
"""
Returns average execution time in milliseconds across "iters".
"""
# Warm-up
f(*args)
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for _ in range(iters):
f(*args)
end_event.record()
torch.cuda.synchronize()
return start_event.elapsed_time(end_event) / iters
# pyre-ignore
def benchmark_different_inputs(f, args) -> float:
"""
Returns average execution time in milliseconds across "iters".
"""
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for arg in args:
f(arg)
end_event.record()
torch.cuda.synchronize()
return start_event.elapsed_time(end_event) / len(args)
def get_num_cached_tables(num_tables: int, cached_tables_ratio: float) -> int:
"""
Controls how # of cached tables are determined based on parameters.
"""
return round(num_tables * cached_tables_ratio)
def create_table_offsets(
num_tables: int, cached_tables_ratio: float, num_embeddings: int
) -> Tensor:
"""
Returns "table size cumsum", which is information of UVM caching for tables.
"""
num_cached_tables = get_num_cached_tables(num_tables, cached_tables_ratio)
np_list = np.arange(0, num_embeddings * num_cached_tables, num_embeddings)
num_uncached_tables = num_tables - num_cached_tables
while num_uncached_tables > 0:
added = random.randint(1, num_uncached_tables)
pos = random.randint(0, len(np_list) - 1)
np_list = np.insert(np_list, pos, [np_list[pos]] * added)
num_uncached_tables -= added
cache_hash_size_cumsum: Tensor = torch.tensor(np_list).cuda()
return cache_hash_size_cumsum
def create_embedding_specs(
num_tables: int,
cached_tables_ratio: float,
num_embeddings: int,
embedding_dims: int,
) -> List[Tuple[str, int, int, SparseType, EmbeddingLocation]]:
"""
Returns embedding specs to be used with IntNBitTableBatchedEmbeddingBagsCodegen.
"""
num_cached_tables = get_num_cached_tables(num_tables, cached_tables_ratio)
num_uncached_tables = num_tables - num_cached_tables
embedding_specs = []
for _ in range(min(num_cached_tables, num_uncached_tables)):
embedding_specs.append(
(
"",
num_embeddings,
embedding_dims,
SparseType.INT8,
EmbeddingLocation.DEVICE,
)
)
embedding_specs.append(
(
"",
num_embeddings,
embedding_dims,
SparseType.INT8,
EmbeddingLocation.MANAGED_CACHING,
)
)
if num_cached_tables > num_uncached_tables:
for _ in range(num_cached_tables - num_uncached_tables):
embedding_specs.append(
(
"",
num_embeddings,
embedding_dims,
SparseType.INT8,
EmbeddingLocation.MANAGED_CACHING,
)
)
else:
for _ in range(num_uncached_tables - num_cached_tables):
embedding_specs.append(
(
"",
num_embeddings,
embedding_dims,
SparseType.INT8,
EmbeddingLocation.DEVICE,
)
)
return embedding_specs
def create_request(
num_tables: int, num_embeddings: int, batch: int, avg_pooling_factor: int
) -> Tuple[Tensor, Tensor]:
"""
Returns [indices, offsets], which are inputs of embedding bags.
"""
indices: Tensor = torch.randint(
0, num_embeddings, (num_tables * batch * avg_pooling_factor,), dtype=torch.int32
).cuda()
# Pooling factors are intentionally diversified between [1, pf / 2, pf, pf* 2, pf * 4, pf * 8].
# where pf == avg_pooling_factor.
pooling_factors = []
for _ in range(num_tables - 1):
half_avg_pooling_factor = avg_pooling_factor // 2
if half_avg_pooling_factor > 0:
pooling_factors.append(
random.choices(
[
1,
half_avg_pooling_factor,
avg_pooling_factor,
2 * avg_pooling_factor,
4 * avg_pooling_factor,
8 * avg_pooling_factor,
],
weights=[5, 10, 15, 1, 1, 3],
)[0]
)
else:
pooling_factors.append(
random.choices(
[1, avg_pooling_factor, 2 * avg_pooling_factor], weights=[2, 20, 1]
)[0]
)
# Last one is whatever is the remainder.
curr_total_pooling_factors = sum(pooling_factors)
pooling_factors.append(num_tables * avg_pooling_factor - curr_total_pooling_factors)
offsets_list = [0]
for pooling_factor in pooling_factors:
if pooling_factor == 1:
for _ in range(batch):
offsets_list.append(pooling_factor)
else:
finish_offset = offsets_list[-1] + pooling_factor * batch
for _ in range(batch - 1):
selected = max(
int(random.gauss(pooling_factor, 0.1 * pooling_factor)), 1
)
last_offset = offsets_list[-1]
offsets_list.append(last_offset + selected)
offsets_list.append(finish_offset)
offsets: Tensor = torch.tensor(offsets_list, dtype=torch.int32).cuda()
return (indices, offsets)
@click.group()
def cli() -> None:
pass
@cli.command()
@click.option("--iters", default=100)
@click.option("--num-tables", default=50)
@click.option("--cached-tables-ratio", default=1.0)
@click.option("--batch", default=100)
@click.option("--avg-pooling-factor", default=100)
def linearize_cache_indices(
iters: int,
num_tables: int,
cached_tables_ratio: float,
batch: int,
avg_pooling_factor: int,
) -> None:
num_embeddings: int = 1000000
cache_hash_size_cumsum = create_table_offsets(
num_tables, cached_tables_ratio, num_embeddings
)
indices, offsets = create_request(
num_tables, num_embeddings, batch, avg_pooling_factor
)
t_ms = benchmark_same_input(
iters,
lambda indices, offsets: torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum, indices, offsets
),
indices,
offsets,
)
logging.info(
f"Across {iters} runs, T: {num_tables}, Cached T: {get_num_cached_tables(num_tables, cached_tables_ratio)}, BS: {batch}, {t_ms * 1.0e3:.0f}us"
)
@cli.command()
@click.option("--iters", default=100)
@click.option("--num-tables", default=50)
@click.option("--cached-tables-ratio", default=1.0)
@click.option("--batch", default=100)
@click.option("--avg-pooling-factor", default=100)
@click.option("--cache-load-factor", default=0.2)
def lxu_cache_lookup(
iters: int,
num_tables: int,
cached_tables_ratio: float,
batch: int,
avg_pooling_factor: int,
cache_load_factor: float,
) -> None:
num_embeddings: int = 1000000
embedding_dims: int = 128
embedding_specs = create_embedding_specs(
num_tables, cached_tables_ratio, num_embeddings, embedding_dims
)
tbe: nn.Module = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs, cache_load_factor=cache_load_factor
)
tbe.fill_random_weights()
# Imitate execution flow by performing prefetching once.
indices, offsets = create_request(
num_tables, num_embeddings, batch, avg_pooling_factor
)
tbe.prefetch(indices, offsets)
linearized_indices = torch.ops.fbgemm.linearize_cache_indices(
tbe.cache_hash_size_cumsum, indices, offsets
)
t_ms = benchmark_same_input(
iters,
lambda linearized_indices, lxu_cache_state: torch.ops.fbgemm.lxu_cache_lookup(
linearized_indices, lxu_cache_state, tbe.total_cache_hash_size
),
linearized_indices,
tbe.lxu_cache_state,
)
# Run once again to obtain cache miss ratio.
locations = torch.ops.fbgemm.lxu_cache_lookup(
linearized_indices, tbe.lxu_cache_state, tbe.total_cache_hash_size
)
num_invalid_accesses = torch.sum(linearized_indices == tbe.total_cache_hash_size)
num_valid_accesses = linearized_indices.numel() - num_invalid_accesses
num_misses = torch.sum(locations == -1) - num_invalid_accesses
logging.info(
f"Across {iters} runs, T: {num_tables}, Cached T: {get_num_cached_tables(num_tables, cached_tables_ratio)}, "
f"BS: {batch}, cache_load_factor: {cache_load_factor}, {t_ms * 1.0e3:.0f}us, "
f"cache miss: {num_misses.item() / num_valid_accesses * 100}%"
)
@cli.command()
@click.option("--iters", default=100)
@click.option("--num-tables", default=50)
@click.option("--cached-tables-ratio", default=1.0)
@click.option("--batch", default=100)
@click.option("--avg-pooling-factor", default=100)
@click.option("--cache-load-factor", default=0.2)
def lru_cache_populate_byte(
iters: int,
num_tables: int,
cached_tables_ratio: float,
batch: int,
avg_pooling_factor: int,
cache_load_factor: float,
) -> None:
num_warm_ups: int = 5
num_embeddings: int = 1000000
embedding_dims: int = 128
embedding_specs = create_embedding_specs(
num_tables, cached_tables_ratio, num_embeddings, embedding_dims
)
cc: nn.Module = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs, cache_load_factor=cache_load_factor
)
cc.fill_random_weights()
warm_up_requests = []
for _ in range(num_warm_ups):
indices, offsets = create_request(
num_tables, num_embeddings, batch, avg_pooling_factor
)
warm_up_requests.append(
torch.ops.fbgemm.linearize_cache_indices(
cc.cache_hash_size_cumsum, indices, offsets
)
)
requests = []
for _ in range(iters):
indices, offsets = create_request(
num_tables, num_embeddings, batch, avg_pooling_factor
)
requests.append(
torch.ops.fbgemm.linearize_cache_indices(
cc.cache_hash_size_cumsum, indices, offsets
)
)
timestep: int = 1
def populate(linear_indices: Tensor) -> None:
nonlocal timestep
torch.ops.fbgemm.lru_cache_populate_byte(
cc.weights_uvm,
cc.cache_hash_size_cumsum,
cc.total_cache_hash_size,
cc.cache_index_table_map,
cc.weights_offsets,
cc.weights_tys,
cc.D_offsets,
linear_indices,
cc.lxu_cache_state,
cc.lxu_cache_weights,
timestep,
cc.lxu_state,
)
timestep += 1
for warm_up_request in warm_up_requests:
populate(warm_up_request)
t_ms = benchmark_different_inputs(
populate,
requests,
)
# Replay to figure out UVM access BW, which would be PCIe bound.
replay_cc: nn.Module = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs, cache_load_factor=cache_load_factor
)
replay_cc.fill_random_weights()
replay_timestep: int = 1
def replay_populate(linear_indices: Tensor) -> None:
nonlocal replay_timestep
torch.ops.fbgemm.lru_cache_populate_byte(
replay_cc.weights_uvm,
replay_cc.cache_hash_size_cumsum,
replay_cc.total_cache_hash_size,
replay_cc.cache_index_table_map,
replay_cc.weights_offsets,
replay_cc.weights_tys,
replay_cc.D_offsets,
linear_indices,
replay_cc.lxu_cache_state,
replay_cc.lxu_cache_weights,
replay_timestep,
replay_cc.lxu_state,
)
replay_timestep += 1
for warm_up_request in warm_up_requests:
replay_populate(warm_up_request)
total_rows = 0
for request in requests:
prev = replay_cc.lxu_cache_state.clone().detach()
replay_populate(request)
after = replay_cc.lxu_cache_state.clone().detach()
diff = after - prev
total_rows += diff.count_nonzero().item()
logging.info(
f"Across {iters} runs, T: {num_tables}, Cached T: {get_num_cached_tables(num_tables, cached_tables_ratio)}, "
f"BS: {batch}, cache_load_factor: {cache_load_factor}, {t_ms * 1.0e3:.0f}us, "
f"BW (just UVM accesses): {total_rows * embedding_dims / iters / t_ms * 1000 / 1024 / 1024} MB/s"
)
@cli.command()
@click.option("--iters", default=100)
@click.option("--num-tables", default=50)
@click.option("--cached-tables-ratio", default=1.0)
@click.option("--batch", default=100)
@click.option("--avg-pooling-factor", default=100)
@click.option("--cache-load-factor", default=0.2)
def lfu_cache_populate_byte(
iters: int,
num_tables: int,
cached_tables_ratio: float,
batch: int,
avg_pooling_factor: int,
cache_load_factor: float,
) -> None:
num_warm_ups: int = 5
num_embeddings: int = 1000000
embedding_dims: int = 128
embedding_specs = create_embedding_specs(
num_tables, cached_tables_ratio, num_embeddings, embedding_dims
)
cc: nn.Module = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs,
cache_load_factor=cache_load_factor,
cache_algorithm=CacheAlgorithm.LFU,
)
cc.fill_random_weights()
warm_up_requests = []
for _ in range(num_warm_ups):
indices, offsets = create_request(
num_tables, num_embeddings, batch, avg_pooling_factor
)
warm_up_requests.append(
torch.ops.fbgemm.linearize_cache_indices(
cc.cache_hash_size_cumsum, indices, offsets
)
)
requests = []
for _ in range(iters):
indices, offsets = create_request(
num_tables, num_embeddings, batch, avg_pooling_factor
)
requests.append(
torch.ops.fbgemm.linearize_cache_indices(
cc.cache_hash_size_cumsum, indices, offsets
)
)
def populate(linear_indices: Tensor) -> None:
torch.ops.fbgemm.lfu_cache_populate_byte(
cc.weights_uvm,
cc.cache_hash_size_cumsum,
cc.total_cache_hash_size,
cc.cache_index_table_map,
cc.weights_offsets,
cc.weights_tys,
cc.D_offsets,
linear_indices,
cc.lxu_cache_state,
cc.lxu_cache_weights,
cc.lxu_state,
)
for warm_up_request in warm_up_requests:
populate(warm_up_request)
t_ms = benchmark_different_inputs(
populate,
requests,
)
# Replay to figure out UVM access BW, which would be PCIe bound.
replay_cc: nn.Module = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs,
cache_load_factor=cache_load_factor,
cache_algorithm=CacheAlgorithm.LFU,
)
replay_cc.fill_random_weights()
def replay_populate(linear_indices: Tensor) -> None:
torch.ops.fbgemm.lfu_cache_populate_byte(
replay_cc.weights_uvm,
replay_cc.cache_hash_size_cumsum,
replay_cc.total_cache_hash_size,
replay_cc.cache_index_table_map,
replay_cc.weights_offsets,
replay_cc.weights_tys,
replay_cc.D_offsets,
linear_indices,
replay_cc.lxu_cache_state,
replay_cc.lxu_cache_weights,
replay_cc.lxu_state,
)
for warm_up_request in warm_up_requests:
replay_populate(warm_up_request)
total_rows = 0
for request in requests:
prev = replay_cc.lxu_cache_state.clone().detach()
replay_populate(request)
after = replay_cc.lxu_cache_state.clone().detach()
diff = after - prev
total_rows += diff.count_nonzero().item()
logging.info(
f"Across {iters} runs, T: {num_tables}, Cached T: {get_num_cached_tables(num_tables, cached_tables_ratio)}, "
f"BS: {batch}, cache_load_factor: {cache_load_factor}, {t_ms * 1.0e3:.0f}us, "
f"BW (just UVM accesses): {total_rows * embedding_dims / iters / t_ms * 1000 / 1024 / 1024} MB/s"
)
if __name__ == "__main__":
cli()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import click
import torch
from fbgemm_gpu.bench.bench_utils import benchmark_torch_function
logging.basicConfig(level=logging.DEBUG)
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
@click.group()
def cli() -> None:
pass
@cli.command()
@click.option("--flush-gpu-cache-size-mb", default=40)
@click.option("--iters", default=100)
@click.option("--batch-size", default=25)
@click.option("--m", default=2048)
@click.option("--n", default=100)
@click.option("--k", default=256)
@click.option("--num_warmups", default=2)
def stride_gemm(
flush_gpu_cache_size_mb: int,
iters: int,
batch_size: int,
m: int,
n: int,
k: int,
num_warmups: int,
) -> None:
A = torch.rand(m, batch_size, k).half().cuda()
B = torch.rand(batch_size, k, n).half().cuda()
bias = torch.rand(batch_size, n).half().cuda()
bias_permute102 = bias.unsqueeze(1)
# A100 40MB L2 cache
elapse, _ = benchmark_torch_function(
torch.ops.fbgemm.permute102_baddbmm_permute102,
(bias, A, B),
flush_gpu_cache_size_mb,
iters=iters,
num_warmups=num_warmups,
)
logging.info(
f"stride gemm fused: time: {elapse}, TFLOPS/sec: {2.0 * batch_size * m * n * k / elapse / 1.0e12: .2f}"
)
def ref_stride_gemm(
bias_permute102: torch.Tensor, A: torch.Tensor, B: torch.Tensor
) -> torch.Tensor:
A_permute102 = A.permute(1, 0, 2)
C_permute102 = torch.baddbmm(bias_permute102, A_permute102, B)
C_ref = C_permute102.permute(1, 0, 2) # (m, batch_size, n)
return C_ref
# A100 40MB L2 cache
elapse_ref, _ = benchmark_torch_function(
ref_stride_gemm,
(bias_permute102, A, B),
flush_gpu_cache_size_mb,
iters=iters,
num_warmups=num_warmups,
)
logging.info(
f"stride gemm unfused: time: {elapse_ref}, TFLOPS/sec: {2.0 * batch_size * m * n * k / elapse_ref / 1.0e12: .2f}"
)
if __name__ == "__main__":
cli()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import time
from typing import Tuple
import click
import numpy as np
import torch
from fbgemm_gpu.bench.bench_utils import benchmark_requests
from fbgemm_gpu.split_embedding_utils import generate_requests, round_up
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from fbgemm_gpu.ssd_split_table_batched_embeddings_ops import (
CacheAlgorithm,
EmbeddingLocation,
PoolingMode,
SparseType,
SSDIntNBitTableBatchedEmbeddingBags,
)
logging.basicConfig(level=logging.DEBUG)
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:ssd_split_table_batched_embeddings"
)
logging.basicConfig(level=logging.DEBUG)
@click.group()
def cli() -> None:
pass
def benchmark_ssd_function(
iters: int,
warmup_iters: int,
# pyre-fixme[2]: Parameter must be annotated.
s,
buf: torch.Tensor,
indices: torch.Tensor,
indices_per_itr: int,
) -> Tuple[float, float]:
actions_count_cpu = torch.tensor([indices_per_itr]).long().cpu()
# warmup
for i in range(warmup_iters):
start = i * indices_per_itr
end = start + indices_per_itr
indices_this_itr = indices[start:end]
# Benchmark code
s.get(indices_this_itr, buf, actions_count_cpu)
s.set(indices_this_itr, buf, actions_count_cpu)
logging.info("Finished warmup")
total_time_read_ns = 0
total_time_write_ns = 0
for i in range(iters):
start = (i + warmup_iters) * indices_per_itr
end = start + indices_per_itr
indices_this_itr = indices[start:end]
# Benchmark code
start = time.time_ns()
s.get(indices_this_itr, buf, actions_count_cpu)
read_end = time.time_ns()
s.set(indices_this_itr, buf, actions_count_cpu)
end = time.time_ns()
total_time_read_ns += read_end - start
total_time_write_ns += end - read_end
if i % 10 == 0:
logging.info(
f"{i}, {(read_end - start) / 10**6}, {(end - read_end) / 10**6}"
)
return (total_time_read_ns / iters, total_time_write_ns / iters)
def benchmark_read_write(
ssd_prefix: str,
batch_size: int,
bag_size: int,
num_embeddings: int,
embedding_dim: int,
iters: int,
warmup_iters: int,
num_shards: int,
num_threads: int,
) -> None:
import tempfile
idx_dtype = torch.int64
data_dtype = torch.float32
np.random.seed(42)
torch.random.manual_seed(43)
elem_size = 4
with tempfile.TemporaryDirectory(prefix=ssd_prefix) as ssd_directory:
ssd_db = torch.classes.fbgemm.EmbeddingRocksDBWrapper(
ssd_directory,
num_shards,
num_threads,
0, # ssd_memtable_flush_period,
0, # ssd_memtable_flush_offset,
4, # ssd_l0_files_per_compact,
embedding_dim,
0, # ssd_rate_limit_mbps,
1, # ssd_size_ratio,
8, # ssd_compaction_trigger,
536870912, # 512MB ssd_write_buffer_size,
8, # ssd_max_write_buffer_num,
-0.01, # ssd_uniform_init_lower
0.01, # ssd_uniform_init_upper
32, # row_storage_bitwidth
)
total_indices = (warmup_iters + iters) * batch_size * bag_size
indices_per_itr = batch_size * bag_size
indices = torch.randint(
low=0, high=num_embeddings, size=(total_indices,), dtype=idx_dtype
)
buf = torch.empty((batch_size * bag_size, embedding_dim), dtype=data_dtype)
read_lat_ns, write_lat_ns = benchmark_ssd_function(
iters, warmup_iters, ssd_db, buf, indices, indices_per_itr
)
total_bytes = batch_size * embedding_dim * bag_size * elem_size
byte_seconds_per_ns = total_bytes * 1e9
gibps_rd = byte_seconds_per_ns / (read_lat_ns * 2**30)
gibps_wr = byte_seconds_per_ns / (write_lat_ns * 2**30)
gibps_tot = 2 * byte_seconds_per_ns / ((read_lat_ns + write_lat_ns) * 2**30)
logging.info(
f"Batch Size: {batch_size}, "
f"Bag_size: {bag_size:3d}, "
f"Read_us: {read_lat_ns / 1000:8.0f}, "
f"Write_us: {write_lat_ns / 1000:8.0f}, "
f"Total_us: {(read_lat_ns + write_lat_ns) / 1000:8.0f}, "
f"TMaxQPS: {1e9 * batch_size / (read_lat_ns + write_lat_ns):8.0f}, "
f"GiBps Rd: {gibps_rd:3.2f}, "
f"GiBps Wr: {gibps_wr:3.2f}, "
f"GiBps R+W: {gibps_tot:3.2f}, "
)
del ssd_db
@cli.command()
# @click.option("--num-tables", default=64)
@click.option("--num-embeddings", default=int(1.5e9))
@click.option("--embedding-dim", default=128)
@click.option("--batch-size", default=1024)
@click.option("--bag-size", default=1)
@click.option("--iters", default=1000)
@click.option("--warmup-iters", default=100)
@click.option(
"--ssd-prefix", default="/tmp/ssd_benchmark_embedding"
) # Check P556577690 and https://fburl.com/t9lf4d7v
@click.option("--num-shards", default=8)
@click.option("--num-threads", default=8)
def ssd_read_write(
ssd_prefix: str,
num_embeddings: int,
embedding_dim: int,
bag_size: int,
batch_size: int,
iters: int,
warmup_iters: int,
num_shards: int,
num_threads: int,
) -> None:
benchmark_read_write(
ssd_prefix,
batch_size,
bag_size,
num_embeddings,
embedding_dim,
iters,
warmup_iters,
num_shards,
num_threads,
)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.INT4)
@click.option("--iters", default=100)
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.1)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP16)
@click.option("--use-cache", is_flag=True, default=False)
@click.option("--cache-algorithm", default="lru")
@click.option("--cache-load-factor", default=0.2)
@click.option("--enforce-hbm", is_flag=True, default=False)
@click.option("--ssd-cache-loc", default="device")
def nbit_ssd(
alpha: bool,
bag_size: int, # L
batch_size: int, # B
embedding_dim: int, # D
weights_precision: SparseType,
iters: int,
mixed: bool,
num_embeddings: int, # E
num_tables: int, # T
reuse: float,
weighted: bool,
flush_gpu_cache_size_mb: int,
output_dtype: SparseType,
use_cache: bool,
cache_algorithm: str,
cache_load_factor: float,
enforce_hbm: bool,
ssd_cache_loc: str,
) -> None:
import tempfile
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
T = num_tables
cache_alg = CacheAlgorithm.LRU
managed_type = (
EmbeddingLocation.MANAGED_CACHING if use_cache else EmbeddingLocation.MANAGED
)
ssd_cache_location = (
EmbeddingLocation.MANAGED
if ssd_cache_loc == "managed"
else EmbeddingLocation.DEVICE
)
logging.info(f"T: {T}")
if mixed:
Ds = [
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds = [D] * T
emb_uvm = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E,
d,
weights_precision,
managed_type,
)
for d in Ds
],
output_dtype=output_dtype,
cache_load_factor=cache_load_factor,
cache_algorithm=cache_alg,
enforce_hbm=enforce_hbm,
).cuda()
emb_uvm.fill_random_weights()
feature_table_map = list(range(T))
C = max(T * B * L, 1)
emb_ssd = SSDIntNBitTableBatchedEmbeddingBags(
embedding_specs=[("", E, d, weights_precision) for d in Ds],
feature_table_map=feature_table_map,
ssd_storage_directory=tempfile.mkdtemp(),
cache_sets=C,
ssd_uniform_init_lower=-0.1,
ssd_uniform_init_upper=0.1,
ssd_shards=2,
pooling_mode=PoolingMode.SUM,
ssd_cache_location=ssd_cache_location, # adjust the cache locations
).cuda()
emb_cpu = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E,
d,
weights_precision,
EmbeddingLocation.HOST,
)
for d in Ds
],
output_dtype=output_dtype,
device="cpu",
)
emb_cpu.fill_random_weights()
requests = generate_requests(
iters,
B,
T,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=weighted,
)
requests_gpu = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
read_write_bytes = (
output_size_multiplier * B * sum(Ds) + param_size_multiplier * B * sum(Ds) * L
)
nparams_byte = sum(w.numel() for (w, _) in emb_cpu.split_embedding_weights())
logging.info(
f"{weights_precision} Embedding tables: {E * T} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, "
f"{nparams_byte / 1.0e9: .2f} GB" # IntN TBE use byte for storage
)
logging.info(
f"Accessed weights per batch: {B * T * L} rows, "
f"{B * (L * sum(Ds)) * param_size_multiplier / 1.0e9: .2f} GB"
)
# UVM
torch.cuda.cudart().cudaProfilerStart()
torch.cuda.nvtx.range_push("uvm forward")
time_per_iter = benchmark_requests(
# pyre-ignore
requests_gpu,
lambda indices, offsets, per_sample_weights: emb_uvm.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
logging.info(
f"UVM NBit Forward, {weights_precision}, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us"
)
torch.cuda.nvtx.range_pop()
torch.cuda.cudart().cudaProfilerStop()
# SSD
torch.cuda.cudart().cudaProfilerStart()
torch.cuda.nvtx.range_push("ssd forward")
time_per_iter = benchmark_requests(
# pyre-ignore
requests_gpu,
lambda indices, offsets, per_sample_weights: emb_ssd.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
logging.info(
f"SSD NBit Forward, {weights_precision}, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us"
)
torch.cuda.nvtx.range_pop()
torch.cuda.cudart().cudaProfilerStop()
# CPU
requests_cpu = [
(a.int().cpu(), b.int().cpu(), c if c else None) for (a, b, c) in requests
]
time_per_iter = benchmark_requests(
# pyre-ignore
requests_cpu,
lambda indices, offsets, per_sample_weights: emb_cpu.forward(
indices.int().cpu(),
offsets.int().cpu(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
logging.info(
f"CPU NBit Forward, {weights_precision}, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us"
)
if __name__ == "__main__":
cli()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import functools
import logging
import random
from typing import List, Tuple
import click
import fbgemm_gpu
import torch
from torch.profiler import profile
logging.basicConfig(level=logging.DEBUG)
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from bench_utils import benchmark_torch_function
else:
from fbgemm_gpu.bench.bench_utils import benchmark_torch_function
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
@click.group()
def cli() -> None:
pass
@cli.command()
@click.option("--batch-size", type=int, default=128)
@click.option("--embedding-dim", type=int, default=128)
@click.option("--max-len", type=int, default=128)
@click.option("--elem-type", type=str, default="half")
def device(
batch_size: int,
embedding_dim: int,
max_len: int,
elem_type: str,
) -> None:
lengths = torch.randint(max_len, size=(batch_size,))
total_lengths = lengths.sum().item()
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
dtype = (
torch.float16
if elem_type == "half" or elem_type == "float16"
else torch.float32
)
# pyre-fixme[6]: For 1st param expected `int` but got `Union[bool, float, int]`.
values_2d = torch.rand(total_lengths, embedding_dim, dtype=dtype)
if torch.cuda.is_available():
offsets = offsets.cuda()
values_2d = values_2d.cuda()
time, output = benchmark_torch_function(
torch.ops.fbgemm.jagged_2d_to_dense, (values_2d, offsets, max_len), iters=1000
)
offsets_nbytes = offsets.numel() * offsets.element_size()
values_nbytes = values_2d.numel() * values_2d.element_size()
dense_nbytes = output.numel() * output.element_size()
num_bytes = offsets_nbytes + values_nbytes + dense_nbytes
logging.info(f"jagged_2d_to_dense {time} sec {num_bytes / time / 1e9} GB/s")
total_L = values_2d.size(0)
time, jagged_output = benchmark_torch_function(
torch.ops.fbgemm.dense_to_jagged, (output, [offsets], total_L), iters=1000
)
num_bytes = offsets_nbytes + 2 * values_nbytes
logging.info(f"dense_to_jagged (2d) {time} sec {num_bytes / time / 1e9} GB/s")
time, jagged_output = benchmark_torch_function(
torch.ops.fbgemm.jagged_dense_elementwise_add_jagged_output,
(values_2d, [offsets], output),
iters=1000,
)
num_bytes = offsets_nbytes + 3 * values_nbytes
logging.info(
f"jagged_dense_elementwise_add_jagged_output {time} sec {num_bytes / time / 1e9} GB/s"
)
time, jagged_output = benchmark_torch_function(
torch.ops.fbgemm.jagged_dense_elementwise_mul,
(values_2d, [offsets], output),
iters=1000,
)
num_bytes = offsets_nbytes + 3 * values_nbytes
logging.info(
f"jagged_dense_elementwise_mul {time} sec {num_bytes / time / 1e9} GB/s"
)
output_sq = output * output
time, jagged_output = benchmark_torch_function(
torch.ops.fbgemm.jagged_dense_dense_elementwise_add_jagged_output,
(values_2d, [offsets], output, output_sq),
iters=1000,
)
num_bytes = offsets_nbytes + 4 * values_nbytes
logging.info(
f"jagged_dense_dense_elementwise_add_jagged_output {time} sec {num_bytes / time / 1e9} GB/s"
)
# pyre-fixme[6]: For 1st param expected `Union[List[int], Size,
# typing.Tuple[int, ...]]` but got `Union[bool, float, int]`.
values_1d = torch.rand(total_lengths)
if torch.cuda.is_available():
values_1d = values_1d.cuda()
values_nbytes = values_1d.numel() * values_1d.element_size()
time, output = benchmark_torch_function(
lambda: torch.ops.fbgemm.jagged_1d_to_dense(
values_1d, offsets, max_len, padding_value=0
),
(),
iters=1000,
)
dense_nbytes = output.numel() * output.element_size()
num_bytes = offsets_nbytes + values_nbytes + dense_nbytes
logging.info(f"jagged_1d_to_dense {time} sec {num_bytes / time / 1e9} GB/s")
total_L = values_1d.size(0)
output_1d = torch.unsqueeze(output, -1)
time, jagged_output = benchmark_torch_function(
torch.ops.fbgemm.dense_to_jagged, (output_1d, [offsets], total_L), iters=1000
)
num_bytes = offsets_nbytes + 2 * values_nbytes
logging.info(f"dense_to_jagged (1d) {time} sec {num_bytes / time / 1e9} GB/s")
@cli.command()
@click.option("--batch-size", type=int, default=1)
@click.option("--h-dim", type=int, default=3)
@click.option("--embedding-dim", type=int, default=16)
@click.option("--max-len", type=int, default=10)
@click.option("--elem-type", type=str, default="half")
def batched_dense_vec_jagged_2d_mul(
batch_size: int,
h_dim: int,
embedding_dim: int,
max_len: int,
elem_type: str,
) -> None:
lengths = torch.randint(2 * max_len, size=(batch_size,)) # Allow for truncation
total_lengths = lengths.sum().item()
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
dtype = (
torch.float16
if elem_type == "half" or elem_type == "float16"
else torch.float32
)
# pyre-fixme[6]: For 1st param expected `int` but got `Union[bool, float, int]`.
values_2d = torch.rand(total_lengths, h_dim * embedding_dim, dtype=dtype)
dense = torch.rand(batch_size * h_dim, max_len, dtype=dtype)
if torch.cuda.is_available():
offsets = offsets.cuda()
values_2d = values_2d.cuda()
dense = dense.cuda()
time, output = benchmark_torch_function(
torch.ops.fbgemm.batched_dense_vec_jagged_2d_mul,
(dense, values_2d, offsets),
iters=1000,
)
# Account for the fact that each matmul inner dim was limited to max_len
computed_lengths = torch.minimum(lengths, torch.ones(batch_size) * max_len)
total_computed_lengths = computed_lengths.sum().item()
num_flops = total_computed_lengths * h_dim * embedding_dim * 2.0
logging.info(
f"batched_dense_vec_jagged_2d_mul {time} sec {num_flops / time / 1e9} GFLOP/s"
)
@cli.command()
@click.option("--batch-size", type=int, default=1024)
@click.option("--max-len", type=int, default=10)
@click.option("--dtype", type=str, default="float")
def jagged_1d_to_truncated_values(
batch_size: int,
max_len: int,
dtype: str,
) -> None:
lengths = torch.randint(2 * max_len, size=(batch_size,)) # Allow for truncation
total_lengths = lengths.sum().item()
torch_dtype = torch.float16 if dtype in ["half", "float16"] else torch.float32
# pyre-fixme[6]: For 1st param expected `int` but got `Union[bool, float, int]`.
values = torch.rand(total_lengths, dtype=torch_dtype)
def ref(values: torch.Tensor, lengths: torch.Tensor, max_len: int) -> torch.Tensor:
dense_values = torch.ops.fbgemm.jagged_to_padded_dense(
values,
[torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)],
[max_len],
padding_value=0,
)
truncated_lengths = torch.clamp(lengths, max=max_len)
mask2d = torch.arange(max_len).expand(
batch_size, -1
) < truncated_lengths.unsqueeze(-1)
return dense_values[mask2d].view(-1)
time_ref, output_ref = benchmark_torch_function(
ref,
(values, lengths, max_len),
)
time, output = benchmark_torch_function(
torch.ops.fbgemm.jagged_1d_to_truncated_values,
(values, lengths, max_len),
)
torch.testing.assert_close(output, output_ref)
bytes = (values.numel() + output.numel()) * (
4 if torch_dtype == torch.float else 2
) + lengths.numel() * 4
logging.info(f"reference {time_ref} sec {bytes / time_ref / 1e9} GB/s")
logging.info(f"truncate_jagged_1d {time} sec {bytes / time / 1e9} GB/s")
@cli.command()
@click.option("--batch-size", type=int, default=1024)
@click.option("--max-len", type=int, default=256)
def masked_select_jagged_1d(
batch_size: int,
max_len: int,
) -> None:
lengths = torch.randint(2 * max_len, size=(batch_size,)) # Allow for truncation
total_lengths = int(lengths.sum().item())
dtype = torch.long
values = torch.randint(2**16, (total_lengths,), dtype=dtype)
mask = torch.randint(2, (total_lengths,)) > 0
def ref(
values: torch.Tensor, lengths: torch.Tensor, mask: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
masked_values_ref = values[mask]
cum_count = torch.cumsum(mask, 0)
cum_count = torch.cat((cum_count, torch.tensor([0])))
cum_length = cum_count[torch.cumsum(lengths, 0) - 1]
cum_length_shift_right = torch.roll(cum_length, 1)
cum_length_shift_right[0] = 0
masked_lengths_ref = cum_length - cum_length_shift_right
return masked_values_ref, masked_lengths_ref
time_ref, (masked_values_ref, masked_lengths_ref) = benchmark_torch_function(
ref,
(values, lengths, mask),
)
time, (masked_values, masked_lengths) = benchmark_torch_function(
torch.ops.fbgemm.masked_select_jagged_1d,
(values, lengths, mask),
)
torch.testing.assert_close(masked_values, masked_values_ref)
torch.testing.assert_close(masked_lengths, masked_lengths_ref)
bytes = (2 * values.numel() + 2 * lengths.numel() + 2 * masked_values.numel()) * 4
logging.info(f"reference {time_ref} sec {bytes / time_ref / 1e9} GB/s")
logging.info(f"masked_select_jagged_1d {time} sec {bytes / time / 1e9} GB/s")
@cli.command()
@click.option("--num-batches", type=int, default=40)
@click.option("--max-seq-length", type=int, default=400)
@click.option("--input-batch-size", type=int, default=1024)
@click.option("--output-batch-size", type=int, default=512)
@click.option("--jagged-tensor-type", type=str, default="float")
@click.option("--has-weights", is_flag=True, default=False)
@click.option("--weight-type", type=str, default="float")
def keyed_jagged_index_select_dim1(
num_batches: int,
max_seq_length: int,
input_batch_size: int,
output_batch_size: int,
jagged_tensor_type: str,
has_weights: bool,
weight_type: str,
) -> None:
jagged_tensor_types = {
"float": torch.float,
"half": torch.half,
"int": torch.int,
"long": torch.long,
}
weight_types = {"float": torch.float, "half": torch.half}
if jagged_tensor_type not in jagged_tensor_types.keys():
raise AssertionError(
f"--jagged-tensor-type ({jagged_tensor_type}) is not supported"
)
if weight_type not in weight_types.keys():
raise AssertionError(f"--weight-type ({weight_type}) is not supported")
jagged_tensor_dtype = jagged_tensor_types[jagged_tensor_type]
is_float = jagged_tensor_dtype in [torch.float, torch.half]
weight_dtype = weight_types[weight_type]
lengths = torch.randint(
low=0,
high=max_seq_length,
size=(input_batch_size * num_batches,),
dtype=torch.long,
device="cuda",
)
# Imitate KeyedJaggedTensor offsets
offsets = torch.concat(
[torch.zeros(1, dtype=torch.long, device="cuda"), lengths.cumsum(0)]
)
indices = torch.randint(
low=0,
high=1,
size=(output_batch_size,),
dtype=torch.long,
device="cuda",
)
if is_float:
values = torch.rand(
int(offsets[-1].item()),
dtype=jagged_tensor_dtype,
device="cuda",
)
else:
values = torch.randint(
2**16,
(int(offsets[-1].item()),),
dtype=jagged_tensor_dtype,
device="cuda",
)
weights = (
torch.rand(int(offsets[-1].item()), dtype=weight_dtype, device="cuda")
if has_weights
else None
)
# Only float tensors can require grad
if is_float:
values.requires_grad = True
time, output = benchmark_torch_function(
torch.ops.fbgemm.keyed_jagged_index_select_dim1,
(values, lengths, offsets, indices, input_batch_size, weights),
iters=1000,
)
output = output[0]
# Prepare inputs for the reference run
ref_inputs = []
for k in range(num_batches):
key_lengths = lengths[k * input_batch_size : (k + 1) * input_batch_size]
start_offset = offsets[k * input_batch_size]
end_offset = offsets[(k + 1) * input_batch_size]
key_values = values[start_offset:end_offset].view(-1, 1)
if has_weights:
# pyre-ignore[16]
key_weights = weights[start_offset:end_offset].view(-1, 1)
else:
key_weights = torch.empty(0)
ref_inputs.append((key_values, key_lengths, indices, key_weights))
def keyed_jagged_index_select_dim1_ref(
inputs: List[torch.Tensor],
has_weights: bool,
) -> Tuple[torch.Tensor, torch.Tensor]:
outputs = []
output_weights = []
for key_values, key_lengths, indices, _ in inputs:
outputs.append(
torch.ops.fbgemm.jagged_index_select(key_values, key_lengths, indices)[
0
].view(-1)
)
if has_weights:
for _, key_lengths, indices, key_weights in inputs:
output_weights.append(
torch.ops.fbgemm.jagged_index_select(
key_weights, key_lengths, indices
)[0].view(-1)
)
return torch.concat(outputs), torch.concat(
output_weights
) if has_weights else torch.empty(0)
time_ref, output_ref = benchmark_torch_function(
keyed_jagged_index_select_dim1_ref, (ref_inputs, has_weights)
)
output_ref = output_ref[0]
logging.info(
f"keyed_jagged_index_select_dim1 forward time: {time * 1e3} ms, ref {time_ref * 1e3}"
)
if not is_float:
return
grad = torch.rand_like(output)
time, _ = benchmark_torch_function(
functools.partial(output.backward, retain_graph=True), (grad,), iters=1000
)
time_ref, _ = benchmark_torch_function(
functools.partial(output_ref.backward, retain_graph=True), (grad,), iters=1000
)
logging.info(
f"keyed_jagged_index_select_dim1 backward time: {time * 1e3} ms, ref {time_ref * 1e3}"
)
@cli.command()
@click.option("--max-seq-length", type=int, default=400)
@click.option("--input-batch-size", type=int, default=1024)
@click.option("--slice-length", type=int, default=10)
@click.option("--jagged-tensor-type", type=str, default="float")
def jagged_slice_cpu(
max_seq_length: int,
input_batch_size: int,
slice_length: int,
jagged_tensor_type: str,
) -> None:
jagged_tensor_types = {
"float": torch.float,
"half": torch.half,
"int": torch.int,
"long": torch.long,
}
if jagged_tensor_type not in jagged_tensor_types.keys():
raise AssertionError(
f"--jagged-tensor-type ({jagged_tensor_type}) is not supported"
)
jagged_tensor_dtype = jagged_tensor_types[jagged_tensor_type]
is_float = jagged_tensor_dtype in [torch.float, torch.half]
lengths = torch.randint(
low=0,
high=max_seq_length,
size=(input_batch_size,),
dtype=torch.long,
)
start_list = [random.randint(0, max(len_ - 1, 0)) for len_ in lengths.tolist()]
start = torch.tensor(start_list)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
if is_float:
values = torch.rand(
int(offsets[-1].item()),
dtype=jagged_tensor_dtype,
)
else:
values = torch.randint(
2**16,
(int(offsets[-1].item()),),
dtype=jagged_tensor_dtype,
)
time, output = benchmark_torch_function(
torch.ops.fbgemm.jagged_slice,
(values, lengths, start, slice_length),
iters=1000,
)
def jagged_slice_ref(
x_values: torch.Tensor,
offsets: torch.Tensor,
start: torch.Tensor,
max_L: int,
) -> Tuple[torch.Tensor, torch.Tensor]:
end_offsets_ = max_L + start + offsets[:-1]
end_offsets = torch.where(end_offsets_ > offsets[1:], offsets[1:], end_offsets_)
start_offsets = start + offsets[:-1]
indices_to_select: List[torch.Tensor] = []
for i in range(end_offsets.size(0)):
indices_to_select.append(
torch.arange(start_offsets[i].item(), end_offsets[i].item())
)
output_ref = torch.index_select(x_values, 0, torch.cat(indices_to_select))
new_lengths = end_offsets - start_offsets
new_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(new_lengths)
return output_ref, new_offsets
time_ref, output = benchmark_torch_function(
jagged_slice_ref, (values, offsets, start, slice_length)
)
logging.info(f"jagged_slice forward time: {time * 1e3} ms, ref {time_ref * 1e3} ms")
profiler = profile(
activities=[
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.CUDA,
],
schedule=torch.profiler.schedule(
wait=200,
warmup=100,
active=100,
),
record_shapes=True,
profile_memory=True,
with_stack=True,
with_flops=True,
)
profiler.start()
for _ in range(500):
torch.ops.fbgemm.jagged_slice(values, lengths, start, slice_length)
profiler.step()
profiler.stop()
logging.info(
"\n"
+ profiler.key_averages().table(sort_by="self_cuda_time_total", row_limit=10)
)
flops = sum(e.flops for e in profiler.events())
logging.info(f"Total Compute: {flops / 1e9} gflops")
if __name__ == "__main__":
cli()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import logging
import signal
from typing import List, Tuple
import click
import fbgemm_gpu
import numpy as np
import tabulate
import torch
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
BoundsCheckMode,
EmbeddingLocation,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from torch import Tensor
from torch.profiler import profile, ProfilerActivity
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from bench_utils import benchmark_torch_function
else:
from fbgemm_gpu.bench.bench_utils import benchmark_torch_function
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:merge_pooled_embeddings")
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:merge_pooled_embeddings_cpu"
)
def get_gpu_device(gpu_num) -> torch.device:
return torch.device(f"cuda:{gpu_num}")
# Merged indices with shape (T, B, L) -> (flattened indices with shape
# (T * B * L), offsets with shape (T * B + 1)).
# Reference: https://fburl.com/code/5ueyfv5j
def get_table_batched_offsets_from_dense(
merged_indices: torch.Tensor,
gpu_num,
) -> Tuple[torch.Tensor, torch.Tensor]:
(T, B, L) = merged_indices.size()
lengths = np.ones((T, B)) * L
flat_lengths = lengths.flatten()
return (
merged_indices.int().contiguous().view(-1).to(device=get_gpu_device(gpu_num)),
torch.tensor(
([0] + np.cumsum(flat_lengths).tolist()), device=get_gpu_device(gpu_num)
).int(),
)
# Reference: https://fburl.com/code/o5600si0
def generate_requests(
num_gpus: int,
B: int,
T: int,
L: int,
E: int,
# inter-batch indices reuse rate
reuse: float = 0.0,
) -> List[Tuple[torch.IntTensor, torch.IntTensor, None]]:
rs = []
for gpu_num in range(num_gpus):
all_indices = torch.randint(
low=0,
high=E,
size=(T, B, L),
device=get_gpu_device(gpu_num),
dtype=torch.int32,
)
# each bag is usually sorted
(all_indices, _) = torch.sort(all_indices)
all_indices = all_indices.reshape(T, B * L)
rs.append(
get_table_batched_offsets_from_dense(all_indices.view(T, B, L), gpu_num)
)
return rs
def _get_random_tensor(
num_ads: int,
embedding_dimension: int,
ads_tables: int,
data_type: str,
gpu_idx: int,
include_quantization: bool,
):
if data_type == "FP16" or include_quantization:
result_tensor = torch.randn(
num_ads,
embedding_dimension * ads_tables,
dtype=torch.float16,
device=torch.device(f"cuda:{gpu_idx}"),
)
elif data_type == "INT8":
assert (
embedding_dimension % 2
) == 0, "needs to align to 2 bytes (half type size) for INT8"
result_tensor = torch.randint(
0,
255,
# 2 FP16 numbers for scale and bias, total of 4 bytes overhead
size=(num_ads, (embedding_dimension + 4) * ads_tables),
dtype=torch.uint8,
device=torch.device(f"cuda:{gpu_idx}"),
)
elif data_type == "INT4":
assert (
embedding_dimension % 4
) == 0, "needs to align to 2 bytes (half type size) for INT4"
result_tensor = torch.randint(
0,
255,
# Using torch.uint8 for int4 storage
size=(num_ads, (embedding_dimension // 2 + 4) * ads_tables),
dtype=torch.uint8,
device=torch.device(f"cuda:{gpu_idx}"),
)
else:
raise ValueError
return result_tensor
def generate_tbe(
batch_indices,
num_ads: int,
embedding_dimension: int,
num_of_embeddings: int,
pooling_factor: int,
ads_tables: int,
fused_tbe: bool,
data_type: str,
num_gpus: int,
):
B = num_ads
D = embedding_dimension
E = num_of_embeddings
L = pooling_factor
T = ads_tables
Ds = [D] * T
managed_option = EmbeddingLocation.DEVICE
output_dtype = SparseType.FP16
if fused_tbe:
assert data_type == "INT8" # INT4 not implemented yet
output_dtype = SparseType.INT8
emb = [
IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
str(idx),
E,
d,
SparseType.INT4,
managed_option,
)
for d in Ds
],
output_dtype=output_dtype,
device=get_gpu_device(idx),
bounds_check_mode=BoundsCheckMode.NONE,
)
for idx in range(num_gpus)
]
for e in emb:
e.fill_random_weights()
requests = generate_requests(num_gpus, B, T, L, E)
# https://fburl.com/code/doxxjc8c
SIZE_OF_FLOAT = 4
num_elem_per_byte = 1 if data_type == "INT8" else 2
assert embedding_dimension % (2 * num_elem_per_byte) == 0
col_sizes = (
[
(embedding_dimension + num_elem_per_byte - 1) // num_elem_per_byte
+ 2 * SIZE_OF_FLOAT
]
* ads_tables
* num_gpus
)
offset = torch.tensor([0] + col_sizes, device=batch_indices.device)
tbe_offset = torch.cumsum(offset, dim=0).to(torch.int).cuda()
return emb, requests, tbe_offset
def print_p2p_bandwidth(
num_gpus, iters, pooled_ad_embeddings, bytes_per_element
) -> None:
print("Pairwise GPU Copy Bandwidth (GB/s)")
p2p_copy_bw = np.zeros((num_gpus, num_gpus))
for i in range(num_gpus):
for j in range(num_gpus):
with torch.cuda.device(i):
t, _ = benchmark_torch_function(
lambda: pooled_ad_embeddings[i].copy_(pooled_ad_embeddings[j])
if i != j
else pooled_ad_embeddings[i].clone(),
(),
flush_gpu_cache_size_mb=0,
iters=iters,
)
p2p_copy_bw[i, j] = (
pooled_ad_embeddings[i].numel() * bytes_per_element / t / 1.0e9
)
table = tabulate.tabulate(
p2p_copy_bw,
headers=[f"GPU {i}" for i in range(num_gpus)],
tablefmt="fancy_grid",
floatfmt=".0f",
)
print(table)
def benchmark( # noqa C901
all_to_one_only: bool,
sum_reduce_to_one_only: bool,
num_ads: int,
embedding_dimension: int,
ads_tables: int,
iters: int = 10,
p2p_bw: bool = False,
dst_device: int = 0,
data_type: str = "FP16",
mode: str = "P2P",
skip_dequantization: bool = False,
num_of_embeddings: int = 10000,
pooling_factor: int = 25,
) -> str:
assert torch.cuda.is_available()
torch.cuda.set_device(dst_device)
num_gpus = torch.cuda.device_count()
batch_indices = torch.zeros(num_ads).long().cuda()
include_quantization = not mode == "P2P"
# Using torch.int8 for int4 storage
bytes_per_element = 2 if (data_type == "FP16" or include_quantization) else 1
total_elements = num_ads * embedding_dimension * ads_tables * num_gpus
logging.debug(
f"B: {num_ads}, D: {embedding_dimension}, T: {ads_tables}, Data Type: {data_type}, Num GPUs: {num_gpus}, Destination GPU: {dst_device}"
)
fused_tbe = mode == "P2P_FUSED_TBE"
include_tbe = fused_tbe or mode == "P2P_TBE"
if include_tbe:
emb, requests, tbe_offset = generate_tbe(
batch_indices,
num_ads,
embedding_dimension,
num_of_embeddings,
pooling_factor,
ads_tables,
fused_tbe,
data_type,
num_gpus,
)
pooled_ad_embeddings = [
_get_random_tensor(
num_ads,
embedding_dimension,
ads_tables,
data_type,
gpu_idx,
include_quantization,
)
for gpu_idx in range(num_gpus)
]
if p2p_bw:
print_p2p_bandwidth(num_gpus, iters, pooled_ad_embeddings, bytes_per_element)
def pool_func_with_quantization(
batch_indices,
include_quantization,
include_tbe,
fused_tbe,
skip_dequantization,
data_type,
):
if include_tbe:
embedding_results = []
for idx, (indices, offsets) in enumerate(requests):
with torch.cuda.device(idx):
embedding_results.append(emb[idx].forward(indices, offsets))
else:
embedding_results = pooled_ad_embeddings
if data_type == "FP16" or (not fused_tbe and not include_quantization):
if all_to_one_only:
return torch.ops.fbgemm.all_to_one_device(
pooled_ad_embeddings, batch_indices.device
)
elif sum_reduce_to_one_only:
return torch.ops.fbgemm.sum_reduce_to_one(
pooled_ad_embeddings, batch_indices.device
)
else:
return torch.ops.fbgemm.merge_pooled_embeddings(
embedding_results, batch_indices.size(0), batch_indices.device
)
assert data_type == "INT8" or data_type == "INT4"
assert not all_to_one_only # not supported
if fused_tbe:
pooled_quantized_result = torch.ops.fbgemm.merge_pooled_embeddings(
embedding_results, batch_indices.size(0), batch_indices.device
)
else:
quantized = []
for t in embedding_results:
t_split_by_table = torch.split(t, embedding_dimension, dim=1)
quantized_split_by_table = [
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(t.float())
if data_type == "INT8"
else torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
t.float(), 4
)
for t in t_split_by_table
]
result = torch.cat(quantized_split_by_table, dim=1)
quantized.append(result)
pooled_quantized_result = torch.ops.fbgemm.merge_pooled_embeddings(
quantized, batch_indices.size(0), batch_indices.device
)
if skip_dequantization:
return pooled_quantized_result
PooledEmbeddingDequantizeDataTypeFP16 = 1
if data_type == "INT8":
return torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloatMixedDim(
pooled_quantized_result,
tbe_offset,
PooledEmbeddingDequantizeDataTypeFP16,
)
else:
# TODO: the result here is wrong. Once MixedDim version for FusedNBit quantization is done, switch to that.
# Since their performance is similar, keep using Fused8BitRowwiseQuantizedToHalf for now.
return torch.ops.fbgemm.Fused8BitRowwiseQuantizedToHalf(
pooled_quantized_result
).half()
streams = [torch.cuda.Stream(device=i) for i in range(num_gpus)]
import contextlib
with contextlib.ExitStack() as stack:
for stream in streams:
stack.enter_context(torch.cuda.stream(stream))
# warm up
merged = pool_func_with_quantization(
batch_indices,
include_quantization,
include_tbe,
fused_tbe,
skip_dequantization,
data_type,
)
if all_to_one_only:
merged = torch.stack(merged)
t, _ = benchmark_torch_function(
pool_func_with_quantization,
(
batch_indices,
include_quantization,
include_tbe,
fused_tbe,
skip_dequantization,
data_type,
),
flush_gpu_cache_size_mb=0,
iters=iters,
)
with profile(activities=[ProfilerActivity.CUDA]) as prof:
pool_func_with_quantization(
batch_indices,
include_quantization,
include_tbe,
fused_tbe,
skip_dequantization,
data_type,
)
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
if isinstance(merged, Tensor):
# all_to_one_only returns a list of tensors,
# otherwise, it's a Tensor.
merged = [merged]
output_num_el = sum([a.numel() for a in merged])
# Assume tensors gathered are all the same size.
num_el_transferred = output_num_el * (num_gpus - 1) / num_gpus
logging.debug(
f"Mode: {mode}, Data Type: {data_type}, B: {num_ads}, D: {embedding_dimension}, T: {ads_tables}, Num GPUs: {num_gpus}, Destination GPU: {dst_device}, all_to_one_only: {all_to_one_only}, "
f"Number of elements: {total_elements / 1.0e6:.2f}, Million, Number of elements per GPU: {total_elements / 1.0e6 / num_gpus:.2f}, Billion elements per sec: {total_elements / t / 1.0e9:.1f}, "
f"Output Size: {output_num_el * bytes_per_element / 1.0e6:.0f}MB, Num elements transferred: {num_el_transferred / 1.0e6}, All-to-one BW: {output_num_el * bytes_per_element / t / 1.0e9:.1f}GB/s, link BW: {num_el_transferred * bytes_per_element / t / 1.0e9:.1f}GB/s, "
f"t: {t * 1.0e3:.2f}ms"
)
# return result in CSV format
return (
f"{mode}, {data_type}, {num_ads}, {embedding_dimension}, {ads_tables}, {num_gpus}, {dst_device}, {all_to_one_only}, "
f"{total_elements / 1.0e6:.2f}, {total_elements / 1.0e6 / num_gpus:.2f}, {total_elements / 1.0e9 / t:.1f}, "
f"{output_num_el * bytes_per_element / 1.0e6:.0f}, {output_num_el * bytes_per_element / t / 1.0e9:.1f}, "
f"{num_el_transferred * bytes_per_element / 1.0e9 / t:.1f}, "
f"{t * 1.0e3:.2f}"
)
@click.command()
@click.option("--all-to-one-only", is_flag=True, default=False)
@click.option("--sum-reduce-to-one-only", is_flag=True, default=False)
@click.option("--num_ads", default=1024, type=int)
@click.option("--embedding_dimension", default=300, type=int)
@click.option("--ads_tables", default=100, type=int)
@click.option("--iters", default=10, type=int)
@click.option("--p2p_bw", is_flag=True, default=False)
@click.option("--dst_device", default=0, type=int)
@click.option(
"--data_type",
type=click.Choice(["FP16", "INT8", "INT4"]),
default="FP16",
)
# P2P: merge_pooled_embeddings() or all_to_one_device() for tensor with "--data_type"
# P2P_QUANT: for INT8/INT4 data type, start with FP16, then quantize -> P2P -> dequantize to FP16
# P2P_TBE: add TBE in front of P2P_QUANT. When "--data_type" is FP16, the flow is TBE -> P2P; for INT8/INT4, the flow is TBE -> quantize -> P2P -> dequantize
# P2P_FUSED_TBE: similar to P2P_TBE except fuse the quantization into TBE
@click.option(
"--mode",
type=click.Choice(["P2P", "P2P_QUANT", "P2P_TBE", "P2P_FUSED_TBE"]),
default="P2P",
)
# For quantized communication, do we dequantize back to FP16 in the end.
@click.option("--skip_dequantization", is_flag=True, default=False)
@click.option("--num_of_embeddings", default=100000, type=int)
@click.option("--pooling_factor", default=25, type=int)
@click.option("--sweep", is_flag=True, default=False)
def main(
all_to_one_only: bool,
sum_reduce_to_one_only: bool,
num_ads: int,
embedding_dimension: int,
ads_tables: int,
iters: int,
p2p_bw: bool,
dst_device: int,
data_type: str,
mode: str,
skip_dequantization: bool,
num_of_embeddings: int,
pooling_factor: int,
sweep: bool,
) -> None:
csv_header = (
"mode, data_type, num_ads, embedding_dimension, ads_tables, num_gpus, dst_device, all_to_one_only, "
"number of elements (Million), number of elements per GPU (Million), throughput (billion elements per sec), "
"output size (MB), all-to-one BW (GB/s), link BW (GB/s), t (ms)"
)
if sweep:
def handler(signum, frame):
logging.error("timeout")
raise TimeoutError()
results = []
num_gpu = torch.cuda.device_count()
for num_ads in [128, 256, 512, 1024, 2048]:
# Scale num_ads so all GPUs have sweep through the same number of total elements
num_ads *= 8 // num_gpu
for embedding_dimension in [16, 64, 112, 304]:
for ads_tables in [25, 50, 100, 400, 800]:
if num_ads * embedding_dimension * ads_tables > 983040000:
continue # Skip tests that are too large
signal.signal(signal.SIGTERM, handler)
signal.alarm(600)
logging.info(
f"config: num_ads: {num_ads}, embedding_dimension: {embedding_dimension}, ads_tables: {ads_tables}"
)
try:
result = benchmark(
all_to_one_only,
sum_reduce_to_one_only,
num_ads,
embedding_dimension,
ads_tables,
iters,
p2p_bw,
dst_device,
data_type,
mode,
skip_dequantization,
num_of_embeddings,
pooling_factor,
)
results.append(result)
except (TimeoutError, RuntimeError) as err:
logging.error(
f"B: {num_ads}, D: {embedding_dimension}, T: {ads_tables}, Data Type: {data_type}, Num GPU: {num_gpu}, time out or failed: {err}"
)
print(csv_header)
print(*results, sep="\n")
return
result = benchmark(
all_to_one_only,
sum_reduce_to_one_only,
num_ads,
embedding_dimension,
ads_tables,
iters,
p2p_bw,
dst_device,
data_type,
mode,
skip_dequantization,
num_of_embeddings,
pooling_factor,
)
print(csv_header)
print(result)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import subprocess
def run(args):
with open(args.shapes_file, "r") as f:
shapes = json.load(f)
num_embeddings_list = ",".join([str(shape[0]) for shape in shapes])
embedding_dims_list = ",".join([str(shape[1]) for shape in shapes])
cmds = [
args.python,
args.benchmark_path,
args.benchmark_cmd,
"--batch-size",
str(args.batch_size),
"--bag-size-list",
str(args.bag_size),
"--embedding-dim-list",
embedding_dims_list,
"--num-embeddings-list",
num_embeddings_list,
"--weights-precision",
args.weights_precision,
"--output-dtype",
args.output_dtype,
"--warmup-runs",
str(args.warmup_runs),
"--runs-of-iters",
str(args.warmup_runs + args.test_runs),
]
if not args.use_gpu:
cmds.append("--use-cpu")
if args.dry_run:
print("Command to be executed:")
print(" ".join(cmds))
return 0
p = subprocess.Popen(
cmds, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True
)
output = ""
for line in iter(p.stdout.readline, ""):
print(line, end="")
if args.output:
output += line
p.stdout.close()
p.wait()
if args.output:
with open(args.output, "w") as outf:
outf.write(output)
return p.returncode
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--python",
type=str,
default="python3.10",
help="The python interpreter used to run the benchmark",
)
parser.add_argument(
"--benchmark-path",
type=str,
default="split_table_batched_embeddings_benchmark.py",
help="Path to the benchmark script",
)
parser.add_argument(
"--benchmark-cmd",
type=str,
default="nbit-device-with-spec",
help="The subcommand of the benchmark",
)
parser.add_argument("--batch-size", type=int, default=32, help="Batch size")
parser.add_argument(
"--bag-size", type=int, default=13, help="Bag size or pooling factor"
)
parser.add_argument(
"--shapes-file",
type=str,
required=True,
help="Path to the JSON file that describes a list of shapes [rows, embedding-dims]. "
+ "Its content should look like '[[123, 2], [456, 16], [789, 16], ...]'",
)
parser.add_argument(
"--weights-precision",
type=str,
default="fp16",
help="Weight data type",
)
parser.add_argument(
"--output-dtype", type=str, default="fp16", help="Output data type"
)
parser.add_argument(
"--warmup-runs", type=int, default=5, help="Number of warmup runs"
)
parser.add_argument("--test-runs", type=int, default=5, help="Number of test runs")
parser.add_argument(
"--output", type=str, default="", help="Also log the benchmark output to a file"
)
parser.add_argument("--use-gpu", action="store_true", help="Use GPU instead of CPU")
parser.add_argument(
"--dry-run",
action="store_true",
help="Only print out the command that will execute",
)
args = parser.parse_args()
returncode = run(args)
exit(returncode)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import functools
from math import sqrt
from typing import List, Tuple
import click
import fbgemm_gpu
import fbgemm_gpu.batched_unary_embeddings_ops as batched_unary_embeddings_ops
import numpy as np
import torch
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from bench_utils import benchmark_torch_function
else:
from fbgemm_gpu.bench.bench_utils import benchmark_torch_function
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
def generate_unary_feature(
batch_size: int, num_embeddings: int
) -> Tuple[List, List, List]:
lengths = []
offsets = []
indices = []
offset = 0
for _ in range(batch_size):
n_indices = 1
indices += (
np.round(np.random.random(n_indices) * (num_embeddings - 1))
.astype(int)
.tolist()
)
offsets.append(offset)
offset += 1
lengths.append(n_indices)
offsets.append(offset)
return (lengths, offsets, indices)
class MyModule(torch.nn.Module):
def __init__(self, num_tasks: int, hash_sizes: List[int]) -> None:
super().__init__()
self.num_tasks = num_tasks
self.hash_sizes = hash_sizes
self.emb_modules = torch.nn.ModuleList()
for _ in range(num_tasks):
for h in self.hash_sizes:
emb = torch.nn.EmbeddingBag(
num_embeddings=h,
embedding_dim=1,
mode="sum",
sparse=False,
include_last_offset=True,
)
emb.weight = torch.nn.Parameter(
torch.empty([h, 1]).uniform_(-sqrt(1 / h), sqrt(1 / h))
)
self.emb_modules.append(emb)
def forward(
self, offsets: List[torch.Tensor], indices: List[torch.Tensor]
) -> torch.Tensor:
tt_list = []
for n in range(self.num_tasks):
t_list = []
for i in range(len(self.hash_sizes)):
t = self.emb_modules[n * len(self.hash_sizes) + i](
offsets=offsets[i].long(), input=indices[i].long()
)
t_list.append(t)
tt = torch.cat(t_list, dim=1)
tt_list.append(tt)
return torch.cat(tt_list).view(self.num_tasks, -1, len(self.hash_sizes))
@click.command()
@click.option("--batch-size", default=512)
@click.option("--num-tables", default=2)
@click.option("--num-tasks", default=3)
@click.option("--repeats", default=100)
def main(batch_size, num_tables, num_tasks, repeats) -> None:
device = torch.device("cuda", 0)
torch.cuda.set_device(device)
hash_sizes = list(np.random.choice(range(50, 250), size=(num_tables)))
lengths = []
offsets = []
indices = []
for h in hash_sizes:
l, o, i = generate_unary_feature(batch_size, h)
lengths.append(torch.IntTensor(l).to(device))
offsets.append(torch.IntTensor(o).to(device))
indices.append(torch.IntTensor(i).to(device))
lengths_tensor = torch.cat(lengths)
indices_tensor = torch.cat(indices)
offsets_tensor = torch.zeros(
lengths_tensor.numel() + 1,
dtype=lengths_tensor.dtype,
device=lengths_tensor.device,
)
offsets_tensor[1:] = torch.ops.fbgemm.asynchronous_inclusive_cumsum(
lengths_tensor.view(-1)
)
# forward
ref_emb = MyModule(num_tasks, hash_sizes).to(device)
unary_emb = batched_unary_embeddings_ops.BatchedUnaryEmbeddingBag(
num_tasks, hash_sizes
).to(device)
for i, param in enumerate(unary_emb.split_embedding_weights()):
param.detach().copy_(ref_emb.emb_modules[i].weight)
output_ref = ref_emb(offsets, indices)
output = unary_emb(offsets_tensor, indices_tensor)
torch.testing.assert_close(output_ref, output)
# backward
d_output = torch.randn([num_tasks, batch_size, len(hash_sizes)]).to(device) * 0.1
output_ref.backward(d_output)
output.backward(d_output)
d_weight_ref = []
for emb in ref_emb.emb_modules:
d_weight_ref.append(emb.weight.grad)
d_weight_ref = torch.cat(d_weight_ref).view(num_tasks, -1)
d_weight = unary_emb.weight.grad
# pyre-fixme[16]: Optional type has no attribute `squeeze`.
torch.testing.assert_close(d_weight_ref, d_weight.squeeze())
# A100 40MB L2 cache
elapse, _ = benchmark_torch_function(ref_emb, (offsets, indices), iters=repeats)
print("PyTorch EmbeddingBag forward", elapse)
elapse, _ = benchmark_torch_function(
unary_emb,
(offsets_tensor, indices_tensor),
iters=repeats,
)
print("Batched Unary Emb forward", elapse)
output = ref_emb(offsets, indices)
output.backward(d_output, retain_graph=True)
elapse, _ = benchmark_torch_function(
functools.partial(output.backward, retain_graph=True),
(d_output,),
iters=repeats,
)
print("PyTorch EmbeddingBag backward", elapse)
output = unary_emb(offsets_tensor, indices_tensor)
elapse, _ = benchmark_torch_function(
functools.partial(output.backward, retain_graph=True),
(d_output,),
iters=repeats,
)
print("Batched Unary Emb backward", elapse)
if __name__ == "__main__":
main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
import statistics
import threading
import time
from dataclasses import dataclass
from typing import Callable, List, Optional, Tuple
import torch
from fbgemm_gpu.split_embedding_utils import ( # noqa: F401
b_indices,
generate_requests, # noqa: F401
get_device, # noqa: F401
round_up, # noqa: F401
)
logging.basicConfig(level=logging.DEBUG)
def benchmark_torch_function( # noqa: C901
# pyre-fixme[2]: Parameter must be annotated.
f,
# pyre-fixme[2]: Parameter must be annotated.
args,
flush_gpu_cache_size_mb: int = 40,
iters: int = 10,
num_warmups: int = 2,
device: str = "cuda",
name: str = "",
num_threads: int = 1,
copy_f_for_multi_thread_test: bool = False,
) -> Tuple[float, torch.Tensor]:
logging.info(f"Start to benchmark {name}...")
if device != "" and device != "cuda":
torch.cuda.set_device(device)
for _ in range(num_warmups):
output = f(*args)
assert num_threads > 0
if torch.cuda.is_available() and (num_threads == 1):
cache = torch.empty(
int(flush_gpu_cache_size_mb * 1024 * 1024 // 4),
dtype=torch.float,
device=device,
)
start_event = [torch.cuda.Event(enable_timing=True) for i in range(iters)]
end_event = [torch.cuda.Event(enable_timing=True) for i in range(iters)]
torch.cuda.synchronize(device)
for i in range(iters):
# flush the cache
if flush_gpu_cache_size_mb:
cache.zero_()
start_event[i].record()
with torch.cuda.nvtx.range(f"RunCudaModule_{name}"):
output = f(*args)
end_event[i].record()
torch.cuda.synchronize(device)
times = torch.tensor(
[s.elapsed_time(e) for s, e in zip(start_event, end_event)]
)
elapsed_time = torch.mean(times).item() * 1.0e-3
elif torch.cuda.is_available() and (num_threads > 1):
cache = torch.empty(
int(flush_gpu_cache_size_mb * 1024 * 1024 // 4),
dtype=torch.float,
device=device,
)
duration_ms_list: List[float] = []
f_list = [f]
# make deepcopy of f if necessary
for _ in range(num_threads - 1):
f_list.append(copy.deepcopy(f) if copy_f_for_multi_thread_test else f)
@torch.inference_mode()
# pyre-ignore[53]
def forward(idx: int) -> None:
stream = torch.cuda.Stream()
f_temp = f_list[idx]
start_event = [
torch.cuda.Event(enable_timing=True)
for i in range(iters // num_threads)
]
end_event = [
torch.cuda.Event(enable_timing=True)
for i in range(iters // num_threads)
]
torch.cuda.synchronize(device)
with torch.cuda.stream(stream):
for i in range(iters // num_threads):
# flush the cache
if flush_gpu_cache_size_mb:
cache.zero_()
start_event[i].record()
with torch.cuda.nvtx.range(f"RunCudaModule_{name}"):
_ = f_temp(*args)
end_event[i].record()
torch.cuda.synchronize(device)
times = torch.tensor(
[s.elapsed_time(e) for s, e in zip(start_event, end_event)]
)
duration_ms = torch.sum(times).item()
duration_ms_list.append(duration_ms)
threads = [
threading.Thread(target=forward, args=(idx,)) for idx in range(num_threads)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
elapsed_time = sum(duration_ms_list) * 1.0e-3 / num_threads / iters
torch.cuda.synchronize(device)
if copy_f_for_multi_thread_test:
# clean the copies of f and clean the HBM cache
for idx in reversed(range(num_threads - 1)):
del f_list[idx + 1]
torch.cuda.empty_cache()
else:
start_time = time.time()
for _ in range(iters):
with torch.cuda.nvtx.range(f"RunCPUModule_{name}"):
output = f(*args)
elapsed_time = (time.time() - start_time) / iters
# pyre-fixme[61]: `output` is undefined, or not always defined.
return float(elapsed_time), output
def benchmark_requests(
requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[torch.Tensor]]],
func: Callable[[torch.Tensor, torch.Tensor, Optional[torch.Tensor]], torch.Tensor],
flush_gpu_cache_size_mb: int = 0,
check_median: bool = False,
num_warmups: int = 0,
bwd_only: bool = False,
grad: Optional[torch.Tensor] = None,
# Used to label benchmark iterations differently in nsys profile result
# so that we can compare performance of two different models for example.
# If empty string is provided, it won't have any effect.
nvtx_range: str = "",
# Can be used to clear model's stats after warmup for example.
callback_after_warmup: Optional[Callable[[], None]] = None,
) -> float:
times = []
if num_warmups > 0:
indices, offsets, weights = requests[0]
for _ in range(num_warmups):
out = func(indices, offsets, weights)
if bwd_only:
out.backward(grad)
if callback_after_warmup is not None:
callback_after_warmup()
if torch.cuda.is_available():
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
for it, (indices, offsets, weights) in enumerate(requests):
if bwd_only:
# Run forward before profiling if does backward only
out = func(indices, offsets, weights)
start_time = time.time()
if torch.cuda.is_available():
if flush_gpu_cache_size_mb:
_ = torch.rand(
flush_gpu_cache_size_mb * 1024 * 1024 // 4,
dtype=torch.float,
device="cuda",
)
torch.cuda.synchronize()
start_event.record()
if nvtx_range:
torch.cuda.nvtx.range_push(f"{nvtx_range}-{it}")
if bwd_only:
out.backward(grad)
else:
func(indices, offsets, weights)
if nvtx_range:
torch.cuda.nvtx.range_pop()
if torch.cuda.is_available():
end_event.record()
torch.cuda.synchronize()
it_time = start_event.elapsed_time(end_event) * 1.0e-3
times.append(it_time)
else:
it_time = time.time() - start_time
times.append(it_time)
avg_time = sum(times) / len(requests)
median_time = statistics.median(times)
return median_time if check_median else avg_time
def benchmark_requests_refer(
requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[torch.Tensor]]],
T: int,
B: int,
L: int,
E: int,
D: int,
pooling_mode: str,
weighted: bool,
flush_gpu_cache_size_mb: int = 0,
check_median: bool = False,
) -> float:
do_pooling = pooling_mode in ["sum", "mean"]
if do_pooling:
nn_embedding_list = [
torch.nn.EmbeddingBag(E, D, mode=pooling_mode, sparse=True).cuda()
] * T
else:
nn_embedding_list = [torch.nn.Embedding(E, D, sparse=True).cuda()] * T
times = []
if torch.cuda.is_available():
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
for indices, _, weights in requests:
indices_list = indices.view(T, B, L).split(1)
if weighted:
assert weights is not None
weights_list = weights.view(T, B, L).split(1)
start_time = time.time()
if torch.cuda.is_available():
if flush_gpu_cache_size_mb:
_ = torch.rand(
flush_gpu_cache_size_mb * 1024 * 1024 // 4,
dtype=torch.float,
device="cuda",
)
torch.cuda.synchronize()
start_event.record()
nn_embedding_output = (
[
b_indices(nn_embedding, x, use_cpu=False, do_pooling=do_pooling)
for (nn_embedding, x) in zip(nn_embedding_list, indices_list)
]
if not weighted
else [
b_indices(
nn_embedding,
x,
per_sample_weights=xw.view(-1),
use_cpu=False,
do_pooling=do_pooling,
)
for (nn_embedding, x, xw) in zip(
nn_embedding_list,
indices_list,
# pyre-fixme[61]: `weights_list` is undefined, or not always
# defined.
weights_list,
)
]
)
if do_pooling:
final_output = torch.cat(
[f.view(B, -1) for f in nn_embedding_output], dim=1
)
else:
final_output = torch.cat(nn_embedding_output, dim=0).view( # noqa: F841
-1, D
)
if torch.cuda.is_available():
end_event.record()
torch.cuda.synchronize()
it_time = start_event.elapsed_time(end_event) * 1.0e-3
times.append(it_time)
else:
it_time = time.time() - start_time
times.append(it_time)
avg_time = sum(times) / len(requests)
median_time = statistics.median(times)
return median_time if check_median else avg_time
def benchmark_pipelined_requests(
requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[torch.Tensor]]],
func1: Callable[[torch.Tensor, torch.Tensor, Optional[torch.Tensor]], None],
func2: Callable[[torch.Tensor, torch.Tensor, Optional[torch.Tensor]], None],
flush_gpu_cache_size_mb: int = 0,
check_median: bool = False,
) -> Tuple[float, float]:
torch.cuda.synchronize()
start_events = [
(torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True))
for _ in requests
]
end_events = [
(torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True))
for _ in requests
]
for (indices, offsets, indices_weights), start_event, end_event in zip(
requests, start_events, end_events
):
if flush_gpu_cache_size_mb:
_ = torch.rand(
flush_gpu_cache_size_mb * 1024 * 1024 // 4,
dtype=torch.float,
device="cuda",
)
torch.cuda.synchronize()
start_event[0].record()
func1(indices, offsets, indices_weights)
end_event[0].record()
start_event[1].record()
func2(indices, offsets, indices_weights)
end_event[1].record()
torch.cuda.synchronize()
avg_time = (
sum(
start_event[0].elapsed_time(end_event[0]) * 1.0e-3
for start_event, end_event in zip(start_events, end_events)
)
/ len(requests),
sum(
start_event[1].elapsed_time(end_event[1]) * 1.0e-3
for start_event, end_event in zip(start_events, end_events)
)
/ len(requests),
)
median_time = (
statistics.median(
start_event[0].elapsed_time(end_event[0]) * 1.0e-3
for start_event, end_event in zip(start_events, end_events)
),
statistics.median(
start_event[1].elapsed_time(end_event[1]) * 1.0e-3
for start_event, end_event in zip(start_events, end_events)
),
)
return median_time if check_median else avg_time
@dataclass
class VBEBenchmarkOutput:
avg: float
fwd: float
bwd: float
compressed_avg: float
compressed_fwd: float
reindex: float
compressed_bwd: float
def benchmark_vbe(
baseline_requests: List[Tuple[torch.Tensor, torch.Tensor]],
compressed_requests: List[Tuple[torch.Tensor, torch.Tensor]],
baseline_func: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
compressed_func: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
reindex: torch.Tensor,
embedding_dim: int,
) -> VBEBenchmarkOutput:
times = []
fwd_times = []
bwd_times = []
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
for indices, offsets in baseline_requests:
time = 0.0
start_event.record()
# forward
out = baseline_func(indices, offsets)
end_event.record()
torch.cuda.synchronize()
it_time = start_event.elapsed_time(end_event) * 1.0e-3
fwd_times.append(it_time)
time += it_time
grad = torch.rand_like(out)
start_event.record()
# backward
out.backward(grad)
end_event.record()
torch.cuda.synchronize()
it_time = start_event.elapsed_time(end_event) * 1.0e-3
bwd_times.append(it_time)
time += it_time
times.append(time)
avg = statistics.median(times)
fwd = statistics.median(fwd_times)
bwd = statistics.median(bwd_times)
times.clear()
fwd_times.clear()
bwd_times.clear()
reindex_times = []
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
for indices, offsets in compressed_requests:
time = 0.0
start_event.record()
# forward
out = compressed_func(indices, offsets)
end_event.record()
torch.cuda.synchronize()
it_time = start_event.elapsed_time(end_event) * 1.0e-3
fwd_times.append(it_time)
time += it_time
start_event.record()
# reindex
out = out.reshape(-1, embedding_dim)
out = torch.ops.fbgemm.index_select_dim0(out, reindex)
end_event.record()
torch.cuda.synchronize()
it_time = start_event.elapsed_time(end_event) * 1.0e-3
reindex_times.append(it_time)
time += it_time
grad = torch.rand_like(out)
start_event.record()
# backward
out.backward(grad)
end_event.record()
torch.cuda.synchronize()
it_time = start_event.elapsed_time(end_event) * 1.0e-3
bwd_times.append(it_time)
time += it_time
times.append(time)
compressed_avg = statistics.median(times)
compressed_fwd = statistics.median(fwd_times)
reindex = statistics.median(reindex_times)
compressed_bwd = statistics.median(bwd_times)
return VBEBenchmarkOutput(
avg, fwd, bwd, compressed_avg, compressed_fwd, reindex, compressed_bwd
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import math
import os
import random
import statistics
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import click
import fbgemm_gpu
import numpy as np
import torch
from fbgemm_gpu.split_embedding_configs import EmbOptimType as OptimType, SparseType
from fbgemm_gpu.split_embedding_utils import generate_requests, get_device, round_up
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
BoundsCheckMode,
CacheAlgorithm,
EmbeddingLocation,
PoolingMode,
RecordCacheMetrics,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
rounded_row_size_in_bytes,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
ComputeDevice,
DenseTableBatchedEmbeddingBagsCodegen,
SplitTableBatchedEmbeddingBagsCodegen,
)
from torch import Tensor
haveAIBench = False
try:
from aibench_observer.utils.observer import emitMetric
haveAIBench = True
except Exception:
haveAIBench = False
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from bench_utils import (
benchmark_pipelined_requests,
benchmark_requests,
benchmark_requests_refer,
benchmark_torch_function,
benchmark_vbe,
)
else:
from fbgemm_gpu.bench.bench_utils import (
benchmark_pipelined_requests,
benchmark_requests,
benchmark_requests_refer,
benchmark_torch_function,
benchmark_vbe,
)
logging.basicConfig(level=logging.DEBUG)
@click.group()
def cli() -> None:
pass
@cli.command()
# recommended value: alpha=1.15 for training and alpha=1.09 for inference
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.FP32)
@click.option("--stoc", is_flag=True, default=False)
@click.option("--iters", default=100)
@click.option("--warmup-runs", default=0)
@click.option("--managed", default="device")
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.0)
@click.option("--row-wise/--no-row-wise", default=True)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--pooling", type=str, default="sum")
@click.option("--weighted-num-requires-grad", type=int, default=None)
@click.option("--bounds-check-mode", type=int, default=BoundsCheckMode.NONE.value)
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--dense", is_flag=True, default=False)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP32)
@click.option("--requests_data_file", type=str, default=None)
@click.option("--tables", type=str, default=None)
def device( # noqa C901
alpha: float,
bag_size: int,
batch_size: int,
embedding_dim: int,
weights_precision: SparseType,
stoc: bool,
iters: int,
warmup_runs: int,
managed: str,
mixed: bool,
num_embeddings: int,
num_tables: int,
reuse: float,
row_wise: bool,
weighted: bool,
pooling: str,
weighted_num_requires_grad: Optional[int],
bounds_check_mode: int,
flush_gpu_cache_size_mb: int,
dense: bool,
output_dtype: SparseType,
requests_data_file: Optional[str],
tables: Optional[str],
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
T = num_tables
if weighted_num_requires_grad:
assert weighted_num_requires_grad <= T
weighted_requires_grad_tables = np.random.choice(
T, replace=False, size=(weighted_num_requires_grad,)
).tolist()
feature_requires_grad = (
torch.tensor(
[1 if t in weighted_requires_grad_tables else 0 for t in range(T)]
)
.to(get_device())
.int()
)
else:
feature_requires_grad = None
if mixed:
Ds = [
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds = [D] * T
optimizer = OptimType.EXACT_ROWWISE_ADAGRAD if row_wise else OptimType.EXACT_ADAGRAD
if managed == "device":
managed_option = (
EmbeddingLocation.DEVICE
if torch.cuda.is_available()
else EmbeddingLocation.HOST
)
else:
managed_option = EmbeddingLocation.MANAGED
if pooling is None or pooling == "sum":
pooling = "sum"
pooling_mode = PoolingMode.SUM
do_pooling = True
elif pooling == "mean":
pooling_mode = PoolingMode.MEAN
do_pooling = True
else: # "none"
pooling_mode = PoolingMode.NONE
do_pooling = False
if dense:
emb = DenseTableBatchedEmbeddingBagsCodegen(
[
(
E,
d,
)
for d in Ds
],
pooling_mode=pooling_mode,
use_cpu=not torch.cuda.is_available(),
)
else:
emb = SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
d,
managed_option,
ComputeDevice.CUDA
if torch.cuda.is_available()
else ComputeDevice.CPU,
)
for d in Ds
],
optimizer=optimizer,
learning_rate=0.1,
eps=0.1,
weights_precision=weights_precision,
stochastic_rounding=stoc,
output_dtype=output_dtype,
pooling_mode=pooling_mode,
bounds_check_mode=BoundsCheckMode(bounds_check_mode),
)
emb = emb.to(get_device())
if weights_precision == SparseType.INT8:
emb.init_embedding_weights_uniform(-0.0003, 0.0003)
nparams = sum(w.numel() for w in emb.split_embedding_weights())
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
if do_pooling:
read_write_bytes = (
output_size_multiplier * B * sum(Ds)
+ param_size_multiplier * B * sum(Ds) * L
)
else:
read_write_bytes = (
output_size_multiplier * B * sum(Ds) * L
+ param_size_multiplier * B * sum(Ds) * L
)
logging.info(
f"Embedding parameters: {nparams / 1.0e9: .2f} GParam, "
f"{nparams * param_size_multiplier / 1.0e9: .2f} GB"
)
logging.info(
f"Accessed weights per batch: {B * sum(Ds) * L * param_size_multiplier / 1.0e9: .2f} GB"
)
requests = generate_requests(
iters,
B,
T,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=weighted,
requests_data_file=requests_data_file,
tables=tables,
)
# forward
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb.forward(
indices.long(),
offsets.long(),
per_sample_weights,
feature_requires_grad=feature_requires_grad,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
num_warmups=warmup_runs,
)
logging.info(
f"Forward, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
if output_dtype == SparseType.INT8:
# backward bench not representative
return
if do_pooling:
grad_output = torch.randn(B, sum(Ds)).to(get_device())
else:
grad_output = torch.randn(B * T * L, D).to(get_device())
# backward
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb(
indices.long(),
offsets.long(),
per_sample_weights,
feature_requires_grad=feature_requires_grad,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
bwd_only=True,
grad=grad_output,
)
logging.info(
f"Backward, B: {B}, E: {E}, T: {T}, D: {D}, L: {L}, "
f"BW: {2 * read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, "
f"T: {time_per_iter * 1.0e6:.0f}us"
)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.FP32)
@click.option("--stoc", is_flag=True, default=False)
@click.option("--iters", default=100)
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.1)
@click.option("--uvm-tables", default=1)
@click.option("--uvm-bag-size", default=1)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--requests_data_file", type=str, default=None)
@click.option("--tables", type=str, default=None)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP32)
@click.option("--use-cache", is_flag=True, default=False)
@click.option("--cache-algorithm", default="lru")
@click.option("--cache-load-factor", default=0.2)
@click.option("--enforce-hbm", is_flag=True, default=False)
def uvm(
alpha: bool,
bag_size: int,
batch_size: int,
embedding_dim: int,
weights_precision: SparseType,
stoc: bool,
iters: int,
mixed: bool,
num_embeddings: int,
num_tables: int,
reuse: float,
uvm_tables: int,
uvm_bag_size: int,
weighted: bool,
flush_gpu_cache_size_mb: int,
requests_data_file: Optional[str],
tables: Optional[str],
output_dtype: SparseType,
use_cache: bool,
cache_algorithm: str,
cache_load_factor: float,
enforce_hbm: bool,
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
T = num_tables
T_uvm = uvm_tables
assert T_uvm <= T
assert (
T_uvm > 0
), f"T_uvm specified {T_uvm} <= 0. If not testing UVM, please use device benchmark."
T_gpu = T - T_uvm
L_uvm = uvm_bag_size
cache_alg = CacheAlgorithm.LRU if cache_algorithm == "lru" else CacheAlgorithm.LFU
managed_type = (
EmbeddingLocation.MANAGED_CACHING if use_cache else EmbeddingLocation.MANAGED
)
if mixed:
Ds = [
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds = [D] * T
emb_uvm = SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
d,
managed_type,
ComputeDevice.CUDA,
)
for d in Ds[:T_uvm]
],
weights_precision=weights_precision,
stochastic_rounding=stoc,
output_dtype=output_dtype,
cache_load_factor=cache_load_factor,
cache_algorithm=cache_alg,
enforce_hbm=enforce_hbm,
).cuda()
if weights_precision == SparseType.INT8:
emb_uvm.init_embedding_weights_uniform(-0.0003, 0.0003)
if T_gpu > 0:
emb_gpu = SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
d,
EmbeddingLocation.DEVICE,
ComputeDevice.CUDA,
)
for d in Ds[T_uvm:]
],
weights_precision=weights_precision,
stochastic_rounding=stoc,
).cuda()
if weights_precision == SparseType.INT8:
emb_gpu.init_embedding_weights_uniform(-0.0003, 0.0003)
emb_mixed = SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
d,
managed_option,
ComputeDevice.CUDA,
)
for (d, managed_option) in zip(
Ds,
[managed_type] * T_uvm + [EmbeddingLocation.DEVICE] * T_gpu,
)
],
weights_precision=weights_precision,
stochastic_rounding=stoc,
output_dtype=output_dtype,
cache_load_factor=cache_load_factor,
cache_algorithm=cache_alg,
enforce_hbm=enforce_hbm,
).cuda()
if weights_precision == SparseType.INT8:
emb_mixed.init_embedding_weights_uniform(-0.0003, 0.0003)
requests_uvm = generate_requests(
iters,
B,
T_uvm,
L_uvm,
E,
reuse=reuse,
alpha=alpha,
weighted=weighted,
requests_data_file=requests_data_file,
tables=tables,
)
requests_gpu = None
if T_gpu > 0:
requests_gpu = generate_requests(
iters,
B,
T_gpu,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=False,
requests_data_file=requests_data_file,
tables=tables,
)
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
read_write_bytes_uvm = (
output_size_multiplier * B * sum(Ds[:T_uvm])
+ param_size_multiplier * B * sum(Ds[:T_uvm]) * L_uvm
)
time_per_iter = benchmark_requests(
requests_uvm,
lambda indices, offsets, per_sample_weights: emb_uvm.forward(
indices.long(),
offsets.long(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
logging.info(
f"UVM Forward, B: {B}, "
f"E: {E}, T: {T_uvm}, D: {D}, L: {L_uvm}, W: {weighted}, "
f"BW: {read_write_bytes_uvm / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
if T_gpu > 0:
requests = []
assert requests_gpu is not None
for rs_uvm, rs_gpu in zip(requests_uvm, requests_gpu):
indices = torch.cat([rs_uvm[0], rs_gpu[0]])
lengths = [L_uvm] * (T_uvm * B) + [L] * (T_gpu * B)
offsets = torch.tensor(([0] + np.cumsum(lengths).tolist())).int().cuda()
per_sample_weights = None
if weighted:
this_rs_uvm_weights = rs_uvm[2]
assert this_rs_uvm_weights is not None
this_rs_gpu_weights = rs_gpu[2]
assert this_rs_gpu_weights is not None
per_sample_weights = torch.cat(
[this_rs_uvm_weights, this_rs_gpu_weights]
)
requests.append((indices, offsets, per_sample_weights))
# forward
time_per_iter = benchmark_requests(
requests_gpu,
lambda indices, offsets, per_sample_weights: emb_gpu.forward(
indices.long(),
offsets.long(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
read_write_bytes_hbm = (
output_size_multiplier * B * sum(Ds[T_uvm:])
+ param_size_multiplier * B * sum(Ds[T_uvm:]) * L
)
logging.info(
f"GPU Forward, B: {B}, "
f"E: {E}, T: {T_gpu}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes_hbm / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb_mixed.forward(
indices.long(),
offsets.long(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
read_write_bytes_total = read_write_bytes_uvm + read_write_bytes_hbm
logging.info(
f"Mixed Forward, B: {B}, "
f"E: {E}, T_GPU: {T_gpu}, T_UVM: {T_uvm}, D: {D}, L_GPU: {L}, L_UVM: {L_uvm}, W: {weighted}, "
f"BW: {read_write_bytes_total / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--cache-algorithm", default="lru")
@click.option("--cache-load-factor", default=0.2)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.FP32)
@click.option("--stoc", is_flag=True, default=False)
@click.option("--long-index", is_flag=True, default=False)
@click.option("--iters", default=100)
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.1)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--requests_data_file", type=str, default=None)
@click.option("--tables", type=str, default=None)
def cache( # noqa C901
alpha: float,
bag_size: int,
batch_size: int,
cache_algorithm: str,
cache_load_factor: float,
embedding_dim: int,
weights_precision: SparseType,
stoc: bool,
iters: int,
long_index: bool,
mixed: bool,
num_embeddings: int,
num_tables: int,
reuse: float,
weighted: bool,
flush_gpu_cache_size_mb: int,
requests_data_file: Optional[str],
tables: Optional[str],
) -> None:
np.random.seed(42)
torch.manual_seed(42)
optimizer = OptimType.EXACT_ROWWISE_ADAGRAD
B = batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
T = num_tables
cache_alg = CacheAlgorithm.LRU if cache_algorithm == "lru" else CacheAlgorithm.LFU
if mixed:
Ds = [
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds = [D] * T
emb_nc = SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
d,
EmbeddingLocation.MANAGED,
ComputeDevice.CUDA,
)
for d in Ds
],
optimizer=optimizer,
weights_precision=weights_precision,
stochastic_rounding=stoc,
).cuda()
if weights_precision == SparseType.INT8:
emb_nc.init_embedding_weights_uniform(-0.0003, 0.0003)
emb = SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
d,
EmbeddingLocation.MANAGED_CACHING,
ComputeDevice.CUDA,
)
for d in Ds
],
optimizer=optimizer,
weights_precision=weights_precision,
stochastic_rounding=stoc,
cache_load_factor=cache_load_factor,
cache_algorithm=cache_alg,
).cuda()
if weights_precision == SparseType.INT8:
emb.init_embedding_weights_uniform(-0.0003, 0.0003)
nparams = sum(w.numel() for w in emb.split_embedding_weights())
param_size_multiplier = weights_precision.bit_rate() / 8.0
logging.info(
f"Embedding tables: {E * T} rows, {nparams / 1.0e9: .2f} GParam, "
f"{nparams * param_size_multiplier / 1.0e9: .2f} GB"
)
logging.info(
f"Accessed weights per batch: {B * T * L} rows, "
f"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB"
)
requests = generate_requests(
2 * iters,
B,
T,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=weighted,
requests_data_file=requests_data_file,
tables=tables,
)
warmup_requests, requests = requests[:iters], requests[iters:]
grad_output = torch.randn(B, sum(Ds)).cuda()
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb_nc(
indices.long(), offsets.long(), per_sample_weights
).backward(grad_output),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
logging.info(
f"ForwardBackward (UVM), B: {B}, E: {E}, T: {T}, D: {D}, L: {L}, "
f"BW: {3 * param_size_multiplier * B * sum(Ds) * L / time_per_iter / 1.0e9: .2f} GB/s, "
f"T: {time_per_iter * 1.0e6:.0f}us"
)
# warm up
for indices, offsets, _ in warmup_requests:
emb.forward(indices.long(), offsets.long())
# get cache miss rate (forward and backward) and exchanged cache lines (prefetch)
cache_misses = []
exchanged_cache_lines = []
NOT_FOUND = -1
for indices, offsets, _ in requests:
old_lxu_cache_state = emb.lxu_cache_state.clone()
emb.prefetch(indices.long(), offsets.long())
exchanged_cache_lines.append(
(emb.lxu_cache_state != old_lxu_cache_state).sum().item()
)
cache_misses.append((emb.lxu_cache_locations_list[0] == NOT_FOUND).sum().item())
emb.forward(indices.long(), offsets.long())
logging.info(
f"Exchanged cache lines -- mean: {sum(exchanged_cache_lines)/len(requests): .2f}, "
f"max: {max(exchanged_cache_lines)}, min: {min(exchanged_cache_lines)}"
)
logging.info(
f"Cache miss -- mean: {sum(cache_misses)/len(requests)}, "
f"max: {max(cache_misses)}, min: {min(cache_misses)}"
)
# benchmark prefetch
emb.reset_cache_states()
for indices, offsets, _ in warmup_requests:
emb.forward(indices, offsets)
prefetch_time, forward_backward_time = benchmark_pipelined_requests(
requests,
lambda indices, offsets, indices_weights: emb.prefetch(indices, offsets),
lambda indices, offsets, indices_weights: emb.forward(
indices, offsets, indices_weights
).backward(grad_output),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
e2e_time = prefetch_time + forward_backward_time
logging.info(
f"ForwardBackward (LXU), reuse: {reuse}, alpha: {alpha}, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, "
f"BW: {3 * param_size_multiplier * B * sum(Ds) * L / e2e_time / 1.0e9: .2f} GB/s, "
f"Tprefetch: {prefetch_time * 1.0e6:.0f}us, "
f"{2 * sum(exchanged_cache_lines) * param_size_multiplier * D / prefetch_time / len(requests) / 1.0e9: .2f} GB/s, "
f"Tfwdbwd: {forward_backward_time * 1.0e6:.0f}us, "
f"{3 * param_size_multiplier * B * sum(Ds) * L / forward_backward_time / 1.0e9: .2f} GB/s, "
f"Te2e: {e2e_time * 1.0e6:.0f}us, "
)
def benchmark_cpu_requests(
requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[torch.Tensor]]],
func: Callable[[Tensor, Tensor, Optional[Tensor]], Tensor],
) -> float:
import time
start_time = time.perf_counter()
for indices, offsets, weights in requests:
func(indices, offsets, weights)
end_time = time.perf_counter()
return (end_time - start_time) / len(requests)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.INT4)
@click.option("--stoc", is_flag=True, default=False)
@click.option("--iters", default=100)
@click.option("--managed", default="device")
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.0)
@click.option("--row-wise/--no-row-wise", default=True)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--index-remapping", is_flag=True, default=False)
@click.option("--requests_data_file", type=str, default=None)
@click.option("--tables", type=str, default=None)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP16)
@click.option("--fp8-exponent-bits", type=int, default=None)
@click.option("--fp8-exponent-bias", type=int, default=None)
def nbit_cpu( # noqa C901
alpha: float,
bag_size: int,
batch_size: int,
embedding_dim: int,
weights_precision: SparseType,
stoc: bool,
iters: int,
managed: str,
mixed: bool,
num_embeddings: int,
num_tables: int,
reuse: float,
row_wise: bool,
weighted: bool,
index_remapping: bool,
requests_data_file: Optional[str],
tables: Optional[str],
output_dtype: SparseType,
fp8_exponent_bits: Optional[int],
fp8_exponent_bias: Optional[int],
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
T = num_tables
if mixed:
Ds = [
# int4 table batched emb op can only handle mixed D where D is multiple of 8
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 8)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds = [D] * T
emb = IntNBitTableBatchedEmbeddingBagsCodegen(
[("", E, d, weights_precision, EmbeddingLocation.HOST) for d in Ds],
device="cpu",
index_remapping=[torch.arange(E) for _ in Ds] if index_remapping else None,
output_dtype=output_dtype,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
).cpu()
emb.fill_random_weights()
nparams_byte = sum(w.numel() for (w, _) in emb.split_embedding_weights())
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
read_write_bytes = (
output_size_multiplier * B * T * D + param_size_multiplier * B * T * L * D
)
logging.info(
f"{weights_precision} Embedding tables: {E * T} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, "
f"{nparams_byte / 1.0e9: .2f} GB" # IntN TBE use byte for storage
)
logging.info(
f"Accessed weights per batch: {B * T * L} rows, "
f"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB"
)
requests = generate_requests(
iters,
B,
T,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=weighted,
requests_data_file=requests_data_file,
tables=tables,
use_cpu=True,
)
requests = [
(a.cpu().int(), b.cpu().int(), c.cpu() if c else None) for (a, b, c) in requests
]
time_per_iter = benchmark_cpu_requests(
# pyre-fixme[6]: For 1st param expected `List[Tuple[IntTensor, IntTensor,
# Optional[Tensor]]]` but got `List[Tuple[Tensor, Tensor, Optional[Tensor]]]`.
requests,
lambda indices, offsets, per_sample_weights: emb.forward(
indices,
offsets,
per_sample_weights,
),
)
logging.info(
f"{weights_precision} Forward, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.INT4)
@click.option("--managed", default="device")
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.0)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--pooling", type=str, default="sum")
@click.option("--bounds-check-mode", type=int, default=BoundsCheckMode.NONE.value)
@click.option("--pruning-ratio", type=float, default=None)
@click.option("--pruning-hash-load-factor", default=0.75)
@click.option("--use-array-for-index-remapping", is_flag=True, default=True)
@click.option("--check-median", is_flag=True, default=True)
@click.option("--iters", default=100)
@click.option("--runs-of-iters", default=5)
@click.option("--warmup-runs", default=2)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP16)
@click.option("--report-aibench", is_flag=True)
@click.option("--run-reference", is_flag=True, default=False)
@click.option("--requests_data_file", type=str, default=None)
@click.option("--tables", type=str, default=None)
@click.option("--fp8-exponent-bits", type=int, default=None)
@click.option("--fp8-exponent-bias", type=int, default=None)
def nbit_device( # noqa C901
alpha: float,
bag_size: int,
batch_size: int,
embedding_dim: int,
weights_precision: SparseType,
managed: str,
mixed: bool,
num_embeddings: int,
num_tables: int,
reuse: float,
weighted: bool,
pooling: str,
bounds_check_mode: int,
pruning_ratio: Optional[float],
pruning_hash_load_factor: float,
use_array_for_index_remapping: bool,
check_median: bool,
iters: int,
runs_of_iters: int,
warmup_runs: int,
output_dtype: SparseType,
report_aibench: bool,
run_reference: bool,
requests_data_file: Optional[str],
tables: Optional[str],
fp8_exponent_bits: Optional[int],
fp8_exponent_bias: Optional[int],
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
original_E = E
T = num_tables
index_remapping = None
if mixed:
# int4 table batched emb op can only handle mixed D where D is multiple of 8
Ds = [
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 8)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds = [D] * T
mem_for_pruning = 0
if pruning_ratio:
assert pruning_ratio < 1 and pruning_ratio >= 0
E = math.ceil(E * (1.0 - pruning_ratio))
index_remapping = []
for _ in range(T):
mapping = torch.tensor([-1] * original_E, dtype=torch.int32)
selected_indices = random.sample(range(original_E), E)
for i, idx in enumerate(selected_indices):
mapping[idx] = i
index_remapping.append(mapping)
if use_array_for_index_remapping:
mem_for_pruning += mapping.numel() * 4
else:
mem_for_pruning += E / pruning_hash_load_factor * 2 * 4
if managed == "device":
managed_option = EmbeddingLocation.DEVICE
else:
managed_option = EmbeddingLocation.MANAGED
if pooling is None or pooling == "sum":
pooling = "sum"
pooling_mode = PoolingMode.SUM
do_pooling = True
elif pooling == "mean":
pooling_mode = PoolingMode.MEAN
do_pooling = True
else: # "none"
pooling_mode = PoolingMode.NONE
do_pooling = False
emb = IntNBitTableBatchedEmbeddingBagsCodegen(
[("", E, d, weights_precision, managed_option) for d in Ds],
bounds_check_mode=BoundsCheckMode(bounds_check_mode),
index_remapping=index_remapping,
pruning_hash_load_factor=pruning_hash_load_factor,
use_array_for_index_remapping=use_array_for_index_remapping,
output_dtype=output_dtype,
pooling_mode=pooling_mode,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
).cuda()
emb.fill_random_weights()
nparams_byte = sum(w.numel() for (w, _) in emb.split_embedding_weights())
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
if do_pooling:
read_write_bytes = (
output_size_multiplier * B * T * D + param_size_multiplier * B * T * L * D
)
else:
read_write_bytes = (
output_size_multiplier * B * T * L * D
+ param_size_multiplier * B * T * L * D
)
logging.info(
f"{weights_precision} Embedding tables: {E * T} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, "
f"{nparams_byte / 1.0e9: .2f} GB" # IntN TBE use byte for storage
)
logging.info(
f"Accessed weights per batch: {B * T * L} rows, "
f"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB"
)
times = []
for i in range(runs_of_iters):
requests = generate_requests(
iters,
B,
T,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=weighted,
requests_data_file=requests_data_file,
tables=tables,
)
requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]
# forward
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
check_median=check_median,
)
# free up GPU memory
del requests
logging.info(
f"Iteration {i}: "
f"{weights_precision} Forward, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us, "
f"Memory Usage For Pruning: {mem_for_pruning / 1.0e9:.0f} GB"
)
if i >= warmup_runs:
times.append(time_per_iter)
time_per_iter = statistics.mean(times)
bandwidth = read_write_bytes / time_per_iter / 1.0e9
logging.info(
f"Average of all iterations: "
f"{weights_precision} Forward, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {bandwidth: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us, "
f"Memory Usage For Pruning: {mem_for_pruning / 1.0e9:.0f} GB"
)
if report_aibench and haveAIBench:
print(
emitMetric(
type="NET",
metric=f"bandwidth_{weights_precision}",
unit="scalar",
value=str(bandwidth),
)
)
print(
emitMetric(
type="NET",
metric=f"time_per_iter_{weights_precision}",
unit="scalar",
value=str(time_per_iter * 1.0e6),
)
)
if run_reference:
times = []
for i in range(runs_of_iters):
requests = generate_requests(
iters,
B,
T,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=weighted,
requests_data_file=requests_data_file,
tables=tables,
)
requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]
# forward
time_per_iter_refer = benchmark_requests_refer(
requests,
T,
B,
L,
E,
D,
pooling,
weighted,
check_median=check_median,
)
# free up GPU memory
del requests
logging.info(
f"Reference (nn.Embedding(Bag)) Iteration {i}: "
f"Forward, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter_refer / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter_refer * 1.0e6:.0f}us "
)
if i >= warmup_runs:
times.append(time_per_iter_refer)
time_per_iter_refer = statistics.mean(times)
bandwidth = read_write_bytes / time_per_iter_refer / 1.0e9
logging.info(
f"Average of all iterations: "
f"Forward, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"Effective BW: {bandwidth: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter_refer * 1.0e6:.0f}us "
)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option("--bag-size-list", type=str, default="20")
@click.option("--batch-size", default=512)
@click.option("--embedding-dim-list", type=str, default="128")
@click.option("--weights-precision", type=SparseType, default=SparseType.INT4)
@click.option("--managed", default="device")
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings-list", type=str, default="100000")
@click.option("--reuse", default=0.0)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--pooling", type=str, default="sum")
@click.option("--bounds-check-mode", type=int, default=BoundsCheckMode.NONE.value)
@click.option("--pruning-ratio", type=float, default=None)
@click.option("--pruning-hash-load-factor", default=0.75)
@click.option("--use-array-for-index-remapping", is_flag=True, default=True)
@click.option("--check-median", is_flag=True, default=True)
@click.option("--iters", default=100)
@click.option("--runs-of-iters", default=5)
@click.option("--warmup-runs", default=2)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP16)
@click.option("--report-aibench", is_flag=True)
@click.option("--fp8-exponent-bits", type=int, default=None)
@click.option("--fp8-exponent-bias", type=int, default=None)
@click.option("--use-cpu", is_flag=True, default=False)
def nbit_device_with_spec( # noqa C901
alpha: float,
bag_size_list: str,
batch_size: int,
embedding_dim_list: str,
weights_precision: SparseType,
managed: str,
mixed: bool,
num_embeddings_list: str,
reuse: float,
weighted: bool,
pooling: str,
bounds_check_mode: int,
pruning_ratio: Optional[float],
pruning_hash_load_factor: float,
use_array_for_index_remapping: bool,
check_median: bool,
iters: int,
runs_of_iters: int,
warmup_runs: int,
output_dtype: SparseType,
report_aibench: bool,
fp8_exponent_bits: Optional[int],
fp8_exponent_bias: Optional[int],
use_cpu: bool,
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
Ds = [int(D) for D in embedding_dim_list.split(",")]
Ls = [int(L) for L in bag_size_list.split(",")]
Es = [int(E) for E in num_embeddings_list.split(",")]
E = np.mean(Es)
D = np.mean(Ds)
L = np.mean(Ls)
T = len(Ds)
logging.info("TBE Spec:")
logging.info("#, E, D, L")
for i, (e, d, bag_size) in enumerate(zip(Es, Ds, Ls)):
logging.info(f"{i}, {e}, {d}, {bag_size}")
logging.info(f"Mean(Es) = {E}, Mean(Ds) = {D}, Mean(Ls) = {L}")
index_remapping = None
mem_for_pruning = 0
if pruning_ratio:
original_Es = Es
assert pruning_ratio < 1 and pruning_ratio >= 0
index_remapping = []
new_Es = []
for original_E in original_Es:
E = math.ceil(original_E * (1.0 - pruning_ratio))
mapping = torch.tensor([-1] * original_E, dtype=torch.int32)
selected_indices = random.sample(range(original_E), E)
for i, idx in enumerate(selected_indices):
mapping[idx] = i
index_remapping.append(mapping)
if use_array_for_index_remapping:
mem_for_pruning += mapping.numel() * 4
else:
mem_for_pruning += E / pruning_hash_load_factor * 2 * 4
new_Es.append(E)
Es = new_Es
E = np.mean(Es)
logging.info(f"After prunnig (pruning_ratio={pruning_ratio}")
logging.info("#, E, D, L")
for i, (e, d, bag_size) in enumerate(zip(Es, Ds, Ls)):
logging.info(f"{i}, {e}, {d}, {bag_size}")
logging.info(f"Mean(Es) = {E}, Mean(Ds) = {D}, Mean(Ls) = {L}")
if managed == "device":
managed_option = EmbeddingLocation.DEVICE
else:
managed_option = EmbeddingLocation.MANAGED
# Override managed_option to HOST if using CPU
if use_cpu:
managed_option = EmbeddingLocation.HOST
if pooling is None or pooling == "sum":
pooling = "sum"
pooling_mode = PoolingMode.SUM
do_pooling = True
elif pooling == "mean":
pooling_mode = PoolingMode.MEAN
do_pooling = True
else: # "none"
pooling_mode = PoolingMode.NONE
do_pooling = False
emb = IntNBitTableBatchedEmbeddingBagsCodegen(
[("", e, d, weights_precision, managed_option) for d, e in zip(Ds, Es)],
device="cpu" if use_cpu else None,
bounds_check_mode=BoundsCheckMode(bounds_check_mode),
index_remapping=index_remapping,
pruning_hash_load_factor=pruning_hash_load_factor,
use_array_for_index_remapping=use_array_for_index_remapping,
output_dtype=output_dtype,
pooling_mode=pooling_mode,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
)
if use_cpu:
emb = emb.cpu()
else:
emb = emb.cuda()
emb.fill_random_weights()
nparams_byte = sum(w.numel() for (w, _) in emb.split_embedding_weights())
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
if do_pooling:
read_write_bytes = sum(
[
output_size_multiplier * B * d
+ param_size_multiplier * B * bag_size * d
for bag_size, d in zip(Ls, Ds)
]
)
else:
read_write_bytes = sum(
[
output_size_multiplier * B * bag_size * d
+ param_size_multiplier * B * bag_size * d
for bag_size, d in zip(Ls, Ds)
]
)
logging.info(
f"{weights_precision} Embedding tables: {sum(Es)} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, "
f"{nparams_byte / 1.0e9: .2f} GB" # IntN TBE use byte for storage
)
logging.info(
f"Accessed weights per batch: {B * sum(Ls)} rows, "
f"{B * sum([bag_size * d for bag_size, d in zip(Ls, Ds)]) * param_size_multiplier / 1.0e9: .2f} GB"
)
times = []
for i in range(runs_of_iters):
# Generate a request for each table then combine
all_requests = {
"indices": [[] for _ in range(iters)],
"offsets": [[] for _ in range(iters)],
"weights": [[] for _ in range(iters)],
}
# row = iter, column = tensor
for t, (bag_size, e) in enumerate(zip(Ls, Es)):
requests = generate_requests(
iters,
B,
1,
bag_size,
e,
reuse=reuse,
# don't use zipf if e isn't large enough compared to bag_size.
alpha=alpha if (e / bag_size) > 2.0 else 1.0,
# need many more samples for zipf if bag_size is very small.
zipf_oversample_ratio=3 if bag_size > 5 else 10,
weighted=weighted,
use_cpu=use_cpu,
)
for it, (indices, offsets, weights) in enumerate(requests):
all_requests["indices"][it].append(indices)
if t > 0:
offsets = offsets[1:] # remove the first element
offsets += all_requests["offsets"][it][t - 1][-1]
all_requests["offsets"][it].append(offsets)
all_requests["weights"][it].append(weights)
requests = []
for it in range(iters):
indices = torch.concat(all_requests["indices"][it])
offsets = torch.concat(all_requests["offsets"][it])
if weighted:
weights = torch.concat(all_requests["weights"][it])
else:
weights = None
requests.append((indices, offsets, weights))
if use_cpu:
requests = [
(a.cpu().int(), b.cpu().int(), c.cpu() if c else None)
for (a, b, c) in requests
]
else:
requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]
del all_requests
assert len(requests) == iters
# forward
if use_cpu:
time_per_iter = benchmark_cpu_requests(
requests,
lambda indices, offsets, per_sample_weights: emb.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
)
else:
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
check_median=check_median,
)
# free up memory
del requests
logging.info(
f"Iteration {i}: "
f"{weights_precision} Forward, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us, "
f"Memory Usage For Pruning: {mem_for_pruning / 1.0e9:.0f} GB"
)
if i >= warmup_runs:
times.append(time_per_iter)
time_per_iter = statistics.mean(times)
bandwidth = read_write_bytes / time_per_iter / 1.0e9
logging.info(
f"Average of all iterations: "
f"{weights_precision} Forward, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {bandwidth: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us, "
f"Memory Usage For Pruning: {mem_for_pruning / 1.0e9:.0f} GB"
)
if report_aibench and haveAIBench:
print(
emitMetric(
type="NET",
metric=f"bandwidth_{weights_precision}",
unit="scalar",
value=str(bandwidth),
)
)
print(
emitMetric(
type="NET",
metric=f"time_per_iter_{weights_precision}",
unit="scalar",
value=str(time_per_iter * 1.0e6),
)
)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.INT4)
@click.option("--iters", default=100)
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.1)
@click.option("--uvm-num-embeddings", default=int(1e5))
@click.option("--uvm-tables", default=1)
@click.option("--uvm-bag-size", default=1)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP16)
@click.option("--use-cache", is_flag=True, default=False)
@click.option("--cache-algorithm", default="lru")
@click.option("--cache-load-factor", default=0.2)
@click.option("--enforce-hbm", is_flag=True, default=False)
@click.option("--fp8-exponent-bits", type=int, default=None)
@click.option("--fp8-exponent-bias", type=int, default=None)
def nbit_uvm(
alpha: bool,
bag_size: int,
batch_size: int,
embedding_dim: int,
weights_precision: SparseType,
iters: int,
mixed: bool,
num_embeddings: int,
num_tables: int,
reuse: float,
uvm_num_embeddings: int,
uvm_tables: int,
uvm_bag_size: int,
weighted: bool,
flush_gpu_cache_size_mb: int,
output_dtype: SparseType,
use_cache: bool,
cache_algorithm: str,
cache_load_factor: float,
enforce_hbm: bool,
fp8_exponent_bits: Optional[int],
fp8_exponent_bias: Optional[int],
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
E_uvm = uvm_num_embeddings
T = num_tables
T_uvm = uvm_tables
assert T_uvm <= T
assert (
T_uvm > 0
), f"T_uvm specified {T_uvm} <= 0. If not testing UVM, please use device benchmark."
T_gpu = T - T_uvm
L_uvm = uvm_bag_size
cache_alg = CacheAlgorithm.LRU if cache_algorithm == "lru" else CacheAlgorithm.LFU
managed_type = (
EmbeddingLocation.MANAGED_CACHING if use_cache else EmbeddingLocation.MANAGED
)
logging.info(f"T: {T}, T_uvm: {T_uvm}, T_gpu: {T_gpu}")
if mixed:
Ds = [
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds = [D] * T
emb_uvm = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E_uvm,
d,
weights_precision,
managed_type,
)
for d in Ds[:T_uvm]
],
output_dtype=output_dtype,
cache_load_factor=cache_load_factor,
cache_algorithm=cache_alg,
enforce_hbm=enforce_hbm,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
).cuda()
emb_uvm.fill_random_weights()
if T_gpu > 0:
emb_gpu = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E,
d,
weights_precision,
EmbeddingLocation.DEVICE,
)
for d in Ds[T_uvm:]
],
output_dtype=output_dtype,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
).cuda()
emb_gpu.fill_random_weights()
emb_mixed = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
e,
d,
weights_precision,
managed_option,
)
for (e, d, managed_option) in zip(
[E_uvm] * T_uvm + [E] * T_gpu,
Ds,
[managed_type] * T_uvm + [EmbeddingLocation.DEVICE] * T_gpu,
)
],
output_dtype=output_dtype,
cache_load_factor=cache_load_factor,
cache_algorithm=cache_alg,
enforce_hbm=enforce_hbm,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
).cuda()
emb_mixed.fill_random_weights()
requests_uvm = generate_requests(
iters,
B,
T_uvm,
L_uvm,
E_uvm,
reuse=reuse,
alpha=alpha,
weighted=weighted,
)
requests_uvm = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests_uvm]
requests_gpu = None
if T_gpu > 0:
requests_gpu = generate_requests(
iters,
B,
T_gpu,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=False,
)
requests_gpu = [
(a.int(), b.int(), c if c else None) for (a, b, c) in requests_gpu
]
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
read_write_bytes_uvm = (
output_size_multiplier * B * sum(Ds[:T_uvm])
+ param_size_multiplier * B * sum(Ds[:T_uvm]) * L_uvm
)
if T_gpu > 0:
nparams_byte = sum(w.numel() for (w, _) in emb_mixed.split_embedding_weights())
logging.info(
f"{weights_precision} Embedding tables: {E * T_gpu + E_uvm * T_uvm} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, "
f"{nparams_byte / 1.0e9: .2f} GB" # IntN TBE use byte for storage
)
logging.info(
f"Accessed weights per batch: {B * (T_gpu * L + T_uvm * L_uvm)} rows, "
f"{B * (L * sum(Ds[T_uvm:]) + L_uvm * sum(Ds[:T_uvm])) * param_size_multiplier / 1.0e9: .2f} GB"
)
torch.cuda.cudart().cudaProfilerStart()
torch.cuda.nvtx.range_push("uvm forward")
time_per_iter = benchmark_requests(
requests_uvm,
lambda indices, offsets, per_sample_weights: emb_uvm.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
logging.info(
f"UVM NBit Forward, {weights_precision}, B: {B}, "
f"E_uvm: {E_uvm}, T: {T_uvm}, D: {D}, L: {L_uvm}, W: {weighted}, "
f"BW: {read_write_bytes_uvm / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us"
)
torch.cuda.nvtx.range_pop()
torch.cuda.cudart().cudaProfilerStop()
if T_gpu > 0:
requests = []
assert requests_gpu is not None
for rs_uvm, rs_gpu in zip(requests_uvm, requests_gpu):
indices = torch.cat([rs_uvm[0], rs_gpu[0]])
lengths = [L_uvm] * (T_uvm * B) + [L] * (T_gpu * B)
offsets = torch.tensor(([0] + np.cumsum(lengths).tolist())).int().cuda()
per_sample_weights = None
if weighted:
this_rs_uvm_weights = rs_uvm[2]
assert this_rs_uvm_weights is not None
this_rs_gpu_weights = rs_gpu[2]
assert this_rs_gpu_weights is not None
per_sample_weights = torch.cat(
[this_rs_uvm_weights, this_rs_gpu_weights]
)
requests.append((indices, offsets, per_sample_weights))
# forward
time_per_iter = benchmark_requests(
requests_gpu,
lambda indices, offsets, per_sample_weights: emb_gpu.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
read_write_bytes_hbm = (
output_size_multiplier * B * sum(Ds[T_uvm:])
+ param_size_multiplier * B * sum(Ds[T_uvm:]) * L
)
logging.info(
f"GPU NBit Forward, {weights_precision}, B: {B}, "
f"E: {E}, T: {T_gpu}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes_hbm / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us"
)
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb_mixed.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
read_write_bytes_total = read_write_bytes_uvm + read_write_bytes_hbm
logging.info(
f"Mixed NBit Forward, {weights_precision}, B: {B}, "
f"E_GPU: {E}, E_UVM: {E_uvm}, T_GPU: {T_gpu}, T_UVM: {T_uvm}, D: {D}, L_GPU: {L}, L_UVM: {L_uvm}, W: {weighted}, "
f"BW: {read_write_bytes_total / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us"
)
# benchmark prefetch
emb_mixed.reset_cache_states()
for indices, offsets, _ in requests:
emb_mixed.forward(indices, offsets)
prefetch_time, forward_time = benchmark_pipelined_requests(
requests,
lambda indices, offsets, indices_weights: emb_mixed.prefetch(
indices,
offsets,
),
lambda indices, offsets, indices_weights: emb_mixed.forward(
indices,
offsets,
indices_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
e2e_time = prefetch_time + forward_time
logging.info(
f"Forward(LXU) {weights_precision}, reuse: {reuse}, alpha: {alpha}, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, "
f"Te2e: {e2e_time * 1.0e6:.0f}us, "
f"e2e BW: {read_write_bytes_total / e2e_time / 1.0e9: .2f} GB/s, "
f"Tprefetch: {prefetch_time * 1.0e6:.0f}us, "
f"TfwdTime: {forward_time * 1.0e6:.0f}us, "
f"{read_write_bytes_total / forward_time / 1.0e9: .2f} GB/s"
)
@cli.command()
@click.option("--test-name", type=str, default="")
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.INT4)
@click.option("--iters", default=100)
@click.option("--warmup", default=10)
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.1)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP16)
@click.option("--use-cache", is_flag=True, default=False)
@click.option("--cache-algorithm", default="lru")
@click.option("--cache-load-factor", default=0.2)
@click.option("--enforce-hbm", is_flag=True, default=False)
@click.option("--fp8-exponent-bits", type=int, default=None)
@click.option("--fp8-exponent-bias", type=int, default=None)
@click.option("--record-cache", is_flag=True, default=False)
@click.option("--uvm-host-mapped", is_flag=True, default=False)
@click.option(
"--dump-requests", type=int, default=0, help="number of reqs to dump (0=no dump)"
)
def nbit_uvm_compare_direct_mapped(
test_name: str,
alpha: bool,
bag_size: int,
batch_size: int,
embedding_dim: int,
weights_precision: SparseType,
iters: int,
warmup: int,
mixed: bool,
num_embeddings: int,
num_tables: int,
reuse: float,
weighted: bool,
flush_gpu_cache_size_mb: int,
output_dtype: SparseType,
use_cache: bool,
cache_algorithm: str,
cache_load_factor: float,
enforce_hbm: bool,
fp8_exponent_bits: Optional[int],
fp8_exponent_bias: Optional[int],
record_cache: bool,
uvm_host_mapped: bool,
dump_requests: int,
) -> None:
logging.info(json.dumps({k: str(v) for k, v in locals().items()}, indent=2))
np.random.seed(42)
torch.manual_seed(42)
B: int = batch_size
D: int = embedding_dim
L: int = bag_size
E: int = num_embeddings
T: int = num_tables
cache_alg: CacheAlgorithm = (
CacheAlgorithm.LRU if cache_algorithm == "lru" else CacheAlgorithm.LFU
)
managed_type: EmbeddingLocation = (
EmbeddingLocation.MANAGED_CACHING if use_cache else EmbeddingLocation.MANAGED
)
if mixed:
Ds: List[int] = [
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds: List[int] = [D] * T
_requests_uvm = generate_requests(
iters,
B,
T,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=weighted,
)
# pyre-fixme[9]: requests_uvm has type `List[Tuple[IntTensor, IntTensor,
# Optional[Tensor]]]`; used as `List[Tuple[Tensor, Tensor, Optional[Tensor]]]`.
requests_uvm: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[Tensor]]] = [
(a.int(), b.int(), c if c else None) for (a, b, c) in _requests_uvm
]
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
read_write_bytes_uvm: float = (
output_size_multiplier * B * sum(Ds[:T])
+ param_size_multiplier * B * sum(Ds[:T]) * L
)
stats: Dict[str, Any] = {
"B": B,
"T": T,
"E": E,
"L": L,
"D": D,
"reuse": reuse,
}
def bench_uvm_cls(
name: str = "32way",
cache_assoc: int = 32,
record_cache: bool = False,
hbm: bool = False,
) -> None:
loc = managed_type if not hbm else EmbeddingLocation.DEVICE
emb = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E,
d,
weights_precision,
loc,
)
for d in Ds[:T]
],
output_dtype=output_dtype,
cache_load_factor=cache_load_factor,
cache_algorithm=cache_alg,
cache_assoc=cache_assoc,
enforce_hbm=enforce_hbm,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
gather_uvm_cache_stats=record_cache,
uvm_host_mapped=uvm_host_mapped,
).cuda()
emb.fill_random_weights()
nvtx_range = (
f"UVM-RECORD-CACHE-{name.upper()}"
if record_cache
else f"UVM-{name.upper()}"
)
callback_after_warmup = emb.reset_uvm_cache_stats if record_cache else None
torch.cuda.cudart().cudaProfilerStart()
time_per_iter = benchmark_requests(
requests_uvm,
lambda indices, offsets, per_sample_weights: emb.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
num_warmups=warmup,
nvtx_range=nvtx_range,
callback_after_warmup=callback_after_warmup,
)
torch.cuda.cudart().cudaProfilerStop()
nonlocal stats
if name not in stats:
stats[name] = {}
if not record_cache:
# Only measure time when cache counter is off (serious overhead)
if name not in stats:
stats[name] = {}
stats[name]["bytes"] = read_write_bytes_uvm
stats[name]["time_per_iter"] = time_per_iter * 1e6
logging.info(
f"[{name.center(8)}] "
f"UVM NBit Forward, {weights_precision}, B: {B}, "
f"E_uvm: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes_uvm / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us"
)
if record_cache:
ucs = emb.uvm_cache_stats.detach().cpu().numpy().tolist()
cache_stats = {
"num_calls": ucs[0],
"num_requested_indices": ucs[1],
"num_unique_indices": ucs[2],
"num_unique_misses": ucs[3],
"num_conflict_unique_misses": ucs[4],
"num_conflict_misses": ucs[5],
}
stats[name]["cache_stats"] = cache_stats
logging.info(f"[{name:>8s}] cache stats {cache_stats}")
bench_uvm_cls(name="HBM", hbm=True)
bench_uvm_cls(name="32way", cache_assoc=32)
bench_uvm_cls(name="1way", cache_assoc=1)
if record_cache:
bench_uvm_cls(
name="32way",
cache_assoc=32,
record_cache=True,
)
bench_uvm_cls(
name="1way",
cache_assoc=1,
record_cache=True,
)
if test_name:
folder = Path(os.getenv("HOME", ".")) / test_name
if not folder.is_dir():
logging.info(f"MAKING FOLDER {folder}")
folder.mkdir(parents=True, mode=0o755)
with (folder / "uvm_stats.txt").open("w") as f:
logging.info(f"Dumping stats at {folder}")
print(stats, file=f)
if dump_requests:
with (folder / "requests.txt").open("w") as f:
for req in requests_uvm[:dump_requests]:
ind, off, _ = req
print(ind.cpu().numpy().tolist(), file=f)
print(off.cpu().numpy().tolist(), file=f)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--cache-algorithm", default="lru")
@click.option("--cache-load-factor", default=0.2)
@click.option("--cache-assoc", default=32)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.INT4)
@click.option("--iters", default=100)
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.1)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP16)
@click.option("--enforce-hbm", is_flag=True, default=False)
@click.option("--record-cache-miss-counter", is_flag=True, default=False)
@click.option("--record-tablewise-cache-miss", is_flag=True, default=False)
@click.option("--gather-uvm-cache-stats", is_flag=True, default=False)
@click.option("--fp8-exponent-bits", type=int, default=None)
@click.option("--fp8-exponent-bias", type=int, default=None)
def nbit_cache( # noqa C901
alpha: float,
bag_size: int,
batch_size: int,
cache_algorithm: str,
cache_load_factor: float,
cache_assoc: int,
embedding_dim: int,
weights_precision: SparseType,
iters: int,
mixed: bool,
num_embeddings: int,
num_tables: int,
reuse: float,
weighted: bool,
flush_gpu_cache_size_mb: int,
output_dtype: SparseType,
enforce_hbm: bool,
record_cache_miss_counter: bool,
record_tablewise_cache_miss: bool,
gather_uvm_cache_stats: bool,
fp8_exponent_bits: Optional[int],
fp8_exponent_bias: Optional[int],
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
T = num_tables
cache_alg = CacheAlgorithm.LRU if cache_algorithm == "lru" else CacheAlgorithm.LFU
if mixed:
Ds = [
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds = [D] * T
emb_nc = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E,
d,
weights_precision,
EmbeddingLocation.MANAGED,
)
for d in Ds
],
output_dtype=output_dtype,
enforce_hbm=enforce_hbm,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
cache_assoc=cache_assoc,
).cuda()
emb_nc.fill_random_weights()
emb = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E,
d,
weights_precision,
EmbeddingLocation.MANAGED_CACHING,
)
for d in Ds
],
record_cache_metrics=RecordCacheMetrics(
record_cache_miss_counter, record_tablewise_cache_miss
),
gather_uvm_cache_stats=gather_uvm_cache_stats,
cache_load_factor=cache_load_factor,
cache_algorithm=cache_alg,
output_dtype=output_dtype,
enforce_hbm=enforce_hbm,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
cache_assoc=cache_assoc,
).cuda()
emb.fill_random_weights()
nparams_byte = sum(w.numel() for (w, _) in emb.split_embedding_weights())
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
read_write_bytes = (
param_size_multiplier * B * sum(Ds) * L
+ output_size_multiplier * B * sum(Ds)
+ param_size_multiplier * B * sum(Ds) * L
)
logging.info(
f"{weights_precision} Embedding tables: {E * T} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, "
f"{nparams_byte / 1.0e9: .2f} GB" # IntN TBE use byte for storage
)
logging.info(
f"Accessed weights per batch: {B * T * L} rows, "
f"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB"
)
requests = generate_requests(
2 * iters, B, T, L, E, reuse=reuse, alpha=alpha, weighted=weighted
)
requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]
warmup_requests, requests = requests[:iters], requests[iters:]
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb_nc(
indices.int(), offsets.int(), per_sample_weights
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
logging.info(
f"Forward (UVM) {weights_precision}, B: {B}, E: {E}, T: {T}, D: {D}, L: {L}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
# warm up
for indices, offsets, _ in warmup_requests:
emb.forward(indices.int(), offsets.int())
# get cache miss rate (forward only) and exchanged cache lines (prefetch)
cache_misses = []
exchanged_cache_lines = []
unique_indices = []
input_indices = []
NOT_FOUND = -1
# reset the cache miss counters after warmup
if record_cache_miss_counter or record_tablewise_cache_miss:
emb.reset_cache_miss_counter()
if gather_uvm_cache_stats:
emb.reset_uvm_cache_stats()
for indices, offsets, _ in requests:
old_lxu_cache_state = emb.lxu_cache_state.clone()
emb.prefetch(indices, offsets)
exchanged_cache_lines.append(
(emb.lxu_cache_state != old_lxu_cache_state).sum().item()
)
cache_misses.append(
(emb.lxu_cache_locations_list.top() == NOT_FOUND).sum().item()
)
emb.forward(indices, offsets)
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
emb.cache_hash_size_cumsum,
indices,
offsets,
)
unique_indices.append(len(torch.unique(linear_cache_indices, sorted=False)))
input_indices.append(len(indices))
logging.info(
f"Exchanged cache lines -- mean: {sum(exchanged_cache_lines)/len(requests): .2f}, "
f"max: {max(exchanged_cache_lines)}, min: {min(exchanged_cache_lines)}"
)
logging.info(
f"Cache miss -- mean: {sum(cache_misses)/len(requests)}, "
f"max: {max(cache_misses)}, min: {min(cache_misses)}"
)
logging.info(
f"input_indices -- mean: {sum(input_indices)/len(requests)}, "
f"max: {max(input_indices)}, min: {min(input_indices)}"
)
logging.info(
f"unique_indices -- mean: {sum(unique_indices)/len(requests)}, "
f"max: {max(unique_indices)}, min: {min(unique_indices)}"
)
unique_miss_rate = [a / b for (a, b) in zip(exchanged_cache_lines, unique_indices)]
logging.info(
f"unique_miss_rate -- mean: {sum(unique_miss_rate)/len(requests)}, "
f"max: {max(unique_miss_rate)}, min: {min(unique_miss_rate)}"
)
if record_cache_miss_counter or record_tablewise_cache_miss:
emb.print_cache_miss_counter()
if gather_uvm_cache_stats:
emb.print_uvm_cache_stats()
# benchmark prefetch
if record_cache_miss_counter or record_tablewise_cache_miss:
emb.reset_cache_states()
if gather_uvm_cache_stats:
emb.reset_uvm_cache_stats()
for indices, offsets, _ in warmup_requests:
emb.forward(indices, offsets)
torch.cuda.cudart().cudaProfilerStart()
torch.cuda.nvtx.range_push("pipeline")
prefetch_time, forward_time = benchmark_pipelined_requests(
requests,
lambda indices, offsets, indices_weights: emb.prefetch(
indices,
offsets,
),
lambda indices, offsets, indices_weights: emb.forward(
indices,
offsets,
indices_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
e2e_time = prefetch_time + forward_time
torch.cuda.nvtx.range_pop()
logging.info(
f"Forward(LXU) {weights_precision}, reuse: {reuse}, alpha: {alpha}, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, "
f"Te2e: {e2e_time * 1.0e6:.0f}us, "
f"e2e BW: {read_write_bytes / e2e_time / 1.0e9: .2f} GB/s, "
f"Tprefetch: {prefetch_time * 1.0e6:.0f}us, "
f"{2 * sum(exchanged_cache_lines) * param_size_multiplier * D / prefetch_time / len(requests) / 1.0e9: .2f} GB/s, "
f"TfwdTime: {forward_time * 1.0e6:.0f}us, "
f"{read_write_bytes / forward_time / 1.0e9: .2f} GB/s"
)
torch.cuda.cudart().cudaProfilerStop()
@cli.command()
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=2048)
@click.option("--iters", default=10)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=100)
@click.option("--pruning-hash-load-factor", default=0.75)
@click.option("--hit-rate", default=0.9)
@click.option("--use-cpu", is_flag=True, default=False)
@click.option("--requests_data_file", type=str, default=None)
@click.option("--tables", type=str, default=None)
def hashtable( # noqa C901
bag_size: int,
batch_size: int,
iters: int,
num_embeddings: int,
num_tables: int,
pruning_hash_load_factor: float,
hit_rate: float,
use_cpu: bool,
requests_data_file: Optional[str],
tables: Optional[str],
) -> None:
B = batch_size
T = num_tables
L = bag_size
E = num_embeddings
np.random.seed(42)
torch.manual_seed(42)
if hit_rate == 1.0:
chosen_indices = torch.cat([torch.arange(E) for _ in range(T)], dim=0).int()
else:
chosen_indices = (
torch.randint(low=0, high=int(E * 1.0 / hit_rate), size=(E * T,))
.view(-1)
.int()
)
dense_indices = torch.cat([torch.arange(E) for _ in range(T)], dim=0).int()
offsets = torch.tensor([E * t for t in range(T + 1)]).int()
assert offsets[-1] == chosen_indices.numel()
assert offsets.numel() == T + 1
assert (offsets.numel() - 1) // T == 1
capacities = [round_up(int(E / pruning_hash_load_factor), 32) for _ in range(T)]
hash_table = torch.zeros(
(sum(capacities), 2),
dtype=torch.int32,
)
hash_table_offsets = torch.tensor([0] + np.cumsum(capacities).tolist()).long()
assert hash_table.numel() * 4 < 2**32
# initialize
hash_table[:, :] = -1
torch.ops.fbgemm.pruned_hashmap_insert(
chosen_indices, dense_indices, offsets, hash_table, hash_table_offsets
)
requests = generate_requests(
iters,
B,
T,
L,
E,
requests_data_file=requests_data_file,
tables=tables,
)
if not use_cpu:
hash_table = hash_table.cuda()
hash_table_offsets = hash_table_offsets.cuda()
requests = [(a.cuda().int(), b.cuda().int(), c) for (a, b, c) in requests]
else:
requests = [(a.int().cpu(), b.int().cpu(), c) for (a, b, c) in requests]
empirical_hit_rate = np.mean(
[
torch.ops.fbgemm.pruned_hashmap_lookup(
indices, offsets, hash_table, hash_table_offsets
)
.ne(-1)
.sum()
.item()
/ indices.numel()
for indices, offsets, _ in requests
]
)
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, _: torch.ops.fbgemm.pruned_hashmap_lookup(
indices, offsets, hash_table, hash_table_offsets
),
)
logging.info(
f"LinearTable: B: {B}, T: {T}, L: {L}, E: {E}, QPS: {B * T * L / time_per_iter / 1.0e9:.2f}B QPS/s, "
f"T: {time_per_iter * 1.0e6:.0f}us, pruning load factor: {E * T / hash_table.shape[0] * 100:.1f}%, hit rate: {empirical_hit_rate * 100:.2f}%, Table size: {hash_table.numel() * 4 / 1.0e9:.0f} GB"
)
if use_cpu:
ht = torch.classes.fbgemm.PrunedMapCPU()
ht.insert(chosen_indices, dense_indices, offsets, T)
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, _: ht.lookup(indices, offsets),
)
logging.info(
f"HashTable: B: {B}, T: {T}, L: {L}, E: {E}, QPS: {B * T * L / time_per_iter / 1.0e9:.2f}B QPS/s, "
f"T: {time_per_iter * 1.0e6:.0f}us, pruning load factor: {E * T / hash_table.shape[0] * 100:.1f}%, hit rate: {empirical_hit_rate * 100:.2f}%, Table size: {hash_table.numel() * 4 / 1.0e9:.0f} GB"
)
@cli.command()
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=2048)
@click.option("--iters", default=100)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=100)
@click.option("--pruning-ratio", default=0.9)
@click.option("--device", default="cuda")
@click.option("--requests_data_file", type=str, default=None)
@click.option("--tables", type=str, default=None)
def pruned_array( # noqa C901
bag_size: int,
batch_size: int,
iters: int,
num_embeddings: int,
num_tables: int,
pruning_ratio: float,
device: str,
requests_data_file: Optional[str],
tables: Optional[str],
) -> None:
B = batch_size
T = num_tables
L = bag_size
E = num_embeddings
np.random.seed(42)
torch.manual_seed(42)
assert pruning_ratio > 0 and pruning_ratio <= 1
original_E = int(E / (1.0 - pruning_ratio))
index_remappings = torch.tensor(
[-1] * original_E * T, dtype=torch.int32, device=device
)
index_remappings_offsets = torch.empty(T + 1, dtype=torch.int64, device=device)
index_remappings_offsets[0] = 0
dense_indices = torch.tensor(range(E), dtype=torch.int32, device=device)
for t in range(T):
selected_indices = torch.add(
torch.randperm(original_E, device=device), t * original_E
)[:E]
index_remappings[selected_indices] = dense_indices
index_remappings_offsets[t + 1] = index_remappings_offsets[t] + original_E
requests = generate_requests(
iters,
B,
T,
L,
E,
requests_data_file=requests_data_file,
tables=tables,
use_cpu=True if device == "cpu" else False,
)
requests = [(a.int().to(device), b.int().to(device), c) for (a, b, c) in requests]
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, _: torch.ops.fbgemm.pruned_array_lookup(
indices,
offsets,
index_remappings,
index_remappings_offsets,
),
)
logging.info(
f"LinearTable: B: {B}, T: {T}, L: {L}, E: {E}, QPS: {B * T * L / time_per_iter / 1.0e9:.2f}B QPS/s, "
f"T: {time_per_iter * 1.0e6:.0f}us, Pruning Ratio: {pruning_ratio * 100:.2f}%, Table size: {original_E * T * 4 / 1.0e9:.0f} GB"
)
@cli.command()
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--iters", default=100)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--bounds-check-mode", type=int, default=BoundsCheckMode.WARNING.value)
@click.option("--requests_data_file", type=str, default=None)
@click.option("--tables", type=str, default=None)
def bounds_check_indices( # noqa C901
bag_size: int,
batch_size: int,
iters: int,
num_embeddings: int,
num_tables: int,
bounds_check_mode: int,
requests_data_file: Optional[str],
tables: Optional[str],
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
L = bag_size
E = num_embeddings
T = num_tables
requests = generate_requests(
iters,
B,
T,
L,
E,
requests_data_file=requests_data_file,
tables=tables,
)
# requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]
warning = torch.tensor([0]).long().to(get_device())
rows_per_table = torch.tensor([E for _ in range(T)]).long().to(get_device())
# forward
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, _: torch.ops.fbgemm.bounds_check_indices(
rows_per_table,
indices,
offsets,
BoundsCheckMode(bounds_check_mode),
warning,
),
)
logging.info(
f"Bounds Check Indices: B: {B}, "
f"E: {E}, T: {T}, L: {L}, "
f"BW: {(8 * B * T * L + 8 * (B * T + 1)) / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
@cli.command()
@click.option("--num-tables", type=int, default=32)
@click.option("--embedding-dim", type=int, default=248)
@click.option("--num-embeddings", type=int, default=int(1e5))
@click.option("--update-row-num", type=int, default=1e4)
@click.option("--weights-precision", type=SparseType, default=SparseType.INT4)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP16)
@click.option("--iters", type=int, default=100)
@click.option("--fp8-exponent-bits", type=int, default=None)
@click.option("--fp8-exponent-bias", type=int, default=None)
def emb_inplace_update( # noqa C901
num_tables: int,
embedding_dim: int,
num_embeddings: int,
update_row_num: int,
weights_precision: SparseType,
output_dtype: SparseType,
iters: int,
fp8_exponent_bits: Optional[int],
fp8_exponent_bias: Optional[int],
) -> None:
if open_source:
logging.warning(
"emb_inplace_update op benchmark doesn't support open source now!"
)
return
np.random.seed(42)
torch.manual_seed(42)
T = num_tables
D = embedding_dim
E = num_embeddings
N = update_row_num
D_alignment = max(weights_precision.align_size() for t in range(T))
D_alignment = max(D_alignment, output_dtype.align_size())
D = round_up(D, D_alignment)
Ds = [
round_up(
np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)),
D_alignment,
)
for _ in range(T)
]
Es = [E] * T
row_alignment = 16 # use_cpu = False -> only test CUDA function now
weights_ty_list = [weights_precision] * T
managed = [EmbeddingLocation.DEVICE] * T
embedding_specs = [
(
"",
E,
D,
W_TY,
EmbeddingLocation(M),
)
for (E, D, M, W_TY) in zip(Es, Ds, managed, weights_ty_list)
]
op = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=embedding_specs,
output_dtype=output_dtype,
device=torch.cuda.current_device(),
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
)
# Initilize the random weights for int nbit table split embedding bag
op.fill_random_weights()
update_table_idx = [np.random.randint(low=0, high=T) for _ in range(N)]
# Generate non-dup indices
table_map = {}
update_row_idx = []
for t in update_table_idx:
while True:
row_idx = np.random.randint(low=0, high=Es[t])
if t not in table_map or row_idx not in table_map[t]:
break
if t in table_map:
table_map[t].append(row_idx)
else:
table_map[t] = []
table_map[t].append(row_idx)
update_row_idx.append(row_idx)
update_weight_size = sum(
[
rounded_row_size_in_bytes(
Ds[t],
weights_ty_list[t],
row_alignment,
)
for t in update_table_idx
]
)
update_weights = torch.randint(
low=0,
high=255,
size=(update_weight_size,),
dtype=torch.uint8,
device=torch.cuda.current_device(),
)
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
read_write_bytes = output_size_multiplier * N * D + param_size_multiplier * N * D
# Update op weights with the customized ops
op.embedding_inplace_update_internal(
update_table_idx,
update_row_idx,
update_weights,
)
time_per_iter, _ = benchmark_torch_function(
op.embedding_inplace_update_internal,
(update_table_idx, update_row_idx, update_weights),
iters=iters,
)
logging.info(
f"Emb inplace update (including H2D for metadata): "
f"T: {T}, D: {D}, E: {E}, N: {N}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9:.2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
update_offsets = []
update_offset = 0
for table_idx in update_table_idx:
D_bytes = rounded_row_size_in_bytes(
Ds[table_idx],
weights_ty_list[table_idx],
row_alignment,
)
update_offsets.append(update_offset)
update_offset += D_bytes
update_offsets.append(update_offset)
update_table_idx = torch.tensor(
update_table_idx,
device=torch.cuda.current_device(),
dtype=torch.int32,
)
update_row_idx = torch.tensor(
update_row_idx,
device=torch.cuda.current_device(),
dtype=torch.int32,
)
update_offsets = torch.tensor(
update_offsets,
device=torch.cuda.current_device(),
dtype=torch.int64,
)
time_per_iter, _ = benchmark_torch_function(
torch.ops.fbgemm.emb_inplace_update,
(
op.weights_dev,
op.weights_uvm,
op.weights_placements,
op.weights_offsets,
op.weights_tys,
op.D_offsets,
update_weights,
update_table_idx,
update_row_idx,
update_offsets,
16, # row_alignment
),
iters=iters,
)
logging.info(
f"Emb inplace update (pure device update op): "
f"T: {T}, D: {D}, E: {E}, N: {N}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9:.2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option(
"--bag-size-list",
type=str,
default="20",
)
@click.option(
"--bag-size-sigma-list",
type=str,
default="None",
help="A list of bag size standard deviations for generating bag sizes "
"(one std per table). If set, the benchmark will treat --bag-size-list as a "
"list of bag size means.",
)
@click.option("--batch-size", default=512)
@click.option("--embedding-dim-list", type=str, default="128")
@click.option("--weights-precision", type=SparseType, default=SparseType.FP32)
@click.option("--stoc", is_flag=True, default=False)
@click.option("--iters", default=100)
@click.option("--warmup-runs", default=0)
@click.option("--managed", default="device")
@click.option("--num-embeddings-list", type=str, default="100000")
@click.option("--reuse", default=0.0)
@click.option("--row-wise/--no-row-wise", default=True)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--pooling", type=str, default="sum")
@click.option("--bounds-check-mode", type=int, default=BoundsCheckMode.NONE.value)
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP32)
def device_with_spec( # noqa C901
alpha: float,
bag_size_list: str,
bag_size_sigma_list: str,
batch_size: int,
embedding_dim_list: str,
weights_precision: SparseType,
stoc: bool,
iters: int,
warmup_runs: int,
managed: str,
num_embeddings_list: str,
reuse: float,
row_wise: bool,
weighted: bool,
pooling: str,
bounds_check_mode: int,
flush_gpu_cache_size_mb: int,
output_dtype: SparseType,
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
Ds = [int(D) for D in embedding_dim_list.split(",")]
Es = [int(E) for E in num_embeddings_list.split(",")]
T = len(Ds)
use_variable_bag_sizes = bag_size_sigma_list != "None"
if use_variable_bag_sizes:
Ls = [int(mu) for mu in bag_size_list.split(",")]
sigma_Ls = [int(sigma) for sigma in bag_size_sigma_list.split(",")]
assert T == len(Ls) and T == len(sigma_Ls), (
f"bag-size-list (length: {len(Ls)}) and bag-size-sigma-list "
f"(length: {len(sigma_Ls)}) must have the same length as "
f"embedding-dim-list (length: {T})"
)
else:
Ls = [int(L) for L in bag_size_list.split(",")]
assert T == len(Ls), (
f"bag-size-list (length: {len(Ls)}) must have the same length as "
f"embedding-dim-list (length: {T})"
)
assert T == len(Es), (
f"num-embeddings-list (length: {len(Es)}) must have the same length as "
f"embedding-dim-list (length: {T})"
)
assert T >= 1, "There must be at least one table"
feature_requires_grad = None
optimizer = OptimType.EXACT_ROWWISE_ADAGRAD if row_wise else OptimType.EXACT_ADAGRAD
if managed == "device":
managed_option = (
EmbeddingLocation.DEVICE
if torch.cuda.is_available()
else EmbeddingLocation.HOST
)
else:
managed_option = EmbeddingLocation.MANAGED
if pooling is None or pooling == "sum":
pooling = "sum"
pooling_mode = PoolingMode.SUM
do_pooling = True
elif pooling == "mean":
pooling_mode = PoolingMode.MEAN
do_pooling = True
else: # "none"
pooling_mode = PoolingMode.NONE
do_pooling = False
if not do_pooling:
ref_D = Ds[0]
for D in Ds:
assert (
D == ref_D
), "All embedding dimensions must be the same for sequence TBE"
emb = SplitTableBatchedEmbeddingBagsCodegen(
[
(
e,
d,
managed_option,
ComputeDevice.CUDA if torch.cuda.is_available() else ComputeDevice.CPU,
)
for d, e in zip(Ds, Es)
],
optimizer=optimizer,
learning_rate=0.1,
eps=0.1,
weights_precision=weights_precision,
stochastic_rounding=stoc,
output_dtype=output_dtype,
pooling_mode=pooling_mode,
bounds_check_mode=BoundsCheckMode(bounds_check_mode),
)
emb = emb.to(get_device())
if weights_precision == SparseType.INT8:
emb.init_embedding_weights_uniform(-0.0003, 0.0003)
nparams = sum(w.numel() for w in emb.split_embedding_weights())
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
# Generate a request for each table then combine
all_requests = {
"indices": [[] for _ in range(iters)],
"offsets": [[] for _ in range(iters)],
"weights": [[] for _ in range(iters)],
}
# row = iter, column = tensor
for t, e in enumerate(Es):
# (indices, offsets, weights)
requests = generate_requests(
iters,
B,
1,
Ls[t],
e,
reuse=reuse,
alpha=alpha,
weighted=weighted,
sigma_L=sigma_Ls[t] if use_variable_bag_sizes else None,
zipf_oversample_ratio=3 if Ls[t] > 5 else 5,
)
for i, (indices, offsets, weights) in enumerate(requests):
all_requests["indices"][i].append(indices)
if t > 0:
offsets = offsets[1:] # remove the first element
offsets += all_requests["offsets"][i][t - 1][-1]
all_requests["offsets"][i].append(offsets)
all_requests["weights"][i].append(weights)
prev_indices_len = -1
requests = []
for i in range(iters):
indices = torch.concat(all_requests["indices"][i])
if prev_indices_len == -1:
prev_indices_len = indices.numel()
assert (
prev_indices_len == indices.numel()
), "Number of indices for every iteration must be the same"
offsets = torch.concat(all_requests["offsets"][i])
if weighted:
weights = torch.concat(all_requests["weights"][i])
else:
weights = None
requests.append((indices, offsets, weights))
del all_requests
assert len(requests) == iters
sum_DLs = sum([d * l for d, l in zip(Ds, Ls)])
if do_pooling:
read_write_bytes = (
output_size_multiplier * B * sum(Ds) + param_size_multiplier * B * sum_DLs
)
else:
read_write_bytes = (
output_size_multiplier * B * sum(Ds) + param_size_multiplier * B * sum_DLs
)
if use_variable_bag_sizes:
# pyre-ignore [61]
Ls_str = f"mu {Ls} sigma {sigma_Ls}"
else:
Ls_str = f"{Ls}"
logging.info(
f"Embedding parameters: {nparams / 1.0e9: .2f} GParam, "
f"{nparams * param_size_multiplier / 1.0e9: .2f} GB"
)
logging.info(
f"Accessed weights per batch: {B * sum_DLs * param_size_multiplier / 1.0e9: .2f} GB"
)
# forward
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb.forward(
indices.long(),
offsets.long(),
per_sample_weights,
feature_requires_grad=feature_requires_grad,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
num_warmups=warmup_runs,
)
logging.info(
f"Forward, B: {B}, "
f"Es: {Es}, T: {T}, Ds: {Ds}, Ls: {Ls_str}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
if output_dtype == SparseType.INT8:
# backward bench not representative
return
if do_pooling:
grad_output = torch.randn(B, sum(Ds)).to(get_device())
else:
# Obtain B * L from indices len
# pyre-ignore[19]
grad_output = torch.randn(requests[0][0].numel(), D).to(get_device())
# backward
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb(
indices.long(),
offsets.long(),
per_sample_weights,
feature_requires_grad=feature_requires_grad,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
bwd_only=True,
grad=grad_output,
)
logging.info(
f"Backward, B: {B}, Es: {Es}, T: {T}, Ds: {Ds}, Ls: {Ls_str}, "
f"BW: {2 * read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, "
f"T: {time_per_iter * 1.0e6:.0f}us"
)
def _to_offsets(lengths: torch.Tensor) -> torch.Tensor:
return torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
@cli.command()
@click.option("--batch-size", default=128000)
@click.option("--compressed-batch-size", default=12800)
@click.option("--embedding-dim", default=128)
@click.option("--bag-size", default=5)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=20)
@click.option("--compressed-tables", default=10)
@click.option("--iters", default=100)
def vbe(
batch_size: int,
compressed_batch_size: int,
embedding_dim: int,
bag_size: int,
num_embeddings: int,
num_tables: int,
compressed_tables: int,
iters: int,
) -> None:
torch.manual_seed(42)
B = batch_size
cB = compressed_batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
T = num_tables
cT = compressed_tables
Ds = [D] * T
optimizer = OptimType.EXACT_ROWWISE_ADAGRAD
managed_option = (
EmbeddingLocation.DEVICE
if torch.cuda.is_available()
else EmbeddingLocation.HOST
)
pooling_mode = PoolingMode.SUM
emb = SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
d,
managed_option,
ComputeDevice.CUDA,
)
for d in Ds
],
optimizer=optimizer,
learning_rate=0.1,
eps=0.1,
weights_precision=SparseType.FP32,
stochastic_rounding=False,
output_dtype=SparseType.FP32,
pooling_mode=pooling_mode,
bounds_check_mode=BoundsCheckMode(BoundsCheckMode.NONE.value),
).to(get_device())
compressed_batch_sizes = ([cB] * cT) + ([B] * (T - cT))
compressed_lengths = [L] * sum(compressed_batch_sizes)
compressed_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
torch.tensor(compressed_lengths, device=get_device())
)
compressed_values = torch.randint(
low=0,
high=E,
size=(sum(compressed_lengths),),
device=get_device(),
dtype=torch.int32,
)
batch_sizes = [B] * T
lengths = [L] * sum(batch_sizes)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
torch.tensor(lengths, device=get_device())
)
reindex = []
for t in range(cT):
start = t * cB
end = cB * (t + 1)
reindex.extend(range(start, end))
for _ in range(B - cB):
i = random.randint(t * cB, cB * (t + 1))
reindex.append(i)
reindex.extend(range(cB * cT, (cB * cT) + (B * cT)))
reindex = torch.tensor(reindex, device=get_device())
values = torch.index_select(compressed_values.reshape(-1, L), 0, reindex).flatten()
requests = [
(
values,
offsets,
)
for _ in range(iters)
]
compressed_requests = [
(
compressed_values,
compressed_offsets,
)
for _ in range(iters)
]
out = benchmark_vbe(
requests,
compressed_requests,
baseline_func=lambda indices, offsets: emb.forward(
indices.long(),
offsets.long(),
),
compressed_func=lambda indices, offsets: emb.forward(
indices.long(),
offsets.long(),
batch_size_per_feature_per_rank=[[bs] for bs in compressed_batch_sizes],
),
reindex=reindex,
embedding_dim=D,
)
logging.info(
f"Uncompressed, B: {B}, T: {T}, D: {D}, L: {L}, "
f"T: {out.avg * 1.0e6:.0f}us, fwd: {out.fwd * 1.0e6:.0f}us, bwd: {out.bwd * 1.0e6:.0f}us\n"
f"Compressed, B: {B}, cB: {cB}, T: {T - cT}, cT: {cT}, D: {D}, L: {L}, "
f"T: {out.compressed_avg * 1.0e6:.0f}us, fwd: {out.compressed_fwd * 1.0e6:.0f}us, reindex: {out.reindex * 1.0e6:.0f}us, bwd: {out.compressed_bwd * 1.0e6:.0f}us"
)
if __name__ == "__main__":
cli()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import functools
import logging
import random
from typing import List
import click
import fbgemm_gpu
import numpy as np
import torch
from torch.profiler import profile
logging.basicConfig(level=logging.DEBUG)
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from bench_utils import benchmark_torch_function
else:
from fbgemm_gpu.bench.bench_utils import benchmark_torch_function
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:index_select_ops")
@click.group()
def cli() -> None:
pass
@cli.command()
@click.option("--world-size", default=128)
@click.option("--num-tables", default=10)
@click.option("--min-len", default=10000)
@click.option("--max-len", default=20000)
def device(
world_size: int,
num_tables: int,
min_len: int,
max_len: int,
) -> None:
lengths = torch.randint(min_len, max_len, size=(num_tables * world_size,))
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
permute = list(range(num_tables * world_size))
random.shuffle(permute)
permute_tensor = torch.tensor(permute)
permuted_length = torch.index_select(lengths, 0, permute_tensor)
permuted_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(permuted_length)
jagged_size = offsets[-1]
if torch.cuda.is_available():
permute_tensor = permute_tensor.cuda()
offsets = offsets.cuda()
permuted_offsets = permuted_offsets.cuda()
time, output = benchmark_torch_function(
torch.ops.fbgemm.expand_into_jagged_permute,
(permute_tensor, offsets, permuted_offsets, jagged_size),
)
num_bytes = (
permute_tensor.numel() * permute_tensor.element_size()
+ offsets.numel() * offsets.element_size()
+ permuted_offsets.numel() * permuted_offsets.element_size()
+ output.numel() * output.element_size()
)
logging.info(f"expand_into_jagged_permute {time} sec {num_bytes / time / 1e9} GB/s")
@cli.command()
@click.option("--row-size", default=25600)
@click.option("--batch-size", default=4096)
@click.option("--unique-batch-size", default=1024)
@click.option("--input-precision", type=str, default="fp32")
def batch_reuse_index_select_device(
row_size: int, batch_size: int, unique_batch_size: int, input_precision: str
) -> None:
# A function for generating indices in batch_reuse
# pyre-fixme[11]: Annotation `array` is not defined as a type.
def gen_inverse_index(curr_size: int, final_size: int) -> np.array:
inverse_index = list(range(curr_size))
np_arr = np.array(inverse_index)
for _ in range(final_size - curr_size):
inverse_index.append(np.random.randint(0, curr_size))
np_arr = np.array(inverse_index)
np.random.shuffle(np_arr)
return np_arr
dtype = torch.float
if input_precision == "fp32":
dtype = torch.float
elif input_precision == "fp16":
dtype = torch.half
else:
raise RuntimeError(f"Does not support data type {input_precision}")
indices = torch.cuda.IntTensor(gen_inverse_index(unique_batch_size, batch_size))
input = torch.rand(unique_batch_size, row_size, dtype=dtype, device="cuda")
input.requires_grad = True
num_bytes = 2 * batch_size * row_size * input.element_size()
time, output = benchmark_torch_function(
torch.ops.fbgemm.index_select_dim0, (input, indices, 0, unique_batch_size)
)
logging.info(
f"index_select_dim0 forward: {dtype}, {num_bytes} bytes read/write, {time * 1e3} ms, {num_bytes / time / 1e9} GB/s"
)
grad = torch.rand_like(output, dtype=dtype, device="cuda")
num_bytes = (input.numel() + output.numel()) * input.element_size()
time, _ = benchmark_torch_function(
functools.partial(output.backward, retain_graph=True), (grad,)
)
logging.info(
f"index_select_dim0 backward: {dtype}, {num_bytes} bytes read/write, {time * 1e3} ms, {num_bytes / time / 1e9} GB/s"
)
@cli.command()
@click.option("--max-seq-length", default=500)
@click.option("--batch-size", default=4096)
@click.option("--num-cols", default=256)
@click.option("--num-jagged-tensor-rows", default=4096)
@click.option("--num-zero-padding", default=1024)
@click.option("--index-dtype", type=click.Choice(["int", "long"]), default="int")
@click.option(
"--jagged-tensor-dtype", type=click.Choice(["float", "half"]), default="float"
)
def jagged_index_select_2d_bench(
max_seq_length: int,
batch_size: int,
num_cols: int,
num_jagged_tensor_rows: int,
num_zero_padding: int,
index_dtype: str,
jagged_tensor_dtype: str,
) -> None:
def jagged_index_select_2d_ref(
values: torch.Tensor, lengths: torch.Tensor, inverse_lookup: torch.Tensor
) -> torch.Tensor:
offsets = torch.ops.fbgemm.asynchronous_exclusive_cumsum(lengths)
end_offsets = offsets + lengths
full_start_offset = torch.index_select(offsets, 0, inverse_lookup)
full_end_offset = torch.index_select(end_offsets, 0, inverse_lookup)
index_ranges = torch.stack(
(full_start_offset, full_end_offset), dim=0
).transpose(0, 1)
to_be_merged_tensors = []
for row in index_ranges:
to_be_merged_tensors.append(torch.arange(row[0], row[1], device="cuda"))
all_indices = torch.cat(to_be_merged_tensors, dim=0)
new_embeddings = torch.index_select(values, 0, all_indices)
return new_embeddings
index_t = {"int": torch.int, "long": torch.long}[index_dtype]
scalar_t = {"float": torch.float, "half": torch.half}[jagged_tensor_dtype]
lengths = torch.randint(
low=0,
high=max_seq_length,
size=(num_jagged_tensor_rows,),
dtype=index_t,
device="cuda",
)
indices, _ = torch.sort(
torch.randint(
low=0,
high=num_jagged_tensor_rows,
size=(batch_size,),
dtype=index_t,
device="cuda",
)
)
values = torch.rand(
int(lengths.sum().item()), num_cols, dtype=scalar_t, device="cuda"
)
values.requires_grad = True
indices[batch_size - num_zero_padding :] = 0
time, (output, _) = benchmark_torch_function(
torch.ops.fbgemm.jagged_index_select,
(values, lengths, indices),
num_warmups=10,
iters=100,
)
time_ref, output_ref = benchmark_torch_function(
jagged_index_select_2d_ref,
(values, lengths, indices),
num_warmups=10,
iters=100,
)
logging.info(
f"jagged_index_select_2d_bench "
f"(max_seq_length={max_seq_length}, "
f"batch_size={batch_size}, "
f"num_cols={num_cols}, "
f"num_jagged_tensor_rows={num_jagged_tensor_rows}, "
f"num_zero_padding={num_zero_padding}, "
f"index_dtype={index_dtype}, "
f"jagged_tensor_dtype={jagged_tensor_dtype})"
)
logging.info(f"forward: fbgemm {time * 1e3:.3f} ms, ref {time_ref * 1e3:.3f} ms")
grad = torch.rand_like(output)
time, _ = benchmark_torch_function(
functools.partial(output.backward, retain_graph=True),
(grad,),
num_warmups=10,
iters=100,
)
time_ref, _ = benchmark_torch_function(
functools.partial(output_ref.backward, retain_graph=True),
(grad,),
num_warmups=10,
iters=100,
)
logging.info(f"backward: fbgemm {time * 1e3:.3f} ms, ref {time_ref * 1e3:.3f} ms")
@cli.command()
@click.option("--row-size", default=512)
@click.option("--batch-size", default=4096)
@click.option("--unique-batch-size", default=1024)
@click.option("--input-precision", type=str, default="fp32")
@click.option("--sort-indices", type=bool, default=True)
@click.option("--num-groups", default=32)
def group_index_select_2d_bench(
row_size: int,
batch_size: int,
unique_batch_size: int,
input_precision: str,
sort_indices: bool,
num_groups: int,
) -> None:
def gen_inverse_index(curr_size: int, final_size: int) -> np.array:
inverse_index = list(range(curr_size))
np_arr = np.array(inverse_index)
for _ in range(final_size - curr_size):
inverse_index.append(np.random.randint(0, curr_size))
np_arr = np.array(inverse_index)
np.random.shuffle(np_arr)
return np_arr
dtype = torch.float
if input_precision == "fp32":
dtype = torch.float
elif input_precision == "fp16":
dtype = torch.half
else:
raise RuntimeError(f"Does not support data type {input_precision}")
offset_indices_group = []
indices_group = []
for i in range(num_groups):
indices = torch.cuda.IntTensor(gen_inverse_index(unique_batch_size, batch_size))
if sort_indices:
indices, _ = indices.sort()
indices_group.append(indices)
indices = torch.add(indices, batch_size * i)
offset_indices_group.append(indices)
offset_indices = torch.concat(offset_indices_group)
input = torch.rand(num_groups * batch_size, row_size, dtype=dtype, device="cuda")
input.requires_grad = True
num_bytes = 2 * batch_size * row_size * input.element_size() * num_groups
bench_kwargs = {"num_warmups": 10, "iters": 100}
# Benchmark forward
time_ref, output_ref = benchmark_torch_function(
torch.index_select, (input, 0, offset_indices), **bench_kwargs
)
input_group = input.split(batch_size, 0)
time, output_group = benchmark_torch_function(
torch.ops.fbgemm.group_index_select_dim0,
(input_group, indices_group),
**bench_kwargs,
)
logging.info(
f"forward: PyTorch batch {time_ref:.5f} sec ({num_bytes / time_ref / 1e9:.5f} GB/s), "
f"fbgemm group {time:5f} sec ({num_bytes / time / 1e9:.5f} GB/s)"
)
# Benchmark backward
grad = torch.rand_like(output_ref)
time_ref, _ = benchmark_torch_function(
functools.partial(output_ref.backward, retain_graph=True),
(grad,),
**bench_kwargs,
)
cat_output = torch.cat(output_group)
time, _ = benchmark_torch_function(
functools.partial(cat_output.backward, retain_graph=True),
(grad,),
**bench_kwargs,
)
logging.info(
f"backward: PyTorch batch {time_ref:.5f} sec ({num_bytes / time_ref / 1e9:.5f} GB/s), "
f"fbgemm group {time:.5f} sec ({num_bytes / time / 1e9:.5f} GB/s)"
)
@cli.command()
@click.option("--num-vecs", default=2048)
@click.option("--num-entries-per-vec", default=1024)
@click.option("--dtype", type=str, default="long")
def asynchronous_complete_cumsum_2d_bench(
num_vecs: int,
num_entries_per_vec: int,
dtype: str,
) -> None:
# Reference code from TorchRec https://github.com/pytorch/torchrec/pull/332
@torch.jit.script
def asynchronous_complete_cumsum_2d_ref(lengths: torch.Tensor) -> torch.Tensor:
(f, b) = lengths.shape
offsets_0 = lengths.new_zeros((f, 1))
offsets_1 = torch.cumsum(lengths, dim=-1).to(lengths.dtype)
offsets = torch.cat([offsets_0, offsets_1], dim=-1)
return offsets
assert dtype == "int" or dtype == "long", "Only int and long are supported"
index_dtype = torch.int64 if dtype == "long" else torch.int32
x = torch.randint(low=0, high=100, size=(num_vecs, num_entries_per_vec)).type(
index_dtype
)
x = x.cuda()
time_ref, _ = benchmark_torch_function(
asynchronous_complete_cumsum_2d_ref, (x,), num_warmups=100, iters=1000
)
time, _ = benchmark_torch_function(
torch.ops.fbgemm.asynchronous_complete_cumsum, (x,), num_warmups=100, iters=1000
)
logging.info(
f"asynchronous_complete_cumsum_2d_bench: input shape {x.shape}, dtype {dtype}"
)
logging.info(f"ref time: {time_ref:.5f} sec")
logging.info(f"fbgemm_gpu time: {time:.5f} sec")
@cli.command()
@click.option("--batch-size", default=8192)
@click.option("--table-size", default=20)
@click.option("--length", default=50)
@click.option("--num-ads", default=100)
@click.option("--dtype", type=click.Choice(["float", "long"]), default="long")
@click.option("--itype", type=click.Choice(["int", "long"]), default="int")
@click.option("--broadcast-indices", type=bool, default=True)
@click.option("--device", type=str, default="cpu")
def reorder_batched_ad_indices_bench(
batch_size: int,
table_size: int,
length: int,
num_ads: int,
dtype: str,
itype: str,
broadcast_indices: bool,
device: str,
) -> None:
assert dtype == "float" or dtype == "long", "Only int and long are supported"
data_type = torch.int64 if dtype == "long" else torch.float
data_size = 8 if dtype == "long" else 4
assert itype == "int" or itype == "long", "Only int and long are supported"
index_type = torch.int64 if itype == "long" else torch.int32
if broadcast_indices:
cat_ad_indices = (
torch.randint(
low=0,
high=100,
size=(batch_size * table_size * length,),
)
.int()
.to(device)
.to(data_type)
)
cat_ad_lengths = (
torch.cat(
[
torch.tensor([length for _ in range(table_size)])
for _ in range(batch_size)
],
0,
)
.int()
.to(device)
)
else:
cat_ad_indices = (
torch.randint(
low=0,
high=100,
size=(batch_size * table_size * num_ads * length,),
)
.int()
.to(device)
.to(data_type)
)
cat_ad_lengths = (
torch.cat(
[
torch.tensor([length for _ in range(table_size * num_ads)])
for _ in range(batch_size)
],
0,
)
.int()
.to(device)
)
batch_offsets = (
torch.tensor([num_ads * b for b in range(batch_size + 1)]).int().cuda()
).to(device)
num_ads_in_batch = batch_size * num_ads
reordered_cat_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_indices
).to(device)
cat_ad_offsets = (
torch.ops.fbgemm.asynchronous_complete_cumsum(cat_ad_lengths)
.to(index_type)
.to(device)
)
reordered_cat_ad_offsets = (
torch.ops.fbgemm.asynchronous_complete_cumsum(reordered_cat_ad_lengths)
.to(index_type)
.to(device)
)
time, _ = benchmark_torch_function(
torch.ops.fbgemm.reorder_batched_ad_indices,
(
cat_ad_offsets,
cat_ad_indices,
reordered_cat_ad_offsets,
batch_offsets,
num_ads_in_batch,
broadcast_indices,
batch_size * table_size * num_ads * length,
),
num_warmups=100,
iters=1000,
)
num_bytes = batch_size * table_size * (num_ads + 1) * length * data_size
logging.info(
f"fbgemm_gpu time: {time * 1000:.5f} ms ({num_bytes / time / 1e9:.5f} GB/s)"
)
@cli.command()
@click.option("--batch-size", default=8192)
@click.option("--table-size", default=20)
@click.option("--length", default=50)
@click.option("--num-ads", default=100)
@click.option("--broadcast-indices", type=bool, default=True)
@click.option("--device", type=str, default="cpu")
def reorder_batched_ad_lengths_bench(
batch_size: int,
table_size: int,
length: int,
num_ads: int,
broadcast_indices: bool,
device: str,
) -> None:
if broadcast_indices:
cat_ad_lengths = (
torch.cat(
[
torch.tensor([length for _ in range(table_size)])
for _ in range(batch_size)
],
0,
)
.int()
.to(device)
)
else:
cat_ad_lengths = (
torch.cat(
[
torch.tensor([length for _ in range(table_size * num_ads)])
for _ in range(batch_size)
],
0,
)
.int()
.to(device)
)
batch_offsets = (
torch.tensor([num_ads * b for b in range(batch_size + 1)]).int().cuda()
).to(device)
num_ads_in_batch = batch_size * num_ads
time, _ = benchmark_torch_function(
torch.ops.fbgemm.reorder_batched_ad_lengths,
(
cat_ad_lengths,
batch_offsets,
num_ads_in_batch,
broadcast_indices,
),
num_warmups=100,
iters=1000,
)
num_bytes = batch_size * table_size * (num_ads + 1) * length * 4
logging.info(
f"fbgemm_gpu time: {time * 1000:.5f} ms ({num_bytes / time / 1e9:.5f} GB/s)"
)
@cli.command()
@click.option("--num-inputs", default=1024)
@click.option("--rows", default=100)
@click.option("--columns", default=128)
@click.option("--num-indices", default=2048)
@click.option("--timeline", is_flag=True, default=False)
def index_select_bench(
num_inputs: int, rows: int, columns: int, num_indices: int, timeline: bool
) -> None:
input_rows = [rows] * num_inputs
input_columns = [columns] * num_inputs
input_num_indices = [num_indices] * num_inputs
inputs = [
torch.rand(rows, cols, dtype=torch.float, device="cuda")
for rows, cols in zip(input_rows, input_columns)
]
for i in range(len(inputs)):
inputs[i].requires_grad = True
indices = [
torch.randint(low=0, high=rows, size=(num,), dtype=torch.long, device="cuda")
for num, rows in zip(input_num_indices, input_rows)
]
concat_inputs = torch.concat([input.flatten().clone().detach() for input in inputs])
concat_inputs.requires_grad = True
concat_indices = torch.concat(indices)
gis_inputs = [input.clone().detach() for input in inputs]
for i in range(len(gis_inputs)):
gis_inputs[i].requires_grad = True
# Add optimizer to perform zero grad in order to reset gradients
# before the accumulation phase
optim_index: torch.optim.Optimizer = torch.optim.SGD(inputs, lr=0.1)
optim_batch: torch.optim.Optimizer = torch.optim.SGD([concat_inputs], lr=0.1)
optim_group: torch.optim.Optimizer = torch.optim.SGD(gis_inputs, lr=0.1)
def index_select_fwd_ref(
inputs: List[torch.Tensor], indices: List[torch.Tensor]
) -> List[torch.Tensor]:
outputs = []
for input, index in zip(inputs, indices):
optim_index.zero_grad()
outputs.append(torch.index_select(input, 0, index))
return outputs
def index_select_bwd_ref(
outputs: List[torch.Tensor], grads: List[torch.Tensor]
) -> None:
for output, grad in zip(outputs, grads):
optim_index.zero_grad()
output.backward(grad, retain_graph=True)
def batch_index_select_fwd(
concat_inputs: List[torch.Tensor],
concat_indices: List[int],
input_num_indices: List[int],
input_rows: List[int],
input_columns: List[int],
) -> torch.autograd.Variable:
optim_batch.zero_grad()
return torch.ops.fbgemm.batch_index_select_dim0(
concat_inputs, concat_indices, input_num_indices, input_rows, input_columns
)
def group_index_select_fwd(
gis_inputs: List[torch.Tensor], indices: List[int]
) -> torch.autograd.Variable:
optim_group.zero_grad()
return torch.ops.fbgemm.group_index_select_dim0(gis_inputs, indices)
def batch_group_index_select_bwd(
output: torch.autograd.Variable,
grads: List[torch.Tensor],
optim: torch.optim.Optimizer,
) -> torch.autograd.Variable:
optim.zero_grad()
return output.backward(grads, retain_graph=True)
bench_kwargs = {"num_warmups": 10, "iters": 10 if timeline else 100}
profile_ctx = profile if timeline else contextlib.nullcontext
with profile_ctx() as prof:
time_pyt, out_pyt = benchmark_torch_function(
index_select_fwd_ref,
(inputs, indices),
**bench_kwargs,
)
time_bis, out_bis = benchmark_torch_function(
batch_index_select_fwd,
(
concat_inputs,
concat_indices,
input_num_indices,
input_rows,
input_columns,
),
**bench_kwargs,
)
time_gis, out_gis = benchmark_torch_function(
group_index_select_fwd,
(gis_inputs, indices),
**bench_kwargs,
)
if timeline:
prof.export_chrome_trace("index_select_fwd_trace.json")
grads = [torch.rand_like(out) for out in out_pyt]
concat_grads = torch.concat([grad.flatten() for grad in grads])
concat_out_gis = torch.concat([out.flatten() for out in out_gis])
with profile_ctx() as prof:
time_bwd_pyt, _ = benchmark_torch_function(
index_select_bwd_ref,
(out_pyt, grads),
**bench_kwargs,
)
time_bwd_bis, _ = benchmark_torch_function(
batch_group_index_select_bwd,
(
out_bis,
concat_grads,
optim_batch,
),
**bench_kwargs,
)
time_bwd_gis, _ = benchmark_torch_function(
batch_group_index_select_bwd,
(
concat_out_gis,
concat_grads,
optim_group,
),
**bench_kwargs,
)
if timeline:
prof.export_chrome_trace("index_select_bwd_trace.json")
logging.info(
f"torch.index_select forward {time_pyt * 1e6:.2f} us, backward {time_bwd_pyt * 1e6:.2f} us\n"
f"torch.ops.fbgemm.batch_index_select forward {time_bis * 1e6:.2f} us, backward {time_bwd_bis * 1e6:.2f} us\n"
f"torch.ops.fbgemm.group_index_select_dim0 forward {time_gis * 1e6:.2f} us, backward {time_bwd_gis * 1e6:.2f} us"
)
@cli.command()
@click.option("--batch-size", default=8192)
@click.option("--table-size", default=20)
@click.option("--length", default=50)
@click.option("--num-ads", default=100)
@click.option("--dtype", type=click.Choice(["float", "long"]), default="long")
@click.option("--itype", type=click.Choice(["int", "long"]), default="int")
@click.option("--broadcast-indices", type=bool, default=True)
def cat_reorder_batched_ad_indices_bench(
batch_size: int,
table_size: int,
length: int,
num_ads: int,
dtype: str,
itype: str,
broadcast_indices: bool,
) -> None:
assert dtype == "float" or dtype == "long", "Only int and long are supported"
data_type = torch.int64 if dtype == "long" else torch.float
data_size = 8 if dtype == "long" else 4
assert itype == "int" or itype == "long", "Only int and long are supported"
if broadcast_indices:
ad_indices = [
(
torch.randint(
low=0,
high=100,
size=(table_size * length,),
)
.int()
.to(data_type)
)
for _ in range(batch_size)
]
ad_lengths = [
torch.tensor([length for _ in range(table_size)]).int()
for _ in range(batch_size)
]
else:
ad_indices = [
(
torch.randint(
low=0,
high=100,
size=(table_size * num_ads * length,),
)
.int()
.to(data_type)
)
for _ in range(batch_size)
]
ad_lengths = [
torch.tensor([length for _ in range(table_size * num_ads)]).int()
for _ in range(batch_size)
]
batch_offsets = torch.tensor([num_ads * b for b in range(batch_size + 1)]).int()
num_ads_in_batch = batch_size * num_ads
# pyre-ignore
def pass_1(ad_indices, ad_lengths, batch_offsets, num_ads_in_batch):
cat_ad_lengths = torch.cat(ad_lengths, 0).to("cuda", non_blocking=True)
cat_ad_indices = torch.cat(ad_indices, 0).to("cuda", non_blocking=True)
batch_offsets = batch_offsets.to("cuda", non_blocking=True)
reordered_cat_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_indices
)
cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(cat_ad_lengths)
reordered_cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
reordered_cat_ad_lengths
)
reordered_cat_ad_indices = torch.ops.fbgemm.reorder_batched_ad_indices(
cat_ad_offsets,
cat_ad_indices,
reordered_cat_ad_offsets,
batch_offsets,
num_ads_in_batch,
broadcast_indices,
batch_size * table_size * num_ads * length,
)
return reordered_cat_ad_indices, reordered_cat_ad_lengths
# process length on device and process indice on device
# pyre-ignore
def pass_2(ad_indices, ad_lengths, batch_offsets, num_ads_in_batch):
cat_ad_lengths = torch.cat(ad_lengths, 0)
reordered_cat_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_indices
)
cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(cat_ad_lengths)
reordered_cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
reordered_cat_ad_lengths
)
cat_ad_indices = torch.cat(ad_indices, 0)
reordered_cat_ad_indices = torch.ops.fbgemm.reorder_batched_ad_indices(
cat_ad_offsets.to("cuda", non_blocking=True),
cat_ad_indices.to("cuda", non_blocking=True),
reordered_cat_ad_offsets.to("cuda", non_blocking=True),
batch_offsets.to("cuda", non_blocking=True),
num_ads_in_batch,
broadcast_indices,
batch_size * table_size * num_ads * length,
)
return reordered_cat_ad_indices, reordered_cat_ad_lengths.to(
"cuda", non_blocking=True
)
# minimize GPU workload + unfused cat + reorder
# pyre-ignore
def pass_3(ad_indices, ad_lengths, batch_offsets, num_ads_in_batch):
cat_ad_lengths = torch.cat(ad_lengths, 0)
reordered_cat_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_indices
)
cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(cat_ad_lengths)
reordered_cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
reordered_cat_ad_lengths
)
cat_ad_indices = torch.cat(ad_indices, 0)
reordered_cat_ad_indices = torch.ops.fbgemm.reorder_batched_ad_indices(
cat_ad_offsets,
cat_ad_indices,
reordered_cat_ad_offsets,
batch_offsets,
num_ads_in_batch,
broadcast_indices,
batch_size * table_size * num_ads * length,
)
return reordered_cat_ad_indices.to(
"cuda", non_blocking=True
), reordered_cat_ad_lengths.to("cuda", non_blocking=True)
# minimize GPU workload + fuse cat + reorder
# pyre-ignore
def pass_4(ad_indices, ad_lengths, batch_offsets, num_ads_in_batch):
cat_ad_lengths = torch.cat(ad_lengths, 0)
reordered_cat_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_indices
)
cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(cat_ad_lengths)
reordered_cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
reordered_cat_ad_lengths
)
reordered_cat_ad_indices = torch.ops.fbgemm.cat_reorder_batched_ad_indices(
cat_ad_offsets,
ad_indices,
reordered_cat_ad_offsets,
batch_offsets,
num_ads_in_batch,
broadcast_indices,
batch_size * table_size * num_ads * length,
)
return reordered_cat_ad_indices.to(
"cuda", non_blocking=True
), reordered_cat_ad_lengths.to("cuda", non_blocking=True)
num_bytes = batch_size * table_size * (num_ads + 1) * length * data_size
# pyre-ignore
def ben(fn, name, ad_indices, ad_lengths, batch_offsets, num_ads_in_batch):
time, _ = benchmark_torch_function(
fn,
(ad_indices, ad_lengths, batch_offsets, num_ads_in_batch),
num_warmups=50,
iters=500,
)
logging.info(
f"{name} fbgemm_gpu time: {time * 1000:.5f} ms ({num_bytes / time / 1e9:.5f} GB/s)"
)
ben(pass_1, "pass_1", ad_indices, ad_lengths, batch_offsets, num_ads_in_batch)
ben(pass_2, "pass_2", ad_indices, ad_lengths, batch_offsets, num_ads_in_batch)
ben(pass_3, "pass_3", ad_indices, ad_lengths, batch_offsets, num_ads_in_batch)
ben(pass_4, "pass_4", ad_indices, ad_lengths, batch_offsets, num_ads_in_batch)
if __name__ == "__main__":
cli()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import functools
import logging
import random
import click
import fbgemm_gpu
import hypothesis.strategies as st
import torch
from hypothesis import given, settings
logging.basicConfig(level=logging.DEBUG)
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from bench_utils import benchmark_torch_function
else:
from fbgemm_gpu.bench.bench_utils import benchmark_torch_function
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
@click.group()
def cli() -> None:
pass
def bench_impl(
flush_gpu_cache_size_mb: int,
iters: int,
num_columns: int,
num_rows: int,
warmup_runs: int,
) -> None:
average_time = {
"int8_quant": 0.0,
"int4_quant": 0.0,
"int2_quant": 0.0,
"fp8_143_quant": 0.0,
"fp8_152_quant": 0.0,
"fp16_quant": 0.0,
"bf16_quant_fbgemm": 0.0,
"bf16_quant_pytorch": 0.0,
"int8_dequant": 0.0,
"int4_dequant": 0.0,
"int2_dequant": 0.0,
"fp8_143_dequant": 0.0,
"fp8_152_dequant": 0.0,
"fp16_dequant": 0.0,
"bf16_dequant_fbgemm": 0.0,
"bf16_dequant_pytorch": 0.0,
}
benchmark = functools.partial(
benchmark_torch_function,
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
iters=iters,
num_warmups=warmup_runs,
)
input_data = torch.rand(num_rows, num_columns).float()
if torch.cuda.is_available():
input_data = input_data.cuda()
quant_data_8bit = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(input_data)
quant_data_4bit = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
input_data, 4
)
quant_data_2bit = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
input_data, 2
)
quant_data_fp8_143 = torch.ops.fbgemm.FloatToHFP8Quantized(
input_data.contiguous(), 4, 14, (2 - 2 ** (-3))
)
quant_data_fp8_152 = torch.ops.fbgemm.FloatToHFP8Quantized(
input_data, 5, 30, (2 - 2 ** (-2))
)
quant_data_fp16 = input_data.half()
quant_data_bf16_fbgemm = torch.ops.fbgemm.FloatToBfloat16Quantized(
input_data.contiguous()
)
quant_data_bf16_pytorch = input_data.bfloat16().view(torch.half)
average_time["int8_quant"], _ = benchmark(
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized,
(input_data,),
)
average_time["int4_quant"], _ = benchmark(
torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf,
(input_data, 4),
)
average_time["int2_quant"], _ = benchmark(
torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf,
(input_data, 2),
)
average_time["fp8_143_quant"], _ = benchmark(
torch.ops.fbgemm.FloatToHFP8Quantized,
(input_data, 4, 14, (2 - 2 ** (-3))),
)
average_time["fp8_152_quant"], _ = benchmark(
torch.ops.fbgemm.FloatToHFP8Quantized,
(input_data, 5, 30, (2 - 2 ** (-2))),
)
average_time["fp16_quant"], _ = benchmark(
lambda tensor: tensor.half(),
(input_data,),
)
average_time["bf16_quant_fbgemm"], _ = benchmark(
torch.ops.fbgemm.FloatToBfloat16Quantized,
(input_data,),
)
average_time["bf16_quant_pytorch"], _ = benchmark(
lambda tensor: tensor.bfloat16().view(torch.half),
(input_data,),
)
average_time["int8_dequant"], _ = benchmark(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat,
(quant_data_8bit,),
)
average_time["int4_dequant"], _ = benchmark(
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat,
(quant_data_4bit, 4),
)
average_time["int2_dequant"], _ = benchmark(
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat,
(quant_data_2bit, 2),
)
average_time["fp8_143_dequant"], _ = benchmark(
torch.ops.fbgemm.HFP8QuantizedToFloat,
(quant_data_fp8_143, 4, 14),
)
average_time["fp8_152_dequant"], _ = benchmark(
torch.ops.fbgemm.HFP8QuantizedToFloat,
(quant_data_fp8_152, 5, 30),
)
average_time["fp16_dequant"], _ = benchmark(
lambda tensor: tensor.float(),
(quant_data_fp16,),
)
average_time["bf16_dequant_fbgemm"], _ = benchmark(
torch.ops.fbgemm.Bfloat16QuantizedToFloat,
(quant_data_bf16_fbgemm,),
)
average_time["bf16_dequant_pytorch"], _ = benchmark(
lambda tensor: tensor.view(torch.bfloat16).float(),
(quant_data_bf16_pytorch,),
)
logging.info(f"-------------- ncols={num_columns}, nrows={num_rows}-------------")
for k, t_time in average_time.items():
logging.info(f"{k} time per iter: {t_time * 1.0e6:.0f}us")
@settings(max_examples=10, deadline=None)
# pyre-ignore
@given(
num_columns=st.sampled_from([2**n for n in range(4, 10)]),
num_rows=st.sampled_from([2**n for n in range(4, 10)]),
)
def bench_spectrum(
flush_gpu_cache_size_mb: int,
iters: int,
num_columns: int,
num_rows: int,
warmup_runs: int,
) -> None:
bench_impl(
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
iters=iters,
num_columns=num_columns,
num_rows=num_rows,
warmup_runs=warmup_runs,
)
@cli.command()
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--iters", default=100)
@click.option("--num-columns", default=-1)
@click.option("--num-rows", default=-1)
@click.option("--warmup-runs", default=2)
def bench(
flush_gpu_cache_size_mb: int,
iters: int,
num_columns: int,
num_rows: int,
warmup_runs: int,
) -> None:
if num_columns == -1 or num_rows == -1:
bench_spectrum(
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
iters=iters,
warmup_runs=warmup_runs,
)
else:
bench_impl(
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
iters=iters,
num_columns=num_columns,
num_rows=num_rows,
warmup_runs=warmup_runs,
)
@cli.command()
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--iters", default=100)
@click.option("--batch_size", default=512)
@click.option("--num_tables", default=256)
@click.option("--min_dim", default=1)
@click.option("--max_dim", default=128)
@click.option("--warmup-runs", default=2)
def mixdim(
flush_gpu_cache_size_mb: int,
iters: int,
batch_size: int,
num_tables: int,
min_dim: int,
max_dim: int,
warmup_runs: int,
) -> None:
if not torch.cuda.is_available():
raise RuntimeError("CUDA is not available.")
random.seed(0)
table_dims = [
random.randint(min_dim, max_dim) * 8 for _ in range(num_tables)
] # assume table dimensions are multiples of 8
table_dims_with_qparams = [d + 8 for d in table_dims]
D_offsets = (
torch.cumsum(torch.tensor([0] + table_dims_with_qparams), dim=0)
.to(torch.int)
.cuda()
)
input_refs = [torch.randn((batch_size, d)).cuda() for d in table_dims]
input_refs_int8 = [
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(t) for t in input_refs
]
input_data = torch.concat(input_refs_int8, dim=1).contiguous()
benchmark = functools.partial(
benchmark_torch_function,
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
iters=iters,
num_warmups=warmup_runs,
)
average_time_mixed_dim_fp32, _ = benchmark(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloatMixedDim,
(
input_data,
D_offsets,
0,
),
) # output is FP32
average_time_mixed_dim_fp16, _ = benchmark_torch_function(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloatMixedDim,
(
input_data,
D_offsets,
1,
),
) # output is FP16
average_time_single_dim, _ = benchmark(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat,
(input_data,),
) # output is FP32
print(
f"Input tensor batch_size: {batch_size}, num_tables: {num_tables}, tensor_size: {input_data.numel() / (1 << 30)} GB, average table dimension: {sum(table_dims) * 1.0/num_tables}."
)
print(
f"Mixed dim dequantize average time per iter FP32: {average_time_mixed_dim_fp32} s, bandwidth : {input_data.numel() / (1 << 30) / average_time_mixed_dim_fp32} GB/s."
)
print(
f"Mixed dim dequantize average time per iter FP16: {average_time_mixed_dim_fp16} s, bandwidth : {input_data.numel() / (1 << 30) / average_time_mixed_dim_fp16} GB/s."
)
print(
f"Single dim dequantize average time per iter FP32: {average_time_single_dim} s, bandwidth: {input_data.numel() / (1 << 30) / average_time_single_dim} GB/s."
)
if __name__ == "__main__":
cli()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import time
from typing import Callable, Tuple
import click
import torch
from torch import Tensor
logging.basicConfig(level=logging.DEBUG)
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
def benchmark_hbc_function(
func: Callable[[Tensor], Tuple[Tensor, Tensor]],
input: Tensor,
) -> Tuple[float, Tensor]:
if input.is_cuda:
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
# Benchmark code
output, _ = func(input)
# Accumulate the time for iters iteration
end_event.record()
torch.cuda.synchronize()
elapsed_time = start_event.elapsed_time(end_event) * 1.0e-3
else:
start_time = time.time()
output, _ = func(input)
elapsed_time = time.time() - start_time
return float(elapsed_time), output
@click.command()
@click.option("--iters", default=100)
@click.option("--warmup-runs", default=2)
def main(
iters: int,
warmup_runs: int,
) -> None:
data_types = [torch.half, torch.float, torch.double]
total_time = {
"hbc": {
"cpu": {
torch.half: 0.0,
torch.float: 0.0,
torch.double: 0.0,
},
"gpu": {
torch.half: 0.0,
torch.float: 0.0,
torch.double: 0.0,
},
},
"hbc_by_feature": {
"cpu": {
torch.half: 0.0,
torch.float: 0.0,
torch.double: 0.0,
},
"gpu": {
torch.half: 0.0,
torch.float: 0.0,
torch.double: 0.0,
},
},
"generic_hbc_by_feature": {
"cpu": {
torch.half: 0.0,
torch.float: 0.0,
torch.double: 0.0,
},
"gpu": {
torch.half: 0.0,
torch.float: 0.0,
torch.double: 0.0,
},
},
}
num_bins: int = 5000
num_segments: int = 42
num_logits = 5000
input_data_cpu = torch.rand(num_logits, dtype=torch.float)
segment_lengths: Tensor = torch.randint(0, 2, (num_logits,))
num_values: int = int(torch.sum(segment_lengths).item())
segment_values: Tensor = torch.randint(
0,
num_segments,
(num_values,),
)
lower_bound: float = 0.0
upper_bound: float = 1.0
w: float = (upper_bound - lower_bound) / num_bins
bin_num_examples: Tensor = torch.empty([num_bins], dtype=torch.float64).fill_(0.0)
bin_num_positives: Tensor = torch.empty([num_bins], dtype=torch.float64).fill_(0.0)
bin_boundaries: Tensor = torch.arange(
lower_bound + w, upper_bound - w / 2, w, dtype=torch.float64
)
by_feature_bin_num_examples: Tensor = torch.empty(
[num_bins * (num_segments + 1)], dtype=torch.float64
).fill_(0.0)
by_feature_bin_num_positives: Tensor = torch.empty(
[num_bins * (num_segments + 1)], dtype=torch.float64
).fill_(0.0)
def fbgemm_hbc_cpu(input: Tensor) -> Tuple[Tensor, Tensor]:
return torch.ops.fbgemm.histogram_binning_calibration(
input,
bin_num_examples,
bin_num_positives,
0.4,
lower_bound,
upper_bound,
0,
0.9995,
)
def fbgemm_hbc_by_feature_cpu(input: Tensor) -> Tuple[Tensor, Tensor]:
return torch.ops.fbgemm.histogram_binning_calibration_by_feature(
input,
segment_values,
segment_lengths,
num_segments,
by_feature_bin_num_examples,
by_feature_bin_num_positives,
num_bins,
0.4,
lower_bound,
upper_bound,
0,
0.9995,
)
def fbgemm_generic_hbc_by_feature_cpu(input: Tensor) -> Tuple[Tensor, Tensor]:
return torch.ops.fbgemm.generic_histogram_binning_calibration_by_feature(
input,
segment_values,
segment_lengths,
num_segments,
by_feature_bin_num_examples,
by_feature_bin_num_positives,
bin_boundaries,
0.4,
0,
0.9995,
)
for step in range(iters + warmup_runs):
for data_type in data_types:
curr_input = input_data_cpu.to(data_type)
hbc_time, _ = benchmark_hbc_function(
fbgemm_hbc_cpu,
curr_input,
)
hbc_by_feature_time, _ = benchmark_hbc_function(
fbgemm_hbc_by_feature_cpu, curr_input
)
generic_hbc_by_feature_time, _ = benchmark_hbc_function(
fbgemm_generic_hbc_by_feature_cpu, curr_input
)
if step >= warmup_runs:
total_time["hbc"]["cpu"][data_type] += hbc_time
total_time["hbc_by_feature"]["cpu"][data_type] += hbc_by_feature_time
total_time["generic_hbc_by_feature"]["cpu"][
data_type
] += generic_hbc_by_feature_time
if torch.cuda.is_available():
bin_num_examples_gpu: Tensor = bin_num_examples.cuda()
bin_num_positives_gpu: Tensor = bin_num_positives.cuda()
def fbgemm_hbc_gpu(input: Tensor) -> Tuple[Tensor, Tensor]:
return torch.ops.fbgemm.histogram_binning_calibration(
input,
bin_num_examples_gpu,
bin_num_positives_gpu,
0.4,
lower_bound,
upper_bound,
0,
0.9995,
)
segment_values_gpu: Tensor = segment_values.cuda()
segment_lengths_gpu: Tensor = segment_lengths.cuda()
by_feature_bin_num_examples_gpu: Tensor = by_feature_bin_num_examples.cuda()
by_feature_bin_num_positives_gpu: Tensor = (
by_feature_bin_num_positives.cuda()
)
def fbgemm_hbc_by_feature_gpu(input: Tensor) -> Tuple[Tensor, Tensor]:
return torch.ops.fbgemm.histogram_binning_calibration_by_feature(
input,
segment_values_gpu,
segment_lengths_gpu,
num_segments,
by_feature_bin_num_examples_gpu,
by_feature_bin_num_positives_gpu,
num_bins,
0.4,
lower_bound,
upper_bound,
0,
0.9995,
)
bin_boundaries_gpu: Tensor = bin_boundaries.cuda()
def fbgemm_generic_hbc_by_feature_gpu(
input: Tensor,
) -> Tuple[Tensor, Tensor]:
return (
torch.ops.fbgemm.generic_histogram_binning_calibration_by_feature(
input,
segment_values_gpu,
segment_lengths_gpu,
num_segments,
by_feature_bin_num_examples_gpu,
by_feature_bin_num_positives_gpu,
bin_boundaries_gpu,
0.4,
0,
0.9995,
)
)
for data_type in data_types:
curr_input_gpu = input_data_cpu.cuda().to(data_type)
hbc_time, _ = benchmark_hbc_function(
fbgemm_hbc_gpu,
curr_input_gpu,
)
hbc_by_feature_time, _ = benchmark_hbc_function(
fbgemm_hbc_by_feature_gpu,
curr_input_gpu,
)
generic_hbc_by_feature_time, _ = benchmark_hbc_function(
fbgemm_generic_hbc_by_feature_gpu,
curr_input_gpu,
)
if step >= warmup_runs:
total_time["hbc"]["gpu"][data_type] += hbc_time
total_time["hbc_by_feature"]["gpu"][
data_type
] += hbc_by_feature_time
total_time["generic_hbc_by_feature"]["gpu"][
data_type
] += generic_hbc_by_feature_time
for op, curr_items in total_time.items():
for platform, data_items in curr_items.items():
for dtype, t_time in data_items.items():
logging.info(
f"{op}_{platform}_{dtype} time per iter: {t_time / iters * 1.0e6:.0f}us"
)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import re
import subprocess
import click
logging.basicConfig(level=logging.DEBUG)
@click.command()
@click.option(
"--benchmark-command",
default="python split_table_batched_embeddings_benchmark.py",
help="Benchmark command to run",
)
@click.option(
"--command-file",
default="batch_input.txt",
help="File containing input commands to evaluate",
)
def batch_benchmark(
benchmark_command: str,
command_file: str,
) -> None:
assert (
"split_table_batched_embeddings_benchmark" in benchmark_command
), "split_table_batched_embeddings benchmark required for execution"
benchmark_cmd = benchmark_command.strip().split()
cmds_run = 0
failed_runs = []
total_fwd_bytes_read_gb = 0
total_fwdbwd_bytes_read_gb = 0
total_fwd_time_us = 0
total_fwdbwd_time_us = 0
with open(command_file) as cmd_file:
for line in cmd_file:
options = line.replace('"', "").strip().split()
cmd = benchmark_cmd + options
logging.info(f"Running command {cmds_run}: {cmd}")
result = subprocess.run(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
logging.info(result.stdout.decode("utf-8"))
# Parse results
found_fwd_results = False
found_fwdbwd_results = False
for line in result.stdout.decode("utf-8").splitlines():
re_match = re.search(r"BW: ([\.\d]+) GB/s, T: ([\.\d]+)us", line)
if re_match:
bw_gb = float(re_match.groups()[0])
time_us = int(re_match.groups()[1])
total_bytes_read_gb = bw_gb * time_us / 1e6
if "Forward, " in line:
total_fwd_bytes_read_gb += total_bytes_read_gb
total_fwd_time_us += time_us
found_fwd_results = True
elif "ForwardBackward, " in line:
total_fwdbwd_bytes_read_gb += total_bytes_read_gb
total_fwdbwd_time_us += time_us
found_fwdbwd_results = True
else:
raise Exception(
f"Unexpected reported metric for line: '{line}'"
)
if not (found_fwd_results and found_fwdbwd_results):
failed_runs.append(cmds_run)
cmds_run += 1
logging.info(f"Number of commands run: {cmds_run}")
if failed_runs:
logging.info(f"Failed runs: {failed_runs}")
logging.info(
f"Average FWD BW: {total_fwd_bytes_read_gb / total_fwd_time_us * 1e6} GB/s"
)
logging.info(
f" FWDBWD BW: {total_fwdbwd_bytes_read_gb / total_fwdbwd_time_us * 1e6} GB/s"
)
if __name__ == "__main__":
batch_benchmark()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import enum
from typing import Any, Dict # noqa: F401
import torch
@enum.unique
class EmbOptimType(enum.Enum):
SGD = "sgd" # uses non-deterministic updates (atomicAdd(..)) with duplicate ids
EXACT_SGD = (
"exact_sgd" # uses deterministic updates (via sorting + segment reduction)
)
LAMB = "lamb"
ADAM = "adam"
# exact/dedup: gradients to the same row are applied with coalesce then apply
# together, instead of applied in sequence (approx).
EXACT_ADAGRAD = "exact_adagrad"
EXACT_ROWWISE_ADAGRAD = "exact_row_wise_adagrad"
LARS_SGD = "lars_sgd"
PARTIAL_ROWWISE_ADAM = "partial_row_wise_adam"
PARTIAL_ROWWISE_LAMB = "partial_row_wise_lamb"
ROWWISE_ADAGRAD = "row_wise_adagrad"
SHAMPOO = "shampoo" # not currently supported for sparse embedding tables
MADGRAD = "madgrad"
EXACT_ROWWISE_WEIGHTED_ADAGRAD = "exact_row_wise_weighted_adagrad"
NONE = "none"
def __str__(self) -> str:
return self.value
# Base class for quantization configuration (in case other numeric types have
# configs)
class QuantizationConfig:
def __init__(self) -> None:
self.config = {} # type: Dict[str, Any]
def get(self, name: str) -> int:
return -1
# FP8 quantization configuration
# Compute necessary parameters in the constructor
class FP8QuantizationConfig(QuantizationConfig):
def __init__(self, exponent_bits: int, exponent_bias: int) -> None:
super(FP8QuantizationConfig, self).__init__()
self.config = {
"exponent_bits": exponent_bits,
"exponent_bias": exponent_bias,
"max_position": (1 << ((1 << exponent_bits) - 2 - exponent_bias))
* (2 - 2 ** (exponent_bits - 7)),
} # type: Dict[str, Any]
def get(self, name: str) -> int:
if name not in self.config:
raise RuntimeError("{} must be set in config".format(name))
return self.config[name]
@enum.unique
class SparseType(enum.Enum):
FP32 = "fp32"
FP16 = "fp16"
FP8 = "fp8"
INT8 = "int8"
INT4 = "int4"
INT2 = "int2"
BF16 = "bf16"
def __str__(self) -> str:
return self.value
@staticmethod
def from_int(ty: int) -> "SparseType":
if ty == 0:
return SparseType("fp32")
elif ty == 1:
return SparseType("fp16")
elif ty == 2:
return SparseType("int8")
elif ty == 3:
return SparseType("int4")
elif ty == 4:
return SparseType("int2")
elif ty == 5:
return SparseType("bf16")
elif ty == 6:
return SparseType("fp8")
else:
raise ValueError(f"Unsupported sparse type: {ty}")
def as_int(self) -> int:
return {
SparseType.FP32.value: 0,
SparseType.FP16.value: 1,
SparseType.INT8.value: 2,
SparseType.INT4.value: 3,
SparseType.INT2.value: 4,
SparseType.BF16.value: 5,
SparseType.FP8.value: 6,
}[self.value]
@staticmethod
def from_dtype(dtype: torch.dtype) -> "SparseType":
if dtype == torch.float32:
return SparseType("fp32")
elif dtype == torch.float16:
return SparseType("fp16")
elif dtype == torch.int8 or dtype == torch.uint8:
return SparseType("int8")
elif dtype == torch.quint4x2:
return SparseType("int4")
elif dtype == torch.quint2x4:
return SparseType("int2")
elif dtype == torch.bfloat16:
return SparseType("bf16")
else:
raise ValueError(f"Unsupported sparse dtype: {dtype}")
def as_dtype(self) -> torch.dtype:
return {
SparseType.FP32.value: torch.float32,
SparseType.FP16.value: torch.float16,
SparseType.FP8.value: torch.uint8,
SparseType.INT8.value: torch.uint8,
SparseType.INT4.value: torch.quint4x2,
SparseType.INT2.value: torch.quint2x4,
SparseType.BF16.value: torch.bfloat16,
}[self.value]
def bit_rate(self) -> int:
return {
SparseType.FP32.value: 32,
SparseType.FP16.value: 16,
SparseType.FP8.value: 8,
SparseType.INT8.value: 8,
SparseType.INT4.value: 4,
SparseType.INT2.value: 2,
SparseType.BF16.value: 16,
}[self.value]
def align_size(self) -> int:
return {
SparseType.FP32.value: 1,
SparseType.FP16.value: 2,
SparseType.FP8.value: 4,
SparseType.INT8.value: 4,
SparseType.INT4.value: 8,
SparseType.INT2.value: 16,
SparseType.BF16.value: 2,
}[self.value]
def is_float(self) -> bool:
if (
self.value == SparseType.FP32.value
or self.value == SparseType.FP16.value
or self.value == SparseType.FP8.value
or self.value == SparseType.BF16.value
):
return True
else:
return False
def default_config(self) -> QuantizationConfig:
if self.value == SparseType.FP8.value:
return FP8QuantizationConfig(4, 7)
else:
return QuantizationConfig()
ELEMENT_SIZE: Dict[SparseType, int] = {
SparseType.FP32: 4,
SparseType.FP16: 2,
SparseType.FP8: 1,
SparseType.INT8: 1,
SparseType.BF16: 2,
# SparseType.INT4: 0.5,
}
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable
import torch
class BatchAuc(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(
self,
n_tasks: int,
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
) -> torch.Tensor:
_, sorted_indices = torch.sort(predictions, descending=True, dim=-1)
sorted_labels = torch.gather(labels, 1, sorted_indices)
sorted_weights = torch.gather(weights, 1, sorted_indices)
cum_fp = torch.cumsum(sorted_weights * (1.0 - sorted_labels), dim=-1)
cum_tp = torch.cumsum(sorted_weights * sorted_labels, dim=-1)
fac = cum_fp[:, -1] * cum_tp[:, -1]
auc = torch.where(fac == 0, 0.5, torch.trapz(cum_tp, cum_fp, dim=-1) / fac)
return auc
class Auc(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(
self,
n_tasks: int,
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
) -> torch.Tensor:
_, sorted_indices = torch.sort(predictions, descending=True, dim=-1)
aucs = []
for sorted_indices_i, labels_i, weights_i in zip(
sorted_indices, labels, weights
):
sorted_labels = torch.index_select(labels_i, dim=0, index=sorted_indices_i)
sorted_weights = torch.index_select(
weights_i, dim=0, index=sorted_indices_i
)
cum_fp = torch.cumsum(sorted_weights * (1.0 - sorted_labels), dim=0)
cum_tp = torch.cumsum(sorted_weights * sorted_labels, dim=0)
auc = torch.where(
cum_fp[-1] * cum_tp[-1] == 0,
0.5, # 0.5 is the no-signal default value for auc.
torch.trapz(cum_tp, cum_fp) / cum_fp[-1] / cum_tp[-1],
)
aucs.append(auc.view(1))
return torch.cat(aucs)
class AucJiterator(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
# Jiterator only works with elementwise kernels
fp_code_string = """
template <typename T> T fp(T weights, T labels) {
return weights * (1.0 - labels);
}"""
tp_code_string = """
template <typename T> T tp(T weights, T labels) {
return weights * labels;
}"""
# pyre-ignore [4]
self.jitted_fp: Callable[..., Any] = torch.cuda.jiterator._create_jit_fn(
fp_code_string
)
# pyre-ignore [4]
self.jitted_tp: Callable[..., Any] = torch.cuda.jiterator._create_jit_fn(
tp_code_string
)
def forward(
self,
n_tasks: int,
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
) -> torch.Tensor:
_, sorted_indices = torch.sort(predictions, descending=True, dim=-1)
aucs = []
for sorted_indices_i, labels_i, weights_i in zip(
sorted_indices, labels, weights
):
sorted_labels = torch.index_select(labels_i, dim=0, index=sorted_indices_i)
sorted_weights = torch.index_select(
weights_i, dim=0, index=sorted_indices_i
)
cum_fp = torch.cumsum(self.jitted_fp(sorted_weights, sorted_labels), dim=0)
cum_tp = torch.cumsum(self.jitted_tp(sorted_weights, sorted_labels), dim=0)
auc = torch.where(
cum_fp[-1] * cum_tp[-1] == 0,
0.5, # 0.5 is the no-signal default value for auc.
torch.trapz(cum_tp, cum_fp) / cum_fp[-1] / cum_tp[-1],
)
aucs.append(auc.view(1))
return torch.cat(aucs)
class BatchAucJiterator(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
# Jiterator only works with elementwise kernels
fp_code_string = """
template <typename T> T fp(T weights, T labels) {
return weights * (1.0 - labels);
}"""
tp_code_string = """
template <typename T> T tp(T weights, T labels) {
return weights * labels;
}"""
# pyre-ignore [4]
self.jitted_fp: Callable[..., Any] = torch.cuda.jiterator._create_jit_fn(
fp_code_string
)
# pyre-ignore [4]
self.jitted_tp: Callable[..., Any] = torch.cuda.jiterator._create_jit_fn(
tp_code_string
)
def forward(
self,
n_tasks: int,
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
) -> torch.Tensor:
_, sorted_indices = torch.sort(predictions, descending=True, dim=-1)
sorted_labels = torch.gather(labels, 1, sorted_indices)
sorted_weights = torch.gather(weights, 1, sorted_indices)
cum_fp = torch.cumsum(self.jitted_fp(sorted_weights, sorted_labels), dim=-1)
cum_tp = torch.cumsum(self.jitted_tp(sorted_weights, sorted_labels), dim=-1)
fac = cum_fp[:, -1] * cum_tp[:, -1]
auc = torch.where(fac == 0, 0.5, torch.trapz(cum_tp, cum_fp, dim=-1) / fac)
return auc
def auc(
n_tasks: int, predictions: torch.Tensor, labels: torch.Tensor, weights: torch.Tensor
) -> torch.Tensor:
_, sorted_indices = torch.sort(predictions, descending=True, dim=-1)
return torch.ops.fbgemm.batch_auc(n_tasks, sorted_indices, labels, weights)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
from itertools import accumulate
from typing import List, Optional
import torch
from torch import nn
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:permute_pooled_embedding_ops_gpu"
)
class PermutePooledEmbeddings(nn.Module):
def __init__(
self,
embs_dims: List[int],
permute: List[int],
device: Optional[torch.device] = None,
) -> None:
super(PermutePooledEmbeddings, self).__init__()
logging.info("Using Permute Pooled Embeddings")
self.register_buffer(
"_offset_dim_list",
torch.tensor(
[0] + list(accumulate(embs_dims)), device=device, dtype=torch.int64
),
)
self.register_buffer(
"_permute", torch.tensor(permute, device=device, dtype=torch.int64)
)
inv_permute: List[int] = [0] * len(permute)
for i, p in enumerate(permute):
inv_permute[p] = i
self.register_buffer(
"_inv_permute", torch.tensor(inv_permute, device=device, dtype=torch.int64)
)
# `Union[BoundMethod[typing.Callable(torch.Tensor.tolist)[[Named(self,
# torch.Tensor)], List[typing.Any]], torch.Tensor], nn.Module, torch.Tensor]`
# is not a function.
inv_embs_dims = [embs_dims[i] for i in permute]
self.register_buffer(
"_inv_offset_dim_list",
torch.tensor(
[0] + list(accumulate(inv_embs_dims)), device=device, dtype=torch.int64
),
)
def forward(self, pooled_embs: torch.Tensor) -> torch.Tensor:
result = torch.ops.fbgemm.permute_pooled_embs_auto_grad(
pooled_embs,
self._offset_dim_list.to(device=pooled_embs.device),
self._permute.to(device=pooled_embs.device),
self._inv_offset_dim_list.to(device=pooled_embs.device),
self._inv_permute.to(device=pooled_embs.device),
)
return result
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# flake8: noqa F401
from fbgemm_gpu.split_embedding_optimizer_codegen.optimizer_args import (
SplitEmbeddingArgs,
SplitEmbeddingOptimizerParams,
)
from fbgemm_gpu.split_embedding_optimizer_codegen.split_embedding_optimizer_rowwise_adagrad import (
SplitEmbeddingRowwiseAdagrad,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import enum
import typing
from typing import Any, Callable, List, Tuple
# Create enums in given namespace with information from query_op
def create_enums(
namespace: typing.Dict[str, Any],
query_op: Callable[[], List[Tuple[str, List[Tuple[str, int]]]]],
) -> None:
for enum_name, items in query_op():
# Create matching python enumeration
# pyre-fixme[6]: For 2nd argument expected `None` but got `List[Tuple[str,
# int]]`.
new_enum = enum.Enum(enum_name, items)
# and store it in the module
namespace[enum_name] = new_enum
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Callable, List, Optional, Tuple, TypeVar
import numpy as np
import torch
from fbgemm_gpu.split_embedding_configs import (
FP8QuantizationConfig,
SparseType,
) # usort:skip
# pyre-fixme[21]: Could not find name `default_rng` in `numpy.random` (stubbed).
from numpy.random import default_rng
logging.basicConfig(level=logging.DEBUG)
Deviceable = TypeVar(
"Deviceable", torch.nn.EmbeddingBag, torch.nn.Embedding, torch.Tensor
)
def round_up(a: int, b: int) -> int:
return int((a + b - 1) // b) * b
def get_device() -> torch.device:
# pyre-fixme[7]: Expected `device` but got `Union[int, device]`.
return (
torch.cuda.current_device()
if torch.cuda.is_available()
else torch.device("cpu")
)
def to_device(t: Deviceable, use_cpu: bool) -> Deviceable:
# pyre-fixme[7]: Expected `Deviceable` but got `Union[Tensor,
# torch.nn.EmbeddingBag]`.
return t.cpu() if use_cpu else t.cuda()
# Merged indices with shape (T, B, L) -> (flattened indices with shape
# (T * B * L), offsets with shape (T * B + 1))
def get_table_batched_offsets_from_dense(
merged_indices: torch.Tensor,
L: Optional[int] = None,
total_B: Optional[int] = None,
use_cpu: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
if L is None and total_B is None:
(T, B, L) = merged_indices.size()
total_B = T * B
lengths = np.ones(total_B) * L
return (
to_device(merged_indices.contiguous().view(-1), use_cpu),
to_device(
torch.tensor(([0] + np.cumsum(lengths).tolist())).long(),
use_cpu,
),
)
def get_offsets_from_dense(indices: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
(B, L) = indices.size()
return (
indices.contiguous().view(-1),
torch.tensor(
np.cumsum(np.asarray([0] + [L for _ in range(B)])[:-1]).astype(np.int64)
),
)
def b_indices(
b: Callable[..., torch.Tensor],
x: torch.Tensor,
per_sample_weights: Optional[torch.Tensor] = None,
use_cpu: bool = False,
do_pooling: bool = True,
) -> torch.Tensor:
(indices, offsets) = get_offsets_from_dense(x)
if do_pooling:
return b(
to_device(indices, use_cpu),
to_device(offsets, use_cpu),
per_sample_weights=per_sample_weights,
)
else:
return b(to_device(indices, use_cpu))
def generate_requests( # noqa C901
iters: int,
B: int,
T: int,
L: int,
E: int,
# inter-batch indices reuse rate
reuse: float = 0.0,
# alpha <= 1.0: use uniform distribution
# alpha > 1.0: use zipf distribution
alpha: float = 1.0,
zipf_oversample_ratio: int = 3,
weighted: bool = False,
requests_data_file: Optional[str] = None,
# Comma-separated list of table numbers
tables: Optional[str] = None,
# If sigma_L is not None, treat L as mu_L and generate Ls from sigma_L
# and mu_L
sigma_L: Optional[int] = None,
emulate_pruning: bool = False,
use_cpu: bool = False,
deterministic_output: bool = False, # generate_requests uses numpy.random.default_rng without a set random seed be default, causing the indices tensor to vary with each call to generate_requests - set generate_repeatable_output to use a fixed random seed instead for repeatable outputs
length_dist: str = "normal", # distribution of embedding sequence lengths
) -> List[Tuple[torch.IntTensor, torch.IntTensor, Optional[torch.Tensor]]]:
# TODO: refactor and split into helper functions to separate load from file,
# generate from distribution, and other future methods of generating data
if requests_data_file is not None:
indices_tensor, offsets_tensor, lengths_tensor = torch.load(requests_data_file)
average_L = 0
if tables is not None:
emb_tables = tuple(int(x) for x in tables.split(","))
indices = torch.zeros(0, dtype=indices_tensor.dtype)
offsets = torch.zeros(1, dtype=offsets_tensor.dtype)
total_L = 0
for t in emb_tables:
t_offsets = offsets_tensor[B * t : B * (t + 1) + 1]
total_L += t_offsets[-1] - t_offsets[0]
indices = torch.cat(
(indices, indices_tensor[t_offsets[0] : t_offsets[-1]])
)
offsets = torch.cat(
(
offsets,
t_offsets[1:] - t_offsets[0] + offsets[-1],
)
)
indices_tensor = indices
offsets_tensor = offsets
average_L = int(total_L / B)
assert np.prod(offsets_tensor.size()) - 1 == np.prod((T, B)), (
f"Requested tables: {emb_tables} "
f"does not conform to inputs (T, B) = ({T}, {B})."
)
logging.warning(
f"Using (indices = {indices_tensor.size()}, offsets = {offsets_tensor.size()}) based "
f"on tables: {emb_tables}"
)
else:
average_L = int((offsets_tensor[-1] - offsets_tensor[0]) / B)
assert (np.prod(offsets_tensor.size()) - 1) == np.prod((T, B)), (
f"Data file (indices = {indices_tensor.size()}, "
f"offsets = {offsets_tensor.size()}, lengths = {lengths_tensor.size()}) "
f"does not conform to inputs (T, B) = ({T}, {B})."
)
assert (
L == average_L
), f"Requested L does not align with provided data file ({L} vs. {average_L})"
assert E > max(indices_tensor), (
f"Number of embeddings is not enough to support maximum index "
f"provided by data file {E} vs. {max(indices_tensor)}"
)
weights_tensor = (
None
if not weighted
else torch.randn(indices_tensor.size(), device=get_device())
)
rs = []
for _ in range(iters):
rs.append(
(
indices_tensor.to(get_device()),
offsets_tensor.to(get_device()),
weights_tensor,
)
)
return rs
# Generate L from stats
if sigma_L is not None:
use_variable_L = True
if length_dist == "uniform":
# TODO: either make these separate parameters or make a separate version of
# generate_requests to handle the uniform dist case once whole
# generate_requests function is refactored to split into helper functions
# for each use case.
# L represents the lower bound when the uniform distribution is used
lower_bound = L
# sigma_L represetns the upper bound when the uniform distribution is used
upper_bound = sigma_L + 1
Ls = np.random.randint(
lower_bound,
upper_bound,
(T, B),
dtype=np.int32,
)
else: # normal dist
Ls = np.random.normal(loc=L, scale=sigma_L, size=T * B).astype(int)
# Make sure that Ls are positive
Ls[Ls < 0] = 0
# Use the same L distribution across iters
Ls = np.tile(Ls, iters)
L = Ls.max()
# Make it exclusive cumsum
L_offsets = torch.from_numpy(np.insert(Ls.cumsum(), 0, 0)).to(torch.long)
else:
use_variable_L = False
# Init to suppress the pyre error
L_offsets = torch.empty(1)
if alpha <= 1.0:
all_indices = torch.randint(
low=0,
high=E,
size=(iters, T, B, L),
device="cpu" if use_variable_L else get_device(),
dtype=torch.int32,
)
# each bag is usually sorted
(all_indices, _) = torch.sort(all_indices)
if use_variable_L:
all_indices = torch.ops.fbgemm.bottom_k_per_row(
all_indices.to(torch.long), L_offsets, False
)
all_indices = all_indices.to(get_device()).int()
else:
all_indices = all_indices.reshape(iters, T, B * L)
else:
assert E >= L, "num-embeddings must be greater than equal to bag-size"
# oversample and then remove duplicates to obtain sampling without
# replacement
zipf_shape = (iters, T, B, zipf_oversample_ratio * L)
if torch.cuda.is_available():
zipf_shape_total_len = np.prod(zipf_shape)
all_indices_list = []
# process 8 GB at a time on GPU
chunk_len = int(1e9)
for chunk_begin in range(0, zipf_shape_total_len, chunk_len):
all_indices_gpu = torch.ops.fbgemm.zipf_cuda(
alpha,
min(zipf_shape_total_len - chunk_begin, chunk_len),
seed=torch.randint(2**31 - 1, (1,))[0],
)
all_indices_list.append(all_indices_gpu.cpu())
all_indices = torch.cat(all_indices_list).reshape(zipf_shape)
else:
all_indices = torch.as_tensor(np.random.zipf(a=alpha, size=zipf_shape))
all_indices = (all_indices - 1) % E
if use_variable_L:
all_indices = torch.ops.fbgemm.bottom_k_per_row(
all_indices, L_offsets, True
)
else:
all_indices = torch.ops.fbgemm.bottom_k_per_row(
all_indices, torch.tensor([0, L], dtype=torch.long), True
)
if deterministic_output:
rng = default_rng(12345)
else:
rng = default_rng()
permutation = torch.as_tensor(
rng.choice(E, size=all_indices.max().item() + 1, replace=False)
)
all_indices = permutation.gather(0, all_indices.flatten())
all_indices = all_indices.to(get_device()).int()
if not use_variable_L:
all_indices = all_indices.reshape(iters, T, B * L)
if reuse > 0.0:
assert (
not use_variable_L
), "Does not support generating Ls from stats for reuse > 0.0"
for it in range(iters - 1):
for t in range(T):
reused_indices = torch.randperm(B * L, device=get_device())[
: int(B * L * reuse)
]
all_indices[it + 1, t, reused_indices] = all_indices[
it, t, reused_indices
]
# Some indices are set to -1 for emulating pruned rows.
if emulate_pruning:
for it in range(iters):
for t in range(T):
num_negative_indices = B // 2
random_locations = torch.randint(
low=0,
high=(B * L),
size=(num_negative_indices,),
device=torch.cuda.current_device(),
dtype=torch.int32,
)
all_indices[it, t, random_locations] = -1
rs = []
for it in range(iters):
if use_variable_L:
start_offset = L_offsets[it * T * B]
it_L_offsets = torch.concat(
[
torch.zeros(1),
L_offsets[it * T * B + 1 : (it + 1) * T * B + 1] - start_offset,
]
)
weights_tensor = (
None
if not weighted
else torch.randn(
int(it_L_offsets[-1].item()), device=get_device()
) # per sample weights will always be FP32
)
rs.append(
(
all_indices[start_offset : L_offsets[(it + 1) * T * B]],
it_L_offsets.to(get_device()),
weights_tensor,
)
)
else:
weights_tensor = (
None
if not weighted
else torch.randn(
T * B * L, device=get_device()
) # per sample weights will always be FP32
)
rs.append(
get_table_batched_offsets_from_dense(
all_indices[it].view(T, B, L), use_cpu=use_cpu
)
+ (weights_tensor,)
)
return rs
def quantize_embs(
weight: torch.Tensor,
weight_ty: SparseType,
fp8_config: Optional[FP8QuantizationConfig] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
weight = weight.detach()
if weight_ty == SparseType.FP32:
q_weight = weight.float()
res_weight = q_weight.view(torch.uint8)
return (res_weight, None)
elif weight_ty == SparseType.FP16:
q_weight = weight.half()
res_weight = q_weight.view(torch.uint8)
return (res_weight, None)
elif weight_ty == SparseType.FP8:
assert fp8_config is not None
# Quantize FP32 to HPF8
res_weight = torch.ops.fbgemm.FloatToHFP8Quantized(
weight.float(),
fp8_config.get("exponent_bits"),
fp8_config.get("exponent_bias"),
fp8_config.get("max_position"),
)
return (res_weight, None)
elif weight_ty == SparseType.INT8:
# Note that FloatToFused8BitRowwiseQuantized might have additional padding
# for alignment if embedding dimension is not a multiple of 4:
# https://fburl.com/code/z009xsy6
q_weight = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(weight)
res_weight = q_weight[:, :-8].view(torch.uint8)
res_scale_shift = torch.tensor(
q_weight[:, -8:].view(torch.float32).to(torch.float16).view(torch.uint8)
) # [-4, -2]: scale; [-2:]: bias
return (res_weight, res_scale_shift)
elif weight_ty == SparseType.INT4 or weight_ty == SparseType.INT2:
# Note that FP32 -> INT4/INT2 conersion op below might have additional padding
# for alignment: https://fburl.com/code/xx9kkduf
q_weight = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
weight,
bit_rate=weight_ty.bit_rate(),
)
res_weight = q_weight[:, :-4].view(torch.uint8)
res_scale_shift = torch.tensor(
q_weight[:, -4:].view(torch.uint8)
) # [-4, -2]: scale; [-2:]: bias
return (res_weight, res_scale_shift)
else:
raise RuntimeError("Unsupported SparseType: {}".format(weight_ty))
def dequantize_embs(
weights: torch.Tensor,
scale_shift: torch.Tensor,
weight_ty: SparseType,
use_cpu: bool,
fp8_config: Optional[FP8QuantizationConfig] = None,
) -> torch.Tensor:
print(f"weight_ty: {weight_ty}")
assert (
weights.dtype == torch.uint8
), "The input tensor for dequantize_embs function needs to be byte tensor"
th_weights = weights
if scale_shift is not None:
th_scale_shift: torch.Tensor = scale_shift.view(torch.float16).to(torch.float32)
if weight_ty == SparseType.INT4:
(E, D_2) = th_weights.shape
D = D_2 * 2
def comp(i: int) -> torch.Tensor:
subs = th_weights.view(torch.uint8) >> (i * 4)
sub_mask = subs & 0xF
result = sub_mask.to(torch.float32) * th_scale_shift[:, 0].reshape(
-1, 1
).to(torch.float32) + th_scale_shift[:, 1].reshape(-1, 1).to(torch.float32)
return result.to(torch.float32)
comps = [comp(i) for i in range(2)]
comps = torch.stack(comps)
comps = comps.permute(1, 2, 0)
comps = comps.reshape(E, D)
return to_device(torch.tensor(comps), use_cpu)
elif weight_ty == SparseType.INT2:
(E, D_4) = th_weights.shape
D = D_4 * 4
# pyre-fixme[53]: Captured variable `scale_shift` is not annotated.
# pyre-fixme[53]: Captured variable `weights` is not annotated.
def comp(i: int) -> torch.Tensor:
subs = th_weights.view(torch.uint8) >> (i * 2)
sub_mask = subs & 0x3
result = sub_mask.to(torch.float32) * th_scale_shift[:, 0].reshape(
-1, 1
).to(torch.float32) + th_scale_shift[:, 1].reshape(-1, 1).to(torch.float32)
return result.to(torch.float32)
comps = [comp(i) for i in range(4)]
comps = torch.stack(comps)
comps = comps.permute(1, 2, 0)
comps = comps.reshape(E, D)
return to_device(torch.tensor(comps), use_cpu)
elif weight_ty == SparseType.INT8:
(E, D) = th_weights.shape
comps = th_weights.to(torch.float32) * th_scale_shift[:, 0].reshape(-1, 1).to(
torch.float32
) + th_scale_shift[:, 1].reshape(-1, 1).to(torch.float32)
return to_device(torch.tensor(comps), use_cpu)
elif weight_ty == SparseType.FP8:
assert fp8_config is not None
assert scale_shift is None
# Dequantize HPF8 to FP32
comps = torch.ops.fbgemm.HFP8QuantizedToFloat(
weights,
fp8_config.get("exponent_bits"),
fp8_config.get("exponent_bias"),
)
return to_device(comps, use_cpu)
elif weight_ty == SparseType.FP16:
assert scale_shift is None
comps = th_weights.view(torch.half)
return to_device(torch.tensor(comps), use_cpu)
elif weight_ty == SparseType.FP32:
assert scale_shift is None
comps = th_weights.view(torch.float32)
# pyre-fixme[7]: Expected `Tensor` but got implicit return value of `None`.
return to_device(torch.tensor(comps), use_cpu)
def fake_quantize_embs(
weights: torch.Tensor,
scale_shift: Optional[torch.Tensor],
dequant_weights: torch.Tensor,
weight_ty: SparseType,
use_cpu: bool,
fp8_config: Optional[FP8QuantizationConfig] = None,
) -> None:
assert (
weights.dtype == torch.uint8
), "The input tensor for dequantize_embs function needs to be byte tensor"
th_weights = weights
if scale_shift is not None:
th_scale_shift: torch.Tensor = (
scale_shift.contiguous().view(torch.float16).to(torch.float32)
)
if weight_ty == SparseType.INT4:
(E, D_2) = th_weights.shape
D = D_2 * 2
def comp(i: int) -> torch.Tensor:
subs = th_weights.view(torch.uint8) >> (i * 4)
sub_mask = subs & 0xF
result = sub_mask.to(torch.float32) * th_scale_shift[:, 0].reshape(
-1, 1
).to(torch.float32) + th_scale_shift[:, 1].reshape(-1, 1).to(torch.float32)
return result.to(torch.float32)
comps = [comp(i) for i in range(2)]
comps = torch.stack(comps)
comps = comps.permute(1, 2, 0)
comps = comps.reshape(E, D)
dequant_weights.copy_(to_device(comps, use_cpu))
elif weight_ty == SparseType.INT2:
(E, D_4) = th_weights.shape
D = D_4 * 4
# pyre-fixme[53]: Captured variable `scale_shift` is not annotated.
# pyre-fixme[53]: Captured variable `weights` is not annotated.
def comp(i: int) -> torch.Tensor:
subs = th_weights.view(torch.uint8) >> (i * 2)
sub_mask = subs & 0x3
result = sub_mask.to(torch.float32) * th_scale_shift[:, 0].reshape(
-1, 1
).to(torch.float32) + th_scale_shift[:, 1].reshape(-1, 1).to(torch.float32)
return result.to(torch.float32)
comps = [comp(i) for i in range(4)]
comps = torch.stack(comps)
comps = comps.permute(1, 2, 0)
comps = comps.reshape(E, D)
dequant_weights.copy_(to_device(comps, use_cpu))
elif weight_ty == SparseType.INT8:
(E, D) = th_weights.shape
comps = th_weights.to(torch.float32) * th_scale_shift[:, 0].reshape(-1, 1).to(
torch.float32
) + th_scale_shift[:, 1].reshape(-1, 1).to(torch.float32)
dequant_weights.copy_(to_device(comps, use_cpu))
elif weight_ty == SparseType.FP8:
assert fp8_config is not None
assert scale_shift is None
# Quantize FP32 to HPF8
comps = torch.ops.fbgemm.FloatToHFP8Quantized(
dequant_weights.detach().float(),
fp8_config.get("exponent_bits"),
fp8_config.get("exponent_bias"),
fp8_config.get("max_position"),
)
weights.copy_(comps)
# Dequantize HPF8 to FP32
comps = torch.ops.fbgemm.HFP8QuantizedToFloat(
comps,
fp8_config.get("exponent_bits"),
fp8_config.get("exponent_bias"),
)
dequant_weights.copy_(to_device(comps, use_cpu))
elif weight_ty == SparseType.FP16:
assert scale_shift is None
comps = dequant_weights.detach().half().view(torch.uint8)
weights.copy_(comps)
elif weight_ty == SparseType.FP32:
assert scale_shift is None
comps = dequant_weights.detach().float().view(torch.uint8)
weights.copy_(comps)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import logging
import math
from typing import cast, Optional, Tuple
import torch
from fbgemm_gpu.split_embedding_configs import QuantizationConfig, SparseType
from fbgemm_gpu.split_embedding_utils import FP8QuantizationConfig, quantize_embs
from fbgemm_gpu.split_table_batched_embeddings_ops_common import EmbeddingLocation
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
ComputeDevice,
SplitTableBatchedEmbeddingBagsCodegen,
)
from torch import Tensor # usort:skip
# TODO: add per-feature based converter option (based on embedding_specs during inference)
# TODO: optimize embedding pruning and quantization latency.
class SplitEmbInferenceConverter:
def __init__(
self,
quantize_type: SparseType,
pruning_ratio: Optional[float],
use_array_for_index_remapping: bool = True,
quantization_config: Optional[QuantizationConfig] = None,
):
self.quantize_type = quantize_type
# TODO(yingz): Change the pruning ratio to per-table settings.
self.pruning_ratio = pruning_ratio
self.use_array_for_index_remapping = use_array_for_index_remapping
self.quantization_config = quantization_config
def convert_model(self, model: torch.nn.Module) -> torch.nn.Module:
self._process_split_embs(model)
return model
def _prune_by_weights_l2_norm(self, new_num_rows, weights) -> Tuple[Tensor, float]:
assert new_num_rows > 0
from numpy.linalg import norm
indicators = []
for row in weights:
indicators.append(norm(row.cpu().numpy(), ord=2))
sorted_indicators = sorted(indicators, reverse=True)
threshold = None
for i in range(new_num_rows, len(sorted_indicators)):
if sorted_indicators[i] < sorted_indicators[new_num_rows - 1]:
threshold = sorted_indicators[i]
break
if threshold is None:
threshold = sorted_indicators[-1] - 1
return (torch.tensor(indicators), threshold)
def _prune_embs(
self,
idx: int,
num_rows: int,
module: SplitTableBatchedEmbeddingBagsCodegen,
) -> Tuple[Tensor, Optional[Tensor]]:
# TODO(yingz): Avoid DtoH / HtoD overhead.
weights = module.split_embedding_weights()[idx].cpu()
if self.pruning_ratio is None:
return (weights, None)
new_num_rows = int(math.ceil(num_rows * (1.0 - self.pruning_ratio))) # type: ignore
if new_num_rows == num_rows:
return (weights, None)
(indicators, threshold) = self._prune_by_weights_l2_norm(new_num_rows, weights)
return torch.ops.fbgemm.embedding_bag_rowwise_prune(
weights, indicators, threshold, torch.int32
)
def _get_quantization_config(self, name):
quantization_config = self.quantization_config
if quantization_config is None:
raise RuntimeError("quantization_config must be set for FP8 weight")
return quantization_config.get(name)
def _quantize_embs(
self, weight: Tensor, weight_ty: SparseType
) -> Tuple[Tensor, Optional[Tensor]]:
fp8_quant_config = cast(FP8QuantizationConfig, self.quantization_config)
return quantize_embs(weight, weight_ty, fp8_quant_config)
def _process_split_embs(self, model: torch.nn.Module) -> None:
for name, child in model.named_children():
if isinstance(
child,
SplitTableBatchedEmbeddingBagsCodegen,
):
embedding_specs = []
use_cpu = child.embedding_specs[0][3] == ComputeDevice.CPU
for E, D, _, _ in child.embedding_specs:
weights_ty = self.quantize_type
if D % weights_ty.align_size() != 0:
logging.warning(
f"Embedding dim {D} couldn't be divided by align size {weights_ty.align_size()}!"
)
assert D % 4 == 0
weights_ty = (
SparseType.FP16
) # fall back to FP16 if dimension couldn't be aligned with the required size
embedding_specs.append(("", E, D, weights_ty))
weight_lists = []
new_embedding_specs = []
index_remapping_list = []
for t, (_, E, D, weight_ty) in enumerate(embedding_specs):
# Try to prune embeddings.
(pruned_weight, index_remapping) = self._prune_embs(t, E, child)
new_embedding_specs.append(
(
"",
pruned_weight.size()[0],
D,
weight_ty,
EmbeddingLocation.HOST
if use_cpu
else EmbeddingLocation.DEVICE,
)
)
index_remapping_list.append(index_remapping)
# Try to quantize embeddings.
weight_lists.append(self._quantize_embs(pruned_weight, weight_ty))
is_fp8_weight = self.quantize_type == SparseType.FP8
q_child = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=new_embedding_specs,
index_remapping=index_remapping_list
if self.pruning_ratio is not None
else None,
pooling_mode=child.pooling_mode,
device="cpu" if use_cpu else torch.cuda.current_device(),
weight_lists=weight_lists,
use_array_for_index_remapping=self.use_array_for_index_remapping,
fp8_exponent_bits=self._get_quantization_config("exponent_bits")
if is_fp8_weight
else None,
fp8_exponent_bias=self._get_quantization_config("exponent_bias")
if is_fp8_weight
else None,
)
setattr(model, name, q_child)
else:
self._process_split_embs(child)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
try:
torch.ops.load_library(os.path.join(os.path.dirname(__file__), "fbgemm_gpu_py.so"))
except Exception as e:
print(e)
# __init__.py is only used in OSS
# Use existence to check if fbgemm_gpu_py.so has already been loaded
open_source: bool = True
# Re-export docs
from . import _fbgemm_gpu_docs # noqa: F401, E402
# Re-export the version string from the auto-generated version file
from ._fbgemm_gpu_version import __version__ # noqa: F401, E402
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
from itertools import accumulate
from typing import List, Optional
import torch
from torch import nn
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:permute_pooled_embedding_ops_split_gpu"
)
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:permute_pooled_embedding_ops_split_cpu"
)
@torch.fx.wrap
def _fx_wrap_tensor_to_device(t: torch.Tensor, device: torch.device) -> torch.Tensor:
return t.to(device=device)
class PermutePooledEmbeddingsSplit(nn.Module):
def __init__(
self,
embs_dims: List[int],
permute: List[int],
device: Optional[torch.device] = None,
) -> None:
super(PermutePooledEmbeddingsSplit, self).__init__()
logging.info("Using Permute Pooled Embeddings")
self.register_buffer(
"_offset_dim_list",
torch.tensor(
[0] + list(accumulate(embs_dims)), device=device, dtype=torch.int64
),
)
self.register_buffer(
"_permute", torch.tensor(permute, device=device, dtype=torch.int64)
)
inv_permute: List[int] = [0] * len(permute)
for i, p in enumerate(permute):
inv_permute[p] = i
self.register_buffer(
"_inv_permute", torch.tensor(inv_permute, device=device, dtype=torch.int64)
)
# `Union[BoundMethod[typing.Callable(torch.Tensor.tolist)[[Named(self,
# torch.Tensor)], List[typing.Any]], torch.Tensor], nn.Module, torch.Tensor]`
# is not a function.
inv_embs_dims = [embs_dims[i] for i in permute]
self.register_buffer(
"_inv_offset_dim_list",
torch.tensor(
[0] + list(accumulate(inv_embs_dims)), device=device, dtype=torch.int64
),
)
def forward(self, pooled_embs: torch.Tensor) -> torch.Tensor:
result = torch.ops.fbgemm.permute_pooled_embs_auto_grad_split(
pooled_embs,
_fx_wrap_tensor_to_device(self._offset_dim_list, device=pooled_embs.device),
_fx_wrap_tensor_to_device(self._permute, device=pooled_embs.device),
_fx_wrap_tensor_to_device(
self._inv_offset_dim_list, device=pooled_embs.device
),
_fx_wrap_tensor_to_device(self._inv_permute, device=pooled_embs.device),
)
return result
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# The code in this file is refactored from https://fburl.com/code/p2gy2gxb
# based on "Amy Yang et al., Training Deep Learning Recommendation Model with
# Quantized Collective Communications", DLP-KDD 2020.
import logging
from typing import Optional, TypeVar
import torch
from fbgemm_gpu.quantize_utils import (
bf16_to_fp32,
fp16_to_fp32,
fp32_to_bf16_with_clamp,
fp32_to_fp16_with_clamp,
fp32_to_hfp8_with_clamp,
hfp8_to_fp32,
)
from fbgemm_gpu.split_embedding_configs import SparseType
from torch.autograd.profiler import record_function # usort:skip
logger: logging.Logger = logging.getLogger()
# FP8 configurations
ebits, mbits, bias = 4, 3, 15
max_pos: float = (2 ** ((1 << ebits) - 2 - bias)) * (2 - 2 ** (-mbits))
# INT8 configurations
ROW_DIM_DEFAULT = 32
def none_throws(
optional: Optional[TypeVar("_T")], message: str = "Unexpected `None`"
) -> TypeVar("_T"):
if optional is None:
raise AssertionError(message)
return optional
class QuantizationContext:
def __init__(self, row_dim: int = ROW_DIM_DEFAULT) -> None:
self.row_dim = row_dim
self.row_dim_quant: int = -1
def _quantize_tensor(
input_tensor: torch.Tensor,
comm_precision: SparseType,
ctx: Optional[QuantizationContext] = None,
is_fwd: bool = True,
) -> torch.Tensor:
if comm_precision == SparseType.FP32:
return input_tensor
elif comm_precision == SparseType.FP16:
return fp32_to_fp16_with_clamp(input_tensor)
elif comm_precision == SparseType.BF16:
return fp32_to_bf16_with_clamp(input_tensor)
elif comm_precision == SparseType.FP8:
# return fp32_to_hfp8_with_clamp(input_tensor, ebits, mbits, bias)
if ctx is not None and ctx.row_dim > 0:
ctx = none_throws(ctx)
row_dim = ctx.row_dim
input_2d = input_tensor.view((-1, row_dim)) if row_dim > 0 else input_tensor
input_2d_quant = torch.ops.fbgemm.FloatToFP8RowwiseQuantized(
input_2d, is_fwd
)
row_dim_quant = input_2d_quant.shape[1]
input_quant_all2all = None
input_quant_all2all = input_2d_quant.view((-1))
ctx.row_dim_quant = row_dim_quant
return input_quant_all2all
else:
return fp32_to_hfp8_with_clamp(input_tensor, ebits, mbits, bias)
elif comm_precision == SparseType.INT8:
ctx = none_throws(ctx)
row_dim = ctx.row_dim
input_2d = input_tensor.view((-1, row_dim)) if row_dim > 0 else input_tensor
input_2d_quant = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(input_2d)
row_dim_quant = input_2d_quant.shape[1]
input_quant_all2all = None
input_quant_all2all = input_2d_quant.view((-1))
ctx.row_dim_quant = row_dim_quant
return input_quant_all2all
else:
raise ValueError(f"comm_precision={comm_precision} is not supported")
def _dequantize_tensor(
quantized_tensor: torch.Tensor,
comm_precision: SparseType,
ctx: Optional[QuantizationContext] = None,
is_fwd: bool = True,
) -> torch.Tensor:
if comm_precision == SparseType.FP32:
assert quantized_tensor.dtype == torch.float
return quantized_tensor
elif comm_precision == SparseType.FP16:
assert quantized_tensor.dtype == torch.half
return fp16_to_fp32(quantized_tensor)
elif comm_precision == SparseType.BF16:
assert quantized_tensor.dtype == torch.bfloat16
return bf16_to_fp32(quantized_tensor)
elif comm_precision == SparseType.FP8:
if ctx is not None and ctx.row_dim > 0:
row_dim_quant = ctx.row_dim_quant
quantized_tensor_2d = quantized_tensor.view((-1, row_dim_quant))
dequant_tensor = torch.ops.fbgemm.FP8RowwiseQuantizedToFloat(
quantized_tensor_2d, is_fwd
)
return dequant_tensor.view(-1)
else:
assert quantized_tensor.dtype == torch.uint8
return hfp8_to_fp32(quantized_tensor, ebits, bias)
elif comm_precision == SparseType.INT8:
ctx = none_throws(ctx)
row_dim_quant = ctx.row_dim_quant
quantized_tensor_2d = quantized_tensor.view((-1, row_dim_quant))
dequant_tensor = torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
quantized_tensor_2d
)
return dequant_tensor.view(-1)
else:
raise ValueError(f"comm_precision={comm_precision} is not supported")
class QuantizedCommCodec:
# Concrete implementation of QuantizedCommCodec provided by FBGEMM functions.
def __init__(
self,
comm_precision: SparseType,
loss_scale: Optional[float] = None,
row_dim: Optional[int] = None,
is_fwd: bool = True,
) -> None:
if loss_scale is not None:
if comm_precision not in [SparseType.FP16, SparseType.BF16]:
logger.warning(
f"Setting loss scale for comm_precision={comm_precision} is not supported. Overriding to None"
)
loss_scale = None
logger.info(
f"Creating QuantizedCommsCodec comm_precision:{comm_precision}, loss_scale:{loss_scale}"
)
self._comm_precision = comm_precision
self._loss_scale = loss_scale
self._is_fwd = is_fwd
self._row_dim: int = -1 if row_dim is None else row_dim
def encode(
self, input_tensor: torch.Tensor, ctx: Optional[QuantizationContext] = None
) -> torch.Tensor:
if self._loss_scale is not None:
input_tensor = self._loss_scale * input_tensor
with record_function(
f"## encoder {self._comm_precision} {self._loss_scale} ##"
):
output = _quantize_tensor(
input_tensor,
self._comm_precision,
ctx,
self._is_fwd,
)
return output
def decode(
self, input_tensor: torch.Tensor, ctx: Optional[QuantizationContext] = None
) -> torch.Tensor:
if self._loss_scale is not None:
input_tensor = input_tensor / self._loss_scale
with record_function(
f"## decoder {self._comm_precision} {self._loss_scale} ##"
):
dequantized_tensor = _dequantize_tensor(
input_tensor, self._comm_precision, ctx, self._is_fwd
)
return dequantized_tensor
def calc_quantized_size(
self, input_len: int, ctx: Optional[QuantizationContext] = None
) -> int:
# Use the same logic in _float_to_fused8bitrowwise_gpu_t()
if self._comm_precision == SparseType.INT8 or (
self._comm_precision == SparseType.FP8 and self._row_dim > 0
):
ctx = none_throws(ctx)
assert input_len % ctx.row_dim == 0, (
f"input_len {input_len} is not a multiple of row dim {ctx.row_dim} "
"Please check your batch size (power of 2 batch size is recommended)"
)
nrows = input_len // ctx.row_dim
ncols = (ctx.row_dim + 3) // 4 * 4 + 2 * 4
return nrows * ncols
else:
return input_len
@property
def quantized_dtype(self) -> torch.dtype:
return self._comm_precision.as_dtype()
def create_context(self) -> Optional[QuantizationContext]:
# fp8 rowwise is activated when row_dim > 0
if self._comm_precision == SparseType.FP8:
return QuantizationContext(self._row_dim)
# int8 rowwise is default
return QuantizationContext()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
logger: logging.Logger = logging.getLogger()
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
TORCH_HALF_MIN: float = torch.finfo(torch.float16).min
TORCH_HALF_MAX: float = torch.finfo(torch.float16).max
TORCH_BFLOAT16_MIN: float = torch.finfo(torch.bfloat16).min
TORCH_BFLOAT16_MAX: float = torch.finfo(torch.bfloat16).max
def fp32_to_fp16_with_clamp(tensor: torch.Tensor) -> torch.Tensor:
return torch.clamp(tensor, TORCH_HALF_MIN, TORCH_HALF_MAX).half()
def fp32_to_bf16_with_clamp(tensor: torch.Tensor) -> torch.Tensor:
return torch.clamp(tensor, TORCH_BFLOAT16_MIN, TORCH_BFLOAT16_MAX).bfloat16()
def fp32_to_hfp8_with_clamp(
tensor: torch.Tensor, ebits: int = 4, mbits: int = 3, bias: int = 15
) -> torch.Tensor:
max_pos: float = (2 ** ((1 << ebits) - 2 - bias)) * (2 - 2 ** (-mbits))
return torch.ops.fbgemm.FloatToHFP8Quantized(
tensor.contiguous(),
ebits,
bias,
max_pos,
)
def fp16_to_fp32(tensor: torch.Tensor) -> torch.Tensor:
return tensor.float()
def bf16_to_fp32(tensor: torch.Tensor) -> torch.Tensor:
return tensor.view(torch.bfloat16).float()
def hfp8_to_fp32(tensor: torch.Tensor, ebits: int = 4, bias: int = 15) -> torch.Tensor:
return torch.ops.fbgemm.HFP8QuantizedToFloat(
tensor.contiguous().view(torch.uint8),
ebits,
bias,
)
def measure_fp16_quant_error(input_tensor: torch.Tensor) -> None:
# TODO: log to tensorboard
num_nan_fp32_tensor = torch.numel(input_tensor[torch.isnan(input_tensor)])
logger.info(
"num NaN in fp32 tensor: {}, ratio: {}.".format(
num_nan_fp32_tensor, num_nan_fp32_tensor / torch.numel(input_tensor)
)
)
logger.info(
"fp32 tensor profile: min: {}, max: {}, min abs:{}, max abs:{}.".format(
torch.min(input_tensor),
torch.max(input_tensor),
torch.min(torch.abs(input_tensor)),
torch.max(torch.abs(input_tensor)),
)
)
fp16_tensor = fp32_to_fp16_with_clamp(input_tensor)
num_nan_fp16_tensor = torch.numel(fp16_tensor[torch.isnan(fp16_tensor)])
logger.info(
"num NaN in fp16 tensor: {}, ratio: {}.".format(
num_nan_fp16_tensor, num_nan_fp16_tensor / torch.numel(input_tensor)
)
)
diff = torch.abs(input_tensor - fp16_tensor.float())
rel_diff = diff / torch.abs(input_tensor)
logger.info(
"fp32_to_fp16 abs error: min={}, max={}, avg={}.".format(
torch.min(diff), torch.max(diff), torch.mean(diff)
)
)
rel_diff_not_nan = rel_diff[torch.logical_not(torch.isnan(rel_diff))]
logger.info(
"fp32_to_fp16 rel error: min={}, max={}, avg={}.".format(
torch.min(rel_diff_not_nan),
torch.max(rel_diff_not_nan),
torch.mean(rel_diff_not_nan),
)
)
rel_diff_1_idx = torch.where(rel_diff == 1.0)
fp32_rel_err_1_vals = input_tensor[rel_diff_1_idx]
if torch.numel(fp32_rel_err_1_vals) > 0:
fp32_rel_err_1_vals = torch.abs(fp32_rel_err_1_vals)
logger.info(
"fp32_to_fp16 rel error == 1: fp32 min:{}, fp32 max:{}, fp32 avg:{}.".format(
torch.min(fp32_rel_err_1_vals),
torch.max(fp32_rel_err_1_vals),
torch.mean(fp32_rel_err_1_vals),
)
)
subrange_ratio = torch.numel(fp16_tensor[rel_diff_1_idx]) / torch.numel(
fp16_tensor
)
logger.info("sub fp16 range ratio: {}".format(subrange_ratio))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import Optional
import torch
from fbgemm_gpu.enums import create_enums
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:cumem_utils")
# Import all uvm enums from c++ library
create_enums(globals(), torch.ops.fbgemm.fbgemm_gpu_uvm_enum_query)
def cudaMemAdvise(
t: torch.Tensor,
advice: Enum,
) -> None:
torch.ops.fbgemm.cuda_mem_advise(t, advice.value)
def cudaMemPrefetchAsync(
t: torch.Tensor,
device_t: Optional[torch.Tensor] = None,
) -> None:
torch.ops.fbgemm.cuda_mem_prefetch_async(t, device_t)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
from math import sqrt
from typing import List
import torch
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
def wrap_weight_to_parameter(weights: List[torch.Tensor]) -> List[torch.Tensor]:
for i, v in enumerate(weights):
if not isinstance(v, torch.nn.Parameter):
weights[i] = torch.nn.Parameter(v)
return weights
class BatchedUnaryEmbeddingBag(torch.nn.Module):
def __init__(self, num_tasks: int, hash_sizes: List[int], long_index: bool = False):
super().__init__()
self.num_tasks = num_tasks
self.hash_sizes = hash_sizes
# [N][sum(E)][1]
embedding_data = torch.randn(size=(num_tasks, sum(self.hash_sizes), 1))
self.weight = torch.nn.Parameter(embedding_data)
index_dtype = torch.int64 if long_index else torch.int32
table_offsets_tensor = torch.cat(
[
torch.tensor([0], dtype=index_dtype),
torch.cumsum(
torch.tensor(hash_sizes),
dim=0,
dtype=index_dtype,
),
]
)
self.register_buffer("table_offsets_tensor", table_offsets_tensor)
self.init_parameters()
def forward(self, offsets: torch.Tensor, input: torch.Tensor):
# output is [N][B][T]
return torch.ops.fbgemm.batched_unary_embeddings(
self.weight,
self.table_offsets_tensor,
offsets,
input,
)
@torch.jit.export
def split_embedding_weights(self):
embedding_weights = []
for n in range(self.num_tasks):
for t in range(len(self.hash_sizes)):
embedding_weights.append(
self.weight.detach()[
n,
self.table_offsets_tensor[t] : self.table_offsets_tensor[t + 1],
:,
]
)
return embedding_weights
@torch.jit.export
def init_parameters(self):
for num_emb, param in zip(
self.hash_sizes * self.num_tasks,
wrap_weight_to_parameter(self.split_embedding_weights()),
):
assert param.shape == (num_emb, 1)
param.data.uniform_(-sqrt(1 / num_emb), sqrt(1 / num_emb))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import enum
from dataclasses import dataclass
from typing import List, NamedTuple
# Maximum number of times prefetch() can be called without
# a corresponding forward() call
MAX_PREFETCH_DEPTH = 100
# GPU and CPU use 16-bit scale and bias for quantized embedding bags in TBE
# The total size is 2 + 2 = 4 bytes
DEFAULT_SCALE_BIAS_SIZE_IN_BYTES = 4
class EmbeddingLocation(enum.IntEnum):
DEVICE = 0
MANAGED = 1
MANAGED_CACHING = 2
HOST = 3
class CacheAlgorithm(enum.Enum):
LRU = 0
LFU = 1
class PoolingMode(enum.IntEnum):
SUM = 0
MEAN = 1
NONE = 2
class BoundsCheckMode(enum.IntEnum):
# Raise an exception (CPU) or device-side assert (CUDA)
FATAL = 0
# Log the first out-of-bounds instance per kernel, and set to zero.
WARNING = 1
# Set to zero.
IGNORE = 2
# No bounds checks.
NONE = 3
RecordCacheMetrics: NamedTuple = NamedTuple(
"RecordCacheMetrics",
[("record_cache_miss_counter", bool), ("record_tablewise_cache_miss", bool)],
)
SplitState: NamedTuple = NamedTuple(
"SplitState",
[
("dev_size", int),
("host_size", int),
("uvm_size", int),
("placements", List[EmbeddingLocation]),
("offsets", List[int]),
],
)
@dataclass
class CacheState:
# T + 1 elements and cache_hash_size_cumsum[-1] == total_cache_hash_size
cache_hash_size_cumsum: List[int]
cache_index_table_map: List[int]
total_cache_hash_size: int
def construct_cache_state(
row_list: List[int],
location_list: List[EmbeddingLocation],
feature_table_map: List[int],
) -> CacheState:
_cache_hash_size_cumsum = [0]
total_cache_hash_size = 0
for num_embeddings, location in zip(row_list, location_list):
if location == EmbeddingLocation.MANAGED_CACHING:
total_cache_hash_size += num_embeddings
_cache_hash_size_cumsum.append(total_cache_hash_size)
# [T], -1: non-cached table
cache_hash_size_cumsum = []
# [total_cache_hash_size], linear cache index -> table index
cache_index_table_map = [-1] * total_cache_hash_size
unique_feature_table_map = {}
for t, t_ in enumerate(feature_table_map):
unique_feature_table_map[t_] = t
for t_, t in unique_feature_table_map.items():
start, end = _cache_hash_size_cumsum[t_], _cache_hash_size_cumsum[t_ + 1]
cache_index_table_map[start:end] = [t] * (end - start)
cache_hash_size_cumsum = [
_cache_hash_size_cumsum[t_]
if location_list[t_] == EmbeddingLocation.MANAGED_CACHING
else -1
for t_ in feature_table_map
]
cache_hash_size_cumsum.append(total_cache_hash_size)
s = CacheState(
cache_hash_size_cumsum=cache_hash_size_cumsum,
cache_index_table_map=cache_index_table_map,
total_cache_hash_size=total_cache_hash_size,
)
return s
# NOTE: This is also defined in fbgemm_gpu.split_embedding_utils, but declaring
# target dependency on :split_embedding_utils will result in compatibility
# breakage with Caffe2 module_factory because it will pull in numpy
def round_up(a: int, b: int) -> int:
return int((a + b - 1) // b) * b
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import enum
import functools
import logging
import os
from dataclasses import dataclass, field
from itertools import accumulate
from math import log2
from typing import Callable, Dict, List, Optional, Tuple, Type, Union
import torch # usort:skip
from torch import nn, Tensor # usort:skip
import fbgemm_gpu.split_embedding_codegen_lookup_invokers as invokers
from fbgemm_gpu.split_embedding_configs import EmbOptimType as OptimType, SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
BoundsCheckMode,
CacheAlgorithm,
CacheState,
construct_cache_state,
EmbeddingLocation,
MAX_PREFETCH_DEPTH,
PoolingMode,
RecordCacheMetrics,
SplitState,
)
try:
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cuda_training"
)
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cpu_training"
)
except Exception:
pass
DEFAULT_ASSOC = 32 if torch.version.hip is None else 64
INT8_EMB_ROW_DIM_OFFSET = 8
class DoesNotHavePrefix(Exception):
pass
class ComputeDevice(enum.IntEnum):
CPU = 0
CUDA = 1
MTIA = 2
class WeightDecayMode(enum.IntEnum):
NONE = 0
L2 = 1
DECOUPLE = 2
COUNTER = 3
class CounterWeightDecayMode(enum.IntEnum):
NONE = 0
L2 = 1
DECOUPLE = 2
class LearningRateMode(enum.IntEnum):
EQUAL = -1
TAIL_ID_LR_INCREASE = 0
TAIL_ID_LR_DECREASE = 1
COUNTER_SGD = 2
class GradSumDecay(enum.IntEnum):
NO_DECAY = -1
CTR_DECAY = 0
@dataclass
class TailIdThreshold:
val: float = 0
is_ratio: bool = False
@dataclass
class CounterBasedRegularizationDefinition:
counter_weight_decay_mode: CounterWeightDecayMode = CounterWeightDecayMode.NONE
counter_halflife: int = -1
adjustment_iter: int = -1
adjustment_ub: float = 1.0
learning_rate_mode: LearningRateMode = LearningRateMode.EQUAL
grad_sum_decay: GradSumDecay = GradSumDecay.NO_DECAY
tail_id_threshold: TailIdThreshold = field(default_factory=TailIdThreshold)
max_counter_update_freq: int = 1000
def construct_split_state(
embedding_specs: List[Tuple[int, int, EmbeddingLocation, ComputeDevice]],
rowwise: bool,
cacheable: bool,
precision: SparseType = SparseType.FP32,
int8_emb_row_dim_offset: int = INT8_EMB_ROW_DIM_OFFSET,
placement: Optional[EmbeddingLocation] = None,
) -> SplitState:
placements: List[EmbeddingLocation] = []
offsets: List[int] = []
dev_size: int = 0
host_size: int = 0
uvm_size: int = 0
for num_embeddings, embedding_dim, location, _ in embedding_specs:
assert (
embedding_dim % 4 == 0
), f"embedding_dim must be a multiple of 4, but got {embedding_dim}"
if precision == SparseType.INT8:
embedding_dim += int8_emb_row_dim_offset
state_size = num_embeddings * embedding_dim if not rowwise else num_embeddings
location = placement if placement is not None else location
if location == EmbeddingLocation.HOST:
placements.append(EmbeddingLocation.HOST)
offsets.append(host_size)
host_size += state_size
# If table is on device, then opimtizer is on device.
# If table is managed, then if optimizer state is rowwise, optimizer is on device, otherwise optimizer is managed.
elif location == EmbeddingLocation.DEVICE or rowwise:
placements.append(EmbeddingLocation.DEVICE)
offsets.append(dev_size)
dev_size += state_size
else:
if cacheable and location == EmbeddingLocation.MANAGED_CACHING:
placements.append(EmbeddingLocation.MANAGED_CACHING)
else:
placements.append(EmbeddingLocation.MANAGED)
offsets.append(uvm_size)
uvm_size += state_size
assert len(placements) == len(offsets)
return SplitState(
dev_size=dev_size,
host_size=host_size,
uvm_size=uvm_size,
placements=placements,
offsets=offsets,
)
def apply_split_helper(
persistent_state_fn: Callable[[str, Tensor], None],
set_attr_fn: Callable[
[str, Union[Tensor, List[int], List[EmbeddingLocation]]], None
],
current_device: torch.device,
use_cpu: bool,
feature_table_map: List[int],
split: SplitState,
prefix: str,
dtype: Type[torch.dtype],
enforce_hbm: bool = False,
make_dev_param: bool = False,
dev_reshape: Optional[Tuple[int, ...]] = None,
) -> None:
set_attr_fn(f"{prefix}_physical_placements", split.placements)
set_attr_fn(f"{prefix}_physical_offsets", split.offsets)
offsets = [split.offsets[t] for t in feature_table_map]
placements = [split.placements[t] for t in feature_table_map]
persistent_state_fn(
f"{prefix}_offsets",
torch.tensor(offsets, device=current_device, dtype=torch.int64),
)
persistent_state_fn(
f"{prefix}_placements",
torch.tensor(placements, device=current_device, dtype=torch.int32),
)
if split.dev_size > 0:
dev_buffer = torch.zeros(
split.dev_size,
device=current_device,
# pyre-fixme[6]
dtype=dtype,
)
dev_buffer = (
dev_buffer.view(*dev_reshape) if dev_reshape is not None else dev_buffer
)
else:
# pyre-fixme[6]
dev_buffer = torch.empty(0, device=current_device, dtype=dtype)
if make_dev_param:
set_attr_fn(f"{prefix}_dev", nn.Parameter(dev_buffer))
else:
persistent_state_fn(f"{prefix}_dev", dev_buffer)
if split.host_size > 0:
if dtype == torch.uint8:
persistent_state_fn(
f"{prefix}_host",
torch.zeros(
split.host_size,
device=current_device,
# pyre-fixme[6]: Expected `Optional[Type[torch._dtype]]` for
# 3rd param but got `Type[Type[torch._dtype]]`.
dtype=dtype,
),
)
else:
set_attr_fn(
f"{prefix}_host",
nn.Parameter(
torch.zeros(
split.host_size,
device=current_device,
# pyre-fixme[6]: Expected `Optional[Type[torch._dtype]]`
# for 3rd param but got `Type[Type[torch._dtype]]`.
dtype=dtype,
)
),
)
else:
persistent_state_fn(
f"{prefix}_host",
# pyre-fixme[6]: For 3rd param expected `dtype` but got `Type[dtype]`.
torch.empty(0, device=current_device, dtype=dtype),
)
if split.uvm_size > 0:
assert not use_cpu
if enforce_hbm:
logging.info("Enforce hbm for the cache location")
persistent_state_fn(
f"{prefix}_uvm",
torch.zeros(
split.uvm_size,
device=current_device,
# pyre-fixme[6]: Expected `Optional[Type[torch._dtype]]` for
# 3rd param but got `Type[Type[torch._dtype]]`.
dtype=dtype,
),
)
else:
persistent_state_fn(
f"{prefix}_uvm",
torch.zeros(
split.uvm_size,
out=torch.ops.fbgemm.new_managed_tensor(
# pyre-fixme[6]: Expected `Optional[Type[torch._dtype]]`
# for 3rd param but got `Type[Type[torch._dtype]]`.
torch.zeros(1, device=current_device, dtype=dtype),
[split.uvm_size],
),
),
)
else:
persistent_state_fn(
f"{prefix}_uvm",
# pyre-fixme[6]: For 3rd param expected `dtype` but got `Type[dtype]`.
torch.empty(0, device=current_device, dtype=dtype),
)
# pyre-fixme[13]: Attribute `uvm_cache_stats` is never initialized.
# pyre-fixme[13]: Attribute `local_uvm_cache_stats` is never initialized.
class SplitTableBatchedEmbeddingBagsCodegen(nn.Module):
"""
Multiple sparse features can share one embedding table.
'feature_table_map' specifies the feature-table mapping.
T: number of logical tables
T_: number of physical tables
T >= T_
For supported optimizer hyperparams, see inline comments below
"""
embedding_specs: List[Tuple[int, int, EmbeddingLocation, ComputeDevice]]
optimizer_args: invokers.lookup_args.OptimizerArgs
lxu_cache_locations_list: List[Tensor]
lxu_cache_locations_empty: Tensor
timesteps_prefetched: List[int]
record_cache_metrics: RecordCacheMetrics
uvm_cache_stats: torch.Tensor
local_uvm_cache_stats: torch.Tensor
linear_cache_indices_list: List[Tensor]
def __init__( # noqa C901
self,
embedding_specs: List[
Tuple[int, int, EmbeddingLocation, ComputeDevice]
], # tuple of (rows, dims, placements, compute_devices)
feature_table_map: Optional[List[int]] = None, # [T]
cache_algorithm: CacheAlgorithm = CacheAlgorithm.LRU,
cache_load_factor: float = 0.2,
cache_sets: int = 0,
cache_reserved_memory: float = 0.0,
cache_precision: SparseType = SparseType.FP32,
weights_precision: SparseType = SparseType.FP32,
output_dtype: SparseType = SparseType.FP32,
enforce_hbm: bool = False, # place all weights/momentums in HBM when using cache
optimizer: OptimType = OptimType.EXACT_SGD,
record_cache_metrics: Optional[RecordCacheMetrics] = None,
gather_uvm_cache_stats: Optional[bool] = False,
# General Optimizer args
stochastic_rounding: bool = True,
gradient_clipping: bool = False,
max_gradient: float = 1.0,
learning_rate: float = 0.01,
# used by EXACT_ADAGRAD, EXACT_ROWWISE_ADAGRAD, EXACT_ROWWISE_WEIGHTED_ADAGRAD, LAMB, and ADAM only
# NOTE that default is different from nn.optim.Adagrad default of 1e-10
eps: float = 1.0e-8,
momentum: float = 0.9, # used by LARS-SGD
# EXACT_ADAGRAD, SGD, EXACT_SGD do not support weight decay
# LAMB, ADAM, PARTIAL_ROWWISE_ADAM, PARTIAL_ROWWISE_LAMB, LARS_SGD support decoupled weight decay
# EXACT_ROWWISE_WEIGHTED_ADAGRAD supports L2 weight decay
# EXACT_ROWWISE_ADAGRAD support both L2 and decoupled weight decay (via weight_decay_mode)
weight_decay: float = 0.0,
weight_decay_mode: WeightDecayMode = WeightDecayMode.NONE,
eta: float = 0.001, # used by LARS-SGD,
beta1: float = 0.9, # used by LAMB and ADAM
beta2: float = 0.999, # used by LAMB and ADAM
counter_based_regularization: Optional[
CounterBasedRegularizationDefinition
] = None, # used by Rowwise Adagrad
pooling_mode: PoolingMode = PoolingMode.SUM,
device: Optional[Union[str, int, torch.device]] = None,
bounds_check_mode: BoundsCheckMode = BoundsCheckMode.WARNING,
uvm_non_rowwise_momentum: bool = False, # place non-rowwise momentum on UVM
use_experimental_tbe: bool = False, # set to True to use TBE v2 (only support NVIDIA GPUs)
# set to True to enable prefetch pipeline, currently only supports LRU cache policy.
# If a separate stream is used for prefetch, the optional forward_stream arg of prefetch function
# should be set.
prefetch_pipeline: bool = False,
) -> None:
super(SplitTableBatchedEmbeddingBagsCodegen, self).__init__()
self.pooling_mode = pooling_mode
self.bounds_check_mode_int: int = bounds_check_mode.value
self.weights_precision = weights_precision
self.output_dtype: int = output_dtype.as_int()
assert (
not prefetch_pipeline or cache_algorithm == CacheAlgorithm.LRU
), "Only LRU cache policy supports prefetch_pipeline."
self.prefetch_pipeline: bool = prefetch_pipeline
self.lock_cache_line: bool = self.prefetch_pipeline
if record_cache_metrics is not None:
self.record_cache_metrics = record_cache_metrics
else:
self.record_cache_metrics = RecordCacheMetrics(False, False)
self.embedding_specs = embedding_specs
(rows, dims, locations, compute_devices) = zip(*embedding_specs)
T_ = len(self.embedding_specs)
self.dims: List[int] = dims
assert T_ > 0
# mixed D is not supported by no bag kernels
mixed_D = False
D = self.dims[0]
for d in self.dims:
if d != D:
mixed_D = True
break
if mixed_D:
assert (
self.pooling_mode != PoolingMode.NONE
), "Mixed dimension tables only supported for pooling tables."
assert all(
cd == compute_devices[0] for cd in compute_devices
), "Heterogenous compute_devices are NOT supported!"
# Split TBE has different function schemas for CUDA and CPU.
# For MTIA device type, it uses the CPU one.
self.use_cpu: bool = (
compute_devices[0] == ComputeDevice.CPU
or compute_devices[0] == ComputeDevice.MTIA
)
assert not self.use_cpu or all(
loc == EmbeddingLocation.HOST for loc in locations
), "ComputeDevice.CPU is only for EmbeddingLocation.HOST!"
assert self.use_cpu or all(
loc != EmbeddingLocation.HOST for loc in locations
), "EmbeddingLocation.HOST doesn't work for CUDA device!"
if self.use_cpu or self.pooling_mode == PoolingMode.NONE:
assert output_dtype in [
SparseType.FP32,
SparseType.FP16,
SparseType.BF16,
], "Fused pooled embedding quantization only supported for cuda."
if optimizer == OptimType.NONE:
assert all(
loc == EmbeddingLocation.DEVICE for loc in locations
), "OptimType.NONE supports only EmbeddingLocation.DEVICE"
assert all(
cd == ComputeDevice.CUDA for cd in compute_devices
), "OptimType.NONE supports only ComputeDevice.CUDA"
assert (
not mixed_D
), "OptimType.NONE does not support mixed embedding dimension"
if device is None:
self.current_device: torch.device = (
torch.device("cpu")
if self.use_cpu
else torch.device(torch.cuda.current_device())
)
elif isinstance(device, torch.device):
self.current_device = device
else:
self.current_device = torch.device(device)
# add placeholder require_grad param tensor to enable autograd with int8 weights
self.placeholder_autograd_tensor = nn.Parameter(
torch.zeros(0, device=self.current_device, dtype=torch.float)
)
self.gather_uvm_cache_stats = gather_uvm_cache_stats
# Define the size of uvm cache stats as class variable
# to make it work with torch jit script.
self.uvm_cache_stats_size = 6
# 0: N_calls, 1: N_requested_indices, 2: N_unique_indices, 3: N_unique_misses,
# 4: N_conflict_unique_misses, 5: N_conflict_misses
self.int8_emb_row_dim_offset: int = INT8_EMB_ROW_DIM_OFFSET
self.feature_table_map: List[int] = (
feature_table_map if feature_table_map is not None else list(range(T_))
)
T = len(self.feature_table_map)
assert T_ <= T
table_has_feature = [False] * T_
for t in self.feature_table_map:
table_has_feature[t] = True
assert all(table_has_feature), "Each table must have at least one feature!"
feature_dims = [dims[t] for t in self.feature_table_map]
D_offsets = [0] + list(accumulate(feature_dims))
self.total_D: int = D_offsets[-1]
self.max_D: int = max(dims)
cached_dims = [
embedding_spec[1]
for embedding_spec in embedding_specs
if embedding_spec[2] == EmbeddingLocation.MANAGED_CACHING
]
self.max_D_cache: int = max(cached_dims) if len(cached_dims) > 0 else 0
self.register_buffer(
"D_offsets",
torch.tensor(D_offsets, device=self.current_device, dtype=torch.int32),
)
hash_size_cumsum = [0] + list(accumulate(rows))
self.total_hash_size: int = int(hash_size_cumsum[-1])
if self.total_hash_size == 0:
self.total_hash_size_bits: int = 0
else:
self.total_hash_size_bits: int = int(log2(float(self.total_hash_size)) + 1)
# The last element is to easily access # of rows of each table by
# hash_size_cumsum[t + 1] - hash_size_cumsum[t]
hash_size_cumsum = [hash_size_cumsum[t] for t in self.feature_table_map] + [
self.total_hash_size
]
self.register_buffer(
"hash_size_cumsum",
torch.tensor(
hash_size_cumsum, device=self.current_device, dtype=torch.int64
),
)
self.register_buffer(
"rows_per_table",
torch.tensor(
[rows[t] for t in self.feature_table_map],
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"bounds_check_warning",
torch.tensor([0], device=self.current_device, dtype=torch.int64),
)
# Required for VBE
self.register_buffer(
"feature_dims",
torch.tensor(feature_dims, device="cpu", dtype=torch.int64),
)
weight_split = construct_split_state(
embedding_specs,
rowwise=False,
cacheable=True,
precision=weights_precision,
)
table_embedding_dtype = weights_precision.as_dtype()
self._apply_split(
weight_split,
prefix="weights",
# pyre-fixme[6]: For 3rd param expected `Type[Type[_dtype]]` but got
# `Type[_dtype]`.
dtype=table_embedding_dtype,
enforce_hbm=enforce_hbm,
make_dev_param=optimizer == OptimType.NONE,
dev_reshape=(-1, self.max_D) if optimizer == OptimType.NONE else None,
)
assert optimizer not in (
OptimType.SGD,
OptimType.ROWWISE_ADAGRAD,
), f"Optimizer {optimizer} is deprecated in the CPU + GPU modes."
if self.use_cpu:
# Construct optimizer states
assert optimizer in (
OptimType.EXACT_ADAGRAD,
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
OptimType.EXACT_SGD,
), f"Optimizer {optimizer} is not supported in CPU mode."
else:
assert optimizer in (
OptimType.ADAM,
OptimType.EXACT_ADAGRAD,
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
OptimType.EXACT_SGD,
OptimType.LAMB,
OptimType.LARS_SGD,
OptimType.PARTIAL_ROWWISE_ADAM,
OptimType.PARTIAL_ROWWISE_LAMB,
OptimType.NONE,
), f"Optimizer {optimizer} is not supported."
self.stochastic_rounding = stochastic_rounding
self.optimizer = optimizer
self.weight_decay_mode = weight_decay_mode
if (
weight_decay_mode == WeightDecayMode.COUNTER
and counter_based_regularization is None
):
raise AssertionError(
"weight_decay_mode is set to WeightDecayMode.COUNTER but counter_based_regularization is None"
)
if (
weight_decay_mode != WeightDecayMode.COUNTER
and counter_based_regularization is not None
):
raise AssertionError(
"Need to set weight_decay_mode to WeightDecayMode.COUNTER together with counter_based_regularization"
)
self._used_rowwise_adagrad_with_counter: bool = (
optimizer == OptimType.EXACT_ROWWISE_ADAGRAD
and weight_decay_mode == WeightDecayMode.COUNTER
and counter_based_regularization is not None
)
if counter_based_regularization is None:
counter_based_regularization = CounterBasedRegularizationDefinition()
self._max_counter_update_freq: int = -1
if self._used_rowwise_adagrad_with_counter:
self._max_counter_update_freq = (
counter_based_regularization.max_counter_update_freq
)
opt_arg_weight_decay_mode = (
counter_based_regularization.counter_weight_decay_mode
)
else:
opt_arg_weight_decay_mode = weight_decay_mode
self.optimizer_args = invokers.lookup_args.OptimizerArgs(
stochastic_rounding=stochastic_rounding,
gradient_clipping=gradient_clipping,
max_gradient=max_gradient,
learning_rate=learning_rate,
eps=eps,
beta1=beta1,
beta2=beta2,
weight_decay=weight_decay,
weight_decay_mode=opt_arg_weight_decay_mode.value,
eta=eta,
momentum=momentum,
counter_halflife=counter_based_regularization.counter_halflife,
adjustment_iter=counter_based_regularization.adjustment_iter,
adjustment_ub=counter_based_regularization.adjustment_ub,
learning_rate_mode=counter_based_regularization.learning_rate_mode.value,
grad_sum_decay=counter_based_regularization.grad_sum_decay.value,
tail_id_threshold=counter_based_regularization.tail_id_threshold.val,
is_tail_id_thresh_ratio=int(
counter_based_regularization.tail_id_threshold.is_ratio
),
total_hash_size=self.total_hash_size,
)
if optimizer != OptimType.NONE:
if optimizer in (OptimType.EXACT_SGD,):
# NOTE: make TorchScript work!
self._register_nonpersistent_buffers("momentum1")
else:
rowwise = optimizer in [
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
]
self._apply_split(
construct_split_state(
embedding_specs,
rowwise=rowwise,
cacheable=False,
placement=EmbeddingLocation.MANAGED
if ((not rowwise) and uvm_non_rowwise_momentum)
else None,
),
prefix="momentum1",
# pyre-fixme[6]: Expected `Type[Type[torch._dtype]]` for 3rd param
# but got `Type[torch.float32]`.
dtype=torch.float32,
enforce_hbm=enforce_hbm,
)
if optimizer in (
OptimType.ADAM,
OptimType.PARTIAL_ROWWISE_ADAM,
OptimType.LAMB,
OptimType.PARTIAL_ROWWISE_LAMB,
):
rowwise = optimizer in (
OptimType.PARTIAL_ROWWISE_ADAM,
OptimType.PARTIAL_ROWWISE_LAMB,
)
self._apply_split(
construct_split_state(
embedding_specs,
rowwise=rowwise,
cacheable=False,
placement=EmbeddingLocation.MANAGED
if ((not rowwise) and uvm_non_rowwise_momentum)
else None,
),
prefix="momentum2",
# pyre-fixme[6]: Expected `Type[Type[torch._dtype]]` for 3rd param
# but got `Type[torch.float32]`.
dtype=torch.float32,
)
else:
# NOTE: make TorchScript work!
self._register_nonpersistent_buffers("momentum2")
if self._used_rowwise_adagrad_with_counter:
self._apply_split(
construct_split_state(
embedding_specs,
rowwise=True,
cacheable=False,
),
prefix="prev_iter",
# TODO: ideally we should use int64 to track iter but it failed to compile.
# It may be related to low precision training code. Currently using float32
# as a workaround while investigating the issue.
# pyre-fixme[6]: Expected `Type[Type[torch._dtype]]` for 3rd param
# but got `Type[torch.float32]`.
dtype=torch.float32,
)
self._apply_split(
construct_split_state(
embedding_specs,
rowwise=True,
cacheable=False,
),
prefix="row_counter",
# pyre-fixme[6]: Expected `Type[Type[torch._dtype]]` for 3rd param
# but got `Type[torch.float32]`.
dtype=torch.float32,
)
self.register_buffer(
"max_counter", torch.tensor([1], dtype=torch.float32)
)
else:
self._register_nonpersistent_buffers("prev_iter")
self._register_nonpersistent_buffers("row_counter")
self.register_buffer(
"max_counter",
torch.ones(1, dtype=torch.float32, device=self.current_device),
persistent=False,
)
if optimizer in (
OptimType.ADAM,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
OptimType.LAMB,
OptimType.PARTIAL_ROWWISE_ADAM,
OptimType.PARTIAL_ROWWISE_LAMB,
):
self.register_buffer(
"iter",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
)
else:
self.register_buffer(
"iter",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
cache_state = construct_cache_state(rows, locations, self.feature_table_map)
# Add table-wise cache miss counter
if self.record_cache_metrics.record_tablewise_cache_miss:
num_tables = len(cache_state.cache_hash_size_cumsum) - 1
self.register_buffer(
"table_wise_cache_miss",
torch.zeros(
num_tables,
device=self.current_device,
dtype=torch.int64,
),
)
# NOTE: make TorchScript work!
else:
self.register_buffer(
"table_wise_cache_miss",
torch.zeros(
0,
device=self.current_device,
dtype=torch.int64,
),
)
if cache_precision == SparseType.FP32:
cache_embedding_dtype = torch.float32
elif cache_precision == SparseType.FP16:
cache_embedding_dtype = torch.float16
else:
raise AssertionError(f"cache_precision {cache_precision} not supported!")
self._apply_cache_state(
cache_state,
cache_algorithm,
cache_load_factor,
cache_sets,
cache_reserved_memory,
dtype=cache_embedding_dtype,
)
logging.info(
f"Using fused {optimizer} with optimizer_args={self.optimizer_args if optimizer != OptimType.NONE else None}\n"
f"Using rowwise_adagrad_with_counter={self._used_rowwise_adagrad_with_counter}"
)
self.step = 0
# Check whether to use TBE v2
is_experimental = False
fbgemm_exp_tbe = os.environ.get("FBGEMM_EXPERIMENTAL_TBE")
if use_experimental_tbe:
is_experimental = True
logging.info(
"use_experimental_tbe is set to True; Use experimental TBE: True"
)
elif fbgemm_exp_tbe is not None:
is_experimental = int(fbgemm_exp_tbe) == 1
logging.info(
f"FBGEMM_EXPERIMENTAL_TBE is set to {fbgemm_exp_tbe}; "
f"Use experimental TBE: {is_experimental}"
)
self.is_experimental: bool = is_experimental
def _register_nonpersistent_buffers(self, prefix: str) -> None:
# NOTE: make TorchScript work!
self.register_buffer(
f"{prefix}_dev",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
f"{prefix}_host",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
f"{prefix}_uvm",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
f"{prefix}_placements",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
f"{prefix}_offsets",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
def get_states(self, prefix: str) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
if not hasattr(self, f"{prefix}_physical_placements"):
raise DoesNotHavePrefix()
dev_param = getattr(self, f"{prefix}_dev")
host_param = getattr(self, f"{prefix}_host")
uvm_param = getattr(self, f"{prefix}_uvm")
placements = getattr(self, f"{prefix}_physical_placements")
offsets = getattr(self, f"{prefix}_physical_offsets")
return (
dev_param,
host_param,
uvm_param,
torch.tensor(placements, dtype=torch.int32),
torch.tensor(offsets, dtype=torch.int64),
)
def get_all_states(self) -> List[Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]]:
all_states = []
for prefix in ["weights", "momentum1", "momentum2", "prev_iter", "row_counter"]:
try:
all_states.append(self.get_states(prefix))
except DoesNotHavePrefix:
pass
return all_states
@torch.jit.export
def get_cache_miss_counter(self) -> Tensor:
# cache_miss_counter contains two items:
# The first one is cache_miss_forward_count which records the total number of forwards which has at least one cache miss
# The second one is the unique_cache_miss_count which records to total number of unique (dedup) cache misses
return self.cache_miss_counter
@torch.jit.export
def get_table_wise_cache_miss(self) -> Tensor:
# table_wise_cache_miss contains all the cache miss count for each table in this embedding table object:
return self.table_wise_cache_miss
def forward( # noqa: C901
self,
indices: Tensor,
offsets: Tensor,
per_sample_weights: Optional[Tensor] = None,
feature_requires_grad: Optional[Tensor] = None,
# 2D tensor of batch size for each rank and feature.
# Shape (number of features, number of ranks)
batch_size_per_feature_per_rank: Optional[List[List[int]]] = None,
total_unique_indices: Optional[int] = None,
) -> Tensor:
if batch_size_per_feature_per_rank is not None:
assert (
self.optimizer == OptimType.EXACT_ROWWISE_ADAGRAD
or self.optimizer == OptimType.EXACT_SGD
), "Variable batch size TBE support is enabled for OptimType.EXACT_ROWWISE_ADAGRAD only"
assert (
self.pooling_mode != PoolingMode.NONE.value
), "Variable batch size TBE support is not enabled for PoolingMode.NONE"
# TODO: Add input check
zero_tensor = torch.zeros(1, device="cpu", dtype=torch.int32)
# Create B offsets
total_batch_size_per_feature = torch.tensor(
[sum(batch_sizes) for batch_sizes in batch_size_per_feature_per_rank],
device="cpu",
dtype=torch.int32,
)
max_B = int(total_batch_size_per_feature.max().item())
Bs = torch.concat([zero_tensor, total_batch_size_per_feature])
B_offsets = Bs.cumsum(dim=0).to(torch.int)
# Create output offsets
B_feature_rank = torch.tensor(
batch_size_per_feature_per_rank,
device="cpu",
dtype=torch.int64,
)
max_B_feature_rank = int(B_feature_rank.max().item())
# D->H only once
self.feature_dims = self.feature_dims.cpu()
output_sizes_feature_rank = B_feature_rank.transpose(
0, 1
) * self.feature_dims.view(1, -1)
output_offsets_feature_rank = torch.concat(
[
zero_tensor.to(torch.int64),
output_sizes_feature_rank.flatten().cumsum(dim=0),
]
)
output_size = int(output_offsets_feature_rank[-1].item())
# TODO: Support INT8 output
# B_offsets_rank_per_feature is for rank and (b, t) mapping
B_offsets_rank_per_feature = (
torch.tensor(
[
[0] + batch_size_per_feature
for batch_size_per_feature in batch_size_per_feature_per_rank
],
device="cpu",
dtype=torch.int32,
)
.cumsum(dim=1)
.to(torch.int)
)
B_offsets = B_offsets.to(self.current_device, non_blocking=True)
output_offsets_feature_rank = output_offsets_feature_rank.to(
self.current_device, non_blocking=True
)
B_offsets_rank_per_feature = B_offsets_rank_per_feature.to(
self.current_device, non_blocking=True
)
# TODO: Use int32 for B_offsets and int64 for output_offsets_feature_rank
vbe_metadata = invokers.lookup_args.VBEMetadata(
B_offsets=B_offsets,
output_offsets_feature_rank=output_offsets_feature_rank,
B_offsets_rank_per_feature=B_offsets_rank_per_feature,
max_B=max_B,
max_B_feature_rank=max_B_feature_rank,
output_size=output_size,
)
else:
vbe_metadata = invokers.lookup_args.VBEMetadata(
B_offsets=None,
output_offsets_feature_rank=None,
B_offsets_rank_per_feature=None,
max_B=-1,
max_B_feature_rank=-1,
output_size=-1,
)
(indices, offsets) = indices.long(), offsets.long()
if self.bounds_check_mode_int != BoundsCheckMode.NONE.value:
torch.ops.fbgemm.bounds_check_indices(
self.rows_per_table,
indices,
offsets,
self.bounds_check_mode_int,
self.bounds_check_warning,
per_sample_weights,
B_offsets=vbe_metadata.B_offsets,
max_B=vbe_metadata.max_B,
)
self.step += 1
if len(self.timesteps_prefetched) == 0:
self._prefetch(indices, offsets)
self.timesteps_prefetched.pop(0)
self.lxu_cache_locations = (
self.lxu_cache_locations_empty
if len(self.lxu_cache_locations_list) == 0
else self.lxu_cache_locations_list.pop(0)
)
common_args = invokers.lookup_args.CommonArgs(
placeholder_autograd_tensor=self.placeholder_autograd_tensor,
dev_weights=self.weights_dev,
host_weights=self.weights_host,
uvm_weights=self.weights_uvm,
lxu_cache_weights=self.lxu_cache_weights,
weights_placements=self.weights_placements,
weights_offsets=self.weights_offsets,
D_offsets=self.D_offsets,
total_D=self.total_D,
max_D=self.max_D,
hash_size_cumsum=self.hash_size_cumsum,
total_hash_size_bits=self.total_hash_size_bits,
indices=indices,
offsets=offsets,
pooling_mode=self.pooling_mode,
indice_weights=per_sample_weights,
feature_requires_grad=feature_requires_grad,
lxu_cache_locations=self.lxu_cache_locations,
output_dtype=self.output_dtype,
vbe_metadata=vbe_metadata,
is_experimental=self.is_experimental,
)
if self.optimizer == OptimType.NONE:
assert (
total_unique_indices is not None
and total_unique_indices <= indices.numel()
), f"OptimType.NONE requires total_unique_indices. Please pass it or check the value (total_unique_indices = {total_unique_indices})"
return invokers.lookup_none.invoke(
common_args, self.optimizer_args, total_unique_indices
)
elif self.optimizer == OptimType.EXACT_SGD:
return invokers.lookup_sgd.invoke(common_args, self.optimizer_args)
momentum1 = invokers.lookup_args.Momentum(
dev=self.momentum1_dev,
host=self.momentum1_host,
uvm=self.momentum1_uvm,
offsets=self.momentum1_offsets,
placements=self.momentum1_placements,
)
if self.optimizer == OptimType.LARS_SGD:
return invokers.lookup_lars_sgd.invoke(
common_args, self.optimizer_args, momentum1
)
if self.optimizer == OptimType.EXACT_ADAGRAD:
return invokers.lookup_adagrad.invoke(
common_args, self.optimizer_args, momentum1
)
momentum2 = invokers.lookup_args.Momentum(
dev=self.momentum2_dev,
host=self.momentum2_host,
uvm=self.momentum2_uvm,
offsets=self.momentum2_offsets,
placements=self.momentum2_placements,
)
# Ensure iter is always on CPU so the increment doesn't synchronize.
if not self.iter.is_cpu:
self.iter = self.iter.cpu()
self.iter[0] += 1
if self.optimizer == OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD:
return invokers.lookup_rowwise_weighted_adagrad.invoke(
common_args,
self.optimizer_args,
momentum1,
# pyre-fixme[6]: Expected `int` for 4th param but got `Union[float,
# int]`.
self.iter.item(),
)
if self.optimizer == OptimType.ADAM:
return invokers.lookup_adam.invoke(
common_args,
self.optimizer_args,
momentum1,
momentum2,
# pyre-fixme[6]: Expected `int` for 5th param but got `Union[float,
# int]`.
self.iter.item(),
)
if self.optimizer == OptimType.PARTIAL_ROWWISE_ADAM:
return invokers.lookup_partial_rowwise_adam.invoke(
common_args,
self.optimizer_args,
momentum1,
momentum2,
# pyre-fixme[6]: Expected `int` for 5th param but got `Union[float,
# int]`.
self.iter.item(),
)
if self.optimizer == OptimType.LAMB:
return invokers.lookup_lamb.invoke(
common_args,
self.optimizer_args,
momentum1,
momentum2,
# pyre-fixme[6]: Expected `int` for 5th param but got `Union[float,
# int]`.
self.iter.item(),
)
if self.optimizer == OptimType.PARTIAL_ROWWISE_LAMB:
return invokers.lookup_partial_rowwise_lamb.invoke(
common_args,
self.optimizer_args,
momentum1,
momentum2,
# pyre-fixme[6]: Expected `int` for 5th param but got `Union[float,
# int]`.
self.iter.item(),
)
prev_iter = invokers.lookup_args.Momentum(
dev=self.prev_iter_dev,
host=self.prev_iter_host,
uvm=self.prev_iter_uvm,
offsets=self.prev_iter_offsets,
placements=self.prev_iter_placements,
)
row_counter = invokers.lookup_args.Momentum(
dev=self.row_counter_dev,
host=self.row_counter_host,
uvm=self.row_counter_uvm,
offsets=self.row_counter_offsets,
placements=self.row_counter_placements,
)
if self._used_rowwise_adagrad_with_counter:
if self.iter.item() % self._max_counter_update_freq == 0:
row_counter_dev = self.row_counter_dev.detach()
if row_counter_dev.numel() > 0:
self.max_counter[0] = torch.max(row_counter_dev).cpu().item() + 1
else:
self.max_counter[0] = 1
if self.optimizer == OptimType.EXACT_ROWWISE_ADAGRAD:
if self._used_rowwise_adagrad_with_counter:
return invokers.lookup_rowwise_adagrad_with_counter.invoke(
common_args,
self.optimizer_args,
momentum1,
prev_iter,
row_counter,
# pyre-fixme[6]: Expected `int` for 6th param but got `Union[float, int]`.
self.iter.item(),
self.max_counter.item(),
)
else:
return invokers.lookup_rowwise_adagrad.invoke(
common_args, self.optimizer_args, momentum1
)
raise ValueError(f"Invalid OptimType: {self.optimizer}")
def reset_uvm_cache_stats(self) -> None:
assert (
self.gather_uvm_cache_stats
), "gather_uvm_cache_stats should be set to true to access uvm cache stats."
self.uvm_cache_stats.zero_()
self.local_uvm_cache_stats.zero_()
def get_uvm_cache_stats(self) -> Tensor:
assert (
self.gather_uvm_cache_stats
), "gather_uvm_cache_stats should be set to true to access uvm cache stats."
return self.uvm_cache_stats
def print_uvm_cache_stats(self) -> None:
assert (
self.gather_uvm_cache_stats
), "gather_uvm_cache_stats should be set to true to access uvm cache stats."
uvm_cache_stats = self.uvm_cache_stats.tolist()
logging.info(
f"N_called: {uvm_cache_stats[0]}\n"
f"N_requested_indices: {uvm_cache_stats[1]}\n"
f"N_unique_indices: {uvm_cache_stats[2]}\n"
f"N_unique_misses: {uvm_cache_stats[3]}\n"
f"N_conflict_unique_misses: {uvm_cache_stats[4]}\n"
f"N_conflict_misses: {uvm_cache_stats[5]}\n"
)
if uvm_cache_stats[1]:
logging.info(
f"unique indices / requested indices: {uvm_cache_stats[2]/uvm_cache_stats[1]}\n"
f"unique misses / requested indices: {uvm_cache_stats[3]/uvm_cache_stats[1]}\n"
)
def prefetch(
self,
indices: Tensor,
offsets: Tensor,
forward_stream: Optional[torch.cuda.Stream] = None,
) -> None:
if self.prefetch_stream is None and forward_stream is not None:
self.prefetch_stream = torch.cuda.current_stream()
assert (
self.prefetch_stream != forward_stream
), "prefetch_stream and forward_stream should not be the same stream"
self._prefetch(indices, offsets)
if forward_stream is not None:
self._prefetch_tensors_record_stream(forward_stream)
def _prefetch(self, indices: Tensor, offsets: Tensor) -> None:
self.timestep += 1
self.timesteps_prefetched.append(self.timestep)
if not self.lxu_cache_weights.numel():
return
(indices, offsets) = indices.long(), offsets.long()
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
self.cache_hash_size_cumsum,
indices,
offsets,
)
if (
self.record_cache_metrics.record_cache_miss_counter
or self.record_cache_metrics.record_tablewise_cache_miss
):
lxu_cache_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.total_cache_hash_size,
self.gather_uvm_cache_stats,
self.local_uvm_cache_stats,
)
if self.record_cache_metrics.record_cache_miss_counter:
self._update_cache_miss_counter(
lxu_cache_locations, linear_cache_indices
)
if self.record_cache_metrics.record_tablewise_cache_miss:
self._update_tablewise_cache_miss(
lxu_cache_locations, linear_cache_indices, offsets
)
if self.cache_algorithm == CacheAlgorithm.LRU:
torch.ops.fbgemm.lru_cache_populate(
self.weights_uvm,
self.cache_hash_size_cumsum,
self.total_cache_hash_size,
self.cache_index_table_map,
self.weights_offsets,
self.D_offsets,
linear_cache_indices,
self.lxu_cache_state,
self.lxu_cache_weights,
self.timestep,
self.lxu_state,
self.stochastic_rounding,
self.gather_uvm_cache_stats,
self.local_uvm_cache_stats,
self.lock_cache_line,
self.lxu_cache_locking_counter,
)
elif self.cache_algorithm == CacheAlgorithm.LFU:
torch.ops.fbgemm.lfu_cache_populate(
self.weights_uvm,
self.cache_hash_size_cumsum,
self.total_cache_hash_size,
self.cache_index_table_map,
self.weights_offsets,
self.D_offsets,
linear_cache_indices,
self.lxu_cache_state,
self.lxu_cache_weights,
self.lxu_state,
self.stochastic_rounding,
)
assert (
len(self.lxu_cache_locations_list) < self.max_prefetch_depth
), f"self.lxu_cache_locations_list has grown to size: {len(self.lxu_cache_locations_list)}, this exceeds the maximum: {self.max_prefetch_depth}. This probably indicates an error in logic where prefetch() is being called more frequently than forward()"
lxu_cache_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.total_cache_hash_size,
self.gather_uvm_cache_stats,
self.local_uvm_cache_stats,
)
self.lxu_cache_locations_list.append(lxu_cache_locations)
if self.prefetch_pipeline:
self.linear_cache_indices_list.append(linear_cache_indices)
if self.gather_uvm_cache_stats:
# Accumulate local_uvm_cache_stats (int32) into uvm_cache_stats (int64).
# We may wanna do this accumulation atomically, but as it's only for monitoring,
# slightly inaccurate result may be acceptable.
self.uvm_cache_stats = torch.add(
self.uvm_cache_stats, self.local_uvm_cache_stats
)
self.local_uvm_cache_stats.zero_()
def _prefetch_tensors_record_stream(
self, forward_stream: torch.cuda.Stream
) -> None:
# Record the tensors created by prefetch stream and consumed by forward/backward
# to the forward stream. In PyTorch, each backward CUDA op runs on the same
# stream that was used for its corresponding forward op.
for t in self.lxu_cache_locations_list:
# pyre-fixme[6]: For 1st param expected `_C.Stream` but got `streams.Stream`
t.record_stream(forward_stream)
for t in self.linear_cache_indices_list:
# pyre-fixme[6]: For 1st param expected `_C.Stream` but got `streams.Stream`
t.record_stream(forward_stream)
def _update_cache_miss_counter(
self,
lxu_cache_locations: Tensor,
linear_cache_indices: Tensor,
) -> None:
CACHE_MISS = -1
CACHE_HIT = -2
cache_missed_locations = torch.where(
lxu_cache_locations == CACHE_MISS, linear_cache_indices, CACHE_HIT
)
unique_ids_list = torch.unique(cache_missed_locations)
unique_ids_count_list = torch.where(unique_ids_list == CACHE_HIT, 0, 1)
miss_count = torch.sum(unique_ids_count_list)
self.cache_miss_counter[0] += (miss_count > 0).to(torch.int64)
self.cache_miss_counter[1] += miss_count
def _update_tablewise_cache_miss(
self,
lxu_cache_locations: Tensor,
linear_cache_indices: Tensor,
offsets: Tensor,
) -> None:
CACHE_MISS = -1
CACHE_HIT = -2
num_tables = len(self.cache_hash_size_cumsum) - 1
num_offsets_per_table = (len(offsets) - 1) // num_tables
cache_missed_locations = torch.where(
lxu_cache_locations == CACHE_MISS, linear_cache_indices, CACHE_HIT
)
for i in range(num_tables):
start = offsets[i * num_offsets_per_table]
end = offsets[(i + 1) * num_offsets_per_table]
current_cache_missed_locations = cache_missed_locations[start:end]
unique_ids_list = torch.unique(current_cache_missed_locations)
unique_ids_count_list = torch.where(unique_ids_list == CACHE_HIT, 0, 1)
miss_count = torch.sum(unique_ids_count_list)
self.table_wise_cache_miss[i] += miss_count
def init_embedding_weights_uniform(self, min_val: float, max_val: float) -> None:
splits = self.split_embedding_weights()
if self.weights_precision == SparseType.INT8:
# TODO: add in-place FloatToFused8BitRowwiseQuantized conversion
for emb in splits:
assert (
len(emb.shape) == 2
), "Int8 embedding only supported for 2D weight tensors."
shape = [emb.shape[0], emb.shape[1] - self.int8_emb_row_dim_offset]
tmp_emb = torch.zeros(shape, device=self.current_device)
tmp_emb.uniform_(min_val, max_val)
tmp_emb_i8 = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(tmp_emb)
emb.data.copy_(tmp_emb_i8)
else:
for param in splits:
param.uniform_(min_val, max_val)
@torch.jit.ignore
def split_embedding_weights(self) -> List[Tensor]:
"""
Returns a list of weights, split by table
"""
splits = []
for t, (rows, dim, _, _) in enumerate(self.embedding_specs):
if self.weights_precision == SparseType.INT8:
dim += self.int8_emb_row_dim_offset
placement = self.weights_physical_placements[t]
offset = self.weights_physical_offsets[t]
if placement == EmbeddingLocation.DEVICE.value:
weights = self.weights_dev
elif placement == EmbeddingLocation.HOST.value:
weights = self.weights_host
else:
weights = self.weights_uvm
if weights.dim() == 2:
weights = weights.flatten()
splits.append(
weights.detach()[offset : offset + rows * dim].view(rows, dim)
)
return splits
@torch.jit.ignore
def get_optimizer_buffer(self, state: str) -> torch.Tensor:
if self.optimizer == OptimType.NONE:
raise NotImplementedError(
f"Getting optimizer buffer is not supported for {self.optimizer}"
)
for name, buffer in self.named_buffers():
if name == state:
return buffer
return torch.tensor(0)
@torch.jit.export
def get_optimizer_state(self) -> List[Dict[str, torch.Tensor]]:
r"""
Get the optimizer state dict that matches the OSS Pytorch optims
TODO: populate the supported list of optimizers
"""
split_optimizer_states = self.split_optimizer_states()
if (
self.optimizer == OptimType.EXACT_ROWWISE_ADAGRAD
or self.optimizer == OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD
or self.optimizer == OptimType.EXACT_ADAGRAD
):
list_of_state_dict = [
{"sum": states[0], "prev_iter": states[1], "row_counter": states[2]}
if self._used_rowwise_adagrad_with_counter
else {"sum": states[0]}
for states in split_optimizer_states
]
elif self.optimizer == OptimType.SGD or self.optimizer == OptimType.EXACT_SGD:
list_of_state_dict = [
{"momentum_buffer": states[0]} for states in split_optimizer_states
]
elif (
self.optimizer == OptimType.ADAM
or self.optimizer == OptimType.PARTIAL_ROWWISE_ADAM
or self.optimizer == OptimType.LAMB
or self.optimizer == OptimType.PARTIAL_ROWWISE_LAMB
):
list_of_state_dict = [
{"exp_avg": states[0], "exp_avg_sq": states[1]}
for states in split_optimizer_states
]
else:
raise NotImplementedError(
f"Getting optimizer state {self.optimizer} is not implmeneted"
)
return list_of_state_dict
@torch.jit.ignore
def split_optimizer_states(
self,
) -> List[List[torch.Tensor]]:
"""
Returns a list of states, split by table
"""
if self.optimizer == OptimType.NONE:
raise NotImplementedError(
f"Getting optimizer states is not supported for {self.optimizer}"
)
def get_optimizer_states(
state_dev: Tensor,
state_host: Tensor,
state_uvm: Tensor,
state_offsets: Tensor,
state_placements: Tensor,
rowwise: bool,
) -> List[torch.Tensor]:
splits = []
for t, (rows, dim, _, _) in enumerate(self.embedding_specs):
offset = state_offsets[t]
placement = state_placements[t]
if placement == EmbeddingLocation.DEVICE:
state = state_dev
elif placement == EmbeddingLocation.HOST:
state = state_host
else:
state = state_uvm
if not rowwise:
splits.append(
state.detach()[offset : offset + rows * dim].view(rows, dim)
)
else:
splits.append(state.detach()[offset : offset + rows].view(rows))
return splits
states: List[List[torch.Tensor]] = []
if self.optimizer not in (OptimType.EXACT_SGD,):
states.append(
get_optimizer_states(
self.momentum1_dev,
self.momentum1_host,
self.momentum1_uvm,
self.momentum1_physical_offsets,
self.momentum1_physical_placements,
rowwise=self.optimizer
in [
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
],
)
)
if self.optimizer in (
OptimType.ADAM,
OptimType.PARTIAL_ROWWISE_ADAM,
OptimType.LAMB,
OptimType.PARTIAL_ROWWISE_LAMB,
):
states.append(
get_optimizer_states(
self.momentum2_dev,
self.momentum2_host,
self.momentum2_uvm,
self.momentum2_physical_offsets,
self.momentum2_physical_placements,
rowwise=self.optimizer
in (OptimType.PARTIAL_ROWWISE_ADAM, OptimType.PARTIAL_ROWWISE_LAMB),
)
)
if self._used_rowwise_adagrad_with_counter:
states.append(
get_optimizer_states(
self.prev_iter_dev,
self.prev_iter_host,
self.prev_iter_uvm,
self.prev_iter_physical_offsets,
self.prev_iter_physical_placements,
rowwise=True,
)
)
states.append(
get_optimizer_states(
self.row_counter_dev,
self.row_counter_host,
self.row_counter_uvm,
self.row_counter_physical_offsets,
self.row_counter_physical_placements,
rowwise=True,
)
)
return_states = [list(s) for s in zip(*states)]
return return_states
@torch.jit.export
def set_learning_rate(self, lr: float) -> None:
"""
Sets the learning rate.
"""
if self.optimizer == OptimType.NONE:
raise NotImplementedError(
f"Setting learning rate is not supported for {self.optimizer}"
)
self._set_learning_rate(lr)
@torch.jit.ignore
def _set_learning_rate(self, lr: float) -> float:
"""
Helper function to script `set_learning_rate`.
Note that returning None does not work.
"""
self.optimizer_args = self.optimizer_args._replace(learning_rate=lr)
return 0.0
@torch.jit.export
def set_optimizer_step(self, step: int) -> None:
"""
Sets the optimizer step.
"""
if self.optimizer == OptimType.NONE:
raise NotImplementedError(
f"Setting optimizer step is not supported for {self.optimizer}"
)
self.iter[0] = step
@torch.jit.export
def flush(self) -> None:
if not self.lxu_cache_weights.numel():
return
torch.ops.fbgemm.lxu_cache_flush(
self.weights_uvm,
self.cache_hash_size_cumsum,
self.cache_index_table_map,
self.weights_offsets,
self.D_offsets,
self.total_D,
self.lxu_cache_state,
self.lxu_cache_weights,
self.stochastic_rounding,
)
def _apply_split(
self,
split: SplitState,
prefix: str,
dtype: Type[torch.dtype],
enforce_hbm: bool = False,
make_dev_param: bool = False,
dev_reshape: Optional[Tuple[int, ...]] = None,
) -> None:
apply_split_helper(
self.register_buffer,
functools.partial(setattr, self),
self.current_device,
self.use_cpu,
self.feature_table_map,
split,
prefix,
dtype,
enforce_hbm,
make_dev_param,
dev_reshape,
)
def _apply_cache_state(
self,
cache_state: CacheState,
cache_algorithm: CacheAlgorithm,
cache_load_factor: float,
cache_sets: int,
cache_reserved_memory: float,
dtype: torch.dtype,
) -> None:
self.cache_algorithm = cache_algorithm
self.timestep = 1
self.timesteps_prefetched = []
self.max_prefetch_depth = MAX_PREFETCH_DEPTH
self.lxu_cache_locations_list = []
self.lxu_cache_locations_empty = torch.empty(
0, device=self.current_device, dtype=torch.int32
).fill_(-1)
self.lxu_cache_locations = self.lxu_cache_locations_empty
self.prefetch_stream: Optional[torch.cuda.Stream] = None
self.linear_cache_indices_list = []
self._init_uvm_cache_stats()
# NOTE: no cache for CPU mode!
if cache_state.total_cache_hash_size == 0 or self.use_cpu:
self.register_buffer(
"lxu_cache_weights",
torch.zeros(0, 0, device=self.current_device, dtype=dtype),
)
# NOTE: make TorchScript work!
self.register_buffer(
"cache_hash_size_cumsum",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"total_cache_hash_size",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"cache_index_table_map",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"lxu_cache_state",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"lxu_state",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"cache_miss_counter",
torch.tensor([0, 0], dtype=torch.int64),
persistent=False,
)
self._init_uvm_cache_counter(cache_sets, persistent=False)
return
assert cache_load_factor > 0
element_size = 2 if dtype == torch.float16 else 4
if cache_sets <= 0:
total_memory = torch.cuda.get_device_properties(
self.current_device
).total_memory
free_memory = (
total_memory
- torch.cuda.memory_reserved(self.current_device)
- int(cache_reserved_memory)
)
assert free_memory > 0
cache_sets = (
int(cache_state.total_cache_hash_size * cache_load_factor)
+ DEFAULT_ASSOC
- 1
) // DEFAULT_ASSOC
cache_sets = 1 if cache_sets == 0 else cache_sets
cache_size = cache_sets * DEFAULT_ASSOC * element_size * self.max_D_cache
if cache_size > free_memory:
cache_sets = (
int(1.0 * free_memory / self.max_D_cache / element_size)
+ DEFAULT_ASSOC
- 1
) // DEFAULT_ASSOC
cache_load_factor = (
1.0 * cache_sets * DEFAULT_ASSOC / int(cache_state.total_cache_hash_size)
)
assert cache_sets > 0
if cache_algorithm == CacheAlgorithm.LFU:
assert cache_sets < 2**24 - 1
cache_size = cache_sets * DEFAULT_ASSOC * element_size * self.max_D_cache
logging.info(
f"Using on-device cache with admission algorithm "
f"{cache_algorithm}, {cache_sets} sets, "
f"load_factor: {cache_load_factor : .3f}, "
f"{cache_size / 1024.0 / 1024.0 / 1024.0 : .2f}GB"
)
self.total_cache_hash_size = cache_state.total_cache_hash_size
self.register_buffer(
"cache_hash_size_cumsum",
torch.tensor(
cache_state.cache_hash_size_cumsum,
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"cache_index_table_map",
torch.tensor(
cache_state.cache_index_table_map,
device=self.current_device,
dtype=torch.int32,
),
)
self.register_buffer(
"lxu_cache_state",
torch.zeros(
cache_sets, DEFAULT_ASSOC, device=self.current_device, dtype=torch.int64
).fill_(-1),
)
self.register_buffer(
"lxu_cache_weights",
torch.zeros(
cache_sets * DEFAULT_ASSOC,
self.max_D_cache,
device=self.current_device,
dtype=dtype,
),
)
self.register_buffer(
"lxu_state",
torch.zeros(
size=(self.total_cache_hash_size + 1,)
if cache_algorithm == CacheAlgorithm.LFU
else (cache_sets, DEFAULT_ASSOC),
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"cache_miss_counter",
torch.tensor([0, 0], device=self.current_device, dtype=torch.int64),
)
self._init_uvm_cache_counter(cache_sets, persistent=True)
if self.prefetch_pipeline:
# using the placeholder_autograd_tensor to make sure
# the hook is executed after the backward pass
# not using register_module_full_backward_hook
# due to https://github.com/pytorch/pytorch/issues/100528
self.placeholder_autograd_tensor.register_hook(
self._sync_stream_post_backward
)
self.register_full_backward_pre_hook(
self._update_cache_counter_and_locations
)
if cache_algorithm not in (CacheAlgorithm.LFU, CacheAlgorithm.LRU):
raise ValueError(
f"cache_algorithm must be {CacheAlgorithm.LRU} "
f"or {CacheAlgorithm.LFU}"
)
def _sync_stream_post_backward(
self,
grad: Tensor,
) -> None:
"""
backward hook function when prefetch_pipeline is enabled.
With the pipeline, prefetch(batch_{i+2}) may overlap with backward(batch_{i}).
There is race condition that backward(batch_i) writes to UVM memory and
at the same time prefetch(batch_{i+2}) loads UVM memory to cache. This stream sync forces
backward(batch_i) to finish before prefetch(batch_{i+2}).
"""
if self.prefetch_stream is not None:
self.prefetch_stream.wait_stream(torch.cuda.current_stream())
def _update_cache_counter_and_locations(
self,
module: nn.Module,
grad_input: Union[Tuple[Tensor, ...], Tensor],
) -> None:
"""
Backward prehook function when prefetch_pipeline is enabled.
This function does 3 things:
1. backward stream waits for prefetch stream to finish.
Otherwise the prefetch(batch_{i+1}) might overlap with backward(batch_i).
If an idx is not in cache in batch_i, but it is being inserted in batch_{i+1},
there is race condition that backward(batch_i) writes to UVM memory and
at the same time prefetch(batch_{i+1}) loads UVM memory to cache.
2. decrement the lxu_cache_locking_counter to indicate the current batch is finished.
The lxu_cache_locking_counter is updated in both prefetch and TBE backward.
As there is no overlap between prefetch and backward, we can decrement either before or
after backward. It's better to decrement before lxu_cache_locations gets updated.
3. update lxu_cache_locations to address the cache inconsistency issue.
In the case that the same index is not inserted into cache in batch_i,
but it is inserted in batch_{i+1}, the cache can be invalid in
the sense that the cached weight for this index does not have the
backward update of batch_i.
Example of the issue is as follows:
idx is in batch_i, batch_{i+1}
prefetch(batch_i)
- failed to insert idx into cache, cache_locations_batch_i of idx is -1 (cache miss)
forward(batch_i)
prefetch(batch_{i+1})
- insert idx into cache, cache is loaded from host memory
backward(batch_i)
- cache_locations_batch_i of idx is -1, the host memory is updated
forward(batch_{i+1})
- OUTPUT IS WRONG. the weight for idx is fetched from cache, but the cache is outdated.
The fix to this cache inconsistency is to update the cache_locations_batch_i before backward of batch_i,
so that the cache gets updated correctly by the backward pass of TBE.
"""
if self.prefetch_stream is not None:
# need to wait for the prefetch of next batch,
# so that cache states are valid
torch.cuda.current_stream().wait_stream(self.prefetch_stream)
torch.ops.fbgemm.lxu_cache_locking_counter_decrement(
self.lxu_cache_locking_counter,
self.lxu_cache_locations,
)
linear_cache_indices = self.linear_cache_indices_list.pop(0)
lxu_cache_locations_new = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.total_cache_hash_size,
False, # not collecting cache stats
self.local_uvm_cache_stats,
)
# self.lxu_cache_locations is updated inplace
torch.ops.fbgemm.lxu_cache_locations_update(
self.lxu_cache_locations,
lxu_cache_locations_new,
)
def _init_uvm_cache_counter(self, cache_sets: int, persistent: bool) -> None:
if self.prefetch_pipeline and persistent:
self.register_buffer(
"lxu_cache_locking_counter",
torch.zeros(
cache_sets,
DEFAULT_ASSOC,
device=self.current_device,
dtype=torch.int32,
),
)
else:
self.register_buffer(
"lxu_cache_locking_counter",
torch.zeros([0, 0], dtype=torch.int32, device=self.current_device),
persistent=persistent,
)
def _init_uvm_cache_stats(self) -> None:
if not self.gather_uvm_cache_stats:
# If uvm_cache_stats is not enabled, register stub entries via buffer to state_dict for TorchScript to JIT properly.
# Since we're not using these variables, we can choose minimize tensor size to keep state_dict size small.
self.register_buffer(
"uvm_cache_stats",
torch.zeros(
1,
device=self.current_device,
dtype=torch.int64,
),
persistent=False,
)
self.register_buffer(
"local_uvm_cache_stats",
torch.zeros(
1,
device=self.current_device,
dtype=torch.int32,
),
persistent=False,
)
else:
self.register_buffer(
"uvm_cache_stats",
torch.zeros(
size=(self.uvm_cache_stats_size,),
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"local_uvm_cache_stats",
torch.zeros(
size=(self.uvm_cache_stats_size,),
device=self.current_device,
dtype=torch.int32,
),
)
self.reset_uvm_cache_stats()
def reset_cache_states(self) -> None:
if not self.lxu_cache_weights.numel():
return
self.lxu_cache_state.fill_(-1)
self.lxu_state.fill_(0)
self.timestep = 1
def reset_embedding_weight_momentum(
self,
pruned_indices: Tensor,
pruned_indices_offsets: Tensor,
logical_table_ids: Tensor,
buffer_ids: Tensor,
) -> None:
if self.optimizer == OptimType.NONE:
raise NotImplementedError(
f"Resetting embedding weight momentum is not supported for {self.optimizer}"
)
total_cache_hash_size = 0
if isinstance(self.total_cache_hash_size, Tensor):
total_cache_hash_size = self.total_cache_hash_size.item()
else:
total_cache_hash_size = self.total_cache_hash_size
rowwise = self.optimizer in [
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
]
if rowwise:
torch.ops.fbgemm.reset_weight_momentum(
dev_weights=self.weights_dev,
uvm_weights=self.weights_uvm,
lxu_cache_weights=self.lxu_cache_weights,
weights_placements=self.weights_placements,
weights_offsets=self.weights_offsets,
momentum1_dev=self.momentum1_dev,
momentum1_uvm=self.momentum1_uvm,
momentum1_placements=self.momentum1_placements,
momentum1_offsets=self.momentum1_offsets,
D_offsets=self.D_offsets,
pruned_indices=pruned_indices.to(device=self.current_device),
pruned_indices_offsets=pruned_indices_offsets.to(
device=self.current_device
),
logical_table_ids=logical_table_ids.to(device=self.current_device),
buffer_ids=buffer_ids.to(device=self.current_device),
cache_hash_size_cumsum=self.cache_hash_size_cumsum,
lxu_cache_state=self.lxu_cache_state,
total_cache_hash_size=total_cache_hash_size,
)
class DenseTableBatchedEmbeddingBagsCodegen(nn.Module):
"""
Table-batched version of nn.EmbeddingBag(sparse=False)
"""
weights: Tensor
weights_offsets: Tensor
D_offsets: Tensor
total_D: int
max_D: int
hash_size_cumsum: Tensor
total_hash_size_bits: int
embedding_specs: List[Tuple[int, int]]
def __init__(
self,
embedding_specs: List[Tuple[int, int]], # tuple of (rows, dims)
feature_table_map: Optional[List[int]] = None, # [T]
weights_precision: SparseType = SparseType.FP32,
pooling_mode: PoolingMode = PoolingMode.SUM,
use_cpu: bool = False,
output_dtype: SparseType = SparseType.FP32,
) -> None: # noqa C901 # tuple of (rows, dims,)
super(DenseTableBatchedEmbeddingBagsCodegen, self).__init__()
self.pooling_mode = pooling_mode
self.weights_precision = weights_precision
self.output_dtype: int = output_dtype.as_int()
table_embedding_dtype = weights_precision.as_dtype()
self.use_cpu = use_cpu
if self.use_cpu or self.pooling_mode == PoolingMode.NONE:
assert output_dtype in [
SparseType.FP32,
SparseType.FP16,
SparseType.BF16,
], "Fused pooled embedding quantization only supported for cuda."
# pyre-fixme[8]: Attribute has type `device`; used as `Union[int, device]`.
self.current_device: torch.device = (
torch.device("cpu") if self.use_cpu else torch.cuda.current_device()
)
self.embedding_specs = embedding_specs
(rows, dims) = zip(*embedding_specs)
T_ = len(self.embedding_specs)
assert T_ > 0
feature_table_map = (
feature_table_map if feature_table_map is not None else list(range(T_))
)
T = len(feature_table_map)
assert T_ <= T
D_offsets = [dims[t] for t in feature_table_map]
D_offsets = [0] + list(accumulate(D_offsets))
self.total_D = D_offsets[-1]
self.max_D = max(dims)
self.register_buffer(
"D_offsets",
torch.tensor(D_offsets, device=self.current_device, dtype=torch.int32),
)
assert self.D_offsets.numel() == T + 1
hash_size_cumsum = [0] + list(accumulate(rows))
if hash_size_cumsum[-1] == 0:
self.total_hash_size_bits: int = 0
else:
self.total_hash_size_bits: int = int(log2(float(hash_size_cumsum[-1])) + 1)
# The last element is to easily access # of rows of each table by
# hash_size_cumsum[t + 1] - hash_size_cumsum[t]
hash_size_cumsum = [hash_size_cumsum[t] for t in feature_table_map] + [
hash_size_cumsum[-1]
]
self.register_buffer(
"hash_size_cumsum",
torch.tensor(
hash_size_cumsum, device=self.current_device, dtype=torch.int64
),
)
weights_offsets = [0] + list(
accumulate([row * dim for (row, dim) in embedding_specs])
)
self.weights = nn.Parameter(
torch.randn(
weights_offsets[-1],
device=self.current_device,
dtype=table_embedding_dtype,
)
)
for feature in range(T):
t = feature_table_map[feature]
row, dim = embedding_specs[t]
if (
self.weights[weights_offsets[t] : weights_offsets[t + 1]].numel()
!= row * dim
):
logging.info(
f"row {row} dim {dim} feature {feature} t {t} {self.weights[weights_offsets[t] : weights_offsets[t + 1]].numel()}"
)
assert (
self.weights[weights_offsets[t] : weights_offsets[t + 1]].numel()
== row * dim
)
assert self.hash_size_cumsum[feature] == sum(
row for (row, _) in embedding_specs[:t]
)
self.weights_physical_offsets: List[int] = weights_offsets
weights_offsets = [weights_offsets[t] for t in feature_table_map]
self.register_buffer(
"weights_offsets",
torch.tensor(
weights_offsets, device=self.current_device, dtype=torch.int64
),
)
def forward(
self,
indices: Tensor,
offsets: Tensor,
per_sample_weights: Optional[Tensor] = None,
feature_requires_grad: Optional[Tensor] = None,
) -> Tensor:
(indices, offsets) = indices.long(), offsets.long()
return torch.ops.fbgemm.dense_embedding_codegen_lookup_function(
dev_weights=self.weights,
weights_offsets=self.weights_offsets,
D_offsets=self.D_offsets,
total_D=self.total_D,
max_D=self.max_D,
hash_size_cumsum=self.hash_size_cumsum,
total_hash_size_bits=self.total_hash_size_bits,
indices=indices,
offsets=offsets,
pooling_mode=self.pooling_mode,
indice_weights=per_sample_weights,
feature_requires_grad=feature_requires_grad,
output_dtype=self.output_dtype,
)
@torch.jit.export
def split_embedding_weights(self) -> List[Tensor]:
"""
Returns a list of weights, split by table
"""
splits = []
for t, (rows, dim) in enumerate(self.embedding_specs):
offset = self.weights_physical_offsets[t]
splits.append(
self.weights.detach()[offset : offset + rows * dim].view(rows, dim)
)
return splits
def init_embedding_weights_uniform(self, min_val: float, max_val: float) -> None:
splits = self.split_embedding_weights()
for param in splits:
param.uniform_(min_val, max_val)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import logging
from itertools import accumulate
from typing import List, Optional, Tuple, Union
import torch # usort:skip
from torch import nn, Tensor # usort:skip
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
BoundsCheckMode,
CacheAlgorithm,
CacheState,
construct_cache_state,
DEFAULT_SCALE_BIAS_SIZE_IN_BYTES,
EmbeddingLocation,
MAX_PREFETCH_DEPTH,
PoolingMode,
RecordCacheMetrics,
round_up,
SplitState,
)
try:
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cuda_inference"
)
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cpu_inference"
)
except Exception:
pass
def rounded_row_size_in_bytes(
dim: int,
weight_ty: SparseType,
row_alignment: int,
scale_bias_size_in_bytes: int = DEFAULT_SCALE_BIAS_SIZE_IN_BYTES,
) -> int:
r = unpadded_row_size_in_bytes(dim, weight_ty, scale_bias_size_in_bytes)
# align each row to 16-byte boundaries.
return round_up(r, row_alignment)
def unpadded_row_size_in_bytes(
dim: int,
weight_ty: SparseType,
scale_bias_size_in_bytes: int = DEFAULT_SCALE_BIAS_SIZE_IN_BYTES,
) -> int:
r = {
SparseType.FP32.value: dim * 4,
SparseType.FP16.value: dim * 2,
SparseType.FP8.value: dim,
SparseType.INT8.value: dim + scale_bias_size_in_bytes,
SparseType.INT4.value: dim // 2 + scale_bias_size_in_bytes,
SparseType.INT2.value: dim // 4 + scale_bias_size_in_bytes,
}[weight_ty.value]
return r
def align_to_cacheline(a: int) -> int:
# align each table to 128b cache line boundary.
return round_up(a, 128)
def nbit_construct_split_state(
embedding_specs: List[Tuple[str, int, int, SparseType, EmbeddingLocation]],
cacheable: bool,
row_alignment: int,
scale_bias_size_in_bytes: int = DEFAULT_SCALE_BIAS_SIZE_IN_BYTES,
cacheline_alignment: bool = True,
) -> SplitState:
placements = torch.jit.annotate(List[EmbeddingLocation], [])
offsets = torch.jit.annotate(List[int], [])
dev_size = 0
host_size = 0
uvm_size = 0
for _, num_embeddings, embedding_dim, weight_ty, location in embedding_specs:
embedding_dim = rounded_row_size_in_bytes(
embedding_dim, weight_ty, row_alignment, scale_bias_size_in_bytes
)
state_size = num_embeddings * embedding_dim
if cacheline_alignment:
state_size = align_to_cacheline(state_size)
if location == EmbeddingLocation.HOST:
placements.append(EmbeddingLocation.HOST)
offsets.append(host_size)
host_size += state_size
elif location == EmbeddingLocation.DEVICE:
placements.append(EmbeddingLocation.DEVICE)
offsets.append(dev_size)
dev_size += state_size
else:
if cacheable and location == EmbeddingLocation.MANAGED_CACHING:
placements.append(EmbeddingLocation.MANAGED_CACHING)
else:
placements.append(EmbeddingLocation.MANAGED)
offsets.append(uvm_size)
uvm_size += state_size
assert len(placements) == len(offsets)
return SplitState(
dev_size=dev_size,
host_size=host_size,
uvm_size=uvm_size,
placements=placements,
offsets=offsets,
)
def random_quant_scaled_tensor(shape: torch.Size, device: torch.device) -> torch.Tensor:
return torch.randint(
0,
255,
size=shape,
dtype=torch.uint8,
device=device,
)
# pyre-fixme[13]: Attribute `cache_miss_counter` is never initialized.
class IntNBitTableBatchedEmbeddingBagsCodegen(nn.Module):
"""
Table-batched version of nn.EmbeddingBag(sparse=False)
Inference version, with FP32/FP16/FP8/INT8/INT4/INT2 supports
"""
embedding_specs: List[Tuple[str, int, int, SparseType, EmbeddingLocation]]
record_cache_metrics: RecordCacheMetrics
cache_miss_counter: torch.Tensor
uvm_cache_stats: torch.Tensor
local_uvm_cache_stats: torch.Tensor
weights_offsets: torch.Tensor
weights_placements: torch.Tensor
def __init__( # noqa C901
self,
embedding_specs: List[
Tuple[str, int, int, SparseType, EmbeddingLocation]
], # tuple of (feature_names, rows, dims, SparseType, EmbeddingLocation/placement)
feature_table_map: Optional[List[int]] = None, # [T]
index_remapping: Optional[List[Tensor]] = None,
pooling_mode: PoolingMode = PoolingMode.SUM,
device: Optional[Union[str, int, torch.device]] = None,
bounds_check_mode: BoundsCheckMode = BoundsCheckMode.WARNING,
weight_lists: Optional[List[Tuple[Tensor, Optional[Tensor]]]] = None,
pruning_hash_load_factor: float = 0.5,
use_array_for_index_remapping: bool = True,
output_dtype: SparseType = SparseType.FP16,
cache_algorithm: CacheAlgorithm = CacheAlgorithm.LRU,
cache_load_factor: float = 0.2,
cache_sets: int = 0,
cache_reserved_memory: float = 0.0,
enforce_hbm: bool = False, # place all weights/momentums in HBM when using cache
record_cache_metrics: Optional[RecordCacheMetrics] = None,
gather_uvm_cache_stats: Optional[bool] = False,
row_alignment: Optional[int] = None,
fp8_exponent_bits: Optional[int] = None,
fp8_exponent_bias: Optional[int] = None,
cache_assoc: int = 32,
scale_bias_size_in_bytes: int = DEFAULT_SCALE_BIAS_SIZE_IN_BYTES,
cacheline_alignment: bool = True,
uvm_host_mapped: bool = False, # True to use cudaHostAlloc; False to use cudaMallocManaged.
) -> None: # noqa C901 # tuple of (rows, dims,)
super(IntNBitTableBatchedEmbeddingBagsCodegen, self).__init__()
# 64 for AMD
if cache_assoc == 32 and torch.version.hip is not None:
cache_assoc = 64
if device is None:
self.current_device: torch.device = torch.device(
torch.cuda.current_device()
)
elif isinstance(device, torch.device):
self.current_device = device
else:
self.current_device = torch.device(device)
self.use_cpu: bool = self.current_device.type == "cpu"
self.scale_bias_size_in_bytes = scale_bias_size_in_bytes
self.pooling_mode = pooling_mode
self.bounds_check_mode_int: int = bounds_check_mode.value
self.embedding_specs = embedding_specs
self.output_dtype: int = output_dtype.as_int()
self.uvm_host_mapped = uvm_host_mapped
# (feature_names, rows, dims, weights_tys, locations) = zip(*embedding_specs)
# Pyre workaround
self.feature_names: List[str] = [e[0] for e in embedding_specs]
rows: List[int] = [e[1] for e in embedding_specs]
dims: List[int] = [e[2] for e in embedding_specs]
weights_tys: List[SparseType] = [e[3] for e in embedding_specs]
locations: List[EmbeddingLocation] = [e[4] for e in embedding_specs]
# if target device is meta then we set use_cpu based on the embedding location
# information in embedding_specs.
if self.current_device.type == "meta":
self.use_cpu = all(loc == EmbeddingLocation.HOST for loc in locations)
if row_alignment is None:
self.row_alignment: int = 1 if self.use_cpu else 16
else:
self.row_alignment = row_alignment
if record_cache_metrics is not None:
self.record_cache_metrics = record_cache_metrics
else:
self.record_cache_metrics = RecordCacheMetrics(False, False)
self.gather_uvm_cache_stats = gather_uvm_cache_stats
# Define the size of uvm cache stats as class variable
# to make it work with torch jit script.
self.uvm_cache_stats_size = 6
# 0: N_calls, 1: N_requested_indices, 2: N_unique_indices, 3: N_unique_misses,
# 4: N_conflict_unique_misses, 5: N_conflict_misses
# mixed D is not supported by no bag kernels
mixed_D = not all(d == dims[0] for d in dims)
if mixed_D:
assert (
self.pooling_mode != PoolingMode.NONE
), "Mixed dimension tables are only supported for pooling tables."
assert not self.use_cpu or all(
loc == EmbeddingLocation.HOST for loc in locations
), "CPU device requires EmbeddingLocation.HOST for location!"
assert self.use_cpu or all(
loc != EmbeddingLocation.HOST for loc in locations
), "EmbeddingLocation.HOST doesn't work for CUDA device!"
T_ = len(self.embedding_specs)
assert T_ > 0
self.feature_table_map: List[int] = (
feature_table_map if feature_table_map is not None else list(range(T_))
)
T = len(self.feature_table_map)
assert T_ <= T
table_has_feature = [False] * T_
for t in self.feature_table_map:
table_has_feature[t] = True
assert all(table_has_feature), "Each table must have at least one feature!"
D_offsets = [dims[t] for t in self.feature_table_map]
D_offsets = [0] + list(accumulate(D_offsets))
self.total_D: int = D_offsets[-1]
for dim, weight_ty in zip(dims, weights_tys):
if not weight_ty.is_float():
assert (
dim % (8 / weight_ty.bit_rate()) == 0
), f"For quantized types we need to at least pack at byte granularity, dim: {dim}, weight_ty: {weight_ty}"
def max_ty_D(ty: SparseType) -> int:
return max(
[dim for dim, weight_ty in zip(dims, weights_tys) if weight_ty == ty],
default=0,
)
self.max_int2_D: int = max_ty_D(SparseType.INT2)
self.max_int4_D: int = max_ty_D(SparseType.INT4)
self.max_int8_D: int = max_ty_D(SparseType.INT8)
self.max_float8_D: int = max_ty_D(SparseType.FP8)
self.max_float16_D: int = max_ty_D(SparseType.FP16)
self.max_float32_D: int = max_ty_D(SparseType.FP32)
self.register_buffer(
"D_offsets",
torch.tensor(D_offsets, device=self.current_device, dtype=torch.int32),
)
assert self.D_offsets.numel() == T + 1
self.register_buffer(
"rows_per_table",
torch.tensor(
[rows[t] for t in self.feature_table_map],
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"bounds_check_warning",
torch.tensor([0], device=self.current_device, dtype=torch.int64),
)
weights_tys_int = [weights_tys[t].as_int() for t in self.feature_table_map]
self.register_buffer(
"weights_tys",
torch.tensor(
weights_tys_int, device=self.current_device, dtype=torch.uint8
),
)
self.weight_initialized: bool = False
self.weights_dev: torch.Tensor = torch.zeros(
0,
device=self.current_device,
dtype=torch.uint8,
)
self.weights_host: torch.Tensor = torch.zeros(
0, device=self.current_device, dtype=torch.uint8
)
self.weights_uvm: torch.Tensor = torch.empty(0, dtype=torch.uint8).to(
self.current_device
)
cached_dims = [
rounded_row_size_in_bytes(
embedding_spec[2], embedding_spec[3], 16, self.scale_bias_size_in_bytes
)
for embedding_spec in self.embedding_specs
if embedding_spec[4] == EmbeddingLocation.MANAGED_CACHING
]
self.max_D_cache: int = max(cached_dims) if len(cached_dims) > 0 else 0
self.initialize_physical_weights_placements_and_offsets(cacheline_alignment)
self.enforce_hbm: bool = enforce_hbm
# Assign weights after weights and weights_offsets are initialized.
if weight_lists:
self._apply_split(
self.dev_size,
self.host_size,
self.uvm_size,
self.weights_physical_placements,
self.weights_physical_offsets,
self.enforce_hbm,
)
self.assign_embedding_weights(weight_lists)
# Handle index remapping for embedding pruning.
self.register_buffer(
"index_remappings_array_offsets",
torch.empty(0, device=self.current_device, dtype=torch.int64),
)
self.register_buffer(
"index_remappings_array",
torch.empty(0, device=self.current_device, dtype=torch.int32),
)
self.register_buffer(
"index_remapping_hash_table_offsets",
torch.empty(0, device=self.current_device, dtype=torch.int64),
)
self.register_buffer(
"index_remapping_hash_table",
torch.empty(0, device=self.current_device, dtype=torch.int32),
)
self.register_buffer(
"original_rows_per_table",
torch.empty(0, device=self.current_device, dtype=torch.int64),
)
# pyre-fixme[4]: Attribute must be annotated.
self.index_remapping_hash_table_cpu = None
if index_remapping:
self.set_index_remappings(
index_remapping, pruning_hash_load_factor, use_array_for_index_remapping
)
# Currently only support cache_precision == embedding_precision.
# Both are represented as uint8_t
cache_state = construct_cache_state(rows, locations, self.feature_table_map)
if self.record_cache_metrics.record_tablewise_cache_miss:
num_tables = len(cache_state.cache_hash_size_cumsum) - 1
self.register_buffer(
"table_wise_cache_miss",
torch.zeros(
num_tables,
device=self.current_device,
dtype=torch.int64,
),
)
# NOTE: make TorchScript work!
else:
self.register_buffer(
"table_wise_cache_miss",
torch.zeros(
0,
device=self.current_device,
dtype=torch.int64,
),
)
self.cache_assoc = cache_assoc
self._apply_cache_state(
cache_state,
cache_algorithm,
cache_load_factor,
cache_sets,
cache_reserved_memory,
)
if self.max_float8_D > 0:
default_config = SparseType.FP8.default_config()
self.fp8_exponent_bits: int = (
default_config.get("exponent_bits")
if fp8_exponent_bits is None
else fp8_exponent_bits
)
self.fp8_exponent_bias: int = (
default_config.get("exponent_bias")
if fp8_exponent_bias is None
else fp8_exponent_bias
)
else:
self.fp8_exponent_bits = -1
self.fp8_exponent_bias = -1
def get_cache_miss_counter(self) -> Tensor:
# cache_miss_counter[0]: cache_miss_forward_count which records the total number of forwards which has at least one cache miss
# cache_miss_counter[1]: unique_cache_miss_count which records to total number of unique (dedup) cache misses
# cache_miss_counter[2]: total number of unique (dedup) access count
# cache_miss_counter[3]: total number of non-dedup access count
# How to get cache miss ratio
# cache miss ratio (# of missed entries / # of unique requests): ( cache_miss_counter[1] / cache_miss_counter[2] )
# cache miss ratio (# of missed entries / # of total access): ( cache_miss_counter[1] / cache_miss_counter[3] )
assert (
self.record_cache_metrics.record_cache_miss_counter
), "record_cache_miss_counter should be true to access counter values"
return self.cache_miss_counter
@torch.jit.export
def get_table_wise_cache_miss(self) -> Tensor:
assert (
self.record_cache_metrics.record_tablewise_cache_miss
), "record_tablewise_cache_miss should be true to access counter values"
# table_wise_cache_miss contains all the cache miss count for each table in this embedding table object:
return self.table_wise_cache_miss
def reset_cache_miss_counter(self) -> None:
assert (
self.record_cache_metrics.record_cache_miss_counter
), "record_cache_miss_counter should be true to access counter values"
self.cache_miss_counter = torch.tensor(
[0, 0, 0, 0], device=self.current_device, dtype=torch.int64
)
def reset_uvm_cache_stats(self) -> None:
assert (
self.gather_uvm_cache_stats
), "gather_uvm_cache_stats should be set to true to access uvm cache stats."
self.uvm_cache_stats.zero_()
self.local_uvm_cache_stats.zero_()
def print_cache_miss_counter(self) -> None:
assert (
self.record_cache_metrics.record_cache_miss_counter
), "record_cache_miss_counter should be true to access counter values"
logging.info(
f"\n"
f"Miss counter value [0] - # of miss occured iters : {self.cache_miss_counter[0]}, \n"
f"Miss counter value [1] - # of unique misses : {self.cache_miss_counter[1]}, \n"
f"Miss counter value [2] - # of unique requested indices : {self.cache_miss_counter[2]}, \n"
f"Miss counter value [3] - # of total requested indices : {self.cache_miss_counter[3]}, "
)
logging.info(
f"unique_miss_rate using counter : {self.cache_miss_counter[1]/self.cache_miss_counter[2]}, \n"
)
logging.info(
f"total_miss_rate using counter : {self.cache_miss_counter[1]/self.cache_miss_counter[3]}, \n"
)
def get_uvm_cache_stats(self) -> Tensor:
assert (
self.gather_uvm_cache_stats
), "gather_uvm_cache_stats should be set to true to access uvm cache stats."
return self.uvm_cache_stats
def print_uvm_cache_stats(self) -> None:
assert (
self.gather_uvm_cache_stats
), "gather_uvm_cache_stats should be set to true to access uvm cache stats."
uvm_cache_stats = self.uvm_cache_stats.tolist()
logging.info(
f"N_called: {uvm_cache_stats[0]}\n"
f"N_requested_indices: {uvm_cache_stats[1]}\n"
f"N_unique_indices: {uvm_cache_stats[2]}\n"
f"N_unique_misses: {uvm_cache_stats[3]}\n"
f"N_conflict_unique_misses: {uvm_cache_stats[4]}\n"
f"N_conflict_misses: {uvm_cache_stats[5]}\n"
)
if uvm_cache_stats[1]:
logging.info(
f"unique indices / requested indices: {uvm_cache_stats[2]/uvm_cache_stats[1]}\n"
f"unique misses / requested indices: {uvm_cache_stats[3]/uvm_cache_stats[1]}\n"
)
@torch.jit.export
def prefetch(self, indices: Tensor, offsets: Tensor) -> None:
self.timestep_counter.increment()
self.timestep_prefetch_size.increment()
if not self.lxu_cache_weights.numel():
return
# FIXME: check the int32_t range failure in https://fburl.com/gdoc/kcdnrnvg .
# The real failure should be in cache handling in https://fburl.com/ox3f26r0 .
indices, offsets = indices.long(), offsets.long()
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
self.cache_hash_size_cumsum,
indices,
offsets,
)
if (
self.record_cache_metrics.record_cache_miss_counter
or self.record_cache_metrics.record_tablewise_cache_miss
):
lxu_cache_locations = (
torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.total_cache_hash_size,
)
if self.cache_assoc in [32, 64]
else torch.ops.fbgemm.direct_mapped_lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.total_cache_hash_size,
)
)
if self.record_cache_metrics.record_cache_miss_counter:
self._update_cache_miss_counter(
lxu_cache_locations, linear_cache_indices
)
if self.record_cache_metrics.record_tablewise_cache_miss:
self._update_tablewise_cache_miss(
lxu_cache_locations, linear_cache_indices, offsets
)
if self.cache_assoc in [32, 64]:
# 64 for AMD
self.prefetch_32way(linear_cache_indices)
elif self.cache_assoc == 1:
self.prefetch_1way(linear_cache_indices)
else:
raise ValueError(f"{self.cache_assoc} not in [1, 32, 64]")
def prefetch_32way(self, linear_cache_indices: Tensor) -> None:
if self.cache_algorithm == CacheAlgorithm.LRU:
torch.ops.fbgemm.lru_cache_populate_byte(
self.weights_uvm,
self.cache_hash_size_cumsum,
self.total_cache_hash_size,
self.cache_index_table_map,
self.weights_offsets,
self.weights_tys,
self.D_offsets,
linear_cache_indices,
self.lxu_cache_state,
self.lxu_cache_weights,
self.timestep_counter.get(),
self.lxu_state,
16, # row_alignment; using default value.
self.gather_uvm_cache_stats,
self.local_uvm_cache_stats,
)
elif self.cache_algorithm == CacheAlgorithm.LFU:
torch.ops.fbgemm.lfu_cache_populate_byte(
self.weights_uvm,
self.cache_hash_size_cumsum,
self.total_cache_hash_size,
self.cache_index_table_map,
self.weights_offsets,
self.weights_tys,
self.D_offsets,
linear_cache_indices,
self.lxu_cache_state,
self.lxu_cache_weights,
self.lxu_state,
)
assert (
self.lxu_cache_locations_list.size() < self.max_prefetch_depth
), f"self.lxu_cache_locations_list has grown to size: {self.lxu_cache_locations_list.size()}, this exceeds the maximum: {self.max_prefetch_depth}. This probably indicates an error in logic where prefetch() is being called more frequently than forward()"
self.lxu_cache_locations_list.push(
torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.total_cache_hash_size,
self.gather_uvm_cache_stats,
self.local_uvm_cache_stats,
)
)
if self.gather_uvm_cache_stats:
self._accumulate_uvm_cache_stats()
def prefetch_1way(self, linear_cache_indices: Tensor) -> None:
if self.cache_algorithm == CacheAlgorithm.LRU:
torch.ops.fbgemm.direct_mapped_lru_cache_populate_byte(
self.weights_uvm,
self.cache_hash_size_cumsum,
self.total_cache_hash_size,
self.cache_index_table_map,
self.weights_offsets,
self.weights_tys,
self.D_offsets,
linear_cache_indices,
self.lxu_cache_state,
self.lxu_cache_weights,
self.timestep_counter.get(),
self.lxu_state,
self.lxu_cache_miss_timestamp,
16, # row_alignment; using default value.
self.gather_uvm_cache_stats,
self.local_uvm_cache_stats,
)
else:
raise ValueError("Direct Mapped for LRU only")
assert (
self.lxu_cache_locations_list.size() < self.max_prefetch_depth
), f"self.lxu_cache_locations_list has grown to size: {self.lxu_cache_locations_list.size()}, this exceeds the maximum: {self.max_prefetch_depth}. This probably indicates an error in logic where prefetch() is being called more frequently than forward()"
self.lxu_cache_locations_list.push(
torch.ops.fbgemm.direct_mapped_lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.total_cache_hash_size,
self.gather_uvm_cache_stats,
self.local_uvm_cache_stats,
)
)
if self.gather_uvm_cache_stats:
self._accumulate_uvm_cache_stats()
def _accumulate_uvm_cache_stats(self) -> None:
# Accumulate local_uvm_cache_stats (int32) into uvm_cache_stats (int64).
# We may wanna do this accumulation atomically, but as it's only for monitoring,
# slightly inaccurate result may be acceptable.
self.uvm_cache_stats = torch.add(
self.uvm_cache_stats, self.local_uvm_cache_stats
)
self.local_uvm_cache_stats.zero_()
def _update_cache_miss_counter(
self,
lxu_cache_locations: Tensor,
linear_cache_indices: Tensor,
) -> None:
CACHE_MISS = torch.tensor([-1], device=self.current_device, dtype=torch.int32)
CACHE_HIT = torch.tensor([-2], device=self.current_device, dtype=torch.int32)
cache_missed_locations = torch.where(
lxu_cache_locations == CACHE_MISS, linear_cache_indices, CACHE_HIT
)
unique_ids_list = torch.unique(cache_missed_locations)
unique_ids_count_list = torch.where(unique_ids_list == CACHE_HIT, 0, 1)
miss_count = torch.sum(unique_ids_count_list)
self.cache_miss_counter[0] += (miss_count > 0).to(torch.int64)
self.cache_miss_counter[1] += miss_count
# Number of unique requests
assert (
len(linear_cache_indices.size()) == 1
), f"linear_cache_indices should be 1-D was {len(linear_cache_indices.size())}-D"
assert (
self.cache_miss_counter.size()[0] == 4
), f"self.cache_miss_counter should be 4-D was {self.cache_miss_counter.size()[0]}-D"
self.cache_miss_counter[2] += torch.unique(linear_cache_indices).size()[0]
# Number of total requests
self.cache_miss_counter[3] += linear_cache_indices.size()[0]
def _update_tablewise_cache_miss(
self,
lxu_cache_locations: Tensor,
linear_cache_indices: Tensor,
offsets: Tensor,
) -> None:
CACHE_MISS = torch.tensor([-1], device=self.current_device, dtype=torch.int32)
CACHE_HIT = torch.tensor([-2], device=self.current_device, dtype=torch.int32)
num_tables = len(self.cache_hash_size_cumsum) - 1
num_offsets_per_table = (len(offsets) - 1) // num_tables
cache_missed_locations = torch.where(
lxu_cache_locations == CACHE_MISS, linear_cache_indices, CACHE_HIT
)
for i in range(num_tables):
start = offsets[i * num_offsets_per_table]
end = offsets[(i + 1) * num_offsets_per_table]
current_cache_missed_locations = cache_missed_locations[start:end]
unique_ids_list = torch.unique(current_cache_missed_locations)
unique_ids_count_list = torch.where(unique_ids_list == CACHE_HIT, 0, 1)
miss_count = torch.sum(unique_ids_count_list)
self.table_wise_cache_miss[i] += miss_count
def forward(
self,
indices: Tensor,
offsets: Tensor,
per_sample_weights: Optional[Tensor] = None,
) -> Tensor:
assert (
self.weight_initialized
), "weight needs to be initialized before forward function"
# First bound check: check if the indices/offsets are within the boundary
# of the original embedding rows before pruning.
# Note that this is only applied when we enable pruning (if the perf becomes
# an issue, we can fuse it inside the remapping kernel).
if (
self.index_remapping_hash_table_cpu is not None
or self.index_remapping_hash_table.numel() > 0
or self.index_remappings_array.numel() > 0
):
if self.bounds_check_mode_int != BoundsCheckMode.NONE.value:
torch.ops.fbgemm.bounds_check_indices(
self.original_rows_per_table,
indices,
offsets,
self.bounds_check_mode_int,
self.bounds_check_warning,
per_sample_weights,
)
# Index remapping changes input indices, and some of them becomes -1 (prunned rows).
# Hence, remapping should be done before prefetch and emb lookup
# so that these operations are with the remapped indices.
if self.index_remapping_hash_table_cpu is not None:
indices = self.index_remapping_hash_table_cpu.lookup(indices, offsets)
elif self.index_remapping_hash_table.numel() > 0:
# Convert from raw indices to pruned indices
indices = torch.ops.fbgemm.pruned_hashmap_lookup(
indices,
offsets,
self.index_remapping_hash_table,
self.index_remapping_hash_table_offsets,
)
elif self.index_remappings_array.numel() > 0:
indices = torch.ops.fbgemm.pruned_array_lookup(
indices,
offsets,
self.index_remappings_array,
self.index_remappings_array_offsets,
)
if self.timestep_prefetch_size.get() <= 0:
self.prefetch(indices, offsets)
self.timestep_prefetch_size.decrement()
lxu_cache_locations = self.lxu_cache_locations_list.pop()
# Second bound check: check if the indices/offsets are within the boundary
# of the pruned embedding rows after pruning.
# Note: we cast to int as a TorchScript workaround.
if self.bounds_check_mode_int != BoundsCheckMode.NONE.value:
torch.ops.fbgemm.bounds_check_indices(
self.rows_per_table,
indices,
offsets,
self.bounds_check_mode_int,
self.bounds_check_warning,
per_sample_weights,
)
# Note: CPU and CUDA ops use the same interface to facilitate JIT IR
# generation for CUDA/CPU. For CPU op, we don't need weights_uvm and
# weights_placements
return torch.ops.fbgemm.int_nbit_split_embedding_codegen_lookup_function(
dev_weights=self.weights_host if self.host_size > 0 else self.weights_dev,
uvm_weights=self.weights_uvm,
weights_placements=self.weights_placements,
weights_offsets=self.weights_offsets,
weights_tys=self.weights_tys,
D_offsets=self.D_offsets,
total_D=self.total_D,
max_int2_D=self.max_int2_D,
max_int4_D=self.max_int4_D,
max_int8_D=self.max_int8_D,
max_float16_D=self.max_float16_D,
max_float32_D=self.max_float32_D,
indices=indices,
offsets=offsets,
pooling_mode=int(self.pooling_mode),
indice_weights=per_sample_weights,
output_dtype=self.output_dtype,
lxu_cache_weights=self.lxu_cache_weights,
lxu_cache_locations=lxu_cache_locations,
row_alignment=self.row_alignment,
max_float8_D=self.max_float8_D,
fp8_exponent_bits=self.fp8_exponent_bits,
fp8_exponent_bias=self.fp8_exponent_bias,
)
def initialize_logical_weights_placements_and_offsets(
self,
) -> None:
assert len(self.weights_physical_offsets) == len(self.embedding_specs)
assert len(self.weights_physical_offsets) == len(
self.weights_physical_placements
)
offsets = [self.weights_physical_offsets[t] for t in self.feature_table_map]
placements = [
self.weights_physical_placements[t] for t in self.feature_table_map
]
self.weights_offsets = torch.tensor(
offsets, device=self.current_device, dtype=torch.int64
)
self.weights_placements = torch.tensor(
placements, device=self.current_device, dtype=torch.int32
)
def initialize_physical_weights_placements_and_offsets(
self,
cacheline_alignment: bool = True,
) -> None:
# Initialize physical weights placements and offsets
# and host/dev/uvm sizes
weight_split: SplitState = nbit_construct_split_state(
self.embedding_specs,
cacheable=True,
row_alignment=self.row_alignment,
scale_bias_size_in_bytes=self.scale_bias_size_in_bytes,
cacheline_alignment=cacheline_alignment,
)
self.weights_physical_placements = [t.value for t in weight_split.placements]
self.weights_physical_offsets = weight_split.offsets
self.host_size = weight_split.host_size
self.dev_size = weight_split.dev_size
self.uvm_size = weight_split.uvm_size
@torch.jit.export
def reset_weights_placements_and_offsets(
self, device: torch.device, location: int
) -> None:
# Reset device/location denoted in embedding specs
self.reset_embedding_spec_location(device, location)
# Initialize all physical/logical weights placements and offsets without initializing large dev weights tensor
self.initialize_physical_weights_placements_and_offsets()
self.initialize_logical_weights_placements_and_offsets()
def reset_embedding_spec_location(
self, device: torch.device, location: int
) -> None:
# Overwrite location in embedding_specs with new location
# Use map since can't script enum call (ie. EmbeddingLocation(value))
INT_TO_EMBEDDING_LOCATION = {
0: EmbeddingLocation.DEVICE,
1: EmbeddingLocation.MANAGED,
2: EmbeddingLocation.MANAGED_CACHING,
3: EmbeddingLocation.HOST,
}
target_location = INT_TO_EMBEDDING_LOCATION[location]
self.current_device = device
self.row_alignment = 1 if target_location == EmbeddingLocation.HOST else 16
self.embedding_specs = [
(spec[0], spec[1], spec[2], spec[3], target_location)
for spec in self.embedding_specs
]
def _apply_split(
self,
dev_size: int,
host_size: int,
uvm_size: int,
placements: List[int],
offsets: List[int],
enforce_hbm: bool,
) -> None:
assert not self.weight_initialized, "Weights have already been initialized."
self.weight_initialized = True
self.weights_physical_placements = placements
self.weights_physical_offsets = offsets
self.host_size = host_size
self.dev_size = dev_size
self.uvm_size = uvm_size
self.initialize_logical_weights_placements_and_offsets()
if dev_size > 0:
self.weights_dev = torch.zeros(
dev_size,
device=self.current_device,
dtype=torch.uint8,
)
if host_size > 0:
self.weights_host = torch.zeros(
host_size, device=self.current_device, dtype=torch.uint8
)
if uvm_size > 0:
assert not self.use_cpu
if enforce_hbm:
if not torch.jit.is_scripting():
logging.info("Enforce hbm for the cache location")
self.weights_uvm = torch.zeros(
uvm_size,
device=self.current_device,
dtype=torch.uint8,
)
else:
self.weights_uvm = torch.zeros(
uvm_size,
out=torch.ops.fbgemm.new_unified_tensor(
torch.zeros(1, device=self.D_offsets.device, dtype=torch.uint8),
[uvm_size],
self.uvm_host_mapped,
),
)
def _apply_cache_state(
self,
cache_state: CacheState,
cache_algorithm: CacheAlgorithm,
cache_load_factor: float,
cache_sets: int,
cache_reserved_memory: float,
) -> None:
assert self.cache_assoc in [
1,
32,
64,
], "Only 1-way or 32-way(64-way for AMD) implmeneted for now"
self.cache_algorithm = cache_algorithm
self.timestep_counter = torch.classes.fbgemm.AtomicCounter()
self.timestep_prefetch_size = torch.classes.fbgemm.AtomicCounter()
self.max_prefetch_depth = MAX_PREFETCH_DEPTH
if self.current_device.type == "meta":
# To reslove "Cannot copy out of meta tensor; no data!" error
lxu_cache_locations_empty = torch.empty(0, dtype=torch.int32).fill_(-1)
else:
lxu_cache_locations_empty = torch.empty(
0, device=self.current_device, dtype=torch.int32
).fill_(-1)
self.lxu_cache_locations_list = torch.classes.fbgemm.TensorQueue(
lxu_cache_locations_empty
)
# NOTE: no cache for CPU mode!
if cache_state.total_cache_hash_size == 0 or self.use_cpu:
self.register_buffer(
"lxu_cache_weights",
torch.zeros(0, 0, device=self.current_device, dtype=torch.uint8),
)
# NOTE: make TorchScript work!
self.register_buffer(
"cache_hash_size_cumsum",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"total_cache_hash_size",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"cache_index_table_map",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"lxu_cache_state",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"lxu_state",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"lxu_cache_miss_timestamp",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"cache_miss_counter",
torch.tensor([0, 0, 0, 0], dtype=torch.int64),
persistent=False,
)
self.register_buffer(
"uvm_cache_stats",
torch.zeros(
size=(self.uvm_cache_stats_size,),
device=self.current_device,
dtype=torch.int64,
),
persistent=False,
)
self.register_buffer(
"local_uvm_cache_stats",
torch.zeros(
size=(self.uvm_cache_stats_size,),
device=self.current_device,
dtype=torch.int32,
),
persistent=False,
)
return
assert cache_load_factor > 0
if cache_sets <= 0:
total_memory = torch.cuda.get_device_properties(
self.current_device
).total_memory
free_memory = (
total_memory
- torch.cuda.memory_reserved(self.current_device)
- int(cache_reserved_memory)
)
assert free_memory > 0
cache_sets = (
int(cache_state.total_cache_hash_size * cache_load_factor)
+ self.cache_assoc
- 1
) // self.cache_assoc
# Note that element_size has been included in max_D_cache (in Bytes)
cache_size = cache_sets * self.cache_assoc * self.max_D_cache
if cache_size > free_memory:
cache_sets = (
int(1.0 * free_memory / self.max_D_cache) + self.cache_assoc - 1
) // self.cache_assoc
cache_sets = 1 if cache_sets == 0 else cache_sets
cache_load_factor = (
1.0 * cache_sets * self.cache_assoc / int(cache_state.total_cache_hash_size)
)
assert cache_sets > 0
if cache_algorithm == CacheAlgorithm.LFU:
assert cache_sets < 2**24 - 1
cache_size = cache_sets * self.cache_assoc * self.max_D_cache
logging.info(
f"Using on-device cache with admission algorithm "
f"{cache_algorithm}, {cache_sets} sets, "
f"cache_load_factor: {cache_load_factor : .3f}, "
f"{cache_size / 1024.0 / 1024.0 / 1024.0 : .2f}GB"
)
self.total_cache_hash_size = cache_state.total_cache_hash_size
self.register_buffer(
"cache_hash_size_cumsum",
torch.tensor(
cache_state.cache_hash_size_cumsum,
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"cache_index_table_map",
torch.tensor(
cache_state.cache_index_table_map,
device=self.current_device,
dtype=torch.int32,
),
)
self.register_buffer(
"lxu_cache_state",
torch.zeros(
cache_sets,
self.cache_assoc,
device=self.current_device,
dtype=torch.int64,
).fill_(-1),
)
self.register_buffer(
"lxu_cache_weights",
torch.zeros(
cache_sets * self.cache_assoc,
self.max_D_cache,
device=self.current_device,
dtype=torch.uint8,
),
)
self.register_buffer(
"lxu_state",
torch.zeros(
size=(self.total_cache_hash_size + 1,)
if cache_algorithm == CacheAlgorithm.LFU
else (cache_sets, self.cache_assoc),
device=self.current_device,
dtype=torch.int64,
),
)
if self.cache_assoc == 1:
self.register_buffer(
"lxu_cache_miss_timestamp",
torch.zeros(
cache_sets,
self.cache_assoc,
device=self.current_device,
dtype=torch.int64,
),
)
else:
# make TorchScript work
self.register_buffer(
"lxu_cache_miss_timestamp",
torch.zeros(1, device=self.current_device, dtype=torch.int64),
persistent=False,
)
self.register_buffer(
"cache_miss_counter",
torch.tensor([0, 0, 0, 0], device=self.current_device, dtype=torch.int64),
)
self.register_buffer(
"uvm_cache_stats",
torch.zeros(
size=(self.uvm_cache_stats_size,),
device=self.current_device,
dtype=torch.int64,
),
persistent=False,
)
self.register_buffer(
"local_uvm_cache_stats",
torch.zeros(
size=(self.uvm_cache_stats_size,),
device=self.current_device,
dtype=torch.int32,
),
persistent=False,
)
if cache_algorithm not in (CacheAlgorithm.LFU, CacheAlgorithm.LRU):
raise ValueError(
f"cache_algorithm must be {CacheAlgorithm.LRU} "
f"or {CacheAlgorithm.LFU}"
)
if self.gather_uvm_cache_stats:
self.reset_uvm_cache_stats()
def reset_cache_states(self) -> None:
if not self.lxu_cache_weights.numel():
return
self.lxu_cache_state.fill_(-1)
self.lxu_state.fill_(0)
self.timestep_counter.reset()
@torch.jit.export
def split_embedding_weights_with_scale_bias(
self, split_scale_bias_mode: int = 1
) -> List[Tuple[Tensor, Optional[Tensor], Optional[Tensor]]]:
"""
Returns a list of weights, split by table
split_scale_bias_mode:
0: return one row;
1: return weights + scale_bias;
2: return weights, scale, bias.
"""
assert self.weight_initialized
splits: List[Tuple[Tensor, Optional[Tensor], Optional[Tensor]]] = []
for t, (_, rows, dim, weight_ty, _) in enumerate(self.embedding_specs):
placement = self.weights_physical_placements[t]
if placement == EmbeddingLocation.DEVICE.value:
weights = self.weights_dev
elif placement == EmbeddingLocation.HOST.value:
weights = self.weights_host
else:
weights = self.weights_uvm
offset = self.weights_physical_offsets[t]
weights_shifts = weights.detach()[
offset : offset
+ rows
* rounded_row_size_in_bytes(
dim, weight_ty, self.row_alignment, self.scale_bias_size_in_bytes
)
].view(
rows,
rounded_row_size_in_bytes(
dim, weight_ty, self.row_alignment, self.scale_bias_size_in_bytes
),
)
if split_scale_bias_mode == 1 or split_scale_bias_mode == 2:
# remove the padding at the end of each row.
weights_shifts = weights_shifts[
:,
: unpadded_row_size_in_bytes(
dim, weight_ty, self.scale_bias_size_in_bytes
),
]
if (
weight_ty == SparseType.INT8
or weight_ty == SparseType.INT4
or weight_ty == SparseType.INT2
):
if split_scale_bias_mode == 1:
splits.append(
(
weights_shifts[:, self.scale_bias_size_in_bytes :],
weights_shifts[:, : self.scale_bias_size_in_bytes],
None,
)
)
else: # 2
# weights_shifts: [0:2] is scale; [2:4] is bias; [4:] is real weights
splits.append(
(
weights_shifts[:, self.scale_bias_size_in_bytes :],
weights_shifts[
:, : self.scale_bias_size_in_bytes // 2
].view(torch.float16),
weights_shifts[
:,
self.scale_bias_size_in_bytes
// 2 : self.scale_bias_size_in_bytes,
].view(torch.float16),
)
)
elif (
weight_ty == SparseType.FP8
or weight_ty == SparseType.FP16
or weight_ty == SparseType.FP32
):
splits.append(
(
weights_shifts,
None,
None,
)
)
else:
raise ValueError("weight_ty is not supported")
else: # split_scale_bias_mode == 0:
splits.append((weights_shifts, None, None))
return splits
@torch.jit.export
def split_embedding_weights(
self,
split_scale_shifts: bool = True
# When true, return list of two tensors, the first with weights and
# the second with scale_bias.
# This should've been named as split_scale_bias.
# Keep as is for backward compatibility.
) -> List[Tuple[Tensor, Optional[Tensor]]]:
"""
Returns a list of weights, split by table
"""
splits: List[
Tuple[Tensor, Optional[Tensor], Optional[Tensor]]
] = self.split_embedding_weights_with_scale_bias(
split_scale_bias_mode=(1 if split_scale_shifts else 0)
)
return [
(split_weight_scale_bias[0], split_weight_scale_bias[1])
for split_weight_scale_bias in splits
]
@torch.jit.export
def initialize_weights(self) -> None:
if not self.weight_initialized:
self._apply_split(
self.dev_size,
self.host_size,
self.uvm_size,
self.weights_physical_placements,
self.weights_physical_offsets,
self.enforce_hbm,
)
self.weight_initialized = True
def fill_random_weights(self) -> None:
"""
Fill the buffer with random weights, table by table
FIXME: make it in-place fill.
"""
self.initialize_weights()
weights = self.split_embedding_weights()
for dest_weight in weights:
dest_weight[0].copy_(
random_quant_scaled_tensor(
shape=dest_weight[0].shape, device=self.current_device
)
)
def assign_embedding_weights(
self, q_weight_list: List[Tuple[Tensor, Optional[Tensor]]]
) -> None:
"""
Assigns self.split_embedding_weights() with values from the input list of weights and scale_shifts.
"""
weights = self.split_embedding_weights()
assert len(q_weight_list) == len(weights)
for dest_weight, input_weight in zip(weights, q_weight_list):
dest_weight[0].copy_(input_weight[0])
if input_weight[1] is not None:
assert dest_weight[1] is not None
dest_weight[1].copy_(input_weight[1])
else:
assert dest_weight[1] is None
@torch.jit.export
def set_index_remappings_array(
self,
index_remapping: List[Tensor],
) -> None:
rows: List[int] = [e[1] for e in self.embedding_specs]
index_remappings_array_offsets = [0]
original_feature_rows = torch.jit.annotate(List[int], [])
last_offset = 0
for t, mapping in enumerate(index_remapping):
if mapping is not None:
current_original_row = mapping.numel()
last_offset += current_original_row
original_feature_rows.append(current_original_row)
else:
original_feature_rows.append(rows[t])
index_remappings_array_offsets.append(last_offset)
self.index_remappings_array_offsets = torch.tensor(
index_remappings_array_offsets,
device=self.current_device,
dtype=torch.int64,
)
if len(original_feature_rows) == 0:
original_feature_rows = rows
self.original_rows_per_table = torch.tensor(
[original_feature_rows[t] for t in self.feature_table_map],
device=self.current_device,
dtype=torch.int64,
)
if self.index_remappings_array_offsets[-1] == 0:
self.index_remappings_array = torch.empty(
0, dtype=torch.int32, device=self.current_device
)
else:
index_remappings_filter_nones = []
for mapping in index_remapping:
if mapping is not None:
index_remappings_filter_nones.append(mapping)
self.index_remappings_array = torch.cat(index_remappings_filter_nones).to(
self.current_device
)
def set_index_remappings(
self,
index_remapping: List[Tensor],
pruning_hash_load_factor: float = 0.5,
use_array_for_index_remapping: bool = True,
) -> None:
rows: List[int] = [e[1] for e in self.embedding_specs]
T = len(self.embedding_specs)
# Hash mapping pruning
if not use_array_for_index_remapping:
capacities = [
round_up(int(row * 1.0 / pruning_hash_load_factor), 32)
if index_remap is not None
else 0
for (index_remap, row) in zip(index_remapping, rows)
]
hash_table = torch.empty(
(sum(capacities), 2),
dtype=torch.int32,
)
hash_table[:, :] = -1
hash_table_offsets = torch.tensor([0] + list(accumulate(capacities))).long()
merged_index_remappings = [
mapping if mapping is not None else Tensor(list(range(row)))
for (mapping, row) in zip(index_remapping, rows)
]
original_feature_rows = [
mapping.numel() for mapping in merged_index_remappings
]
if len(original_feature_rows) == 0:
original_feature_rows = rows
self.original_rows_per_table = torch.tensor(
[original_feature_rows[t] for t in self.feature_table_map],
device=self.current_device,
dtype=torch.int64,
)
dense_indices = torch.cat(merged_index_remappings, dim=0).int()
indices = torch.cat(
[torch.arange(row) for row in original_feature_rows], dim=0
).int()
offsets = torch.tensor([0] + list(accumulate(original_feature_rows))).int()
if self.use_cpu:
self.index_remapping_hash_table_cpu = (
torch.classes.fbgemm.PrunedMapCPU()
)
self.index_remapping_hash_table_cpu.insert(
indices, dense_indices, offsets, T
)
else:
# pruned_hashmap_insert only has cpu implementation: Move dense_indices to CPU
torch.ops.fbgemm.pruned_hashmap_insert(
indices,
dense_indices.cpu(),
offsets,
hash_table,
hash_table_offsets,
)
self.index_remapping_hash_table = hash_table.to(self.current_device)
self.index_remapping_hash_table_offsets = hash_table_offsets.to(
self.current_device
)
self.index_remapping_hash_table_cpu = None
# Array mapping pruning
else:
self.set_index_remappings_array(index_remapping)
def _embedding_inplace_update_per_table(
self,
update_table_idx: int,
update_row_indices: List[int],
update_weights: Tensor,
) -> None:
row_size = len(update_row_indices)
if row_size == 0:
return
# pyre-fixme[9]: update_row_indices has type `List[int]`; used as `Tensor`.
update_row_indices = torch.tensor(
update_row_indices,
device=self.current_device,
dtype=torch.int64,
)
table_values = self.split_embedding_weights(split_scale_shifts=False)[
update_table_idx
]
table_values[0].scatter_(
dim=0,
# pyre-fixme[16]: `List` has no attribute `view`.
index=update_row_indices.view(row_size, 1).expand_as(update_weights),
src=update_weights,
)
@torch.jit.export
def embedding_inplace_update(
self,
update_table_indices: List[int],
update_row_indices: List[List[int]],
update_weights: List[Tensor],
) -> None:
for i in range(len(update_table_indices)):
self._embedding_inplace_update_per_table(
update_table_indices[i],
update_row_indices[i],
update_weights[i],
)
def embedding_inplace_update_internal(
self,
update_table_indices: List[int],
update_row_indices: List[int],
update_weights: Tensor,
) -> None:
assert len(update_table_indices) == len(update_row_indices)
update_offsets = []
update_offset = 0
for table_idx in update_table_indices:
D_bytes = rounded_row_size_in_bytes(
self.embedding_specs[table_idx][2],
self.embedding_specs[table_idx][3],
self.row_alignment,
self.scale_bias_size_in_bytes,
)
update_offsets.append(update_offset)
update_offset += D_bytes
update_offsets.append(update_offset)
# pyre-fixme[9]: update_table_indices has type `List[int]`; used as `Tensor`.
update_table_indices = torch.tensor(
update_table_indices,
device=self.current_device,
dtype=torch.int32,
)
# pyre-fixme[9]: update_row_indices has type `List[int]`; used as `Tensor`.
update_row_indices = torch.tensor(
update_row_indices,
device=self.current_device,
dtype=torch.int64,
)
update_offsets = torch.tensor(
update_offsets,
device=self.current_device,
dtype=torch.int64,
)
# Only support array based pruning for now.
assert self.index_remapping_hash_table_cpu is None
assert self.index_remapping_hash_table.numel() == 0
assert self.index_remappings_array.numel() >= 0
if self.index_remappings_array.numel() > 0:
update_row_indices = torch.ops.fbgemm.pruned_array_lookup_from_row_idx(
update_row_indices,
update_table_indices,
self.index_remappings_array,
self.index_remappings_array_offsets,
)
lxu_cache_locations = None
if self.lxu_cache_weights.numel() > 0:
linear_cache_indices = (
torch.ops.fbgemm.linearize_cache_indices_from_row_idx(
self.cache_hash_size_cumsum,
update_table_indices,
update_row_indices,
)
)
if self.cache_assoc in [32, 64]:
# 64 for AMD
self.prefetch_32way(linear_cache_indices)
elif self.cache_assoc == 1:
self.prefetch_1way(linear_cache_indices)
else:
raise ValueError(f"{self.cache_assoc} not in [1, 32, 64]")
lxu_cache_locations = self.lxu_cache_locations_list.pop()
torch.ops.fbgemm.emb_inplace_update(
dev_weights=self.weights_host if self.host_size > 0 else self.weights_dev,
uvm_weights=self.weights_uvm,
weights_placements=self.weights_placements,
weights_offsets=self.weights_offsets,
weights_tys=self.weights_tys,
D_offsets=self.D_offsets,
update_weights=update_weights,
update_table_indices=update_table_indices,
update_row_indices=update_row_indices,
update_offsets=update_offsets,
row_alignment=self.row_alignment,
lxu_cache_weights=self.lxu_cache_weights,
lxu_cache_locations=lxu_cache_locations,
)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import fbgemm_gpu
import fbgemm_gpu.split_table_batched_embeddings_ops_training
import torch # usort:skip
Tensor = torch.Tensor
def add_docs(method, docstr):
method.__doc__ = docstr
add_docs(
torch.ops.fbgemm.jagged_2d_to_dense,
"""
jagged_2d_to_dense(values, x_offsets, max_sequence_length) -> Tensor
Converts a jagged tensor, with a 2D values array into a dense tensor, padding with zeros.
Args:
values (Tensor): 2D tensor containing the values of the jagged tensor.
x_offsets (Tensor): 1D tensor containing the starting point of each jagged row in the values tensor.
max_sequence_length (int): Maximum length of any row in the jagged dimension.
Returns:
Tensor: The padded dense tensor
Example:
>>> values = torch.tensor([[1,1],[2,2],[3,3],[4,4]])
>>> x_offsets = torch.tensor([0, 1, 3])
>>> torch.ops.fbgemm.jagged_2d_to_dense(values, x_offsets, 3)
tensor([[[1, 1],
[0, 0],
[0, 0]],
[[2, 2],
[3, 3],
[0, 0]]])
""",
)
# Example:
#
# >>> t = torch.arange(4)
add_docs(
torch.ops.fbgemm.jagged_1d_to_dense,
"""
jagged_1d_to_dense(values, offsets, max_sequence_length, padding_value) -> Tensor)
Converts a jagged tensor, with a 1D values array, into a dense tensor, padding with a specified padding value.
Args:
values (Tensor): 1D tensor containing the values of the jagged tensor.
offsets (Tensor): 1D tensor containing the starting point of each jagged row in the values tensor.
max_sequence_length (int): Maximum length of any row in the jagged dimension.
padding_value (int): Value to set in the empty areas of the dense output, outside of the jagged tensor coverage.
Returns:
Tensor: the padded dense tensor
Example:
>>> values = torch.tensor([1,2,3,4])
>>> offsets = torch.tensor([0, 1, 3])
>>> torch.ops.fbgemm.jagged_1d_to_dense(values, x_offsets, 3, 0)
tensor([[1, 0, 0],
[2, 3, 0]])
""",
)
add_docs(
torch.ops.fbgemm.dense_to_jagged,
"""
dense_to_jagged(dense, x_offsets, total_L) -> (Tensor, Tensor[])
Converts a dense tensor into a jagged tensor, given the desired offsets of the resulting dense tensor.
Args:
dense (Tensor): A dense input tensor to be converted
x_offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
total_L (int, Optional): Total number of values in the resulting jagged tensor.
Returns:
(Tensor, Tensor[]): Values and offsets of the resulting jagged tensor. Offsets are identital to those that were input.
Example:
>>> dense = torch.tensor([[[1, 1], [0, 0], [0, 0]], [[2, 2], [3, 3], [0, 0]]])
>>> x_offsets = torch.tensor([0, 1, 3])
>>> torch.ops.fbgemm.dense_to_jagged(dense, [x_offsets])
(tensor([[1, 1],
[2, 2],
[3, 3]]), [tensor([0, 1, 3])])
""",
)
add_docs(
torch.ops.fbgemm.jagged_to_padded_dense,
"""
jagged_to_padded_dense(values, offsets, max_lengths, padding_value=0) -> Tensor
Converts a jagged tensor into a dense tensor, padding with a specified padding value.
Args:
values (Tensor): Jagged tensor values
offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
max_lengths (int[]): A list with max_length for each jagged dimension.
padding_value (float): Value to set in the empty areas of the dense output, outside of the jagged tensor coverage.
Returns:
Tensor: the padded dense tensor
Example:
>>> values = torch.tensor([[1,1],[2,2],[3,3],[4,4]])
>>> offsets = torch.tensor([0, 1, 3])
>>> torch.ops.fbgemm.jagged_to_padded_dense(values, [offsets], [3], 7)
tensor([[[1, 1],
[7, 7],
[7, 7]],
[[2, 2],
[3, 3],
[7, 7]]])
""",
)
add_docs(
torch.ops.fbgemm.jagged_dense_elementwise_add,
"""
jagged_dense_elementwise_add(x_values, x_offsets, y) -> Tensor
Adds a jagged tensor to a dense tensor, resulting in dense tensor. Jagged
tensor input will be padded with zeros for the purposes of the addition.
Args:
x_values (Tensor): Jagged tensor values
offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
y (Tensor): A dense tensor
Returns:
Tensor: The sum of jagged input tensor + y
""",
)
add_docs(
torch.ops.fbgemm.jagged_dense_elementwise_add_jagged_output,
"""
jagged_dense_elementwise_add_jagged_output(x_values, x_offsets, y) -> (Tensor, Tensor[])
Adds a jagged tensor to a dense tensor and, resulting in a jagged tensor with the same structure as the input jagged tensor.
Args:
x_values (Tensor): Jagged tensor values
x_offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
y (Tensor): A dense tensor
Returns:
(Tensor, Tensor[]): Values and offsets of the resulting jagged tensor. Offsets are identital to those that were input.
""",
)
add_docs(
torch.ops.fbgemm.jagged_dense_dense_elementwise_add_jagged_output,
"""
jagged_dense_dense_elementwise_add_jagged_output(x_values, x_offsets, y_0, y_1) -> (Tensor, Tensor[])
Adds a jagged tensor to the sum of two dense tensors, resulting in a jagged tensor with the same structure as the input jagged tensor.
Args:
x_values (Tensor): Jagged tensor values
x_offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
y_0 (Tensor): A dense tensor
y_1 (Tensor): A dense tensor
Returns:
(Tensor, Tensor[]): Values and offsets of the resulting jagged tensor. Offsets are identital to those that were input.
""",
)
add_docs(
torch.ops.fbgemm.jagged_dense_elementwise_mul,
"""
jagged_dense_elementwise_mul(x_values, x_offsets, y) -> (Tensor, Tensor[])
Elementwise-multiplies a jagged tensor a dense tensor and, resulting in a jagged tensor with the same structure as the input jagged tensor.
Args:
x_values (Tensor): Jagged tensor values
x_offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
y (Tensor): A dense tensor
Returns:
(Tensor, Tensor[]): Values and offsets of the resulting jagged tensor. Offsets are identital to those that were input.
""",
)
add_docs(
fbgemm_gpu.split_table_batched_embeddings_ops_training.SplitTableBatchedEmbeddingBagsCodegen,
"""
SplitTableBatchedEmbeddingBagsCodegen(embedding_specs, feature_table_map=None, cache_algorithm=CacheAlgorithm.LRU, cache_load_factor=0.2, cache_sets=0, cache_reserved_memory=0.0, cache_precision=SparseType.FP32, weights_precision=SparseType.FP32, output_dtype=SparseType.FP32, enforce_hbm=False, optimizer=OptimType.EXACT_SGD, record_cache_metrics=None, stochastic_rounding=True, gradient_clipping=False, max_gradient=1.0, learning_rate=0.01, eps=1.0e-8, momentum=0.9, weight_decay=0.0, weight_decay_mode=WeightDecayMode.NONE, eta=0.001, beta1=0.9, beta2=0.999, pooling_mode=PoolingMode.SUM, device=None, bounds_check_mode=BoundsCheckMode.WARNING) -> None
Table batched Embedding operator. Looks up one or more embedding tables. The module is application for training. The backward operator is fused with optimizer. Thus, the embedding tables are updated during backward.
Args:
embedding_specs (List[Tuple[int, int, EmbeddingLocation, ComputeDevice]]): A list of embedding specifications. Each spec is a tuple of (number of embedding rows, embedding dimension; must be a multiple of 4, table placement, compute device).
feature_table_map (List[int], optional): An optional list that specifies feature-table mapping.
cache_algorithm (CacheAlgorithm, optional): LXU cache algorithm (`CacheAlgorithm.LRU`, `CacheAlgorithm.LFU`)
cache_load_factor (float, optional): The LXU cache capacity which is `cache_load_factor` * the total number of rows in all embedding tables
cache_sets (int, optional): The number of cache sets
cache_reserved_memory (float, optional): Amount of memory reserved in HBM for non-cache purpose.
cache_precision (SparseType, optional): Data type of LXU cache (`SparseType.FP32`, `SparseType.FP16`)
weights_precision (SparseType, optional): Data type of embedding tables (also known as weights) (`SparseType.FP32`, `SparseType.FP16`, `SparseType.INT8`)
output_dtype (SparseType, optional): Data type of an output tensor (`SparseType.FP32`, `SparseType.FP16`, `SparseType.INT8`)
enforce_hbm (bool, optional): If True, place all weights/momentums in HBM when using cache
optimizer (OptimType, optional): An optimizer to use for embedding table update in the backward pass. (`OptimType.ADAM`, `OptimType.EXACT_ADAGRAD`, `OptimType.EXACT_ROWWISE_ADAGRAD`, `OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD`, `OptimType.EXACT_SGD`, `OptimType.LAMB`, `OptimType.LARS_SGD`, `OptimType.PARTIAL_ROWWISE_ADAM`, `OptimType.PARTIAL_ROWWISE_LAMB`, `OptimType.SGD`)
record_cache_metrics (RecordCacheMetrics, optional): Record number of hits, number of requests, etc if RecordCacheMetrics.record_cache_miss_counter is True and record the similar metrics table-wise if RecordCacheMetrics.record_tablewise_cache_miss is True (default is None).
stochastic_rounding (bool, optional): If True, apply stochastic rounding for weight type that is not `SparseType.FP32`
gradient_clipping (bool, optional): If True, apply gradient clipping
max_gradient (float, optional): The value for gradient clipping
learning_rate (float, optional): The learning rate
eps (float, optional): The epsilon value used by Adagrad, LAMB, and Adam
momentum (float, optional): Momentum used by LARS-SGD
weight_decay (float, optional): Weight decay used by LARS-SGD, LAMB, ADAM, and Rowwise Adagrad
weight_decay_mode (WeightDecayMode, optional): Weight decay mode (`WeightDecayMode.NONE`, `WeightDecayMode.L2`, `WeightDecayMode.DECOUPLE`)
eta (float, optional): The eta value used by LARS-SGD
beta1 (float, optional): The beta1 value used by LAMB and ADAM
beta2 (float, optional): The beta2 value used by LAMB and ADAM
pooling_mode (PoolingMode, optional): Pooling mode (`PoolingMode.SUM`, `PoolingMode.MEAN`, `PoolingMode.NONE`)
device (torch.device, optional): The current device to place tensors on
bounds_check_mode (BoundsCheckMode, optional): If not set to `BoundsCheckMode.NONE`, apply boundary check for indices (`BoundsCheckMode.NONE`, `BoundsCheckMode.FATAL`, `BoundsCheckMode.WARNING`, `BoundsCheckMode.IGNORE`)
Inputs:
indices (torch.Tensor): A 1D-tensor that contains indices to be accessed in all embedding table
offsets (torch.Tensor): A 1D-tensor that conatins offsets of indices. Shape `(B * T + 1)` where `B` = batch size and `T` = number of tables. `offsets[t * B + b + 1] - offsets[t * B + b]` is the length of bag `b` of table `t`
per_sample_weights (torch.Tensor, optional): An optional 1D-tensor that contains positional weights. Shape `(max(bag length))`. Positional weight `i` is multiplied to all columns of row `i` in each bag after its read from the embedding table and before pooling (if pooling mode is not PoolingMode.NONE).
feature_requires_grad (torch.Tensor, optional): An optional tensor for checking if `per_sample_weights` requires gradient
Returns:
A 2D-tensor containing looked up data. Shape `(B, total_D)` where `B` = batch size and `total_D` = the sum of all embedding dimensions in the table
Example:
>>> import torch
>>>
>>> from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
>>> EmbeddingLocation,
>>> )
>>> from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
>>> SplitTableBatchedEmbeddingBagsCodegen,
>>> ComputeDevice,
>>> )
>>>
>>> # Two tables
>>> embedding_specs = [
>>> (3, 8, EmbeddingLocation.DEVICE, ComputeDevice.CUDA),
>>> (5, 4, EmbeddingLocation.MANAGED, ComputeDevice.CUDA)
>>> ]
>>>
>>> tbe = SplitTableBatchedEmbeddingBagsCodegen(embedding_specs)
>>> tbe.init_embedding_weights_uniform(-1, 1)
>>>
>>> print(tbe.split_embedding_weights())
[tensor([[-0.9426, 0.7046, 0.4214, -0.0419, 0.1331, -0.7856, -0.8124, -0.2021],
[-0.5771, 0.5911, -0.7792, -0.1068, -0.6203, 0.4813, -0.1677, 0.4790],
[-0.5587, -0.0941, 0.5754, 0.3475, -0.8952, -0.1964, 0.0810, -0.4174]],
device='cuda:0'), tensor([[-0.2513, -0.4039, -0.3775, 0.3273],
[-0.5399, -0.0229, -0.1455, -0.8770],
[-0.9520, 0.4593, -0.7169, 0.6307],
[-0.1765, 0.8757, 0.8614, 0.2051],
[-0.0603, -0.9980, -0.7958, -0.5826]], device='cuda:0')]
>>> # Batch size = 3
>>> indices = torch.tensor([0, 1, 2, 0, 1, 2, 0, 3, 1, 4, 2, 0, 0],
>>> device="cuda",
>>> dtype=torch.long)
>>> offsets = torch.tensor([0, 2, 5, 7, 9, 12, 13],
>>> device="cuda",
>>> dtype=torch.long)
>>>
>>> output = tbe(indices, offsets)
>>>
>>> # Batch size = 3, total embedding dimension = 12
>>> print(output.shape)
torch.Size([3, 12])
>>> print(output)
tensor([[-1.5197, 1.2957, -0.3578, -0.1487, -0.4873, -0.3044, -0.9801, 0.2769,
-0.7164, 0.8528, 0.7159, -0.6719],
[-2.0784, 1.2016, 0.2176, 0.1988, -1.3825, -0.5008, -0.8991, -0.1405,
-1.2637, -0.9427, -1.8902, 0.3754],
[-1.5013, 0.6105, 0.9968, 0.3057, -0.7621, -0.9821, -0.7314, -0.6195,
-0.2513, -0.4039, -0.3775, 0.3273]], device='cuda:0',
grad_fn=<CppNode<SplitLookupFunction_sgd_Op>>)
""",
)
add_docs(
torch.ops.fbgemm.batched_dense_vec_jagged_2d_mul,
"""
batched_dense_vec_jagged_2d_mul(Tensor v, Tensor a_values, Tensor a_offsets) -> Tensor
Batched vector matrix multiplication of a batched dense vector with a jagged tensor, dense vector is in
size (B * H, max_N) and jagged tensor is in size (B, max_N, H * D) where max_N is the maximum size of
jagged dimension. B * H is the batch size and each multiplies is max_N with [max_N, D]
Args:
v (Tensor): dense vector tensor
a_values (Tensor): Jagged tensor values
a_offsets (Tensor []): A list of jagged offset tensors, one for each jagged dimension.
Returns:
Tensor: output of batch matmul in size (B * H, D)
""",
)
#
#
# add_docs(
# torch.ops.fbgemm.stacked_jagged_1d_to_dense,
# """Args:
# {input}
# Keyword args:
# {out}""",
# )
#
#
# add_docs(
# torch.ops.fbgemm.stacked_jagged_2d_to_dense,
# """Args:
# {input}
# Keyword args:
# {out}""",
# )
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import itertools
import logging
from math import log2
from typing import List, Optional, Tuple
import torch # usort:skip
import fbgemm_gpu.split_embedding_codegen_lookup_invokers as invokers
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
CacheAlgorithm,
DEFAULT_SCALE_BIAS_SIZE_IN_BYTES,
EmbeddingLocation,
PoolingMode,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
align_to_cacheline,
rounded_row_size_in_bytes,
unpadded_row_size_in_bytes,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
CounterBasedRegularizationDefinition,
WeightDecayMode,
)
from torch import nn, Tensor # usort:skip
from torch.autograd.profiler import record_function
try:
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:ssd_split_table_batched_embeddings"
)
except OSError:
# Keep for BC: will be deprecated soon.
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu/fb:ssd_split_table_batched_embeddings"
)
ASSOC = 32
class SSDTableBatchedEmbeddingBags(nn.Module):
D_offsets: Tensor
lxu_cache_weights: Tensor
lru_state: Tensor
lxu_cache_weights: Tensor
lxu_cache_state: Tensor
momentum1_dev: Tensor
momentum1_uvm: Tensor
momentum1_host: Tensor
momentum1_placements: Tensor
momentum1_offsets: Tensor
weights_dev: Tensor
weights_uvm: Tensor
weights_host: Tensor
weights_placements: Tensor
weights_offsets: Tensor
def __init__(
self,
embedding_specs: List[Tuple[int, int]], # tuple of (rows, dims)
feature_table_map: Optional[List[int]], # [T]
cache_sets: int,
ssd_storage_directory: str,
ssd_shards: int = 1,
ssd_memtable_flush_period: int = -1,
ssd_memtable_flush_offset: int = -1,
ssd_l0_files_per_compact: int = 4,
ssd_rate_limit_mbps: int = 0,
ssd_size_ratio: int = 10,
ssd_compaction_trigger: int = 8,
ssd_write_buffer_size: int = 2 * 1024 * 1024 * 1024,
ssd_max_write_buffer_num: int = 16,
ssd_cache_location: EmbeddingLocation = EmbeddingLocation.MANAGED,
ssd_uniform_init_lower: float = -0.01,
ssd_uniform_init_upper: float = 0.01,
# General Optimizer args
stochastic_rounding: bool = True,
gradient_clipping: bool = False,
max_gradient: float = 1.0,
learning_rate: float = 0.01,
eps: float = 1.0e-8, # used by Adagrad, LAMB, and Adam
momentum: float = 0.9, # used by LARS-SGD
weight_decay: float = 0.0, # used by LARS-SGD, LAMB, ADAM, and Rowwise Adagrad
weight_decay_mode: WeightDecayMode = WeightDecayMode.NONE, # used by Rowwise Adagrad
eta: float = 0.001, # used by LARS-SGD,
beta1: float = 0.9, # used by LAMB and ADAM
beta2: float = 0.999, # used by LAMB and ADAM
counter_based_regularization: Optional[
CounterBasedRegularizationDefinition
] = None, # used by Rowwise Adagrad
pooling_mode: PoolingMode = PoolingMode.SUM,
) -> None:
super(SSDTableBatchedEmbeddingBags, self).__init__()
self.pooling_mode = pooling_mode
self.embedding_specs = embedding_specs
(rows, dims) = zip(*embedding_specs)
T_ = len(self.embedding_specs)
assert T_ > 0
# pyre-fixme[8]: Attribute has type `device`; used as `int`.
self.current_device: torch.device = torch.cuda.current_device()
self.feature_table_map: List[int] = (
feature_table_map if feature_table_map is not None else list(range(T_))
)
T = len(self.feature_table_map)
assert T_ <= T
table_has_feature = [False] * T_
for t in self.feature_table_map:
table_has_feature[t] = True
assert all(table_has_feature), "Each table must have at least one feature!"
D_offsets = [dims[t] for t in self.feature_table_map]
D_offsets = [0] + list(itertools.accumulate(D_offsets))
self.total_D: int = D_offsets[-1]
self.max_D: int = max(dims)
self.register_buffer(
"D_offsets",
torch.tensor(D_offsets, device=self.current_device, dtype=torch.int32),
)
assert self.D_offsets.numel() == T + 1
hash_size_cumsum = [0] + list(itertools.accumulate(rows))
if hash_size_cumsum[-1] == 0:
self.total_hash_size_bits: int = 0
else:
self.total_hash_size_bits: int = int(log2(float(hash_size_cumsum[-1])) + 1)
# The last element is to easily access # of rows of each table by
self.total_hash_size_bits = int(log2(float(hash_size_cumsum[-1])) + 1)
self.total_hash_size: int = hash_size_cumsum[-1]
# The last element is to easily access # of rows of each table by
# hash_size_cumsum[t + 1] - hash_size_cumsum[t]
hash_size_cumsum = [hash_size_cumsum[t] for t in self.feature_table_map] + [
hash_size_cumsum[-1]
]
self.register_buffer(
"hash_size_cumsum",
torch.tensor(
hash_size_cumsum, device=self.current_device, dtype=torch.int64
),
)
element_size = 4
cache_size = cache_sets * ASSOC * element_size * self.max_D
logging.info(
f"Using cache for SSD with admission algorithm "
f"{CacheAlgorithm.LRU}, {cache_sets} sets, stored on {'DEVICE' if ssd_cache_location is EmbeddingLocation.DEVICE else 'MANAGED'} with {ssd_shards} shards, "
f"Memtable Flush Period: {ssd_memtable_flush_period}, "
f"Memtable Flush Offset: {ssd_memtable_flush_offset}, "
f"Desired L0 files per compaction: {ssd_l0_files_per_compact}, "
f"{cache_size / 1024.0 / 1024.0 / 1024.0 : .2f}GB"
)
self.register_buffer(
"lxu_cache_state",
torch.zeros(cache_sets, ASSOC, dtype=torch.int64).fill_(-1),
)
self.register_buffer(
"lru_state", torch.zeros(cache_sets, ASSOC, dtype=torch.int64)
)
assert ssd_cache_location in (
EmbeddingLocation.MANAGED,
EmbeddingLocation.DEVICE,
)
if ssd_cache_location == EmbeddingLocation.MANAGED:
self.register_buffer(
"lxu_cache_weights",
torch.ops.fbgemm.new_managed_tensor(
torch.zeros(1, device=self.current_device, dtype=torch.float32),
[cache_sets * ASSOC, self.max_D],
),
)
else:
self.register_buffer(
"lxu_cache_weights",
torch.zeros(
cache_sets * ASSOC,
self.max_D,
device=self.current_device,
dtype=torch.float32,
),
)
self.timestep = 0
import os
os.makedirs(ssd_storage_directory, exist_ok=True)
import tempfile
ssd_directory = tempfile.mkdtemp(
prefix="ssd_table_batched_embeddings", dir=ssd_storage_directory
)
# pyre-fixme[4]: Attribute must be annotated.
self.ssd_db = torch.classes.fbgemm.EmbeddingRocksDBWrapper(
ssd_directory,
ssd_shards,
ssd_shards,
ssd_memtable_flush_period,
ssd_memtable_flush_offset,
ssd_l0_files_per_compact,
self.max_D,
ssd_rate_limit_mbps,
ssd_size_ratio,
ssd_compaction_trigger,
ssd_write_buffer_size,
ssd_max_write_buffer_num,
ssd_uniform_init_lower,
ssd_uniform_init_upper,
32, # row_storage_bitwidth
)
# pyre-fixme[20]: Argument `self` expected.
(low_priority, high_priority) = torch.cuda.Stream.priority_range()
self.ssd_stream = torch.cuda.Stream(priority=low_priority)
self.ssd_set_start = torch.cuda.Event()
self.ssd_set_end = torch.cuda.Event()
self.timesteps_prefetched: List[int] = []
if weight_decay_mode == WeightDecayMode.COUNTER or counter_based_regularization:
raise AssertionError(
"weight_decay_mode = WeightDecayMode.COUNTER is not supported for SSD TBE."
)
counter_based_regularization = CounterBasedRegularizationDefinition()
self.optimizer_args = invokers.lookup_args.OptimizerArgs(
stochastic_rounding=stochastic_rounding,
gradient_clipping=gradient_clipping,
max_gradient=max_gradient,
learning_rate=learning_rate,
eps=eps,
beta1=beta1,
beta2=beta2,
weight_decay=weight_decay,
weight_decay_mode=weight_decay_mode.value,
eta=eta,
momentum=momentum,
counter_halflife=counter_based_regularization.counter_halflife,
adjustment_iter=counter_based_regularization.adjustment_iter,
adjustment_ub=counter_based_regularization.adjustment_ub,
learning_rate_mode=counter_based_regularization.learning_rate_mode.value,
grad_sum_decay=counter_based_regularization.grad_sum_decay.value,
tail_id_threshold=counter_based_regularization.tail_id_threshold.val,
is_tail_id_thresh_ratio=int(
counter_based_regularization.tail_id_threshold.is_ratio
),
total_hash_size=-1, # Unused
)
self.weights_dev = nn.Parameter(
torch.empty((0,), device=self.current_device, dtype=torch.float32)
)
self.register_buffer(
"weights_uvm",
torch.tensor((0,), device=self.current_device, dtype=torch.float32),
)
self.register_buffer(
"weights_host",
torch.empty(0),
)
self.register_buffer(
"weights_placements",
torch.tensor(
[EmbeddingLocation.MANAGED_CACHING for _ in range(T_)],
dtype=torch.int32,
),
)
weights_offsets = [0] + list(
itertools.accumulate([row * dim for (row, dim) in zip(rows, dims)])
)
self.register_buffer(
"weights_offsets",
torch.tensor(
weights_offsets[:-1],
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"momentum1_dev",
torch.zeros(
self.total_hash_size,
device=self.current_device,
dtype=torch.float32,
),
)
self.register_buffer(
"momentum1_uvm",
torch.empty((0,), device=self.current_device, dtype=torch.float32),
)
self.register_buffer(
"momentum1_host",
torch.empty(0),
)
self.register_buffer(
"momentum1_placements",
torch.tensor(
[EmbeddingLocation.DEVICE for _ in range(T_)], dtype=torch.int32
),
)
momentum1_offsets = [0] + list(itertools.accumulate(rows))
self.register_buffer(
"momentum1_offsets",
torch.tensor(
momentum1_offsets[:-1],
device=self.current_device,
dtype=torch.int64,
),
)
# add placeholder require_grad param to enable autograd without nn.parameter
# this is needed to enable int8 embedding weights for SplitTableBatchedEmbedding
self.placeholder_autograd_tensor = nn.Parameter(
torch.zeros(0, device=self.current_device, dtype=torch.float)
)
def prefetch(self, indices: Tensor, offsets: Tensor) -> Optional[Tensor]:
(indices, offsets) = indices.long(), offsets.long()
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
self.hash_size_cumsum,
indices,
offsets,
)
self.timestep += 1
self.timesteps_prefetched.append(self.timestep)
(
inserted_indices,
evicted_indices,
assigned_cache_slots,
actions_count_gpu,
) = torch.ops.fbgemm.ssd_cache_populate_actions(
linear_cache_indices,
self.total_hash_size,
self.lxu_cache_state,
self.timestep,
1, # for now assume prefetch_dist == 1
self.lru_state,
)
def to_pinned_cpu(t: torch.Tensor) -> torch.Tensor:
t_cpu = torch.empty(t.shape, pin_memory=True, dtype=t.dtype)
t_cpu.copy_(t, non_blocking=True)
return t_cpu
actions_count_cpu = to_pinned_cpu(actions_count_gpu)
assigned_cache_slots = assigned_cache_slots.long()
evicted_rows = self.lxu_cache_weights[
assigned_cache_slots.clamp_(min=0).long(), :
]
inserted_rows = torch.empty(
evicted_rows.shape,
dtype=self.lxu_cache_weights.dtype,
pin_memory=True,
)
current_stream = torch.cuda.current_stream()
# Ensure the previous iterations l3_db.set(..) has completed.
current_stream.wait_event(self.ssd_set_end)
self.ssd_db.get_cuda(
to_pinned_cpu(inserted_indices), inserted_rows, actions_count_cpu
)
current_stream.record_event(self.ssd_set_start)
# TODO: T123943415 T123943414 this is a big copy that is (mostly) unnecessary with a decent cache hit rate.
# Should we allocate on HBM?
inserted_rows_gpu = inserted_rows.cuda(non_blocking=True)
# self.lxu_cache_weights[assigned_cache_slots, :] = inserted_rows.cuda(non_blocking=True)
torch.ops.fbgemm.masked_index_put(
self.lxu_cache_weights,
assigned_cache_slots,
inserted_rows_gpu,
actions_count_gpu,
)
with torch.cuda.stream(self.ssd_stream):
self.ssd_stream.wait_event(self.ssd_set_start)
evicted_rows_cpu = to_pinned_cpu(evicted_rows)
evicted_indices_cpu = to_pinned_cpu(evicted_indices)
# pyre-fixme[6]: For 1st param expected `Stream` but got `Stream`.
evicted_rows.record_stream(self.ssd_stream)
evicted_indices.record_stream(self.ssd_stream)
self.ssd_db.set_cuda(
evicted_indices_cpu, evicted_rows_cpu, actions_count_cpu, self.timestep
)
# TODO: is this needed?
# Need a way to synchronize
# actions_count_cpu.record_stream(self.ssd_stream)
self.ssd_stream.record_event(self.ssd_set_end)
return linear_cache_indices
def forward(
self,
indices: Tensor,
offsets: Tensor,
per_sample_weights: Optional[Tensor] = None,
feature_requires_grad: Optional[Tensor] = None,
) -> Tensor:
(indices, offsets) = indices.long(), offsets.long()
if len(self.timesteps_prefetched) == 0:
with record_function("## prefetch ##"):
linear_cache_indices = self.prefetch(indices, offsets)
else:
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
self.hash_size_cumsum,
indices,
offsets,
)
lxu_cache_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.hash_size_cumsum[-1].item(),
)
common_args = invokers.lookup_args.CommonArgs(
placeholder_autograd_tensor=self.placeholder_autograd_tensor,
output_dtype=SparseType.FP32.as_int(),
dev_weights=self.weights_dev,
host_weights=self.weights_host,
uvm_weights=self.weights_uvm,
lxu_cache_weights=self.lxu_cache_weights,
weights_placements=self.weights_placements,
weights_offsets=self.weights_offsets,
D_offsets=self.D_offsets,
total_D=self.total_D,
max_D=self.max_D,
hash_size_cumsum=self.hash_size_cumsum,
total_hash_size_bits=self.total_hash_size_bits,
indices=indices,
offsets=offsets,
pooling_mode=self.pooling_mode,
indice_weights=per_sample_weights,
feature_requires_grad=feature_requires_grad,
lxu_cache_locations=lxu_cache_locations,
vbe_metadata=invokers.lookup_args.VBEMetadata(
B_offsets=None,
output_offsets_feature_rank=None,
B_offsets_rank_per_feature=None,
max_B=-1,
max_B_feature_rank=-1,
output_size=-1,
),
is_experimental=False,
)
momentum1 = invokers.lookup_args.Momentum(
dev=self.momentum1_dev,
host=self.momentum1_host,
uvm=self.momentum1_uvm,
offsets=self.momentum1_offsets,
placements=self.momentum1_placements,
)
self.timesteps_prefetched.pop(0)
return invokers.lookup_rowwise_adagrad.invoke(
common_args, self.optimizer_args, momentum1
)
@torch.jit.ignore
def debug_split_optimizer_states(self) -> List[Tuple[torch.Tensor]]:
"""
Returns a list of states, split by table
Testing only
"""
(rows, _) = zip(*self.embedding_specs)
rows_cumsum = [0] + list(itertools.accumulate(rows))
return [
(
self.momentum1_dev.detach()[rows_cumsum[t] : rows_cumsum[t + 1]].view(
row
),
)
for t, row in enumerate(rows)
]
@torch.jit.export
def debug_split_embedding_weights(self) -> List[torch.Tensor]:
"""
Returns a list of weights, split by table.
Testing only, very slow.
"""
(rows, _) = zip(*self.embedding_specs)
rows_cumsum = [0] + list(itertools.accumulate(rows))
splits = []
for t, (row, dim) in enumerate(self.embedding_specs):
weights = torch.empty((row, dim), dtype=torch.float32)
self.ssd_db.get_cuda(
torch.arange(rows_cumsum[t], rows_cumsum[t + 1]).to(torch.int64),
weights,
torch.as_tensor([row]),
)
splits.append(weights)
torch.cuda.synchronize(self.current_device)
return splits
@torch.jit.export
def set_learning_rate(self, lr: float) -> None:
"""
Sets the learning rate.
"""
self._set_learning_rate(lr)
@torch.jit.ignore
def _set_learning_rate(self, lr: float) -> float:
"""
Helper function to script `set_learning_rate`.
Note that returning None does not work.
"""
self.optimizer_args = self.optimizer_args._replace(learning_rate=lr)
return 0.0
def flush(self) -> None:
active_slots_mask = self.lxu_cache_state != -1
active_weights = self.lxu_cache_weights.masked_select(
active_slots_mask.view(-1, 1)
).view(-1, self.max_D)
active_ids = self.lxu_cache_state.view(-1).masked_select(
active_slots_mask.view(-1)
)
torch.cuda.current_stream().wait_stream(self.ssd_stream)
self.ssd_db.set_cuda(
active_ids.cpu(),
active_weights.cpu(),
torch.tensor([active_ids.numel()]),
self.timestep,
)
class SSDIntNBitTableBatchedEmbeddingBags(nn.Module):
"""
SSD Table-batched version of nn.EmbeddingBag(sparse=False)
Inference version, with FP32/FP16/FP8/INT8/INT4/INT2 supports
"""
embedding_specs: List[Tuple[str, int, int, SparseType]]
def __init__(
self,
embedding_specs: List[
Tuple[str, int, int, SparseType]
], # tuple of (feature_names, rows, dims, SparseType)
feature_table_map: Optional[List[int]] = None, # [T]
pooling_mode: PoolingMode = PoolingMode.SUM,
output_dtype: SparseType = SparseType.FP16,
row_alignment: Optional[int] = None,
fp8_exponent_bits: Optional[int] = None,
fp8_exponent_bias: Optional[int] = None,
cache_assoc: int = 32,
scale_bias_size_in_bytes: int = DEFAULT_SCALE_BIAS_SIZE_IN_BYTES,
cache_sets: int = 0,
ssd_storage_directory: str = "/tmp",
ssd_shards: int = 1,
ssd_memtable_flush_period: int = -1,
ssd_memtable_flush_offset: int = -1,
ssd_l0_files_per_compact: int = 4,
ssd_rate_limit_mbps: int = 0,
ssd_size_ratio: int = 10,
ssd_compaction_trigger: int = 8,
ssd_write_buffer_size: int = 2 * 1024 * 1024 * 1024,
ssd_max_write_buffer_num: int = 16,
ssd_cache_location: EmbeddingLocation = EmbeddingLocation.MANAGED,
ssd_uniform_init_lower: float = -0.01,
ssd_uniform_init_upper: float = 0.01,
) -> None: # noqa C901 # tuple of (rows, dims,)
super(SSDIntNBitTableBatchedEmbeddingBags, self).__init__()
assert cache_assoc == 32, "Only 32-way cache is supported now"
self.scale_bias_size_in_bytes = scale_bias_size_in_bytes
self.pooling_mode = pooling_mode
self.embedding_specs = embedding_specs
T_ = len(self.embedding_specs)
assert T_ > 0
device = torch.cuda.current_device()
if device is None:
self.current_device: torch.device = torch.device(
torch.cuda.current_device()
)
elif isinstance(device, torch.device):
self.current_device = device
else:
self.current_device = torch.device(device)
self.use_cpu: bool = self.current_device.type == "cpu"
self.feature_table_map: List[int] = (
feature_table_map if feature_table_map is not None else list(range(T_))
)
T = len(self.feature_table_map)
assert T_ <= T
table_has_feature = [False] * T_
for t in self.feature_table_map:
table_has_feature[t] = True
assert all(table_has_feature), "Each table must have at least one feature!"
self.output_dtype: int = output_dtype.as_int()
# (feature_names, rows, dims, weights_tys) = zip(*embedding_specs)
# Pyre workaround
rows: List[int] = [e[1] for e in embedding_specs]
dims: List[int] = [e[2] for e in embedding_specs]
weights_tys: List[SparseType] = [e[3] for e in embedding_specs]
D_offsets = [dims[t] for t in self.feature_table_map]
D_offsets = [0] + list(itertools.accumulate(D_offsets))
self.total_D: int = D_offsets[-1]
self.register_buffer(
"D_offsets",
torch.tensor(D_offsets, device=self.current_device, dtype=torch.int32),
)
if row_alignment is None:
self.row_alignment: int = 1 if self.use_cpu else 16
else:
self.row_alignment = row_alignment
for dim, weight_ty in zip(dims, weights_tys):
if not weight_ty.is_float():
assert (
dim % (8 / weight_ty.bit_rate()) == 0
), f"For quantized types we need to at least pack at byte granularity, dim: {dim}, weight_ty: {weight_ty}"
def max_ty_D(ty: SparseType) -> int:
return max(
[dim for dim, weight_ty in zip(dims, weights_tys) if weight_ty == ty],
default=0,
)
self.max_int2_D: int = max_ty_D(SparseType.INT2)
self.max_int4_D: int = max_ty_D(SparseType.INT4)
self.max_int8_D: int = max_ty_D(SparseType.INT8)
self.max_float8_D: int = max_ty_D(SparseType.FP8)
self.max_float16_D: int = max_ty_D(SparseType.FP16)
self.max_float32_D: int = max_ty_D(SparseType.FP32)
cached_dims = [
rounded_row_size_in_bytes(
embedding_spec[2], embedding_spec[3], 16, self.scale_bias_size_in_bytes
)
for embedding_spec in self.embedding_specs
]
self.max_D_cache: int = max(cached_dims) if len(cached_dims) > 0 else 0
placements = []
offsets = []
uvm_size = 0
for _, num_embeddings, embedding_dim, weight_ty in embedding_specs:
embedding_dim = rounded_row_size_in_bytes(
embedding_dim, weight_ty, self.row_alignment, scale_bias_size_in_bytes
)
state_size = num_embeddings * embedding_dim
state_size = align_to_cacheline(state_size)
placements.append(EmbeddingLocation.MANAGED_CACHING)
offsets.append(uvm_size)
uvm_size += state_size
self.weights_physical_offsets: List[int] = offsets
weights_tys_int = [weights_tys[t].as_int() for t in self.feature_table_map]
self.register_buffer(
"weights_tys",
torch.tensor(
weights_tys_int, device=self.current_device, dtype=torch.uint8
),
)
self.weight_initialized: bool = True
assert self.D_offsets.numel() == T + 1
hash_size_cumsum = [0] + list(itertools.accumulate(rows))
if hash_size_cumsum[-1] == 0:
self.total_hash_size_bits: int = 0
else:
self.total_hash_size_bits: int = int(log2(float(hash_size_cumsum[-1])) + 1)
# The last element is to easily access # of rows of each table by
self.total_hash_size_bits = int(log2(float(hash_size_cumsum[-1])) + 1)
self.total_hash_size: int = hash_size_cumsum[-1]
# The last element is to easily access # of rows of each table by
# hash_size_cumsum[t + 1] - hash_size_cumsum[t]
hash_size_cumsum = [hash_size_cumsum[t] for t in self.feature_table_map] + [
hash_size_cumsum[-1]
]
self.register_buffer(
"hash_size_cumsum",
torch.tensor(
hash_size_cumsum, device=self.current_device, dtype=torch.int64
),
)
element_size = 1
cache_size = cache_sets * ASSOC * element_size * self.max_D_cache
logging.info(
f"Using cache for SSD with admission algorithm "
f"{CacheAlgorithm.LRU}, {cache_sets} sets, stored on {'DEVICE' if ssd_cache_location is EmbeddingLocation.DEVICE else 'MANAGED'} with {ssd_shards} shards, "
f"Memtable Flush Period: {ssd_memtable_flush_period}, "
f"Memtable Flush Offset: {ssd_memtable_flush_offset}, "
f"Desired L0 files per compaction: {ssd_l0_files_per_compact}, "
f"{cache_size / 1024.0 / 1024.0 / 1024.0 : .2f}GB"
)
self.register_buffer(
"lxu_cache_state",
torch.zeros(cache_sets, ASSOC, dtype=torch.int64).fill_(-1),
)
self.register_buffer(
"lru_state", torch.zeros(cache_sets, ASSOC, dtype=torch.int64)
)
assert ssd_cache_location in (
EmbeddingLocation.MANAGED,
EmbeddingLocation.DEVICE,
)
if ssd_cache_location == EmbeddingLocation.MANAGED:
self.register_buffer(
"lxu_cache_weights",
torch.ops.fbgemm.new_managed_tensor(
torch.zeros(1, device=self.current_device, dtype=torch.uint8),
[cache_sets * ASSOC, self.max_D_cache],
),
)
else:
self.register_buffer(
"lxu_cache_weights",
torch.zeros(
cache_sets * ASSOC,
self.max_D_cache,
device=self.current_device,
dtype=torch.uint8,
),
)
import os
os.makedirs(ssd_storage_directory, exist_ok=True)
import tempfile
ssd_directory = tempfile.mkdtemp(
prefix="ssd_table_batched_embeddings", dir=ssd_storage_directory
)
# pyre-fixme[4]: Attribute must be annotated.
self.ssd_db = torch.classes.fbgemm.EmbeddingRocksDBWrapper(
ssd_directory,
ssd_shards,
ssd_shards,
ssd_memtable_flush_period,
ssd_memtable_flush_offset,
ssd_l0_files_per_compact,
self.max_D_cache,
ssd_rate_limit_mbps,
ssd_size_ratio,
ssd_compaction_trigger,
ssd_write_buffer_size,
ssd_max_write_buffer_num,
ssd_uniform_init_lower,
ssd_uniform_init_upper,
8, # row_storage_bitwidth
)
# pyre-fixme[20]: Argument `self` expected.
(low_priority, high_priority) = torch.cuda.Stream.priority_range()
self.ssd_stream = torch.cuda.Stream(priority=low_priority)
self.ssd_set_start = torch.cuda.Event()
self.ssd_set_end = torch.cuda.Event()
# pyre-fixme[4]: Attribute must be annotated.
self.timestep_counter = torch.classes.fbgemm.AtomicCounter()
# pyre-fixme[4]: Attribute must be annotated.
self.timestep_prefetch_size = torch.classes.fbgemm.AtomicCounter()
self.weights_dev: torch.Tensor = torch.empty(
0,
device=self.current_device,
dtype=torch.uint8,
)
self.register_buffer(
"weights_uvm",
torch.tensor((0,), device=self.current_device, dtype=torch.uint8),
)
self.register_buffer(
"weights_host",
torch.empty(0),
)
self.register_buffer(
"weights_placements",
torch.tensor(
[EmbeddingLocation.MANAGED_CACHING for _ in range(T_)],
dtype=torch.int32,
),
)
weights_offsets = [0] + list(
itertools.accumulate([row * dim for (row, dim) in zip(rows, dims)])
)
self.register_buffer(
"weights_offsets",
torch.tensor(
weights_offsets[:-1],
device=self.current_device,
dtype=torch.int64,
),
)
if self.max_float8_D > 0:
default_config = SparseType.FP8.default_config()
self.fp8_exponent_bits: int = (
default_config.get("exponent_bits")
if fp8_exponent_bits is None
else fp8_exponent_bits
)
self.fp8_exponent_bias: int = (
default_config.get("exponent_bias")
if fp8_exponent_bias is None
else fp8_exponent_bias
)
else:
self.fp8_exponent_bits = -1
self.fp8_exponent_bias = -1
@torch.jit.export
def prefetch(self, indices: Tensor, offsets: Tensor) -> Tensor:
(indices, offsets) = indices.long(), offsets.long()
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
self.hash_size_cumsum,
indices,
offsets,
)
self.timestep_counter.increment()
self.timestep_prefetch_size.increment()
(
inserted_indices,
evicted_indices,
assigned_cache_slots,
actions_count_gpu,
) = torch.ops.fbgemm.ssd_cache_populate_actions(
linear_cache_indices,
self.total_hash_size,
self.lxu_cache_state,
self.timestep_counter.get(),
1, # for now assume prefetch_dist == 1
self.lru_state,
)
actions_count_cpu = torch.empty(
actions_count_gpu.shape, pin_memory=True, dtype=actions_count_gpu.dtype
)
actions_count_cpu.copy_(actions_count_gpu, non_blocking=True)
assigned_cache_slots = assigned_cache_slots.long()
evicted_rows = self.lxu_cache_weights[
assigned_cache_slots.clamp_(min=0).long(), :
]
inserted_rows = torch.empty(
evicted_rows.shape,
dtype=self.lxu_cache_weights.dtype,
pin_memory=True,
)
current_stream = torch.cuda.current_stream()
# Ensure the previous iterations l3_db.set(..) has completed.
current_stream.wait_event(self.ssd_set_end)
inserted_indices_cpu = torch.empty(
inserted_indices.shape, pin_memory=True, dtype=inserted_indices.dtype
)
inserted_indices_cpu.copy_(inserted_indices, non_blocking=True)
self.ssd_db.get_cuda(
inserted_indices_cpu,
inserted_rows,
actions_count_cpu,
)
current_stream.record_event(self.ssd_set_start)
# TODO: T123943415 T123943414 this is a big copy that is (mostly) unnecessary with a decent cache hit rate.
# Should we allocate on HBM?
inserted_rows_gpu = inserted_rows.to(self.current_device, non_blocking=True)
# self.lxu_cache_weights[assigned_cache_slots, :] = inserted_rows.cuda(non_blocking=True)
torch.ops.fbgemm.masked_index_put(
self.lxu_cache_weights,
assigned_cache_slots,
inserted_rows_gpu,
actions_count_gpu,
)
with torch.cuda.stream(self.ssd_stream):
self.ssd_stream.wait_event(self.ssd_set_start)
evicted_rows_cpu = torch.empty(
evicted_rows.shape, pin_memory=True, dtype=evicted_rows.dtype
)
evicted_rows_cpu.copy_(evicted_rows, non_blocking=True)
evicted_indices_cpu = torch.empty(
evicted_indices.shape, pin_memory=True, dtype=evicted_indices.dtype
)
evicted_indices_cpu.copy_(evicted_indices, non_blocking=True)
# pyre-fixme[6]: For 1st param expected `Stream` but got `Stream`.
evicted_rows.record_stream(self.ssd_stream)
evicted_indices.record_stream(self.ssd_stream)
self.ssd_db.set_cuda(
evicted_indices_cpu,
evicted_rows_cpu,
actions_count_cpu,
self.timestep_counter.get(),
)
# TODO: is this needed?
# Need a way to synchronize
# actions_count_cpu.record_stream(self.ssd_stream)
self.ssd_stream.record_event(self.ssd_set_end)
return linear_cache_indices
def forward(
self,
indices: Tensor,
offsets: Tensor,
per_sample_weights: Optional[Tensor] = None,
) -> Tensor:
if self.timestep_prefetch_size.get() <= 0:
with record_function("## prefetch ##"):
linear_cache_indices = self.prefetch(indices, offsets)
else:
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
self.hash_size_cumsum,
indices,
offsets,
)
lxu_cache_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.hash_size_cumsum[-1].item(),
)
self.timestep_prefetch_size.decrement()
assert (
self.weight_initialized
), "weight needs to be initialized before forward function"
# Note: CPU and CUDA ops use the same interface to facilitate JIT IR
# generation for CUDA/CPU. For CPU op, we don't need weights_uvm and
# weights_placements
return torch.ops.fbgemm.int_nbit_split_embedding_codegen_lookup_function(
dev_weights=self.weights_dev,
uvm_weights=self.weights_uvm,
weights_placements=self.weights_placements,
weights_offsets=self.weights_offsets,
weights_tys=self.weights_tys,
D_offsets=self.D_offsets,
total_D=self.total_D,
max_int2_D=self.max_int2_D,
max_int4_D=self.max_int4_D,
max_int8_D=self.max_int8_D,
max_float16_D=self.max_float16_D,
max_float32_D=self.max_float32_D,
indices=indices,
offsets=offsets,
pooling_mode=int(self.pooling_mode),
indice_weights=per_sample_weights,
output_dtype=self.output_dtype,
lxu_cache_weights=self.lxu_cache_weights,
lxu_cache_locations=lxu_cache_locations,
row_alignment=self.row_alignment,
max_float8_D=self.max_float8_D,
fp8_exponent_bits=self.fp8_exponent_bits,
fp8_exponent_bias=self.fp8_exponent_bias,
)
@torch.jit.export
def split_embedding_weights(
self, split_scale_shifts: bool = True
) -> List[Tuple[Tensor, Optional[Tensor]]]:
"""
Returns a list of weights, split by table.
Testing only, very slow.
"""
splits: List[Tuple[Tensor, Optional[Tensor]]] = []
rows_cumsum = 0
for _, row, dim, weight_ty in self.embedding_specs:
weights = torch.empty(
(
row,
rounded_row_size_in_bytes(
dim,
weight_ty,
self.row_alignment,
self.scale_bias_size_in_bytes,
),
),
dtype=torch.uint8,
)
self.ssd_db.get_cuda(
torch.arange(rows_cumsum, rows_cumsum + row).to(torch.int64),
weights,
torch.as_tensor([row]),
)
rows_cumsum += row
torch.cuda.synchronize(self.current_device)
weights_shifts = weights.detach()
if split_scale_shifts:
# remove the padding at the end of each row.
weights_shifts = weights_shifts[
:,
: unpadded_row_size_in_bytes(
dim, weight_ty, self.scale_bias_size_in_bytes
),
]
if (
weight_ty == SparseType.INT8
or weight_ty == SparseType.INT4
or weight_ty == SparseType.INT2
):
splits.append(
(
weights_shifts[:, self.scale_bias_size_in_bytes :],
weights_shifts[:, : self.scale_bias_size_in_bytes],
)
)
else:
assert (
weight_ty == SparseType.FP8
or weight_ty == SparseType.FP16
or weight_ty == SparseType.FP32
)
splits.append(
(
weights_shifts,
None,
)
)
else:
splits.append((weights_shifts, None))
torch.cuda.synchronize(self.current_device)
return splits
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
# flake8: noqa F401
import torch # usort:skip
import warnings
# This module is a compatibility wrapper that re-exports the symbols from:
# fbgemm_gpu.split_table_batched_embeddings_ops_common
# fbgemm_gpu.split_table_batched_embeddings_ops_inference
# fbgemm_gpu.split_table_batched_embeddings_ops_training
from fbgemm_gpu.split_embedding_configs import EmbOptimType as OptimType, SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
BoundsCheckMode,
CacheAlgorithm,
CacheState,
DEFAULT_SCALE_BIAS_SIZE_IN_BYTES,
EmbeddingLocation,
PoolingMode,
RecordCacheMetrics,
round_up,
SplitState,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
align_to_cacheline,
IntNBitTableBatchedEmbeddingBagsCodegen,
rounded_row_size_in_bytes,
unpadded_row_size_in_bytes,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
ComputeDevice,
CounterBasedRegularizationDefinition,
CounterWeightDecayMode,
DEFAULT_ASSOC,
DenseTableBatchedEmbeddingBagsCodegen,
GradSumDecay,
INT8_EMB_ROW_DIM_OFFSET,
LearningRateMode,
SplitTableBatchedEmbeddingBagsCodegen,
TailIdThreshold,
WeightDecayMode,
)
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cpu")
except Exception:
pass
warnings.warn(
f"""\033[93m
The Python module {__name__} is now DEPRECATED and will be removed in the
future. Users should instead declare dependencies on
//deeplearning/fbgemm/fbgemm_gpu/split_table_batched_embeddings_ops_{{training, inference}}
in their TARGETS file and import the
fbgemm_gpu.split_table_batched_embeddings_ops_{{training, inference}}
modules as needed in their scripts.
\033[0m""",
DeprecationWarning,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import os
import struct
import subprocess
import unittest
from functools import wraps
from typing import Any, Callable, List, Tuple
import hypothesis.strategies as st
import numpy as np
import torch
TEST_WITH_ROCM: bool = os.getenv("FBGEMM_TEST_WITH_ROCM", "0") == "1"
# Eigen/Python round 0.5 away from 0, Numpy rounds to even
round_to_nearest: Callable[[np.ndarray], np.ndarray] = np.vectorize(round)
def bytes_to_floats(byte_matrix: np.ndarray) -> np.ndarray:
floats = np.empty([np.shape(byte_matrix)[0], 1], dtype=np.float32)
for i, byte_values in enumerate(byte_matrix):
(floats[i],) = struct.unpack("f", bytearray(byte_values))
return floats
def floats_to_bytes(floats: np.ndarray) -> np.ndarray:
byte_matrix = np.empty([np.shape(floats)[0], 4], dtype=np.uint8)
for i, value in enumerate(floats):
assert isinstance(value, np.float32), (value, floats)
as_bytes = struct.pack("f", value)
# In Python3 bytes will be a list of int, in Python2 a list of string
if isinstance(as_bytes[0], int):
byte_matrix[i] = list(as_bytes)
else:
byte_matrix[i] = list(map(ord, as_bytes))
return byte_matrix
def bytes_to_half_floats(byte_matrix: np.ndarray) -> np.ndarray:
floats = np.empty([np.shape(byte_matrix)[0], 1], dtype=np.float16)
for i, byte_values in enumerate(byte_matrix):
(floats[i],) = np.frombuffer(
memoryview(byte_values).tobytes(), dtype=np.float16
)
return floats
def half_floats_to_bytes(floats: np.ndarray) -> np.ndarray:
byte_matrix = np.empty([np.shape(floats)[0], 2], dtype=np.uint8)
for i, value in enumerate(floats):
assert isinstance(value, np.float16), (value, floats)
byte_matrix[i] = np.frombuffer(
memoryview(value.tobytes()).tobytes(), dtype=np.uint8
)
return byte_matrix
def fused_rowwise_8bit_quantize_reference(data: np.ndarray) -> np.ndarray:
minimum = np.min(data, axis=-1, keepdims=True)
maximum = np.max(data, axis=-1, keepdims=True)
span = maximum - minimum
bias = minimum
scale = span / 255.0
inverse_scale = 255.0 / (span + 1e-8)
quantized_data = round_to_nearest((data - bias) * inverse_scale)
scale_bytes = floats_to_bytes(scale.reshape(-1))
scale_bytes = scale_bytes.reshape(data.shape[:-1] + (scale_bytes.shape[-1],))
bias_bytes = floats_to_bytes(bias.reshape(-1))
bias_bytes = bias_bytes.reshape(data.shape[:-1] + (bias_bytes.shape[-1],))
return np.concatenate([quantized_data, scale_bytes, bias_bytes], axis=-1)
def fused_rowwise_8bit_dequantize_reference(fused_quantized: np.ndarray) -> np.ndarray:
scale = bytes_to_floats(fused_quantized[..., -8:-4].astype(np.uint8).reshape(-1, 4))
scale = scale.reshape(fused_quantized.shape[:-1] + (scale.shape[-1],))
bias = bytes_to_floats(fused_quantized[..., -4:].astype(np.uint8).reshape(-1, 4))
bias = bias.reshape(fused_quantized.shape[:-1] + (bias.shape[-1],))
quantized_data = fused_quantized[..., :-8]
return quantized_data * scale + bias
def fused_rowwise_8bit_dequantize_reference_half(
fused_quantized: np.ndarray,
) -> np.ndarray:
scale = bytes_to_half_floats(
fused_quantized[..., -8:-4].astype(np.uint8).reshape(-1, 4)
)
scale = scale.reshape(fused_quantized.shape[:-1] + (scale.shape[-1],))
bias = bytes_to_half_floats(
fused_quantized[..., -4:].astype(np.uint8).reshape(-1, 4)
)
bias = bias.reshape(fused_quantized.shape[:-1] + (bias.shape[-1],))
quantized_data = fused_quantized[..., :-8]
return quantized_data * scale + bias
def fused_rowwise_nbit_quantize_reference(data: np.ndarray, bit: int) -> np.ndarray:
minimum = np.min(data, axis=1).astype(np.float16).astype(np.float32)
maximum = np.max(data, axis=1)
span = maximum - minimum
qmax = (1 << bit) - 1
scale = (span / qmax).astype(np.float16).astype(np.float32)
bias = np.zeros(data.shape[0])
quantized_data = np.zeros(data.shape).astype(np.uint8)
for i in range(data.shape[0]):
bias[i] = minimum[i]
inverse_scale = 1.0 if scale[i] == 0.0 else 1 / scale[i]
if scale[i] == 0.0 or math.isinf(inverse_scale):
scale[i] = 1.0
inverse_scale = 1.0
quantized_data[i] = np.clip(
np.round((data[i, :] - minimum[i]) * inverse_scale), 0, qmax
)
# pack
assert 8 % bit == 0
num_elem_per_byte = 8 // bit
packed_dim = (data.shape[1] + num_elem_per_byte - 1) // num_elem_per_byte
packed_data = np.zeros([data.shape[0], packed_dim]).astype(np.uint8)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
if j % num_elem_per_byte == 0:
packed_data[i, j // num_elem_per_byte] = quantized_data[i, j]
else:
packed_data[i, j // num_elem_per_byte] += quantized_data[i, j] << (
(j % num_elem_per_byte) * bit
)
scale_bytes = half_floats_to_bytes(scale.astype(np.float16))
bias_bytes = half_floats_to_bytes(bias.astype(np.float16))
return np.concatenate([packed_data, scale_bytes, bias_bytes], axis=1)
def fused_rowwise_nbit_quantize_dequantize_reference(
data: np.ndarray, bit: int
) -> np.ndarray:
fused_quantized = fused_rowwise_nbit_quantize_reference(data, bit)
scale = bytes_to_half_floats(fused_quantized[:, -4:-2].astype(np.uint8)).astype(
np.float32
)
bias = bytes_to_half_floats(fused_quantized[:, -2:].astype(np.uint8)).astype(
np.float32
)
quantized_data = fused_quantized[:, :-4]
# unpack
packed_dim = fused_quantized.shape[1] - 4
assert 8 % bit == 0
num_elem_per_byte = 8 // bit
assert packed_dim == ((data.shape[1] + num_elem_per_byte - 1) // num_elem_per_byte)
unpacked_data = np.zeros(data.shape).astype(np.uint8)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
unpacked_data[i, j] = (
quantized_data[i, j // num_elem_per_byte]
>> ((j % num_elem_per_byte) * bit)
) & ((1 << bit) - 1)
return scale * unpacked_data + bias
# Used for `@unittest.skipIf`
gpu_unavailable: Tuple[bool, str] = (
not torch.cuda.is_available() or torch.cuda.device_count() == 0,
"CUDA is not available or no GPUs detected",
)
# Used for `if` statements inside tests
gpu_available: bool = not gpu_unavailable[0]
# Used for `@unittest.skipIf` for tests that pass in internal CI, but fail on the GitHub runners
running_on_github: Tuple[bool, str] = (
os.getenv("GITHUB_ENV") is not None,
"Test is currently known to fail or hang when run in the GitHub runners",
)
# Used for `@unittest.skipIf` for tests that currently fail on ARM platform
on_arm_platform: Tuple[bool, str] = (
subprocess.run(["uname", "-m"], stdout=subprocess.PIPE)
.stdout.decode("utf-8")
.strip()
== "aarch64",
"Test is currently known to fail when running on ARM platform",
)
def cpu_and_maybe_gpu() -> st.SearchStrategy[List[torch.device]]:
gpu_available = torch.cuda.is_available() and torch.cuda.device_count() > 0
# st.sampled_from is not guaranteed to test all the values passed to it.
# However, Hypothesis, by default, generates 100 test cases from the specified strategies.
# If st.sampled_from contains >100 items or if it's used in conjunction with other strategies
# then it may not test all values; however, for smaller tests it may work fine.
# This is still a stopgap solution until we figure out a way to parameterize UnitTestCase.
return st.sampled_from(
[torch.device("cpu")] + ([torch.device("cuda")] if gpu_available else [])
)
def cpu_only() -> st.SearchStrategy[List[torch.device]]:
return st.sampled_from([torch.device("cpu")])
# pyre-fixme[3]: Return annotation cannot be `Any`.
def skipIfRocm(reason: str = "Test currently doesn't work on the ROCm stack") -> Any:
# pyre-fixme[3]: Return annotation cannot be `Any`.
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
def skipIfRocmDecorator(fn: Callable) -> Any:
@wraps(fn)
# pyre-fixme[3]: Return annotation cannot be `Any`.
def wrapper(*args: Any, **kwargs: Any) -> Any:
if TEST_WITH_ROCM:
raise unittest.SkipTest(reason)
else:
fn(*args, **kwargs)
return wrapper
return skipIfRocmDecorator
def symint_vector_unsupported() -> Tuple[bool, str]:
major, minor = torch.__version__.split(".")[0:2]
return (
int(major) < 2 or (int(major) == 2 and int(minor) < 1),
"""
dynamic shape support for this op needs to be on PyTorch 2.1 or
newer with https://github.com/pytorch/pytorch/pull/101056
""",
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import random
import unittest
from itertools import accumulate
from typing import List, Tuple
import hypothesis.strategies as st
import torch
from hypothesis import given, HealthCheck, settings
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
from test_utils import gpu_unavailable # pyre-ignore[21]
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
from fbgemm_gpu.test.test_utils import gpu_unavailable
def gen_inputs(
hash_sizes: List[int],
batch_size: int,
max_len: int,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
the lengths of bags are chosen from
a uniform distribution from [0, max_len]
"""
T = len(hash_sizes)
offsets = [0]
indices_per_table = []
for t in range(T):
len_sum = 0
for _ in range(batch_size):
length = random.randint(0, max_len)
len_sum += length
offsets.append(offsets[-1] + length)
n_rows = hash_sizes[t]
indices_per_table.append(torch.randint(n_rows, [len_sum], dtype=torch.int64))
indices = torch.cat(indices_per_table, dim=0)
offsets = torch.tensor(offsets, dtype=torch.int64)
return indices, offsets
def transpose_embedding_input_ref(
hash_size_cumsum: torch.Tensor,
indices: torch.Tensor,
offsets: torch.Tensor,
info_B_num_bits: int,
) -> Tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
]:
"""
reference implementation of torch.ops.fbgemm.transpose_embedding_input
"""
T = hash_size_cumsum.numel() - 1
B = (offsets.numel() - 1) // T
linear_indices = torch.zeros_like(indices)
infos = torch.zeros_like(indices)
for b_t in range(B * T):
t = b_t // B
b = b_t % B
start = int(offsets[b_t].item())
end = int(offsets[b_t + 1].item())
for i in range(start, end):
linear_indices[i] = indices[i] + hash_size_cumsum[t]
infos[i] = (t << info_B_num_bits) | b
linear_indices_sorted, sorted_idx = torch.sort(linear_indices, stable=True)
infos_sorted = infos[sorted_idx]
(
sorted_linear_indices_run,
sorted_linear_indices_run_lengths,
) = torch.unique_consecutive(linear_indices_sorted, return_counts=True)
sorted_linear_indices_num_runs = torch.tensor(
sorted_linear_indices_run.numel(), dtype=torch.int64
)
sorted_linear_indices_cumulative_run_lengths = torch.tensor(
[0] + list(accumulate(sorted_linear_indices_run_lengths.tolist())),
dtype=torch.int64,
)
return (
linear_indices,
linear_indices_sorted,
infos_sorted,
sorted_linear_indices_run,
sorted_linear_indices_run_lengths,
sorted_linear_indices_num_runs,
sorted_linear_indices_cumulative_run_lengths,
)
class SplitEmbeddingsUtilsTest(unittest.TestCase):
@unittest.skipIf(*gpu_unavailable)
# pyre-ignore [56]: Invalid decoration, was not able to infer the type of argument
@given(
B=st.integers(min_value=10, max_value=25),
T=st.integers(min_value=5, max_value=20),
E=st.integers(min_value=10, max_value=50),
)
@settings(deadline=30000, suppress_health_check=[HealthCheck.filter_too_much])
def test_transpose(self, B: int, T: int, E: int) -> None:
hash_sizes = [random.randint(E, 2 * E) for _ in range(T)]
batch_size = B
max_len = 3 * E
total_hash_size_bits: int = int(math.log2(sum(hash_sizes)) + 1)
hash_size_cumsum = torch.tensor(
[0] + list(accumulate(hash_sizes)), dtype=torch.int64
)
indices, offsets = gen_inputs(hash_sizes, batch_size, max_len)
hash_size_cumsum_cuda = hash_size_cumsum.cuda()
info_B_num_bits, _ = torch.ops.fbgemm.get_infos_metadata(
hash_size_cumsum_cuda, B, T
)
(
linear_indices,
linear_indices_sorted,
infos_sorted,
sorted_linear_indices_run,
sorted_linear_indices_run_lengths,
sorted_linear_indices_num_runs,
sorted_linear_indices_cumulative_run_lengths,
) = torch.ops.fbgemm.transpose_embedding_input(
hash_size_cumsum_cuda,
total_hash_size_bits,
indices.cuda(),
offsets.cuda(),
info_B_num_bits=info_B_num_bits,
)
(
linear_indices_ref,
linear_indices_sorted_ref,
infos_sorted_ref,
sorted_linear_indices_run_ref,
sorted_linear_indices_run_lengths_ref,
sorted_linear_indices_num_runs_ref,
sorted_linear_indices_cumulative_run_lengths_ref,
) = transpose_embedding_input_ref(
hash_size_cumsum, indices, offsets, info_B_num_bits
)
self.assertTrue(torch.equal(linear_indices.cpu(), linear_indices_ref))
self.assertTrue(
torch.equal(linear_indices_sorted.cpu(), linear_indices_sorted_ref)
)
self.assertTrue(torch.equal(infos_sorted.cpu(), infos_sorted_ref))
# fbgemm impl has padding so we need slice
num = sorted_linear_indices_run_ref.numel()
self.assertTrue(
torch.equal(
sorted_linear_indices_run.cpu()[:num], sorted_linear_indices_run_ref
)
)
self.assertTrue(
torch.equal(
sorted_linear_indices_run_lengths.cpu()[:num],
sorted_linear_indices_run_lengths_ref,
)
)
self.assertEqual(
sorted_linear_indices_num_runs.item(),
sorted_linear_indices_num_runs_ref.item(),
)
self.assertTrue(
torch.equal(
sorted_linear_indices_cumulative_run_lengths.cpu()[: num + 1],
sorted_linear_indices_cumulative_run_lengths_ref,
)
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import random
import unittest
from typing import List
import fbgemm_gpu
import hypothesis.strategies as st
import torch
from hypothesis import given, settings, Verbosity
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from test_utils import gpu_available, gpu_unavailable, skipIfRocm
else:
from fbgemm_gpu.test.test_utils import gpu_available, gpu_unavailable, skipIfRocm
if gpu_available:
# pyre-ignore[21]
from fbgemm_gpu.uvm import cudaMemAdvise, cudaMemoryAdvise, cudaMemPrefetchAsync
MAX_EXAMPLES = 40
class UvmTest(unittest.TestCase):
@unittest.skipIf(*gpu_unavailable)
@given(
sizes=st.lists(st.integers(min_value=1, max_value=8), min_size=1, max_size=4),
uvm_op=st.sampled_from(
[
torch.ops.fbgemm.new_unified_tensor,
torch.ops.fbgemm.new_managed_tensor,
torch.ops.fbgemm.new_host_mapped_tensor,
torch.ops.fbgemm.new_vanilla_managed_tensor,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
# pyre-fixme[2]: Parameter must be annotated.
def test_is_uvm_tensor(self, sizes: List[int], uvm_op) -> None:
if uvm_op is torch.ops.fbgemm.new_unified_tensor:
is_host_mapped = random.choice([True, False])
uvm_t = uvm_op(
torch.empty(0, device="cuda:0", dtype=torch.float),
sizes,
is_host_mapped,
)
else:
uvm_t = uvm_op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)
@unittest.skipIf(*gpu_unavailable)
@given(
sizes=st.lists(st.integers(min_value=1, max_value=8), min_size=1, max_size=4),
uvm_op=st.sampled_from(
[
torch.ops.fbgemm.new_unified_tensor,
torch.ops.fbgemm.new_managed_tensor,
torch.ops.fbgemm.new_vanilla_managed_tensor,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
# pyre-fixme[2]: Parameter must be annotated.
def test_uvm_to_cpu(self, sizes: List[int], uvm_op) -> None:
if uvm_op is torch.ops.fbgemm.new_unified_tensor:
is_host_mapped = False
uvm_t = uvm_op(
torch.empty(0, device="cuda:0", dtype=torch.float),
sizes,
is_host_mapped,
)
else:
uvm_t = uvm_op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
cpu_t = torch.ops.fbgemm.uvm_to_cpu(uvm_t)
assert not torch.ops.fbgemm.is_uvm_tensor(cpu_t)
assert torch.ops.fbgemm.uvm_storage(cpu_t)
uvm_t.copy_(cpu_t)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)
# Test use of cpu tensor after freeing the uvm tensor
del uvm_t
cpu_t.mul_(42)
@unittest.skipIf(*gpu_unavailable)
def test_enum(self) -> None:
# pyre-ignore[16]
assert cudaMemoryAdvise.cudaMemAdviseSetAccessedBy.value == 5
@skipIfRocm()
@unittest.skipIf(*gpu_unavailable)
@given(
sizes=st.lists(
st.integers(min_value=1, max_value=(1024)), min_size=1, max_size=4
),
uvm_op=st.sampled_from(
[
torch.ops.fbgemm.new_unified_tensor,
torch.ops.fbgemm.new_managed_tensor,
torch.ops.fbgemm.new_vanilla_managed_tensor,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
# pyre-fixme[2]: Parameter must be annotated.
def test_cudaMemAdvise(self, sizes: List[int], uvm_op) -> None:
if uvm_op is torch.ops.fbgemm.new_unified_tensor:
is_host_mapped = False
uvm_t = uvm_op(
torch.empty(0, device="cuda:0", dtype=torch.float),
sizes,
is_host_mapped,
)
else:
uvm_t = uvm_op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)
# pyre-ignore[16]
cudaMemAdvise(uvm_t, cudaMemoryAdvise.cudaMemAdviseSetAccessedBy)
@unittest.skipIf(*gpu_unavailable)
@given(
sizes=st.lists(
st.integers(min_value=1, max_value=(1024)), min_size=1, max_size=3
),
uvm_op=st.sampled_from(
[
torch.ops.fbgemm.new_unified_tensor,
torch.ops.fbgemm.new_managed_tensor,
torch.ops.fbgemm.new_vanilla_managed_tensor,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
# pyre-fixme[2]: Parameter must be annotated.
def test_cudaMemPrefetchAsync(self, sizes: List[int], uvm_op) -> None:
if uvm_op is torch.ops.fbgemm.new_unified_tensor:
is_host_mapped = False
uvm_t = uvm_op(
torch.empty(0, device="cuda:0", dtype=torch.float),
sizes,
is_host_mapped,
)
else:
uvm_t = uvm_op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)
cudaMemPrefetchAsync(uvm_t)
torch.cuda.synchronize(torch.device("cuda:0"))
@skipIfRocm()
@unittest.skipIf(
not torch.cuda.is_available() or torch.cuda.device_count() < 2,
"Skip unless two CUDA devices are detected",
)
@given(
sizes=st.lists(
st.integers(min_value=1, max_value=(1024)), min_size=1, max_size=4
),
uvm_op=st.sampled_from(
[
torch.ops.fbgemm.new_unified_tensor,
torch.ops.fbgemm.new_managed_tensor,
torch.ops.fbgemm.new_vanilla_managed_tensor,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
# pyre-fixme[2]: Parameter must be annotated.
def test_uvm_to_device(self, sizes: List[int], uvm_op) -> None:
if uvm_op is torch.ops.fbgemm.new_unified_tensor:
is_host_mapped = False
uvm_t = uvm_op(
torch.empty(0, device="cuda:0", dtype=torch.float),
sizes,
is_host_mapped,
)
else:
uvm_t = uvm_op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)
# Reference uvm tensor from second cuda device
try:
device_prototype = torch.empty(0, device="cuda:1")
except RuntimeError:
# Skip the tests if there is no "cuda:1" device
return
second_t = torch.ops.fbgemm.uvm_to_device(uvm_t, device_prototype)
assert torch.ops.fbgemm.is_uvm_tensor(second_t)
assert torch.ops.fbgemm.uvm_storage(second_t)
assert second_t.device == device_prototype.device
@skipIfRocm()
@unittest.skipIf(*gpu_unavailable)
@given(
sizes=st.lists(
st.integers(min_value=1, max_value=(1024)), min_size=1, max_size=4
),
uvm_op=st.sampled_from(
[
torch.ops.fbgemm.new_unified_tensor,
torch.ops.fbgemm.new_managed_tensor,
torch.ops.fbgemm.new_vanilla_managed_tensor,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
# pyre-fixme[2]: Parameter must be annotated.
def test_uvm_slice(self, sizes: List[int], uvm_op) -> None:
if uvm_op is torch.ops.fbgemm.new_unified_tensor:
is_host_mapped = False
uvm_t = uvm_op(
torch.empty(0, device="cuda:0", dtype=torch.float),
sizes,
is_host_mapped,
)
else:
uvm_t = uvm_op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)
for i in range(sizes[0]):
uvm_slice = uvm_t[i]
cpu_slice = torch.ops.fbgemm.uvm_to_cpu(uvm_slice)
assert uvm_slice.storage_offset() == cpu_slice.storage_offset()
assert uvm_slice.storage().data_ptr() == uvm_t.storage().data_ptr()
assert cpu_slice.storage().data_ptr() == uvm_t.storage().data_ptr()
assert torch.ops.fbgemm.is_uvm_tensor(uvm_slice)
assert torch.ops.fbgemm.uvm_storage(cpu_slice)
@skipIfRocm()
@unittest.skipIf(*gpu_unavailable)
@given(
sizes=st.lists(
st.integers(min_value=1, max_value=(1024)), min_size=1, max_size=4
),
uvm_op=st.sampled_from(
[
torch.ops.fbgemm.new_unified_tensor,
torch.ops.fbgemm.new_managed_tensor,
torch.ops.fbgemm.new_vanilla_managed_tensor,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
# pyre-fixme[2]: Parameter must be annotated.
def test_uvm_memadviceDontFork(self, sizes: List[int], uvm_op) -> None:
if uvm_op is torch.ops.fbgemm.new_unified_tensor:
is_host_mapped = False
uvm_t = uvm_op(
torch.empty(0, device="cuda:0", dtype=torch.float),
sizes,
is_host_mapped,
)
else:
uvm_t = uvm_op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)
cpu_t = torch.ops.fbgemm.uvm_to_cpu(uvm_t)
torch.ops.fbgemm.uvm_mem_advice_dont_fork(cpu_t)
@unittest.skipIf(*gpu_unavailable)
@given(
sizes=st.lists(
st.integers(min_value=1, max_value=(512)), min_size=1, max_size=3
),
uvm_op=st.sampled_from(
[
torch.ops.fbgemm.new_unified_tensor,
torch.ops.fbgemm.new_managed_tensor,
torch.ops.fbgemm.new_vanilla_managed_tensor,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
# pyre-fixme[2]: Parameter must be annotated.
def test_uvm_to_cpu_clone(self, sizes: List[int], uvm_op) -> None:
if uvm_op is torch.ops.fbgemm.new_unified_tensor:
is_host_mapped = False
uvm_t = uvm_op(
torch.empty(0, device="cuda:0", dtype=torch.float),
sizes,
is_host_mapped,
)
else:
uvm_t = uvm_op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)
cpu_clone = torch.ops.fbgemm.uvm_to_cpu_clone(uvm_t)
assert not torch.ops.fbgemm.is_uvm_tensor(cpu_clone)
assert not torch.ops.fbgemm.uvm_storage(cpu_clone)
@unittest.skipIf(*gpu_unavailable)
@given(
sizes=st.lists(
st.integers(min_value=1, max_value=(512)), min_size=1, max_size=3
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_new_managed_tensor_meta(self, sizes: List[int]) -> None:
cpu_tensor = torch.empty(sizes).to("meta")
cpu_tensor_meta = torch.ops.fbgemm.new_managed_tensor(cpu_tensor, sizes)
assert cpu_tensor.shape == cpu_tensor_meta.shape
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import logging
import math
import random
import unittest
from typing import Optional, Tuple
import fbgemm_gpu
import hypothesis.strategies as st
import numpy as np
import torch
from fbgemm_gpu.split_embedding_configs import (
EmbOptimType as OptimType,
FP8QuantizationConfig,
QuantizationConfig,
SparseType,
)
from fbgemm_gpu.split_embedding_inference_converter import SplitEmbInferenceConverter
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
EmbeddingLocation,
PoolingMode,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
ComputeDevice,
SplitTableBatchedEmbeddingBagsCodegen,
)
from hypothesis import given, settings, Verbosity
from torch import nn
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from test_utils import gpu_available, on_arm_platform
else:
from fbgemm_gpu.test.test_utils import gpu_available, on_arm_platform
EMB_WEIGHT_UNIFORM_INIT_BOUND = 0.000316
MAX_EXAMPLES = 40
def div_round_up(a: int, b: int) -> int:
return int((a + b - 1) // b) * b
def to_device(t: torch.Tensor, use_cpu: bool) -> torch.Tensor:
return t.cpu() if use_cpu else t.cuda()
def get_table_batched_offsets_from_dense(
merged_indices: torch.Tensor, use_cpu: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
(T, B, L) = merged_indices.size()
lengths = np.ones((T, B)) * L
flat_lengths = lengths.flatten()
return (
to_device(merged_indices.contiguous().view(-1), use_cpu),
to_device(
torch.tensor(([0] + np.cumsum(flat_lengths).tolist())).long(),
use_cpu,
),
)
class SparseArch(nn.Module):
"""
The testing module with split table batched embedding op
"""
def __init__(
self,
emb_dim,
num_tables,
num_rows,
use_cpu,
) -> None:
super().__init__()
pooling_mode = PoolingMode.SUM
Ds = [emb_dim] * num_tables
Es = [num_rows] * num_tables
device = ComputeDevice.CPU if use_cpu else ComputeDevice.CUDA
loc = EmbeddingLocation.HOST if use_cpu else EmbeddingLocation.DEVICE
self.emb_module = SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
E,
D,
loc,
device,
)
for (E, D) in zip(Es, Ds)
],
weights_precision=SparseType.FP32,
optimizer=OptimType.EXACT_SGD,
learning_rate=0.05,
pooling_mode=pooling_mode,
)
self.emb_module.init_embedding_weights_uniform(
-EMB_WEIGHT_UNIFORM_INIT_BOUND, +EMB_WEIGHT_UNIFORM_INIT_BOUND
)
def forward(self, indices, offsets):
return self.emb_module(indices, offsets)
class QuantizedSplitEmbeddingsTest(unittest.TestCase):
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
]
),
quantize_type=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.FP8,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
),
use_cpu=st.booleans() if gpu_available else st.just(True),
pruning_ratio=st.sampled_from([None, 0.0]),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_quantize_workflow(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
pooling_mode: PoolingMode,
quantize_type: SparseType,
pruning_ratio: Optional[float],
use_cpu: bool,
) -> None:
E = int(10**log_E)
Es = [E] * T
D_alignment = 8 if not quantize_type == SparseType.INT2 else 16
D = div_round_up(D, D_alignment)
xs = [torch.randint(low=0, high=e, size=(B, L)) for e in Es]
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
# indices: T, B, L; offsets: T * B + 1
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=use_cpu)
sparse_arch = SparseArch(emb_dim=D, num_tables=T, num_rows=E, use_cpu=use_cpu)
quantization_config = QuantizationConfig()
# Fake quantize to make the original weight in FP32 all be exactly
# representable by INT8 row-wise quantized values
if quantize_type == quantize_type.INT8:
for t in range(T):
sparse_arch.emb_module.split_embedding_weights()[t].data.copy_(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
sparse_arch.emb_module.split_embedding_weights()[t].data
)
)
)
elif quantize_type == quantize_type.INT4 or quantize_type == quantize_type.INT2:
for t in range(T):
sparse_arch.emb_module.split_embedding_weights()[t].data.copy_(
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat(
torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
sparse_arch.emb_module.split_embedding_weights()[t].data,
bit_rate=quantize_type.bit_rate(),
),
bit_rate=quantize_type.bit_rate(),
)
)
elif quantize_type == SparseType.FP8:
quantization_config = FP8QuantizationConfig(random.choice([4, 5]), 7)
for t in range(T):
sparse_arch.emb_module.split_embedding_weights()[t].data.copy_(
torch.ops.fbgemm.HFP8QuantizedToFloat(
torch.ops.fbgemm.FloatToHFP8Quantized(
sparse_arch.emb_module.split_embedding_weights()[t].data,
quantization_config.get("exponent_bits"),
quantization_config.get("exponent_bias"),
quantization_config.get("max_position"),
),
quantization_config.get("exponent_bits"),
quantization_config.get("exponent_bias"),
)
)
emb_out = sparse_arch(indices, offsets) # B, T, D
# Apply the quantization transformations on the model!
split_emb_infer_converter = SplitEmbInferenceConverter(
quantize_type=quantize_type,
pruning_ratio=pruning_ratio,
quantization_config=quantization_config,
)
split_emb_infer_converter.convert_model(sparse_arch)
assert type(sparse_arch.emb_module) is IntNBitTableBatchedEmbeddingBagsCodegen
assert sparse_arch.emb_module.use_cpu == use_cpu
quantized_emb_out = sparse_arch(indices.int(), offsets.int()) # B, T, D
# Compare FP32 emb module vs. quantize_type (FP16, INT8, INT4, INT2) emb module
torch.testing.assert_close(
emb_out.float().cpu(),
quantized_emb_out.float().cpu(),
atol=1.0e-1,
rtol=1.0e-1,
)
@unittest.skipIf(*on_arm_platform)
@given(
use_cpu=st.booleans() if gpu_available else st.just(True),
use_array_for_index_remapping=st.booleans(),
quantize_type=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_l2_norm_pruning_workflow(
self,
use_cpu: bool,
use_array_for_index_remapping: bool,
quantize_type: SparseType,
) -> None:
D = 128
T = 2
E = 5
indices = torch.Tensor([3, 0, 2, 2, 3, 4, 2]).int()
offsets = torch.Tensor([0, 1, 4, 6, 7]).int()
weights = [
(torch.Tensor([0.4, 0.1, -0.2, 0.2, 0.3]).float().view(E, 1))
* (torch.Tensor([1.0] * E * D).view(E, D)),
(torch.Tensor([-0.8, 0.2, 0.5, -0.1, 0.9]).float().view(E, 1))
* (torch.Tensor([1.0] * E * D).view(E, D)),
]
# Inputs for 3 test cases. Each row is used in one test case.
pruning_ratios = [0.9, 0.5, 0.1]
remapped_indices = [
torch.Tensor([0, 4]).int(),
torch.Tensor([3, 0, 2, 2, 4, 2]).int(),
indices,
]
remapped_offsets = [
torch.Tensor([0, 0, 1, 2, 2]).int(),
torch.Tensor([0, 1, 4, 5, 6]).int(),
offsets,
]
# Start to test.
logging.info("use cpu = {}".format(use_cpu))
for pruning_ratio, remapped_index, remapped_offset in zip(
pruning_ratios, remapped_indices, remapped_offsets
):
logging.info("pruning ratio = {}.".format(pruning_ratio))
sparse_arch = SparseArch(
emb_dim=D, num_tables=T, num_rows=E, use_cpu=use_cpu
)
for idx in range(T):
sparse_arch.emb_module.split_embedding_weights()[idx].copy_(
weights[idx]
)
emb_out = sparse_arch(
to_device(remapped_index, use_cpu), to_device(remapped_offset, use_cpu)
) # B, T, D
# Apply pruning / quantization transformations on the model!
split_emb_infer_converter = SplitEmbInferenceConverter(
quantize_type=quantize_type,
pruning_ratio=pruning_ratio,
use_array_for_index_remapping=use_array_for_index_remapping,
)
split_emb_infer_converter.convert_model(sparse_arch)
assert (
type(sparse_arch.emb_module) is IntNBitTableBatchedEmbeddingBagsCodegen
)
assert sparse_arch.emb_module.use_cpu == use_cpu
pruned_emb_out = sparse_arch(
to_device(indices, use_cpu), to_device(offsets, use_cpu)
) # B, T, D
# Compare FP32 emb module with remapped index vs. FP16 emb module with pruning
torch.testing.assert_close(
emb_out.float().cpu(),
pruned_emb_out.float().cpu(),
atol=1.0e-1,
rtol=1.0e-1,
)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
pruning_ratio=st.floats(min_value=0.0, max_value=1.0, exclude_max=True),
use_cpu=st.booleans() if gpu_available else st.just(True),
use_array_for_index_remapping=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_pruning_workflow_large_scale(
self,
T: int,
D: int,
log_E: int,
pruning_ratio: Optional[float],
use_cpu: bool,
use_array_for_index_remapping: bool,
) -> None:
E = int(10**log_E)
D_alignment = 8
D = div_round_up(D, D_alignment)
sparse_arch = SparseArch(emb_dim=D, num_tables=T, num_rows=E, use_cpu=use_cpu)
# Make sure that each row has a unique L2 norm.
embedding_weights_before = sparse_arch.emb_module.split_embedding_weights()
for weights in embedding_weights_before:
for i in range(weights.size()[0]):
weights[i].uniform_(i * 0.01, (i + 1) * 0.01)
# Collect #rows before pruning.
num_rows_before = [weight.size()[0] for weight in embedding_weights_before]
# Apply pruning / quantization transformations on the model!
split_emb_infer_converter = SplitEmbInferenceConverter(
quantize_type=SparseType.FP16,
pruning_ratio=pruning_ratio,
use_array_for_index_remapping=use_array_for_index_remapping,
)
split_emb_infer_converter.convert_model(sparse_arch)
embedding_weights_after = sparse_arch.emb_module.split_embedding_weights()
assert type(sparse_arch.emb_module) is IntNBitTableBatchedEmbeddingBagsCodegen
assert sparse_arch.emb_module.use_cpu == use_cpu
# Collect #rows after pruning.
embedding_weights_after = sparse_arch.emb_module.split_embedding_weights()
num_rows_after = [weight[0].size()[0] for weight in embedding_weights_after]
# Check #rows after pruning aligns with the specified pruning ratio.
self.assertEqual(len(num_rows_before), len(num_rows_after))
for before, after in zip(num_rows_before, num_rows_after):
self.assertEqual(
math.ceil(before * (1.0 - pruning_ratio)), # type: ignore
after,
msg="original_num_rows = {}, pruning ratio = {}".format(
before, pruning_ratio
),
)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import random
import unittest
from ctypes import c_float, c_int32, cast, POINTER, pointer
from typing import Dict, Tuple
import hypothesis.strategies as st
import numpy as np
import torch
from fbgemm_gpu.split_embedding_configs import SparseType
from hypothesis import assume, given, HealthCheck, settings, Verbosity
from torch import Tensor
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
# pyre-ignore[21]
from test_utils import ( # noqa: F401
bytes_to_half_floats,
fused_rowwise_8bit_dequantize_reference,
fused_rowwise_8bit_quantize_reference,
fused_rowwise_nbit_quantize_dequantize_reference,
fused_rowwise_nbit_quantize_reference,
gpu_available,
gpu_unavailable,
symint_vector_unsupported,
)
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
from fbgemm_gpu.test.test_utils import (
bytes_to_half_floats,
fused_rowwise_8bit_dequantize_reference,
fused_rowwise_8bit_quantize_reference,
fused_rowwise_nbit_quantize_dequantize_reference,
fused_rowwise_nbit_quantize_reference,
gpu_available,
gpu_unavailable,
symint_vector_unsupported,
)
no_long_tests: bool = False
class TestFused8BitRowwiseQuantizationConversion(unittest.TestCase):
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `hypothesis.strategies.integers($parameter$min_value = 0, $parameter$max_value =
# 100)` to decorator factory `hypothesis.given`.
@given(
nrows=st.integers(min_value=0, max_value=100),
ncols=st.integers(min_value=0, max_value=100),
is_half=st.booleans(),
test_float_or_half_op=st.booleans(),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_op(
self,
nrows: int,
ncols: int,
is_half: bool,
test_float_or_half_op: bool,
) -> None:
input_data = torch.rand(nrows, ncols).float()
if is_half:
input_data = torch.rand(nrows, ncols).half()
if test_float_or_half_op:
quantized_data = torch.ops.fbgemm.FloatOrHalfToFused8BitRowwiseQuantized(
input_data
)
else:
if not is_half:
quantized_data = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
input_data
)
else:
quantized_data = torch.ops.fbgemm.HalfToFused8BitRowwiseQuantized(
input_data
)
if nrows == 0 or ncols == 0:
assert quantized_data.numel() == nrows * ((ncols + 3) // 4 * 4 + 8)
return
reference = fused_rowwise_8bit_quantize_reference(input_data.float().numpy())
np.testing.assert_array_almost_equal(quantized_data.numpy(), reference)
if gpu_available:
input_data_gpu = input_data.cuda()
if test_float_or_half_op:
quantized_data_gpu = (
torch.ops.fbgemm.FloatOrHalfToFused8BitRowwiseQuantized(
input_data_gpu
)
)
else:
if not is_half:
quantized_data_gpu = (
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
input_data_gpu
)
)
else:
quantized_data_gpu = (
torch.ops.fbgemm.HalfToFused8BitRowwiseQuantized(input_data_gpu)
)
quantized_data_numpy = quantized_data_gpu.cpu().numpy()
ncols_aligned = (ncols + 4 - 1) // 4 * 4
# compare quantized data
np.testing.assert_allclose(
quantized_data_numpy[:, :ncols],
reference[:, :ncols],
# Allow 1 mantissa bit difference (LSB)
atol=1,
)
# compare scales
np.testing.assert_array_almost_equal(
quantized_data_numpy[:, ncols_aligned : ncols_aligned + 4],
reference[:, ncols : ncols + 4],
)
# compare zero points
np.testing.assert_array_equal(
quantized_data_numpy[:, ncols_aligned + 4 : ncols_aligned + 8],
reference[:, ncols + 4 : ncols + 8],
)
# pyre-ignore [56]: Invalid decoration, was not able to infer the type of argument
@given(
nrows=st.integers(min_value=0, max_value=100),
ncols=st.integers(min_value=0, max_value=100),
is_output_half=st.booleans(),
test_float_or_half_op=st.booleans(),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_and_dequantize_op(
self,
nrows: int,
ncols: int,
is_output_half: bool,
test_float_or_half_op: bool,
) -> None:
num_elem_per_byte = 1
input_data = torch.rand(nrows, ncols).float()
if is_output_half:
input_data = input_data.half()
assume(ncols % (2 * num_elem_per_byte) == 0)
if test_float_or_half_op:
quantized_data = torch.ops.fbgemm.FloatOrHalfToFused8BitRowwiseQuantized(
input_data
)
dequantized_data = torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloatOrHalf(
quantized_data,
output_dtype=1 if is_output_half else 0,
)
else:
if not is_output_half:
quantized_data = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
input_data
)
dequantized_data = torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
quantized_data
)
else:
quantized_data = torch.ops.fbgemm.HalfToFused8BitRowwiseQuantized(
input_data
)
dequantized_data = torch.ops.fbgemm.Fused8BitRowwiseQuantizedToHalf(
quantized_data
)
if nrows == 0 or ncols == 0:
assert dequantized_data.numel() == 0
return
reference = torch.from_numpy(
fused_rowwise_8bit_dequantize_reference(quantized_data.numpy())
)
if not is_output_half:
torch.testing.assert_close(dequantized_data.float(), reference.float())
else:
torch.testing.assert_close(dequantized_data.half(), reference.half())
if gpu_available:
input_data_gpu = input_data.cuda()
if test_float_or_half_op:
quantized_data_gpu = (
torch.ops.fbgemm.FloatOrHalfToFused8BitRowwiseQuantized(
input_data_gpu
)
)
dequantized_data_gpu = (
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloatOrHalf(
quantized_data_gpu,
output_dtype=1 if is_output_half else 0,
)
)
else:
if not is_output_half:
quantized_data_gpu = (
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
input_data_gpu
)
)
dequantized_data_gpu = (
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
quantized_data_gpu
)
)
else:
quantized_data_gpu = (
torch.ops.fbgemm.HalfToFused8BitRowwiseQuantized(input_data_gpu)
)
dequantized_data_gpu = (
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToHalf(
quantized_data_gpu
)
)
dequantized_data_numpy = dequantized_data_gpu.cpu().numpy()
dequantized_data_trimmed = torch.from_numpy(
dequantized_data_numpy[:, :ncols]
)
if not is_output_half:
torch.testing.assert_close(
dequantized_data_trimmed.float(), reference.float()
)
else:
torch.testing.assert_close(
dequantized_data_trimmed.half(), reference.half()
)
@unittest.skipIf(no_long_tests, "Slow test, requires buck build to run.") # noqa
def test_quantize_and_dequantize_op_cuda_large_nrows(self) -> None:
ncols = 256
nrows = 65540
input_data = torch.rand(nrows, ncols).float()
quantized_data = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(input_data)
reference = torch.from_numpy(
fused_rowwise_8bit_dequantize_reference(quantized_data.numpy())
)
if gpu_available:
input_data_gpu = input_data.cuda()
quantized_data_gpu = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
input_data_gpu
)
dequantized_data_gpu = torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
quantized_data_gpu
)
reference = torch.from_numpy(
fused_rowwise_8bit_dequantize_reference(
quantized_data_gpu.cpu().numpy()
)
)
# compare quantized data
torch.testing.assert_close(dequantized_data_gpu.cpu(), reference)
class TestMixedDimInt8DequantizationConversion(unittest.TestCase):
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# Pyre was not able to infer the type of argument `not torch.cuda.is_available()`
# to decorator factory `unittest.skipIf`.
@unittest.skipIf(*gpu_unavailable)
def test_mixed_dim_8bit_dequantize_op_empty(self) -> None:
# assert that kernel return empty tensor and not failing with cuda error
input_refs = torch.empty((0, 0), dtype=torch.uint8).cuda()
D_offsets = torch.tensor([0]).cuda()
mixed_dim_dequant_output = (
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloatMixedDim(
input_refs, D_offsets, SparseType.FP32.as_int()
)
)
assert mixed_dim_dequant_output.numel() == 0
@unittest.skipIf(*gpu_unavailable)
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `hypothesis.strategies.integers($parameter$min_value = 0, $parameter$max_value =
# 100)` to decorator factory `hypothesis.given`.
@given(
B=st.integers(min_value=1, max_value=100),
T=st.integers(min_value=1, max_value=100),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
min_dim=st.just(1),
max_dim=st.just(100),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_mixed_dim_8bit_dequantize_op(
self,
B: int,
T: int,
output_dtype: SparseType,
min_dim: int,
max_dim: int,
) -> None:
self.run_mixed_dim_8bit_dequantize_op_test(B, T, output_dtype, min_dim, max_dim)
@unittest.skipIf(*gpu_unavailable)
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `hypothesis.strategies.integers($parameter$min_value = 0, $parameter$max_value =
# 100)` to decorator factory `hypothesis.given`.
@given(
B=st.integers(min_value=1, max_value=100),
T=st.integers(min_value=1, max_value=100),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
min_dim=st.just(100),
max_dim=st.just(1000),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_mixed_dim_8bit_dequantize_op_large_dims(
self,
B: int,
T: int,
output_dtype: SparseType,
min_dim: int,
max_dim: int,
) -> None:
self.run_mixed_dim_8bit_dequantize_op_test(B, T, output_dtype, min_dim, max_dim)
@unittest.skipIf(*gpu_unavailable)
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `hypothesis.strategies.integers($parameter$min_value = 0, $parameter$max_value =
# 100)` to decorator factory `hypothesis.given`.
@given(
B=st.just(65540),
T=st.just(5),
output_dtype=st.just(SparseType.FP32),
min_dim=st.just(1),
max_dim=st.just(100),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_mixed_dim_8bit_dequantize_op_large_rows(
self,
B: int,
T: int,
output_dtype: SparseType,
min_dim: int,
max_dim: int,
) -> None:
self.run_mixed_dim_8bit_dequantize_op_test(B, T, output_dtype, min_dim, max_dim)
def run_mixed_dim_8bit_dequantize_op_test(
self,
B: int,
T: int,
output_dtype: SparseType,
min_dim: int,
max_dim: int,
) -> None:
table_dims = [
random.randint(min_dim, max_dim) * 8 for _ in range(T)
] # assume table dimensions are multiples of 8
table_dims_with_qparams = [d + 8 for d in table_dims]
D_offsets = (
torch.cumsum(torch.tensor([0] + table_dims_with_qparams), dim=0)
.to(torch.int)
.cuda()
)
input_refs = [torch.randn((B, d)).cuda() for d in table_dims]
input_refs_int8 = [
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(t) for t in input_refs
]
input_data = torch.concat(input_refs_int8, dim=1).contiguous()
mixed_dim_dequant_output = (
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloatMixedDim(
input_data, D_offsets, output_dtype.as_int()
)
)
table_output_split = [t + 8 for t in table_dims]
output_ref = []
for output_i8 in torch.split(input_data, table_output_split, dim=1):
output_ref.append(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
output_i8.contiguous()
)
)
output_ref_concat = torch.cat(output_ref, dim=1)
if output_dtype == SparseType.FP16:
output_ref_concat = output_ref_concat.half()
torch.testing.assert_close(output_ref_concat, mixed_dim_dequant_output)
class TestFusedNBitRowwiseQuantizationConversion(unittest.TestCase):
# pyre-ignore [56]: Invalid decoration, was not able to infer the type of argument
@given(
nrows=st.integers(min_value=0, max_value=100),
ncols=st.integers(min_value=0, max_value=100),
bit_rate=st.sampled_from([2, 4]),
is_half=st.booleans(),
test_float_or_half_op=st.booleans(),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_op(
self,
nrows: int,
ncols: int,
bit_rate: int,
is_half: bool,
test_float_or_half_op: bool,
) -> None:
assert 8 % bit_rate == 0
num_elem_per_byte = 8 // bit_rate
assume(ncols % (2 * num_elem_per_byte) == 0)
input_data = torch.rand(nrows, ncols).float()
if is_half:
input_data = input_data.half()
if test_float_or_half_op:
quantized_data = (
torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf(
input_data, bit_rate
)
)
else:
if not is_half:
quantized_data = (
torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
input_data, bit_rate
)
)
else:
quantized_data = torch.ops.fbgemm.HalfToFusedNBitRowwiseQuantizedSBHalf(
input_data, bit_rate
)
if nrows == 0 or ncols == 0:
assert quantized_data.numel() == nrows * (
(ncols + bit_rate - 1) // bit_rate + 4
)
return
quantized_data = quantized_data.numpy()
reference = fused_rowwise_nbit_quantize_reference(
input_data.float().numpy(), bit_rate
)
interleaved_dim = ncols // num_elem_per_byte
# compare quantized data
np.testing.assert_array_equal(
quantized_data[:, :interleaved_dim], reference[:, :interleaved_dim]
)
# compare scales
np.testing.assert_array_almost_equal(
bytes_to_half_floats(
quantized_data[:, interleaved_dim : interleaved_dim + 2]
),
bytes_to_half_floats(reference[:, interleaved_dim : interleaved_dim + 2]),
)
# compare zero points
np.testing.assert_array_equal(
quantized_data[:, interleaved_dim + 2], reference[:, interleaved_dim + 2]
)
if gpu_available:
input_data_gpu = input_data.cuda()
if test_float_or_half_op:
quantized_data_gpu = (
torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf(
input_data_gpu, bit_rate
)
)
else:
if not is_half:
quantized_data_gpu = (
torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
input_data_gpu, bit_rate
)
)
else:
quantized_data_gpu = (
torch.ops.fbgemm.HalfToFusedNBitRowwiseQuantizedSBHalf(
input_data_gpu, bit_rate
)
)
quantized_data_numpy = quantized_data_gpu.cpu().numpy()
# compare quantized data
np.testing.assert_array_equal(
quantized_data_numpy[:, :ncols], reference[:, :ncols]
)
# pyre-ignore [56]: Invalid decoration, was not able to infer the type of argument
@given(
nrows=st.integers(min_value=0, max_value=100),
ncols=st.integers(min_value=0, max_value=100),
bit_rate=st.sampled_from([2, 4]),
is_output_half=st.booleans(),
test_float_or_half_op=st.booleans(),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_and_dequantize_op(
self,
nrows: int,
ncols: int,
bit_rate: int,
is_output_half: bool,
test_float_or_half_op: bool,
) -> None:
assert 8 % bit_rate == 0
num_elem_per_byte = 8 // bit_rate
input_data = torch.rand(nrows, ncols).float()
if is_output_half:
input_data = input_data.half()
assume(ncols % (2 * num_elem_per_byte) == 0)
if test_float_or_half_op:
quantized_data = (
torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf(
input_data, bit_rate
)
)
dequantized_data = (
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloatOrHalf(
quantized_data,
bit_rate,
output_dtype=1 if is_output_half else 0,
)
)
else:
if not is_output_half:
quantized_data = (
torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
input_data, bit_rate
)
)
dequantized_data = (
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat(
quantized_data, bit_rate
)
)
else:
quantized_data = torch.ops.fbgemm.HalfToFusedNBitRowwiseQuantizedSBHalf(
input_data, bit_rate
)
dequantized_data = (
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToHalf(
quantized_data, bit_rate
)
)
if nrows == 0 or ncols == 0:
assert dequantized_data.numel() == 0
return
if not is_output_half:
reference = torch.from_numpy(
fused_rowwise_nbit_quantize_dequantize_reference(
input_data.float().numpy(), bit_rate
)
)
else:
reference = torch.from_numpy(
fused_rowwise_nbit_quantize_dequantize_reference(
input_data.float().numpy(), bit_rate
)
).half()
torch.testing.assert_close(dequantized_data, reference)
if gpu_available:
input_data_gpu = input_data.cuda()
if test_float_or_half_op:
quantized_data_gpu = (
torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf(
input_data_gpu, bit_rate
)
)
dequantized_data_gpu = (
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloatOrHalf(
quantized_data_gpu,
bit_rate,
output_dtype=1 if is_output_half else 0,
)
)
else:
if not is_output_half:
quantized_data_gpu = (
torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
input_data_gpu, bit_rate
)
)
dequantized_data_gpu = (
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat(
quantized_data_gpu, bit_rate
)
)
else:
quantized_data_gpu = (
torch.ops.fbgemm.HalfToFusedNBitRowwiseQuantizedSBHalf(
input_data_gpu, bit_rate
)
)
dequantized_data_gpu = (
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToHalf(
quantized_data_gpu, bit_rate
)
)
# compare quantized data
torch.testing.assert_close(
dequantized_data_gpu.cpu().float(), dequantized_data.float()
)
@unittest.skipIf(no_long_tests, "Slow test, requires buck build to run.") # noqa
def test_quantize_and_dequantize_op_cuda_large_nrows(self) -> None:
ncols = 256
bit_rate = 4
nrows = 65540
num_elem_per_byte = 8 // bit_rate
input_data = torch.rand(nrows, ncols).float()
assume(ncols % (2 * num_elem_per_byte) == 0)
reference = torch.from_numpy(
fused_rowwise_nbit_quantize_dequantize_reference(
input_data.numpy(), bit_rate
)
)
if gpu_available:
input_data_gpu = input_data.cuda()
quantized_data_gpu = (
torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
input_data_gpu, bit_rate
)
)
dequantized_data_gpu = (
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat(
quantized_data_gpu, bit_rate
)
)
# compare quantized data
torch.testing.assert_close(dequantized_data_gpu.cpu(), reference)
class TestHFP8QuantizationConversion(unittest.TestCase):
# min_pos is the minimal of denormal numbers
# min_normal_pos is the minimal of normal numbers
def _get_hfp8_dynamic_range(
self, ebits: int, mbits: int, bias: int
) -> Tuple[int, int, int]:
max_pos = (1 << ((1 << ebits) - 2 - bias)) * (2 - 2 ** (-mbits))
min_pos = 2 ** (1 - bias - mbits)
min_normal_pos = 2 ** (1 - bias)
return min_pos, max_pos, min_normal_pos
def _get_hfp8_config(
self,
) -> Tuple[int, int, Dict[int, int], Dict[int, int], Dict[int, int]]:
# TODO: set up test for 1-5-2 format
# TODO: parameterize ebits and mbits in unit test
ebits = 4
mbits = 3
max_pos_dict = {}
min_pos_dict = {}
min_normal_pos_dict = {}
for bias in [4, 5, 6, 7]:
min_pos, max_pos, min_normal_pos = self._get_hfp8_dynamic_range(
ebits, mbits, bias
)
min_pos_dict[bias] = min_pos
max_pos_dict[bias] = max_pos
min_normal_pos_dict[bias] = min_normal_pos
return ebits, mbits, min_pos_dict, max_pos_dict, min_normal_pos_dict
def _test_conversion(
self,
input_data: Tensor,
reference_data: Tensor,
ebits: int,
exponent_bias: int,
max_pos: float,
atol: float = 0.0,
rtol: float = 1e-7,
) -> None:
if torch.cuda.is_available():
input_data_gpu = input_data.cuda()
quantized_data_gpu = torch.ops.fbgemm.FloatToHFP8Quantized(
input_data_gpu, ebits, exponent_bias, max_pos
)
dequantized_data_gpu = torch.ops.fbgemm.HFP8QuantizedToFloat(
quantized_data_gpu, ebits, exponent_bias
)
torch.testing.assert_close(
dequantized_data_gpu.cpu(), reference_data, rtol=rtol, atol=atol
)
# pyre-ignore [56]
@given(
nrows=st.integers(min_value=1, max_value=100),
ncols=st.integers(min_value=1, max_value=100),
exponent_bias=st.integers(min_value=4, max_value=7),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_and_dequantize_op(
self, nrows: int, ncols: int, exponent_bias: int
) -> None:
ebits, mbits, min_pos, max_pos, min_normal_pos = self._get_hfp8_config()
# test positive normal range
input_data = torch.FloatTensor((nrows, ncols)).uniform_(
min_normal_pos[exponent_bias], max_pos[exponent_bias]
)
self._test_conversion(
input_data,
input_data,
ebits,
exponent_bias,
max_pos[exponent_bias],
rtol=(2 ** (-mbits - 1)),
atol=0,
)
# test positive denormal range
input_data = torch.FloatTensor((nrows, ncols)).uniform_(
min_pos[exponent_bias], min_normal_pos[exponent_bias]
)
self._test_conversion(
input_data,
input_data,
ebits,
exponent_bias,
max_pos[exponent_bias],
rtol=0.0,
atol=(2 ** (1 - exponent_bias - mbits)),
)
# test negative normal range
input_data = torch.FloatTensor((nrows, ncols)).uniform_(
-max_pos[exponent_bias], -min_normal_pos[exponent_bias]
)
self._test_conversion(
input_data,
input_data,
ebits,
exponent_bias,
max_pos[exponent_bias],
rtol=(2 ** (-mbits - 1)),
atol=0,
)
# test negative denormal range
input_data = torch.FloatTensor((nrows, ncols)).uniform_(
-min_normal_pos[exponent_bias], -min_pos[exponent_bias]
)
self._test_conversion(
input_data,
input_data,
ebits,
exponent_bias,
max_pos[exponent_bias],
rtol=0.0,
atol=(2 ** (1 - exponent_bias - mbits)),
)
# test positive underflow
input_data = torch.FloatTensor((nrows, ncols)).uniform_(
0, 0.5 * min_pos[exponent_bias]
)
self._test_conversion(
input_data,
input_data.new_full(input_data.shape, 0),
ebits,
exponent_bias,
max_pos[exponent_bias],
)
# test negative underflow
input_data = torch.FloatTensor((nrows, ncols)).uniform_(
-0.5 * min_pos[exponent_bias], 0
)
self._test_conversion(
input_data,
input_data.new_full(input_data.shape, 0),
ebits,
exponent_bias,
max_pos[exponent_bias],
)
# test positive overflow
input_data = torch.FloatTensor((nrows, ncols)).uniform_(
max_pos[exponent_bias], max_pos[exponent_bias] * 2
)
self._test_conversion(
input_data,
input_data.new_full(input_data.shape, max_pos[exponent_bias]),
ebits,
exponent_bias,
max_pos[exponent_bias],
)
# test negative overflow
input_data = torch.FloatTensor((nrows, ncols)).uniform_(
-max_pos[exponent_bias] * 2, -max_pos[exponent_bias]
)
self._test_conversion(
input_data,
input_data.new_full(input_data.shape, -max_pos[exponent_bias]),
ebits,
exponent_bias,
max_pos[exponent_bias],
)
class TestDenseMLPQuantizationConversion(unittest.TestCase):
@unittest.skipIf(*gpu_unavailable)
# pyre-ignore [56]: Invalid decoration, was not able to infer the type of argument
@given(
nrows=st.integers(min_value=0, max_value=100),
ncols=st.integers(min_value=0, max_value=100),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_op(self, nrows: int, ncols: int) -> None:
ebits = 8
mbits = 7
bias = 127
max_pos = (1 << ((1 << ebits) - 2 - bias)) * (2 - 2 ** (-mbits))
min_pos = 2 ** (1 - bias - mbits)
bounding_box_size = 16
print("MSFP parameters", bounding_box_size, ebits, mbits, bias)
input_data = torch.rand(nrows, ncols).float()
quantized_data = torch.ops.fbgemm.FloatToMSFPQuantized(
input_data.cuda(),
bounding_box_size,
ebits,
mbits,
bias,
min_pos,
max_pos,
)
dequantized_data = torch.ops.fbgemm.MSFPQuantizedToFloat(
quantized_data.cuda(), ebits, mbits, bias
)
torch.testing.assert_close(dequantized_data.cpu(), input_data, rtol=1, atol=0)
class SparseNNOperatorsGPUTest(unittest.TestCase):
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `hypothesis.strategies.sampled_from(["BF16"])` to decorator factory
# `hypothesis.given`.
@given(
precision=st.just("BF16"),
batch_size=st.integers(min_value=1, max_value=256),
k=st.integers(min_value=2, max_value=2),
n=st.integers(min_value=2, max_value=2),
)
def test_dense_mlp_quantize_ops(
self, precision: str, batch_size: int, k: int, n: int
) -> None:
if precision == "BF16":
input_data = torch.rand((n, k), dtype=torch.float32)
quantized_data = torch.ops.fbgemm.FloatToBfloat16Quantized(input_data)
dequantized_data = torch.ops.fbgemm.Bfloat16QuantizedToFloat(quantized_data)
torch.testing.assert_close(
dequantized_data, input_data, rtol=1e-2, atol=1e-2
)
def bfloat_quantize(x_float: float) -> np.uint16:
bits = cast(pointer(c_float(x_float)), POINTER(c_int32)).contents.value
bits += 1 << 15
bits = bits >> 16
bits = np.uint16(bits)
return bits
def bfloat_dequantize(x_bfloat: np.uint16) -> float:
bits = np.int32(x_bfloat) << 16
return cast(pointer(c_int32(bits)), POINTER(c_float)).contents.value
class TestBfloat16QuantizationConversion(unittest.TestCase):
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `hypothesis.strategies.integers($parameter$min_value = 0, $parameter$max_value =
# 100)` to decorator factory `hypothesis.given`.
@given(
nrows=st.integers(min_value=0, max_value=100),
ncols=st.integers(min_value=0, max_value=100),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_op(self, nrows: int, ncols: int) -> None:
input_data = torch.rand(nrows, ncols).float()
quantized_data = torch.ops.fbgemm.FloatToBfloat16Quantized(input_data)
if nrows == 0 or ncols == 0:
assert quantized_data.numel() == 0
return
f = np.vectorize(lambda x: bfloat_quantize(x))
reference = f(input_data.numpy())
quantized_data_uint16 = quantized_data.numpy()
quantized_data_uint16.dtype = np.uint16
np.testing.assert_array_almost_equal(quantized_data_uint16, reference)
if torch.cuda.is_available():
input_data_gpu = input_data.cuda()
quantized_data_gpu = torch.ops.fbgemm.FloatToBfloat16Quantized(
input_data_gpu
)
quantized_data_numpy = quantized_data_gpu.cpu().numpy()
quantized_data_numpy.dtype = np.uint16
np.testing.assert_allclose(quantized_data_numpy, reference)
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `hypothesis.strategies.integers($parameter$min_value = 0, $parameter$max_value =
# 100)` to decorator factory `hypothesis.given`.
@given(
nrows=st.integers(min_value=0, max_value=100),
ncols=st.integers(min_value=0, max_value=100),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_and_dequantize_op(self, nrows: int, ncols: int) -> None:
input_data = torch.rand(nrows, ncols).float()
quantized_data = torch.ops.fbgemm.FloatToBfloat16Quantized(input_data)
dequantized_data = torch.ops.fbgemm.Bfloat16QuantizedToFloat(quantized_data)
if nrows == 0 or ncols == 0:
assert dequantized_data.numel() == 0
return
f = np.vectorize(lambda x: bfloat_quantize(x))
ref_bfloat16 = f(input_data.numpy())
f = np.vectorize(lambda x: bfloat_dequantize(x))
ref_fp32 = torch.from_numpy(f(ref_bfloat16)).float()
torch.testing.assert_close(dequantized_data, ref_fp32)
if torch.cuda.is_available():
input_data_gpu = input_data.cuda()
quantized_data_gpu = torch.ops.fbgemm.FloatToBfloat16Quantized(
input_data_gpu
)
dequantized_data_gpu = torch.ops.fbgemm.Bfloat16QuantizedToFloat(
quantized_data_gpu
)
# compare quantized data
torch.testing.assert_close(dequantized_data_gpu.cpu(), ref_fp32)
@unittest.skipIf(not torch.cuda.is_available(), "Skip when CUDA is not available")
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `hypothesis.strategies.sampled_from([(65540, 256), (256, 65540)])` to decorator
# factory `hypothesis.given`.
@given(
ncols_nrows=st.sampled_from([(65540, 256), (256, 65540)]),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_and_dequantize_op_cuda_large_nrows_bf16(
self, ncols_nrows: Tuple[int, int]
) -> None:
ncols, nrows = ncols_nrows
input_data = torch.rand(nrows, ncols).float()
quantized_data = torch.ops.fbgemm.FloatToBfloat16Quantized(input_data)
dequantized_data = torch.ops.fbgemm.Bfloat16QuantizedToFloat(quantized_data)
if torch.cuda.is_available():
input_data_gpu = input_data.cuda()
quantized_data_gpu = torch.ops.fbgemm.FloatToBfloat16Quantized(
input_data_gpu
)
dequantized_data_gpu = torch.ops.fbgemm.Bfloat16QuantizedToFloat(
quantized_data_gpu
)
# compare quantized data
torch.testing.assert_close(dequantized_data_gpu.cpu(), dequantized_data)
class TestFP8RowwiseQuantizationConversion(unittest.TestCase):
enable_logging: bool = False
def setUp(self) -> None:
self.enable_logging = bool(os.getenv("FBGEMM_GPU_ENABLE_LOGGING", 0))
if self.enable_logging:
logging.info("Enabled logging for TestFP8RowwiseQuantizationConversion")
@unittest.skipIf(*gpu_unavailable)
# pyre-fixme[56]:
@given(
batched=st.booleans(),
bs=st.integers(min_value=1, max_value=100),
m=st.integers(min_value=0, max_value=100),
n=st.integers(min_value=0, max_value=100),
forward=st.booleans(),
given_last_dim=st.booleans(),
dtype=st.sampled_from(
[
torch.float,
torch.half,
torch.bfloat16,
],
),
# if before PT 2.1, we don't support symint_vector, so turn it off
test_compile=st.booleans() if symint_vector_unsupported() else st.just(False),
)
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_quantize_and_dequantize_op_fp8_rowwise(
self,
batched: bool,
bs: int,
m: int,
n: int,
forward: bool,
given_last_dim: bool,
dtype: torch.dtype,
test_compile: bool,
) -> None:
n = n * 4 # need (n % 4 == 0)
input_data = (
torch.rand(bs, m, n, dtype=dtype)
if batched
else torch.rand(bs * m, n, dtype=dtype)
)
input_data_gpu = input_data.cuda()
quantized_data_gpu = torch.ops.fbgemm.FloatToFP8RowwiseQuantized(
input_data_gpu, forward=forward
)
quantize_func = (
torch.compile(
torch.ops.fbgemm.FP8RowwiseQuantizedToFloat,
dynamic=True,
fullgraph=True,
)
if test_compile
else torch.ops.fbgemm.FP8RowwiseQuantizedToFloat
)
if test_compile:
torch._dynamo.mark_dynamic(quantized_data_gpu, 0)
torch._dynamo.mark_dynamic(quantized_data_gpu, 1)
dequantized_data_gpu = quantize_func(
quantized_data_gpu,
forward=forward,
output_dtype=SparseType.FP32.as_int()
if dtype == torch.float
else (
SparseType.FP16.as_int()
if dtype == torch.half
else SparseType.BF16.as_int()
),
)
if m == 0 or n == 0:
assert dequantized_data_gpu.numel() == 0
return
assert (
dequantized_data_gpu.dtype == dtype
), "result is {dequantized_data_gpu.dtype} type, but expected {dtype}"
qref = input_data_gpu.float()
dq = dequantized_data_gpu.float()
if self.enable_logging:
# Logging quantization errors
errors = (qref - dq) / (qref + 1e-5)
logging.info(f"max relative error {errors.abs().max()}")
val, idx = torch.topk(errors.flatten().abs(), k=min(10, errors.shape[-1]))
logging.info(f"top-10 errors {val}")
logging.info(f"ref data {input_data_gpu.flatten()}")
logging.info(f"dequantized data {dequantized_data_gpu.flatten()}")
logging.info(f"max relative error {errors.flatten()[idx]}")
torch.testing.assert_close(qref.cpu(), dq.cpu(), rtol=0.1, atol=0.05)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import copy
import random
import unittest
import fbgemm_gpu.ssd_split_table_batched_embeddings_ops as ssd_split_table_batched_embeddings_ops
import hypothesis.strategies as st
import numpy as np
import torch
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_embedding_utils import (
b_indices,
fake_quantize_embs,
get_table_batched_offsets_from_dense,
round_up,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_common import PoolingMode
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
rounded_row_size_in_bytes,
unpadded_row_size_in_bytes,
)
from hypothesis import given, settings, Verbosity
MAX_EXAMPLES = 40
@unittest.skipIf(not torch.cuda.is_available(), "Skip when CUDA is not available")
class SSDSplitTableBatchedEmbeddingsTest(unittest.TestCase):
def test_ssd(self) -> None:
import tempfile
E = int(1e4)
D = 128
N = 100
indices = torch.as_tensor(np.random.choice(E, replace=False, size=(N,)))
weights = torch.randn(N, D)
output_weights = torch.empty_like(weights)
count = torch.tensor([N])
feature_table_map = list(range(1))
emb = ssd_split_table_batched_embeddings_ops.SSDTableBatchedEmbeddingBags(
embedding_specs=[(E, D)],
feature_table_map=feature_table_map,
ssd_storage_directory=tempfile.mkdtemp(),
cache_sets=1,
ssd_uniform_init_lower=-0.1,
ssd_uniform_init_upper=0.1,
)
emb.ssd_db.get_cuda(indices, output_weights, count)
torch.cuda.synchronize()
assert (output_weights <= 0.1).all().item()
assert (output_weights >= -0.1).all().item()
emb.ssd_db.set_cuda(indices, weights, count, 1)
emb.ssd_db.get_cuda(indices, output_weights, count)
torch.cuda.synchronize()
torch.testing.assert_close(weights, output_weights)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_ssd_forward(
self, T: int, D: int, B: int, log_E: int, L: int, weighted: bool
) -> None:
import tempfile
E = int(10**log_E)
D = D * 4
Ds = [D] * T
Es = [E] * T
feature_table_map = list(range(T))
emb = ssd_split_table_batched_embeddings_ops.SSDTableBatchedEmbeddingBags(
embedding_specs=[(E, D) for (E, D) in zip(Es, Ds)],
feature_table_map=feature_table_map,
ssd_storage_directory=tempfile.mkdtemp(),
cache_sets=max(T * B * L, 1),
ssd_uniform_init_lower=-0.1,
ssd_uniform_init_upper=0.1,
).cuda()
bs = [
torch.nn.EmbeddingBag(E, D, mode="sum", sparse=True).cuda()
for (E, D) in zip(Es, Ds)
]
torch.manual_seed(42)
xs = [torch.randint(low=0, high=e, size=(B, L)).cuda() for e in Es]
xws = [torch.randn(size=(B, L)).cuda() for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
fs = (
[b_indices(b, x) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1))
for (b, x, xw) in zip(bs, xs, xws)
]
)
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
for t in range(T):
emb.ssd_db.set_cuda(
torch.arange(t * E, (t + 1) * E).to(torch.int64),
bs[t].weight.cpu(),
torch.as_tensor([E]),
t,
)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x)
fc2 = (
emb(indices.cuda(), offsets.cuda())
if not weighted
else emb(indices.cuda(), offsets.cuda(), xw.contiguous().view(-1).cuda())
)
torch.testing.assert_close(
fc2.float(),
f.float(),
atol=1.0e-5,
rtol=1.0e-5,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_ssd_backward_adagrad(
self, T: int, D: int, B: int, log_E: int, L: int, weighted: bool
) -> None:
import tempfile
E = int(10**log_E)
D = D * 4
Ds = [D] * T
Es = [E] * T
lr = 0.5
eps = 0.2
feature_table_map = list(range(T))
emb = ssd_split_table_batched_embeddings_ops.SSDTableBatchedEmbeddingBags(
embedding_specs=[(E, D) for (E, D) in zip(Es, Ds)],
feature_table_map=feature_table_map,
ssd_storage_directory=tempfile.mkdtemp(),
cache_sets=max(T * B * L, 1),
ssd_uniform_init_lower=-0.1,
ssd_uniform_init_upper=0.1,
learning_rate=lr,
eps=eps,
ssd_shards=2,
).cuda()
bs = [
torch.nn.EmbeddingBag(E, D, mode="sum", sparse=True).cuda()
for (E, D) in zip(Es, Ds)
]
torch.manual_seed(42)
xs = [torch.randint(low=0, high=e, size=(B, L)).cuda() for e in Es]
xws = [torch.randn(size=(B, L)).cuda() for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
fs = (
[b_indices(b, x) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1))
for (b, x, xw) in zip(bs, xs, xws)
]
)
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
for t in range(T):
emb.ssd_db.set_cuda(
torch.arange(t * E, (t + 1) * E).to(torch.int64),
bs[t].weight.cpu(),
torch.as_tensor([E]),
t,
)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x)
fc2 = (
emb(indices.cuda(), offsets.cuda())
if not weighted
else emb(indices.cuda(), offsets.cuda(), xw.contiguous().view(-1).cuda())
)
torch.testing.assert_close(
fc2.float(),
f.float(),
atol=1.0e-5,
rtol=1.0e-5,
)
fc2.backward(torch.cat([go.view(B, -1) for go in gos], dim=1))
split_optimizer_states = [s for (s,) in emb.debug_split_optimizer_states()]
for t in range(T):
# pyre-fixme[16]: Optional type has no attribute `float`.
ref_optimizer_state = bs[t].weight.grad.float().to_dense().pow(2)
torch.testing.assert_close(
split_optimizer_states[t].float(),
ref_optimizer_state.mean(dim=1),
atol=1.0e-4,
rtol=1.0e-4,
)
emb.flush()
for t in range(T):
torch.testing.assert_close(
emb.debug_split_embedding_weights()[t].float().cuda(),
torch.addcdiv(
bs[t].weight.float(),
value=-lr,
tensor1=bs[t].weight.grad.float().to_dense(),
tensor2=split_optimizer_states[t]
.float()
.sqrt_()
.add_(eps)
.view(Es[t], 1),
),
atol=1.0e-4,
rtol=1.0e-4,
)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_ssd_cache(
self, T: int, D: int, B: int, log_E: int, L: int, weighted: bool
) -> None:
# T=2
# D=2
# B=9
# log_E=3
# L=14
# weighted=False
import tempfile
E = int(10**log_E)
D = D * 4
Ds = [D] * T
Es = [E] * T
lr = 0.5
eps = 0.2
C = max(T * B * L, 1)
feature_table_map = list(range(T))
emb = ssd_split_table_batched_embeddings_ops.SSDTableBatchedEmbeddingBags(
embedding_specs=[(E, D) for (E, D) in zip(Es, Ds)],
feature_table_map=feature_table_map,
ssd_storage_directory=tempfile.mkdtemp(),
cache_sets=C,
ssd_uniform_init_lower=-0.1,
ssd_uniform_init_upper=0.1,
learning_rate=lr,
eps=eps,
ssd_shards=2,
).cuda()
bs = [
torch.nn.EmbeddingBag(E, D, mode="sum", sparse=True).cuda()
for (E, D) in zip(Es, Ds)
]
torch.manual_seed(42)
for t in range(T):
emb.ssd_db.set_cuda(
torch.arange(t * E, (t + 1) * E).to(torch.int64),
bs[t].weight.cpu(),
torch.as_tensor([E]),
t,
)
for i in range(10):
xs = [torch.randint(low=0, high=e, size=(B, L)).cuda() for e in Es]
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xws = [torch.randn(size=(B, L)).cuda() for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x)
(indices, offsets) = indices.cuda(), offsets.cuda()
assert emb.timestep == i
emb.prefetch(indices, offsets)
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
emb.hash_size_cumsum,
indices,
offsets,
)
# Verify that prefetching twice avoids any actions.
(
_,
_,
_,
actions_count_gpu,
) = torch.ops.fbgemm.ssd_cache_populate_actions( # noqa
linear_cache_indices,
emb.total_hash_size,
emb.lxu_cache_state,
emb.timestep,
0, # prefetch_dist
emb.lru_state,
)
assert actions_count_gpu.item() == 0
lxu_cache_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
emb.lxu_cache_state,
emb.hash_size_cumsum[-1],
)
lru_state_cpu = emb.lru_state.cpu()
lxu_cache_state_cpu = emb.lxu_cache_state.cpu()
NOT_FOUND = np.iinfo(np.int32).max
ASSOC = 32
for loc, linear_idx in zip(
lxu_cache_locations.cpu().numpy().tolist(),
linear_cache_indices.cpu().numpy().tolist(),
):
assert loc != NOT_FOUND
# if we have a hit, check the cache is consistent
loc_set = loc // ASSOC
loc_slot = loc % ASSOC
assert lru_state_cpu[loc_set, loc_slot] == emb.timestep
assert lxu_cache_state_cpu[loc_set, loc_slot] == linear_idx
fs = (
[b_indices(b, x) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1))
for (b, x, xw) in zip(bs, xs, xws)
]
)
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
fc2 = (
emb(indices, offsets)
if not weighted
else emb(indices, offsets, xw.contiguous().view(-1).cuda())
)
torch.testing.assert_close(
fc2.float(),
f.float(),
atol=1.0e-5,
rtol=1.0e-5,
)
@unittest.skipIf(not torch.cuda.is_available(), "Skip when CUDA is not available")
class SSDIntNBitTableBatchedEmbeddingsTest(unittest.TestCase):
def test_nbit_ssd(self) -> None:
import tempfile
E = int(1e4)
D = 128
N = 100
indices = torch.as_tensor(np.random.choice(E, replace=False, size=(N,)))
weights = torch.empty(N, D, dtype=torch.uint8)
output_weights = torch.empty_like(weights)
count = torch.tensor([N])
feature_table_map = list(range(1))
emb = (
ssd_split_table_batched_embeddings_ops.SSDIntNBitTableBatchedEmbeddingBags(
embedding_specs=[("", E, D, SparseType.FP32)],
feature_table_map=feature_table_map,
ssd_storage_directory=tempfile.mkdtemp(),
cache_sets=1,
)
)
emb.ssd_db.get_cuda(indices, output_weights, count)
torch.cuda.synchronize()
emb.ssd_db.set_cuda(indices, weights, count, 1)
emb.ssd_db.get_cuda(indices, output_weights, count)
torch.cuda.synchronize()
torch.testing.assert_close(weights, output_weights)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
# FIXME: Disable positional weight due to numerical issues.
weighted=st.just(False),
weights_ty=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
),
mixed_weights_ty=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_ssd_forward(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
weights_ty: SparseType,
mixed_weights_ty: bool,
) -> None:
import tempfile
if not mixed_weights_ty:
weights_ty_list = [weights_ty] * T
else:
weights_ty_list = [
random.choice(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
)
for _ in range(T)
]
D_alignment = max(
1 if ty.bit_rate() % 8 == 0 else int(8 / ty.bit_rate())
for ty in weights_ty_list
)
D = round_up(D, D_alignment)
E = int(10**log_E)
Ds = [D] * T
Es = [E] * T
row_alignment = 16
feature_table_map = list(range(T))
emb = (
ssd_split_table_batched_embeddings_ops.SSDIntNBitTableBatchedEmbeddingBags(
embedding_specs=[
("", E, D, W_TY) for (E, D, W_TY) in zip(Es, Ds, weights_ty_list)
],
feature_table_map=feature_table_map,
ssd_storage_directory=tempfile.mkdtemp(),
cache_sets=max(T * B * L, 1),
ssd_uniform_init_lower=-0.1,
ssd_uniform_init_upper=0.1,
pooling_mode=PoolingMode.SUM,
).cuda()
)
# # NOTE: test TorchScript-compatible!
# emb = torch.jit.script(emb)
bs = [
torch.nn.EmbeddingBag(E, D, mode="sum", sparse=True).cuda()
for (E, D) in zip(Es, Ds)
]
torch.manual_seed(42)
xs = [torch.randint(low=0, high=e, size=(B, L)).cuda() for e in Es]
xws = [torch.randn(size=(B, L)).cuda() for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
for t in range(T):
(weights, scale_shift) = emb.split_embedding_weights()[t]
if scale_shift is not None:
(E, R) = scale_shift.shape
self.assertEqual(R, 4)
scales = np.random.uniform(0.1, 1, size=(E,)).astype(np.float16)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
scale_shift[:, :] = torch.tensor(
np.stack([scales, shifts], axis=1).astype(np.float16).view(np.uint8)
)
D_bytes = rounded_row_size_in_bytes(
Ds[t], weights_ty_list[t], row_alignment
)
copy_byte_tensor = torch.empty([E, D_bytes], dtype=torch.uint8)
fake_quantize_embs(
weights,
scale_shift,
bs[t].weight.detach(),
weights_ty_list[t],
use_cpu=False,
)
if weights_ty_list[t] in [SparseType.FP32, SparseType.FP16, SparseType.FP8]:
copy_byte_tensor[
:,
: unpadded_row_size_in_bytes(Ds[t], weights_ty_list[t]),
] = weights # q_weights
else:
copy_byte_tensor[
:,
emb.scale_bias_size_in_bytes : unpadded_row_size_in_bytes(
Ds[t], weights_ty_list[t]
),
] = weights # q_weights
copy_byte_tensor[
:, : emb.scale_bias_size_in_bytes
] = scale_shift # q_scale_shift
emb.ssd_db.set_cuda(
torch.arange(t * E, (t + 1) * E).to(torch.int64),
copy_byte_tensor,
torch.as_tensor([E]),
t,
)
torch.cuda.synchronize()
fs = (
[b_indices(b, x) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1))
for (b, x, xw) in zip(bs, xs, xws)
]
)
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x)
fc2 = (
emb(indices.cuda().int(), offsets.cuda().int())
if not weighted
else emb(
indices.cuda().int(),
offsets.cuda().int(),
xw.contiguous().view(-1).cuda(),
)
)
torch.testing.assert_close(
fc2.float(),
f.float(),
atol=1.0e-2,
rtol=1.0e-2,
equal_nan=True,
)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_ssd_cache(
self, T: int, D: int, B: int, log_E: int, L: int, weighted: bool
) -> None:
import tempfile
weights_ty = random.choice(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
)
D_alignment = (
1 if weights_ty.bit_rate() % 8 == 0 else int(8 / weights_ty.bit_rate())
)
D = round_up(D, D_alignment)
E = int(10**log_E)
Ds = [D] * T
Es = [E] * T
weights_ty_list = [weights_ty] * T
C = max(T * B * L, 1)
row_alignment = 16
feature_table_map = list(range(T))
emb = (
ssd_split_table_batched_embeddings_ops.SSDIntNBitTableBatchedEmbeddingBags(
embedding_specs=[
("", E, D, W_TY) for (E, D, W_TY) in zip(Es, Ds, weights_ty_list)
],
feature_table_map=feature_table_map,
ssd_storage_directory=tempfile.mkdtemp(),
cache_sets=C,
ssd_uniform_init_lower=-0.1,
ssd_uniform_init_upper=0.1,
ssd_shards=2,
pooling_mode=PoolingMode.SUM,
).cuda()
)
# # NOTE: test TorchScript-compatible!
# emb = torch.jit.script(emb)
bs = [
torch.nn.EmbeddingBag(E, D, mode="sum", sparse=True).cuda()
for (E, D) in zip(Es, Ds)
]
torch.manual_seed(42)
for t in range(T):
(weights, scale_shift) = emb.split_embedding_weights()[t]
if scale_shift is not None:
(E, R) = scale_shift.shape
self.assertEqual(R, 4)
if weights_ty_list[t] == SparseType.INT2:
scales = np.random.uniform(0.1, 1, size=(E,)).astype(np.float16)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
if weights_ty_list[t] == SparseType.INT4:
scales = np.random.uniform(0.01, 0.1, size=(E,)).astype(np.float16)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
if weights_ty_list[t] == SparseType.INT8:
scales = np.random.uniform(0.001, 0.01, size=(E,)).astype(
np.float16
)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
scale_shift[:, :] = torch.tensor(
np.stack([scales, shifts], axis=1).astype(np.float16).view(np.uint8)
)
D_bytes = rounded_row_size_in_bytes(
Ds[t], weights_ty_list[t], row_alignment
)
copy_byte_tensor = torch.empty([E, D_bytes], dtype=torch.uint8)
fake_quantize_embs(
weights,
scale_shift,
bs[t].weight.detach(),
weights_ty_list[t],
use_cpu=False,
)
if weights_ty_list[t] in [SparseType.FP32, SparseType.FP16, SparseType.FP8]:
copy_byte_tensor[
:,
: unpadded_row_size_in_bytes(Ds[t], weights_ty_list[t]),
] = weights # q_weights
else:
copy_byte_tensor[
:,
emb.scale_bias_size_in_bytes : unpadded_row_size_in_bytes(
Ds[t], weights_ty_list[t]
),
] = weights # q_weights
copy_byte_tensor[
:, : emb.scale_bias_size_in_bytes
] = scale_shift # q_scale_shift
emb.ssd_db.set_cuda(
torch.arange(t * E, (t + 1) * E).to(torch.int64),
copy_byte_tensor,
torch.as_tensor([E]),
t,
)
torch.cuda.synchronize()
for i in range(10):
xs = [torch.randint(low=0, high=e, size=(B, L)).cuda() for e in Es]
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xws = [torch.randn(size=(B, L)).cuda() for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x)
(indices, offsets) = indices.cuda(), offsets.cuda()
assert emb.timestep_counter.get() == i
emb.prefetch(indices, offsets)
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
emb.hash_size_cumsum,
indices,
offsets,
)
# Verify that prefetching twice avoids any actions.
(
_,
_,
_,
actions_count_gpu,
) = torch.ops.fbgemm.ssd_cache_populate_actions( # noqa
linear_cache_indices,
emb.total_hash_size,
emb.lxu_cache_state,
emb.timestep_counter.get(),
0, # prefetch_dist
emb.lru_state,
)
assert actions_count_gpu.item() == 0
lxu_cache_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
emb.lxu_cache_state,
emb.hash_size_cumsum[-1],
)
lru_state_cpu = emb.lru_state.cpu()
lxu_cache_state_cpu = emb.lxu_cache_state.cpu()
NOT_FOUND = np.iinfo(np.int32).max
ASSOC = 32
for loc, linear_idx in zip(
lxu_cache_locations.cpu().numpy().tolist(),
linear_cache_indices.cpu().numpy().tolist(),
):
assert loc != NOT_FOUND
# if we have a hit, check the cache is consistent
loc_set = loc // ASSOC
loc_slot = loc % ASSOC
assert lru_state_cpu[loc_set, loc_slot] == emb.timestep_counter.get()
assert lxu_cache_state_cpu[loc_set, loc_slot] == linear_idx
fs = (
[b_indices(b, x) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1))
for (b, x, xw) in zip(bs, xs, xws)
]
)
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
fc2 = (
emb(indices.cuda().int(), offsets.cuda().int())
if not weighted
else emb(
indices.cuda().int(),
offsets.cuda().int(),
xw.contiguous().view(-1).cuda(),
)
)
torch.testing.assert_close(
fc2.float(),
f.float(),
atol=1.0e-2,
rtol=1.0e-2,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import unittest
from typing import List, Optional, Tuple
import torch
from hypothesis import given, settings
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
# pyre-ignore[21]
from test_utils import cpu_and_maybe_gpu
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:input_combine")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:input_combine_cpu")
from fbgemm_gpu.test.test_utils import cpu_and_maybe_gpu
DEFAULT_DEVICE = torch.device("cpu")
class TBEInputPrepareReference(torch.nn.Module):
def __init__(self, include_last_offsets: List[bool]) -> None:
super().__init__()
self.include_last_offsets = include_last_offsets
def forward( # noqa C901
self,
indices_list: List[torch.Tensor],
offsets_list: List[torch.Tensor],
per_sample_weights_list: List[torch.Tensor],
batch_size: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
size = 0
assert len(indices_list) > 0
assert len(indices_list) == len(offsets_list)
assert len(indices_list) == len(per_sample_weights_list)
assert len(indices_list) == len(self.include_last_offsets)
for i in range(len(self.include_last_offsets)):
size += indices_list[i].size(0)
assert indices_list[i].dim() == 1
assert offsets_list[i].dim() == 1
if per_sample_weights_list[i].numel() > 0:
assert per_sample_weights_list[i].dim() == 1
assert indices_list[i].numel() == per_sample_weights_list[i].numel()
combined_indices = torch.empty(
size,
dtype=torch.int32,
device=indices_list[0].device,
)
torch.cat(indices_list, out=combined_indices)
offsets_starts = torch.zeros(
[len(offsets_list) + 1],
dtype=offsets_list[0].dtype,
device=offsets_list[0].device,
)
offsets_accs = torch.zeros(
[len(offsets_list) + 1],
dtype=offsets_list[0].dtype,
device=offsets_list[0].device,
)
for i, include_last_offset in enumerate(self.include_last_offsets):
if include_last_offset:
offsets_starts[i + 1] = offsets_starts[i] + offsets_list[i].size(0) - 1
else:
offsets_starts[i + 1] = offsets_starts[i] + offsets_list[i].size(0)
offsets_accs[i + 1] = offsets_accs[i] + indices_list[i].size(0)
assert offsets_accs[-1] == combined_indices.size(0)
combined_offsets_size: List[int] = (
[int(offsets_starts[-1].item()) + 1]
if batch_size is None
else [batch_size * len(offsets_list) + 1]
)
combined_offsets = torch.zeros(
combined_offsets_size,
dtype=torch.int32,
device=offsets_list[0].device,
)
if batch_size is None:
for i in range(len(self.include_last_offsets)):
combined_offsets[offsets_starts[i] : offsets_starts[i + 1]] = (
offsets_list[i][: offsets_starts[i + 1] - offsets_starts[i]]
+ offsets_accs[i]
)
else:
for i in range(len(self.include_last_offsets)):
cur_start = batch_size * i
combined_offsets[
cur_start : cur_start + offsets_starts[i + 1] - offsets_starts[i]
] = (
offsets_list[i][: offsets_starts[i + 1] - offsets_starts[i]]
+ offsets_accs[i]
)
cur_start = cur_start + offsets_starts[i + 1] - offsets_starts[i]
for j in range(batch_size - offsets_starts[i + 1] + offsets_starts[i]):
combined_offsets[cur_start + j] = (
indices_list[i].numel() + offsets_accs[i]
)
combined_offsets[-1] = offsets_accs[-1]
per_sample_weights: Optional[torch.Tensor] = None
for i in range(len(self.include_last_offsets)):
if per_sample_weights_list[i].size(0) > 0:
per_sample_weights = torch.ones(
combined_indices.size(0),
dtype=per_sample_weights_list[i].dtype,
device=per_sample_weights_list[i].device,
)
break
if per_sample_weights is not None:
for i in range(len(self.include_last_offsets)):
if per_sample_weights_list[i].size(0) > 0:
per_sample_weights[
offsets_accs[i] : offsets_accs[i + 1]
] = per_sample_weights_list[i][:]
# indices and offsets are required to be int32 for TBE
return combined_indices, combined_offsets, per_sample_weights
class InputCombineTest(unittest.TestCase):
def _get_inputs(self, dtypes, device=DEFAULT_DEVICE):
indices_list = [
torch.tensor([1, 2, 3], dtype=dtypes[0], device=device),
torch.tensor([1, 2, 3, 4], dtype=dtypes[1], device=device),
]
offsets_list = [
torch.tensor([0, 2], dtype=dtypes[0], device=device),
torch.tensor([0, 1, 4], dtype=dtypes[1], device=device),
]
include_last_offsets = [False, True]
per_sample_weights = [
torch.tensor([1, 2, 1], dtype=torch.float, device=device),
torch.tensor([1, 2, 1, 3], dtype=torch.float, device=device),
]
empty_per_sample_weights = [
torch.tensor([], dtype=torch.float, device=device),
torch.tensor([], dtype=torch.float, device=device),
]
return (
indices_list,
offsets_list,
per_sample_weights,
empty_per_sample_weights,
include_last_offsets,
)
def _run_test(self, dtypes) -> None:
(
indices_list,
offsets_list,
per_sample_weights,
empty_per_sample_weights,
include_last_offsets,
) = self._get_inputs(dtypes)
ref_mod = TBEInputPrepareReference(include_last_offsets)
outputs = torch.ops.fbgemm.tbe_input_combine(
indices_list,
offsets_list,
per_sample_weights,
torch.BoolTensor(include_last_offsets),
)
ref_outputs = ref_mod(indices_list, offsets_list, per_sample_weights)
for i, j in zip(outputs, ref_outputs):
torch.testing.assert_close(i, j)
self.assertTrue(outputs[0].dtype == torch.int32)
self.assertTrue(outputs[1].dtype == torch.int32)
outputs = torch.ops.fbgemm.tbe_input_combine(
indices_list,
offsets_list,
empty_per_sample_weights,
torch.BoolTensor(include_last_offsets),
)
ref_outputs = ref_mod(indices_list, offsets_list, empty_per_sample_weights)
for i, j in zip(outputs[:-1], ref_outputs[:-1]):
torch.testing.assert_close(i, j)
self.assertTrue(j.dtype == torch.int32)
self.assertTrue(outputs[0].dtype == torch.int32)
self.assertTrue(outputs[1].dtype == torch.int32)
self.assertTrue(outputs[-1].size(0) == 0)
def _run_padding_fused_test(self, dtypes, batch_size) -> None:
(
indices_list,
offsets_list,
per_sample_weights,
empty_per_sample_weights,
include_last_offsets,
) = self._get_inputs(dtypes)
ref_mod = TBEInputPrepareReference(include_last_offsets)
outputs = torch.ops.fbgemm.padding_fused_tbe_input_combine(
indices_list,
offsets_list,
per_sample_weights,
torch.BoolTensor(include_last_offsets),
batch_size,
)
ref_outputs = ref_mod(
indices_list, offsets_list, per_sample_weights, batch_size
)
for i, j in zip(outputs, ref_outputs):
torch.testing.assert_close(i, j)
self.assertTrue(outputs[0].dtype == torch.int32)
self.assertTrue(outputs[1].dtype == torch.int32)
outputs = torch.ops.fbgemm.padding_fused_tbe_input_combine(
indices_list,
offsets_list,
empty_per_sample_weights,
torch.BoolTensor(include_last_offsets),
batch_size,
)
ref_outputs = ref_mod(
indices_list, offsets_list, empty_per_sample_weights, batch_size
)
for i, j in zip(outputs[:-1], ref_outputs[:-1]):
torch.testing.assert_close(i, j)
self.assertTrue(j.dtype == torch.int32)
self.assertTrue(outputs[0].dtype == torch.int32)
self.assertTrue(outputs[1].dtype == torch.int32)
self.assertTrue(outputs[-1].size(0) == 0)
def _offsets_to_lengths(
self, offsets, indices, include_last_offsets, device=DEFAULT_DEVICE
):
if include_last_offsets:
offsets_complete = offsets
else:
offsets_complete = torch.cat(
[
offsets,
torch.tensor([indices.numel()], dtype=offsets.dtype, device=device),
]
)
return offsets_complete[1:] - offsets_complete[:-1]
def _run_test_with_length(self, dtypes, device=DEFAULT_DEVICE) -> None:
(
indices_list,
offsets_list,
per_sample_weights,
empty_per_sample_weights,
include_last_offsets,
) = self._get_inputs(dtypes, device=device)
ref_mod = TBEInputPrepareReference(include_last_offsets)
lengths_list = [
self._offsets_to_lengths(
offsets, indices, include_last_offsets, device=device
)
for offsets, indices, include_last_offsets in zip(
offsets_list, indices_list, include_last_offsets
)
]
outputs = torch.ops.fbgemm.tbe_input_combine_with_length(
indices_list, lengths_list, per_sample_weights
)
ref_outputs = ref_mod(indices_list, offsets_list, per_sample_weights)
# indices
self.assertTrue(ref_outputs[0].allclose(outputs[0]))
# per sample weights
self.assertTrue(ref_outputs[2].allclose(outputs[2]))
ref_lengths = self._offsets_to_lengths(ref_outputs[1], ref_outputs[0], True)
self.assertTrue(ref_lengths.allclose(outputs[1]))
def _run_padding_fused_test_with_length(self, dtypes, batch_size) -> None:
(
indices_list,
offsets_list,
per_sample_weights,
empty_per_sample_weights,
include_last_offsets,
) = self._get_inputs(dtypes)
ref_mod = TBEInputPrepareReference(include_last_offsets)
lengths_list = [
self._offsets_to_lengths(offsets, indices, include_last_offsets)
for offsets, indices, include_last_offsets in zip(
offsets_list, indices_list, include_last_offsets
)
]
outputs = torch.ops.fbgemm.padding_fused_tbe_input_combine_with_length(
indices_list,
lengths_list,
per_sample_weights,
batch_size,
)
ref_outputs = ref_mod(
indices_list, offsets_list, per_sample_weights, batch_size
)
# indices
self.assertTrue(ref_outputs[0].allclose(outputs[0]))
# per sample weights
self.assertTrue(ref_outputs[2].allclose(outputs[2]))
ref_lengths = self._offsets_to_lengths(ref_outputs[1], ref_outputs[0], True)
self.assertTrue(ref_lengths.allclose(outputs[1]))
def test_input_combine_int64(self) -> None:
self._run_test((torch.int64, torch.int64))
def test_input_combine_int32(self) -> None:
self._run_test((torch.int64, torch.int64))
def test_input_combined_mix(self) -> None:
self._run_test((torch.int64, torch.int32))
@given(device=cpu_and_maybe_gpu())
@settings(deadline=None)
def test_input_combine_int64_with_length(self, device: torch.device) -> None:
self._run_test_with_length((torch.int64, torch.int64), device=device)
@given(device=cpu_and_maybe_gpu())
@settings(deadline=None)
def test_input_combine_int32_with_length(self, device: torch.device) -> None:
self._run_test_with_length((torch.int32, torch.int32), device=device)
@given(device=cpu_and_maybe_gpu())
@settings(deadline=None)
def test_input_combine_mix_with_length(self, device: torch.device) -> None:
self._run_test_with_length((torch.int64, torch.int32), device=device)
def test_padding_fused_input_combine_int64(self) -> None:
self._run_padding_fused_test((torch.int64, torch.int64), 64)
def test_padding_fused_input_combine_int32(self) -> None:
self._run_padding_fused_test((torch.int32, torch.int32), 64)
def test_padding_fused_input_combined_mix(self) -> None:
self._run_padding_fused_test((torch.int64, torch.int32), 64)
def test_padding_fused_input_combine_int64_with_length(self) -> None:
self._run_padding_fused_test_with_length((torch.int64, torch.int64), 64)
def test_padding_fused_input_combine_int32_with_length(self) -> None:
self._run_padding_fused_test_with_length((torch.int32, torch.int32), 64)
def test_padding_fused_input_combined_mix_with_length(self) -> None:
self._run_padding_fused_test_with_length((torch.int64, torch.int32), 64)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import contextlib
import functools
import itertools
import logging
import random
import unittest
from itertools import accumulate
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import hypothesis.strategies as st
import numpy as np
import torch
from hypothesis import given, settings, Verbosity
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
# pyre-ignore[21]
from test_utils import gpu_available, gpu_unavailable, skipIfRocm
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:index_select_ops")
from fbgemm_gpu.test.test_utils import gpu_available, gpu_unavailable, skipIfRocm
def unbucketize_indices_value(
bucketized_indices: torch.Tensor,
bucketized_lengths: torch.Tensor,
block_sizes: torch.Tensor,
W: int,
B: int,
) -> torch.Tensor:
block_size_expand = torch.empty_like(bucketized_indices)
bucket_expand = torch.empty_like(bucketized_indices)
T = block_sizes.size()[0]
offset = 0
for w in range(W):
for t in range(T):
for b in range(B):
seg_length = bucketized_lengths[w * T * B + t * B + b]
for i in range(offset, offset + seg_length):
block_size_expand[i] = block_sizes[t]
bucket_expand[i] = w
offset += seg_length
return bucket_expand * block_size_expand + bucketized_indices
def get_n_rand_num_summing_to_k(n: int, k: int) -> np.ndarray:
"""Get a list of `n` integers which collectively sum to `k`, drawn
uniformly from the set of all such lists.
Args:
n - The number of integers in the result list
k - The value they should sum to
"""
# There are a lot of ways to do this wrong, probably including
# the ones you've just thought of. I think the following does
# it correctly, though.
if n == 0:
return np.array([])
return np.random.multinomial(k, np.ones(n) / n, size=1)[0]
@torch.jit.script
def permute_scripted(
permute: torch.Tensor, lengths: torch.Tensor, indices: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
(
permuted_lengths_cpu,
permuted_indices_cpu,
permuted_weights_cpu,
) = torch.ops.fbgemm.permute_2D_sparse_data(permute, lengths, indices, None, None)
return (
permuted_lengths_cpu,
permuted_indices_cpu,
permuted_weights_cpu,
)
class SparseOpsTest(unittest.TestCase):
@staticmethod
def permute_indices_ref_(
lengths: torch.Tensor,
indices: torch.Tensor,
weights: Optional[torch.Tensor],
permute: torch.LongTensor,
is_1D: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
T = lengths.size(0)
B = lengths.size(1)
if T == 0 or B == 0:
if is_1D:
lengths = lengths.view(-1)
return lengths, indices, weights
if is_1D:
permuted_lengths = torch.index_select(lengths.view(-1), 0, permute).view(-1)
original_segment_lengths = lengths.view(-1)
original_segment_start = [0] + list(accumulate(lengths.view(-1)))
permuted_indices = []
permuted_weights = []
for i in range(permute.numel()):
start = original_segment_start[permute[i]]
end = start + original_segment_lengths[permute[i]]
permuted_indices.append(indices[start:end])
if weights is not None:
permuted_weights.append(weights[start:end])
permuted_indices = torch.cat(permuted_indices, dim=0).flatten()
if weights is None:
permuted_weights = None
else:
permuted_weights = torch.cat(permuted_weights, dim=0).flatten()
else:
permuted_lengths = torch.index_select(lengths.view(T, -1), 0, permute)
original_segment_lengths = lengths.view(T, -1).sum(dim=1, dtype=torch.int32)
original_segment_start = [0] + list(
accumulate(original_segment_lengths.view(-1))
)
permuted_indices = []
permuted_weights = []
for i in range(permute.size(0)):
start = original_segment_start[permute[i]]
end = start + original_segment_lengths[permute[i]]
permuted_indices.append(indices[start:end])
if weights is not None:
permuted_weights.append(weights[start:end])
permuted_indices = torch.cat(permuted_indices, dim=0).flatten()
if weights is None:
permuted_weights = None
else:
permuted_weights = torch.cat(permuted_weights, dim=0).flatten()
return permuted_lengths, permuted_indices, permuted_weights
@given(
B=st.integers(min_value=0, max_value=20),
T=st.integers(min_value=0, max_value=20),
L=st.integers(min_value=2, max_value=20),
long_index=st.booleans(),
has_weight=st.booleans(),
is_1D=st.booleans(),
W=st.integers(min_value=4, max_value=8),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_permute_indices(
self,
B: int,
T: int,
L: int,
long_index: bool,
has_weight: bool,
is_1D: bool,
W: int,
) -> None:
index_dtype = torch.int64 if long_index else torch.int32
length_splits: Optional[List[torch.Tensor]] = None
if is_1D:
if B == 0:
batch_sizes = [0] * W
else:
batch_sizes = [random.randint(a=1, b=B) for i in range(W)]
length_splits = [
torch.randint(low=1, high=L, size=(T, batch_sizes[i])).type(index_dtype)
for i in range(W)
]
lengths = torch.cat(length_splits, dim=1)
else:
lengths = torch.randint(low=1, high=L, size=(T, B)).type(index_dtype)
# pyre-fixme[6]: For 1st param expected `Union[List[int], Size,
# typing.Tuple[int, ...]]` but got `Union[bool, float, int]`.
weights = torch.rand(lengths.sum().item()).float() if has_weight else None
indices = torch.randint(
low=1,
high=int(1e5),
# pyre-fixme[6]: Expected `Union[int, typing.Tuple[int, ...]]` for 3rd
# param but got `Tuple[typing.Union[float, int]]`.
size=(lengths.sum().item(),),
).type(index_dtype)
if is_1D:
permute_list = []
offset_w = [0] + list(
# pyre-fixme[16]
accumulate([length_split.numel() for length_split in length_splits])
)
for t in range(T):
for w in range(W):
for b in range(batch_sizes[w]):
permute_list.append(offset_w[w] + t * batch_sizes[w] + b)
else:
permute_list = list(range(T))
random.shuffle(permute_list)
permute = torch.IntTensor(permute_list)
if is_1D:
(
permuted_lengths_cpu,
permuted_indices_cpu,
permuted_weights_cpu,
) = torch.ops.fbgemm.permute_1D_sparse_data(
permute, lengths, indices, weights, None
)
else:
(
permuted_lengths_cpu,
permuted_indices_cpu,
permuted_weights_cpu,
) = torch.ops.fbgemm.permute_2D_sparse_data(
permute, lengths, indices, weights, None
)
(
permuted_lengths_ref,
permuted_indices_ref,
permuted_weights_ref,
# pyre-fixme[6]: For 4th param expected `LongTensor` but got `Tensor`.
) = self.permute_indices_ref_(lengths, indices, weights, permute.long(), is_1D)
torch.testing.assert_close(permuted_indices_cpu, permuted_indices_ref)
torch.testing.assert_close(permuted_lengths_cpu, permuted_lengths_ref)
if has_weight:
torch.testing.assert_close(permuted_weights_cpu, permuted_weights_ref)
else:
assert permuted_weights_cpu is None and permuted_weights_ref is None
if gpu_available:
if is_1D:
(
permuted_lengths_gpu,
permuted_indices_gpu,
permuted_weights_gpu,
) = torch.ops.fbgemm.permute_1D_sparse_data(
permute.cuda(),
lengths.cuda(),
indices.cuda(),
# pyre-fixme[16]: `Optional` has no attribute `cuda`.
weights.cuda() if has_weight else None,
None,
)
else:
(
permuted_lengths_gpu,
permuted_indices_gpu,
permuted_weights_gpu,
) = torch.ops.fbgemm.permute_2D_sparse_data(
permute.cuda(),
lengths.cuda(),
indices.cuda(),
weights.cuda() if has_weight else None,
None,
)
torch.testing.assert_close(permuted_indices_gpu.cpu(), permuted_indices_cpu)
torch.testing.assert_close(permuted_lengths_gpu.cpu(), permuted_lengths_cpu)
if has_weight:
torch.testing.assert_close(
permuted_weights_gpu.cpu(), permuted_weights_cpu
)
else:
assert permuted_weights_gpu is None
# TorchScript has different behaviors than eager mode. We can see undefined
# models returned. So we need to add a unittest to ensure the op return
# real None, not an undefined tensor.
def test_permute_indices_scripted_with_none_weights(
self,
) -> None:
index_dtype = torch.int32
lengths = torch.randint(low=1, high=2, size=(1, 1)).type(index_dtype)
weights = None
indices = torch.randint(
low=1,
high=int(1e5),
# pyre-fixme[6]: Expected `Union[int, typing.Tuple[int, ...]]` for 3rd
# param but got `Tuple[typing.Union[float, int]]`.
size=(lengths.sum().item(),),
).type(index_dtype)
permute_list = list(range(1))
random.shuffle(permute_list)
permute = torch.IntTensor(permute_list)
(
permuted_lengths_cpu,
permuted_indices_cpu,
permuted_weights_cpu,
) = permute_scripted(permute, lengths, indices)
(
permuted_lengths_ref,
permuted_indices_ref,
permuted_weights_ref,
# pyre-fixme[6]: For 4th param expected `LongTensor` but got `Tensor`.
) = self.permute_indices_ref_(lengths, indices, weights, permute.long(), False)
self.assertTrue(torch.equal(permuted_indices_cpu, permuted_indices_ref))
self.assertTrue(torch.equal(permuted_lengths_cpu, permuted_lengths_ref))
self.assertEqual(permuted_weights_cpu, None)
self.assertEqual(permuted_weights_ref, None)
@given(
permute_size=st.integers(min_value=30, max_value=1000),
long_index=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_invert_permute(
self,
permute_size: int,
long_index: bool,
) -> None:
index_dtype = torch.int64 if long_index else torch.int32
permute_list = list(range(permute_size))
random.shuffle(permute_list)
inversed_permute_list = [0] * len(permute_list)
for i in range(permute_size):
inversed_permute_list[permute_list[i]] = i
permute = torch.IntTensor(permute_list).type(index_dtype)
inverse_permute_ref = torch.IntTensor(inversed_permute_list).type(index_dtype)
inverse_permute_cpu = torch.ops.fbgemm.invert_permute(permute)
torch.testing.assert_close(inverse_permute_cpu, inverse_permute_ref)
if gpu_available:
inverse_permute_gpu = torch.ops.fbgemm.invert_permute(permute.cuda())
torch.testing.assert_close(inverse_permute_gpu.cpu(), inverse_permute_cpu)
@given(
B=st.integers(min_value=1, max_value=20),
T=st.integers(min_value=1, max_value=20),
L=st.integers(min_value=2, max_value=20),
long_index=st.booleans(),
has_weight=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_permute_indices_with_repeats(
self, B: int, T: int, L: int, long_index: bool, has_weight: bool
) -> None:
index_dtype = torch.int64 if long_index else torch.int32
lengths = torch.randint(low=1, high=L, size=(T, B)).type(index_dtype)
# pyre-fixme[6]: For 1st param expected `Union[List[int], Size,
# typing.Tuple[int, ...]]` but got `Union[bool, float, int]`.
weights = torch.rand(lengths.sum().item()).float() if has_weight else None
indices = torch.randint(
low=1,
high=int(1e5),
# pyre-fixme[6]: Expected `Union[int, typing.Tuple[int, ...]]` for 3rd
# param but got `Tuple[typing.Union[float, int]]`.
size=(lengths.sum().item(),),
).type(index_dtype)
permute_list = list(range(T))
num_repeats = random.randint(0, T)
for _ in range(num_repeats):
permute_list.append(random.randint(0, T - 1))
random.shuffle(permute_list)
permute = torch.IntTensor(permute_list)
(
permuted_lengths_cpu,
permuted_indices_cpu,
permuted_weights_cpu,
) = torch.ops.fbgemm.permute_2D_sparse_data(permute, lengths, indices, weights)
(
permuted_lengths_ref,
permuted_indices_ref,
permuted_weights_ref,
# pyre-fixme[6]: For 4th param expected `LongTensor` but got `Tensor`.
) = self.permute_indices_ref_(lengths, indices, weights, permute.long())
torch.testing.assert_close(permuted_indices_cpu, permuted_indices_ref)
torch.testing.assert_close(permuted_lengths_cpu, permuted_lengths_ref)
if has_weight:
torch.testing.assert_close(permuted_weights_cpu, permuted_weights_ref)
else:
assert permuted_weights_cpu is None and permuted_weights_ref is None
if gpu_available:
(
permuted_lengths_gpu,
permuted_indices_gpu,
permuted_weights_gpu,
) = torch.ops.fbgemm.permute_2D_sparse_data(
permute.cuda(),
lengths.cuda(),
indices.cuda(),
# pyre-fixme[16]: `Optional` has no attribute `cuda`.
weights.cuda() if has_weight else None,
)
torch.testing.assert_close(permuted_indices_gpu.cpu(), permuted_indices_cpu)
torch.testing.assert_close(permuted_lengths_gpu.cpu(), permuted_lengths_cpu)
if has_weight:
torch.testing.assert_close(
permuted_weights_gpu.cpu(), permuted_weights_cpu
)
else:
assert permuted_weights_cpu is None
@staticmethod
def permute_embeddings_(
permute_fn: Callable[..., Tuple[torch.Tensor, ...]],
*args: Any,
) -> Tuple[torch.Tensor, torch.Tensor]:
if permute_fn == torch.ops.fbgemm.permute_2D_sparse_data:
permuted_lengths, permuted_embeddings, _ = permute_fn(*args, None)
return permuted_lengths, permuted_embeddings
else:
return permute_fn(*args)
@given(
B=st.integers(min_value=1, max_value=20),
T=st.integers(min_value=1, max_value=20),
L=st.integers(min_value=2, max_value=20),
long_index=st.booleans(),
permute_fn=st.sampled_from(
[
torch.ops.fbgemm.permute_2D_sparse_data,
torch.ops.fbgemm.permute_sequence_embeddings,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_permute_embeddings(
self,
B: int,
T: int,
L: int,
long_index: bool,
permute_fn: Callable[..., Tuple[torch.Tensor, ...]],
) -> None:
index_dtype = torch.int64 if long_index else torch.int32
lengths = torch.randint(low=1, high=L, size=(T, B)).type(index_dtype)
# pyre-fixme[6]: For 1st param expected `Union[List[int], Size,
# typing.Tuple[int, ...]]` but got `Union[bool, float, int]`.
embeddings = torch.rand(lengths.sum().item()).float()
permute_list = list(range(T))
random.shuffle(permute_list)
permute = torch.IntTensor(permute_list)
(permuted_lengths_cpu, permuted_embeddings_cpu) = self.permute_embeddings_(
permute_fn, permute, lengths, embeddings
)
(
permuted_lengths_ref,
permuted_embeddings_ref,
_,
# pyre-fixme[6]: For 4th param expected `LongTensor` but got `Tensor`.
) = self.permute_indices_ref_(lengths, embeddings, None, permute.long())
torch.testing.assert_close(permuted_embeddings_cpu, permuted_embeddings_ref)
torch.testing.assert_close(permuted_lengths_cpu, permuted_lengths_ref)
if gpu_available:
(permuted_lengths_gpu, permuted_embeddings_gpu) = self.permute_embeddings_(
permute_fn,
permute.cuda(),
lengths.cuda(),
embeddings.cuda(),
)
torch.testing.assert_close(
permuted_embeddings_gpu.cpu(), permuted_embeddings_cpu
)
torch.testing.assert_close(permuted_lengths_gpu.cpu(), permuted_lengths_cpu)
@given(
long_indices=st.booleans(),
use_cpu=st.booleans() if gpu_available else st.just(True),
)
@settings(verbosity=Verbosity.verbose, max_examples=16, deadline=None)
def test_block_bucketize_sparse_features_long_indices(
self, long_indices: bool, use_cpu: bool
) -> None:
bucketize_pos = False
sequence = False
index_type = torch.long if long_indices else torch.int
# 3 GPUs
my_size = 3
block_sizes = torch.tensor([3, 4, 5], dtype=index_type)
if not long_indices:
lengths = torch.tensor([0, 3, 2, 0, 1, 4], dtype=index_type)
indices = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=index_type)
new_lengths_ref = torch.tensor(
[0, 2, 0, 0, 0, 0, 0, 1, 2, 0, 1, 3, 0, 0, 0, 0, 0, 1], dtype=index_type
)
new_indices_ref = torch.tensor(
[1, 2, 0, 0, 1, 1, 2, 3, 4, 0], dtype=index_type
)
else:
lengths = torch.tensor([0, 3, 2, 0, 1, 4], dtype=index_type)
# Test long and negative indices: -8 will be casted to 18446644015555759292
indices = torch.tensor(
[1, 2, 3, 100061827127359, 5, 6, 7, -8, 100058153792324, 10],
dtype=index_type,
)
new_lengths_ref = torch.tensor(
[0, 2, 0, 0, 0, 0, 0, 1, 2, 0, 1, 1, 0, 0, 0, 0, 0, 3], dtype=index_type
)
new_indices_ref = torch.tensor(
[
1,
2,
0,
33353942375786, # 100061827127359/3 = 33353942375786
1,
1,
2,
6148914691236517202, # -8 cast to 18446644015555759292, 18446644015555759292 /3 = 6148914691236517202
33352717930774, # 100058153792324/3 = 33352717930774
0,
],
dtype=index_type,
)
(
new_lengths_cpu,
new_indices_cpu,
new_weights_cpu,
new_pos_cpu,
unbucketize_permute_cpu,
) = torch.ops.fbgemm.block_bucketize_sparse_features(
lengths,
indices,
bucketize_pos,
sequence,
block_sizes,
my_size,
None,
)
torch.testing.assert_close(new_lengths_cpu, new_lengths_ref)
torch.testing.assert_close(new_indices_cpu, new_indices_ref)
if not use_cpu:
(
new_lengths_gpu,
new_indices_gpu,
new_weights_gpu,
new_pos_gpu,
unbucketize_permute_gpu,
) = torch.ops.fbgemm.block_bucketize_sparse_features(
lengths.cuda(),
indices.cuda(),
bucketize_pos,
sequence,
block_sizes.cuda(),
my_size,
None,
)
torch.testing.assert_close(new_lengths_gpu.cpu(), new_lengths_ref)
torch.testing.assert_close(new_indices_gpu.cpu(), new_indices_ref)
torch.testing.assert_close(new_lengths_gpu.cpu(), new_lengths_cpu)
torch.testing.assert_close(new_indices_gpu.cpu(), new_indices_cpu)
@given(
n=st.integers(min_value=1, max_value=100),
long_index=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_cumsum(self, n: int, long_index: bool) -> None:
index_dtype = torch.int64 if long_index else torch.int32
np_index_dtype = np.int64 if long_index else np.int32
# cpu tests
x = torch.randint(low=0, high=100, size=(n,)).type(index_dtype)
ze = torch.ops.fbgemm.asynchronous_exclusive_cumsum(x)
zi = torch.ops.fbgemm.asynchronous_inclusive_cumsum(x)
zc = torch.ops.fbgemm.asynchronous_complete_cumsum(x)
torch.testing.assert_close(
torch.from_numpy(np.cumsum(x.cpu().numpy()).astype(np_index_dtype)),
zi.cpu(),
)
torch.testing.assert_close(
torch.from_numpy(
(np.cumsum([0] + x.cpu().numpy().tolist())[:-1]).astype(np_index_dtype)
),
ze.cpu(),
)
torch.testing.assert_close(
torch.from_numpy(
(np.cumsum([0] + x.cpu().numpy().tolist())).astype(np_index_dtype)
),
zc.cpu(),
)
# meta tests
mx = torch.randint(low=0, high=100, size=(n,)).type(index_dtype).to("meta")
# mze = torch.ops.fbgemm.asynchronous_exclusive_cumsum(mx)
# mzi = torch.ops.fbgemm.asynchronous_inclusive_cumsum(mx)
mzc = torch.ops.fbgemm.asynchronous_complete_cumsum(mx)
# self.assertEqual(ze.size(), mze.size())
# self.assertEqual(zi.size(), mzi.size())
self.assertEqual(zc.size(), mzc.size())
if gpu_available:
x = x.cuda()
ze = torch.ops.fbgemm.asynchronous_exclusive_cumsum(x)
zi = torch.ops.fbgemm.asynchronous_inclusive_cumsum(x)
zc = torch.ops.fbgemm.asynchronous_complete_cumsum(x)
torch.testing.assert_close(
torch.from_numpy(np.cumsum(x.cpu().numpy()).astype(np_index_dtype)),
zi.cpu(),
)
torch.testing.assert_close(
torch.from_numpy(
(np.cumsum([0] + x.cpu().numpy().tolist())[:-1]).astype(
np_index_dtype
)
),
ze.cpu(),
)
torch.testing.assert_close(
torch.from_numpy(
(np.cumsum([0] + x.cpu().numpy().tolist())).astype(np_index_dtype)
),
zc.cpu(),
)
@given(
n=st.integers(min_value=1, max_value=600),
b=st.integers(min_value=1, max_value=10),
long_index=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_asynchronous_complete_cumsum_2d(
self, n: int, b: int, long_index: bool
) -> None:
index_dtype = torch.int64 if long_index else torch.int32
def test_asynchronous_complete_cumsum_2d_helper(x: torch.Tensor) -> None:
np_index_dtype = np.int64 if long_index else np.int32
zc = torch.ops.fbgemm.asynchronous_complete_cumsum(x)
zeros = torch.zeros(b, 1)
torch.testing.assert_close(
torch.from_numpy(
np.cumsum(
torch.concat([zeros, x.cpu()], dim=1).numpy(), axis=1
).astype(np_index_dtype)
),
zc.cpu(),
)
x = torch.randint(low=0, high=100, size=(b, n)).type(index_dtype)
# cpu test
test_asynchronous_complete_cumsum_2d_helper(x)
if gpu_available:
# gpu test
test_asynchronous_complete_cumsum_2d_helper(x.cuda())
@given(
N=st.integers(min_value=1, max_value=20),
offsets_type=st.sampled_from([torch.int32, torch.int64]),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_offsets_range(
self,
N: int,
# pyre-fixme[11]: Annotation `int32` is not defined as a type.
# pyre-fixme[11]: Annotation `int64` is not defined as a type.
offsets_type: "Union[Type[torch.int32], Type[torch.int64]]",
) -> None:
lengths = np.array([np.random.randint(low=0, high=20) for _ in range(N)])
offsets = np.cumsum(np.concatenate(([0], lengths)))[:-1]
range_ref = torch.from_numpy(
np.concatenate([np.arange(size) for size in lengths])
)
output_size = np.sum(lengths)
offsets_cpu = torch.tensor(offsets, dtype=offsets_type)
range_cpu = torch.ops.fbgemm.offsets_range(offsets_cpu, output_size)
range_ref = range_ref.to(range_cpu.dtype)
torch.testing.assert_close(range_cpu, range_ref, rtol=0, atol=0)
if gpu_available:
range_gpu = torch.ops.fbgemm.offsets_range(offsets_cpu.cuda(), output_size)
range_ref = range_ref.to(range_gpu.dtype)
torch.testing.assert_close(range_gpu.cpu(), range_ref, rtol=0, atol=0)
@given(
index_type=st.sampled_from([torch.int, torch.long]),
has_weight=st.booleans(),
bucketize_pos=st.booleans(),
sequence=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=16, deadline=None)
def test_block_bucketize_sparse_features(
self,
index_type: Type[torch.dtype],
has_weight: bool,
bucketize_pos: bool,
sequence: bool,
) -> None:
B = 2
# pyre-ignore [6]
lengths = torch.tensor([0, 2, 1, 3, 2, 3, 3, 1], dtype=index_type)
indices = torch.tensor(
[3, 4, 15, 11, 28, 29, 1, 10, 11, 12, 13, 11, 22, 20, 20],
# pyre-ignore [6]
dtype=index_type,
)
weights = (
torch.tensor(
[
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
13.0,
14.0,
15.0,
],
dtype=torch.float,
)
if has_weight
else None
)
# pyre-ignore [6]
block_sizes = torch.tensor([5, 15, 10, 20], dtype=index_type)
my_size = 2
new_lengths_ref = torch.tensor(
[0, 2, 0, 1, 1, 0, 1, 0, 0, 0, 1, 2, 1, 3, 2, 1],
# pyre-ignore [6]
dtype=index_type,
)
new_indices_ref = torch.tensor(
[3, 4, 11, 1, 11, 0, 13, 14, 0, 1, 2, 3, 2, 0, 0],
# pyre-ignore [6]
dtype=index_type,
)
new_weights_ref = torch.tensor(
[
1.0,
2.0,
4.0,
7.0,
12.0,
3.0,
5.0,
6.0,
8.0,
9.0,
10.0,
11.0,
13.0,
14.0,
15.0,
],
dtype=torch.float,
)
new_pos_ref = torch.tensor(
[0, 1, 0, 0, 0, 0, 1, 2, 1, 0, 1, 2, 1, 2, 0],
# pyre-ignore [6]
dtype=index_type,
)
(
new_lengths_cpu,
new_indices_cpu,
new_weights_cpu,
new_pos_cpu,
unbucketize_permute,
) = torch.ops.fbgemm.block_bucketize_sparse_features(
lengths, indices, bucketize_pos, sequence, block_sizes, my_size, weights
)
torch.testing.assert_close(new_lengths_cpu, new_lengths_ref, rtol=0, atol=0)
torch.testing.assert_close(new_indices_cpu, new_indices_ref, rtol=0, atol=0)
if has_weight:
torch.testing.assert_close(new_weights_cpu, new_weights_ref)
if bucketize_pos:
torch.testing.assert_close(new_pos_cpu, new_pos_ref)
if sequence:
value_unbucketized_indices = unbucketize_indices_value(
new_indices_cpu, new_lengths_cpu, block_sizes, my_size, B
)
unbucketized_indices = torch.index_select(
value_unbucketized_indices, 0, unbucketize_permute
)
torch.testing.assert_close(unbucketized_indices, indices, rtol=0, atol=0)
if gpu_available:
(
new_lengths_gpu,
new_indices_gpu,
new_weights_gpu,
new_pos_gpu,
unbucketize_permute_gpu,
) = torch.ops.fbgemm.block_bucketize_sparse_features(
lengths.cuda(),
indices.cuda(),
bucketize_pos,
sequence,
block_sizes.cuda(),
my_size,
# pyre-fixme[16]: `Optional` has no attribute `cuda`.
weights.cuda() if has_weight else None,
)
torch.testing.assert_close(
new_lengths_gpu.cpu(), new_lengths_ref, rtol=0, atol=0
)
torch.testing.assert_close(
new_indices_gpu.cpu(), new_indices_ref, rtol=0, atol=0
)
if has_weight:
torch.testing.assert_close(new_weights_gpu.cpu(), new_weights_cpu)
if bucketize_pos:
torch.testing.assert_close(new_pos_gpu.cpu(), new_pos_cpu)
if sequence:
value_unbucketized_indices = unbucketize_indices_value(
new_indices_gpu.cpu(),
new_lengths_gpu.cpu(),
block_sizes,
my_size,
B,
)
unbucketized_indices = torch.index_select(
value_unbucketized_indices, 0, unbucketize_permute_gpu.cpu()
)
torch.testing.assert_close(
unbucketized_indices, indices, rtol=0, atol=0
)
@given(
index_type=st.sampled_from([torch.int, torch.long]),
has_weight=st.booleans(),
bucketize_pos=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=2, deadline=None)
def test_bucketize_sparse_features(
self,
index_type: Type[torch.dtype],
has_weight: bool,
bucketize_pos: bool,
) -> None:
# pyre-ignore [6]
lengths = torch.tensor([0, 2, 1, 3], dtype=index_type)
# pyre-ignore [6]
indices = torch.tensor([10, 10, 15, 20, 25, 30], dtype=index_type)
weights = (
torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=torch.float)
if has_weight
else None
)
# pyre-ignore [6]
new_lengths_ref = torch.tensor([0, 2, 0, 2, 0, 0, 1, 1], dtype=index_type)
# pyre-ignore [6]
new_indices_ref = torch.tensor([5, 5, 10, 15, 7, 12], dtype=index_type)
new_weights_ref = torch.tensor(
[1.0, 2.0, 4.0, 6.0, 3.0, 5.0], dtype=torch.float
)
# pyre-ignore [6]
new_pos_ref = torch.tensor([0, 1, 0, 2, 0, 1], dtype=index_type)
(
new_lengths_cpu,
new_indices_cpu,
new_weights_cpu,
new_pos_cpu,
) = torch.ops.fbgemm.bucketize_sparse_features(
lengths, indices, bucketize_pos, 2, weights
)
torch.testing.assert_close(new_lengths_cpu, new_lengths_ref, rtol=0, atol=0)
torch.testing.assert_close(new_indices_cpu, new_indices_ref, rtol=0, atol=0)
if has_weight:
torch.testing.assert_close(new_weights_cpu, new_weights_ref)
if bucketize_pos:
torch.testing.assert_close(new_pos_cpu, new_pos_ref)
if gpu_available:
(
new_lengths_gpu,
new_indices_gpu,
new_weights_gpu,
new_pos_gpu,
) = torch.ops.fbgemm.bucketize_sparse_features(
lengths.cuda(),
indices.cuda(),
bucketize_pos,
2,
# pyre-fixme[16]: `Optional` has no attribute `cuda`.
weights.cuda() if has_weight else None,
)
torch.testing.assert_close(
new_lengths_gpu.cpu(), new_lengths_ref, rtol=0, atol=0
)
torch.testing.assert_close(
new_indices_gpu.cpu(), new_indices_ref, rtol=0, atol=0
)
if has_weight:
torch.testing.assert_close(new_weights_gpu.cpu(), new_weights_cpu)
if bucketize_pos:
torch.testing.assert_close(new_pos_gpu.cpu(), new_pos_cpu)
@unittest.skipIf(*gpu_unavailable)
@given(
B=st.integers(min_value=1, max_value=20),
T=st.integers(min_value=1, max_value=20),
L=st.integers(min_value=2, max_value=20),
A=st.integers(min_value=1, max_value=20),
Dtype=st.sampled_from([torch.int32, torch.float, torch.int64]),
broadcast_lengths=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_reorder_batched_ad_lengths(
self,
B: int,
T: int,
L: int,
A: int,
Dtype: torch.dtype,
broadcast_lengths: bool,
) -> None:
if broadcast_lengths:
cat_ad_lengths = (
torch.cat([torch.tensor([L for _ in range(T)]) for _ in range(B)], 0)
.cuda()
.to(Dtype)
)
cat_ad_lengths_broadcasted = cat_ad_lengths.tile([A])
else:
cat_ad_lengths = (
torch.cat(
[torch.tensor([L for _ in range(T * A)]) for _ in range(B)], 0
)
.cuda()
.to(Dtype)
)
cat_ad_lengths_broadcasted = cat_ad_lengths
batch_offsets = torch.tensor([A * b for b in range(B + 1)]).int().cuda()
num_ads_in_batch = B * A
reordered_batched_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_lengths
)
torch.testing.assert_close(
cat_ad_lengths_broadcasted, reordered_batched_ad_lengths
)
cat_ad_lengths_cpu = cat_ad_lengths.cpu()
batch_offsets_cpu = batch_offsets.cpu()
reordered_batched_ad_lengths_cpu = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths_cpu, batch_offsets_cpu, num_ads_in_batch, broadcast_lengths
)
torch.testing.assert_close(
reordered_batched_ad_lengths_cpu, reordered_batched_ad_lengths.cpu()
)
@given(
B=st.integers(min_value=1, max_value=20),
T=st.integers(min_value=1, max_value=20),
L=st.integers(min_value=2, max_value=20),
A=st.integers(min_value=1, max_value=20),
Dtype=st.sampled_from([torch.int32, torch.float, torch.int64]),
broadcast_lengths=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=40, deadline=None)
def test_reorder_batched_ad_lengths_cpu(
self,
B: int,
T: int,
L: int,
A: int,
Dtype: torch.dtype,
broadcast_lengths: bool,
) -> None:
if broadcast_lengths:
cat_ad_lengths = (
torch.cat([torch.tensor([L for _ in range(T)]) for _ in range(B)], 0)
.int()
.to(Dtype)
)
cat_ad_lengths_broadcasted = cat_ad_lengths.tile([A])
else:
cat_ad_lengths = (
torch.cat(
[torch.tensor([L for _ in range(T * A)]) for _ in range(B)], 0
)
.int()
.to(Dtype)
)
cat_ad_lengths_broadcasted = cat_ad_lengths
batch_offsets = torch.tensor([A * b for b in range(B + 1)]).int()
num_ads_in_batch = B * A
reordered_batched_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_lengths
)
torch.testing.assert_close(
cat_ad_lengths_broadcasted, reordered_batched_ad_lengths
)
@unittest.skipIf(*gpu_unavailable)
@given(
B=st.integers(min_value=1, max_value=20),
T=st.integers(min_value=1, max_value=20),
L=st.integers(min_value=2, max_value=20),
A=st.integers(min_value=1, max_value=20),
Dtype=st.sampled_from([torch.int32, torch.float, torch.int64]),
Itype=st.sampled_from([torch.int32, torch.int64]),
broadcast_indices=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_reorder_batched_ad_indices(
self,
B: int,
T: int,
L: int,
A: int,
Dtype: torch.dtype,
Itype: torch.dtype,
broadcast_indices: bool,
) -> None:
if broadcast_indices:
cat_ad_indices = (
torch.randint(
low=0,
high=100,
size=(B * T * L,),
)
.int()
.cuda()
.to(Dtype)
)
cat_ad_lengths = (
torch.cat(
[torch.tensor([L for _ in range(T)]) for _ in range(B)],
0,
)
.int()
.cuda()
)
cat_ad_lengths_broadcasted = cat_ad_lengths.tile([A])
else:
cat_ad_indices = (
torch.randint(
low=0,
high=100,
size=(B * T * A * L,),
)
.int()
.cuda()
.to(Dtype)
)
cat_ad_lengths = (
torch.cat(
[torch.tensor([L for _ in range(T * A)]) for _ in range(B)],
0,
)
.int()
.cuda()
)
cat_ad_lengths_broadcasted = cat_ad_lengths
batch_offsets = torch.tensor([A * b for b in range(B + 1)]).int().cuda()
num_ads_in_batch = B * A
reordered_cat_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_indices
)
torch.testing.assert_close(cat_ad_lengths_broadcasted, reordered_cat_ad_lengths)
cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
cat_ad_lengths
).to(Itype)
reordered_cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
reordered_cat_ad_lengths
).to(Itype)
reordered_cat_ad_indices = torch.ops.fbgemm.reorder_batched_ad_indices(
cat_ad_offsets,
cat_ad_indices,
reordered_cat_ad_offsets,
batch_offsets,
num_ads_in_batch,
broadcast_indices,
B * T * A * L,
)
torch.testing.assert_close(
reordered_cat_ad_indices.view(T, B, A, L).permute(1, 0, 2, 3),
cat_ad_indices.view(B, T, 1, L).tile([1, 1, A, 1])
if broadcast_indices
else cat_ad_indices.view(B, T, A, L),
)
@given(
B=st.integers(min_value=1, max_value=20),
T=st.integers(min_value=1, max_value=20),
L=st.integers(min_value=2, max_value=20),
A=st.integers(min_value=1, max_value=20),
Dtype=st.sampled_from([torch.int32, torch.float, torch.int64]),
Itype=st.sampled_from([torch.int32, torch.int64]),
broadcast_indices=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_cat_reorder_batched_ad_indices_cpu(
self,
B: int,
T: int,
L: int,
A: int,
Dtype: torch.dtype,
Itype: torch.dtype,
broadcast_indices: bool,
) -> None:
if broadcast_indices:
ad_indices = [
(
torch.randint(
low=0,
high=100,
size=(T * L,),
)
.int()
.to(Dtype)
)
for _ in range(B)
]
cat_ad_lengths = torch.cat(
[torch.tensor([L for _ in range(T)]) for _ in range(B)],
0,
).int()
cat_ad_lengths_broadcasted = cat_ad_lengths.tile([A])
cat_ad_indices = torch.cat(ad_indices, 0)
else:
ad_indices = [
(
torch.randint(
low=0,
high=100,
size=(T * A * L,),
)
.int()
.to(Dtype)
)
for _ in range(B)
]
cat_ad_lengths = torch.cat(
[torch.tensor([L for _ in range(T * A)]) for _ in range(B)],
0,
).int()
cat_ad_lengths_broadcasted = cat_ad_lengths
cat_ad_indices = torch.cat(ad_indices, 0)
batch_offsets = torch.tensor([A * b for b in range(B + 1)]).int()
num_ads_in_batch = B * A
reordered_cat_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_indices
)
torch.testing.assert_close(cat_ad_lengths_broadcasted, reordered_cat_ad_lengths)
cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
cat_ad_lengths
).to(Itype)
reordered_cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
reordered_cat_ad_lengths
).to(Itype)
reordered_cat_ad_indices = torch.ops.fbgemm.cat_reorder_batched_ad_indices(
cat_ad_offsets,
ad_indices,
reordered_cat_ad_offsets,
batch_offsets,
num_ads_in_batch,
broadcast_indices,
B * T * A * L,
)
torch.testing.assert_close(
reordered_cat_ad_indices.view(T, B, A, L).permute(1, 0, 2, 3),
cat_ad_indices.view(B, T, 1, L).tile([1, 1, A, 1])
if broadcast_indices
else cat_ad_indices.view(B, T, A, L),
)
@given(
B=st.integers(min_value=1, max_value=20),
T=st.integers(min_value=1, max_value=20),
L=st.integers(min_value=2, max_value=20),
A=st.integers(min_value=1, max_value=20),
Dtype=st.sampled_from([torch.int32, torch.float, torch.int64]),
Itype=st.sampled_from([torch.int32, torch.int64]),
broadcast_indices=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=40, deadline=None)
def test_reorder_batched_ad_indices_cpu(
self,
B: int,
T: int,
L: int,
A: int,
Dtype: torch.dtype,
Itype: torch.dtype,
broadcast_indices: bool,
) -> None:
if broadcast_indices:
cat_ad_indices = (
torch.randint(
low=0,
high=100,
size=(B * T * L,),
)
.int()
.to(Dtype)
)
cat_ad_lengths = torch.cat(
[torch.tensor([L for _ in range(T)]) for _ in range(B)],
0,
).int()
cat_ad_lengths_broadcasted = cat_ad_lengths.tile([A])
else:
cat_ad_indices = (
torch.randint(
low=0,
high=100,
size=(B * T * A * L,),
)
.int()
.to(Dtype)
)
cat_ad_lengths = torch.cat(
[torch.tensor([L for _ in range(T * A)]) for _ in range(B)],
0,
).int()
cat_ad_lengths_broadcasted = cat_ad_lengths
batch_offsets = torch.tensor([A * b for b in range(B + 1)]).int()
num_ads_in_batch = B * A
reordered_cat_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_indices
)
torch.testing.assert_close(cat_ad_lengths_broadcasted, reordered_cat_ad_lengths)
cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
cat_ad_lengths
).to(Itype)
reordered_cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
reordered_cat_ad_lengths
).to(Itype)
reordered_cat_ad_indices = torch.ops.fbgemm.reorder_batched_ad_indices(
cat_ad_offsets,
cat_ad_indices,
reordered_cat_ad_offsets,
batch_offsets,
num_ads_in_batch,
broadcast_indices,
B * T * A * L,
)
torch.testing.assert_close(
reordered_cat_ad_indices.view(T, B, A, L).permute(1, 0, 2, 3),
cat_ad_indices.view(B, T, 1, L).tile([1, 1, A, 1])
if broadcast_indices
else cat_ad_indices.view(B, T, A, L),
)
@given(data_type=st.sampled_from([torch.bfloat16, torch.half, torch.float32]))
@settings(verbosity=Verbosity.verbose, deadline=None)
def test_histogram_binning_calibration(self, data_type: torch.dtype) -> None:
num_bins = 5000
logit = torch.tensor([[-0.0018], [0.0085], [0.0090], [0.0003], [0.0029]]).type(
data_type
)
bin_num_examples = torch.empty([num_bins], dtype=torch.float64).fill_(0.0)
bin_num_positives = torch.empty([num_bins], dtype=torch.float64).fill_(0.0)
calibrated_prediction, bin_ids = torch.ops.fbgemm.histogram_binning_calibration(
logit=logit,
bin_num_examples=bin_num_examples,
bin_num_positives=bin_num_positives,
positive_weight=0.4,
lower_bound=0.0,
upper_bound=1.0,
bin_ctr_in_use_after=10000,
bin_ctr_weight_value=0.9995,
)
expected_calibrated_prediction = torch.tensor(
[[0.2853], [0.2875], [0.2876], [0.2858], [0.2863]]
).type(data_type)
expected_bin_ids = torch.tensor(
[1426, 1437, 1437, 1428, 1431], dtype=torch.long
)
error_tolerance = 1e-03
if data_type == torch.bfloat16:
# Due to smaller significand bits.
error_tolerance = 1e-02
expected_bin_ids = torch.tensor(
[1426, 1438, 1438, 1430, 1430], dtype=torch.long
)
torch.testing.assert_close(
calibrated_prediction,
expected_calibrated_prediction,
rtol=error_tolerance,
atol=error_tolerance,
)
self.assertTrue(
torch.equal(
bin_ids.long(),
expected_bin_ids,
)
)
if torch.cuda.is_available():
(
calibrated_prediction_gpu,
bin_ids_gpu,
) = torch.ops.fbgemm.histogram_binning_calibration(
logit=logit.cuda(),
bin_num_examples=bin_num_examples.cuda(),
bin_num_positives=bin_num_positives.cuda(),
positive_weight=0.4,
lower_bound=0.0,
upper_bound=1.0,
bin_ctr_in_use_after=10000,
bin_ctr_weight_value=0.9995,
)
torch.testing.assert_close(
calibrated_prediction_gpu,
expected_calibrated_prediction.cuda(),
rtol=error_tolerance,
atol=error_tolerance,
)
self.assertTrue(
torch.equal(
bin_ids_gpu.long(),
expected_bin_ids.cuda(),
)
)
@given(
data_type=st.sampled_from([torch.bfloat16, torch.half, torch.float32]),
segment_value_type=st.sampled_from([torch.int, torch.long]),
segment_length_type=st.sampled_from([torch.int, torch.long]),
)
@settings(verbosity=Verbosity.verbose, deadline=None)
def test_histogram_binning_calibration_by_feature(
self,
data_type: torch.dtype,
segment_value_type: torch.dtype,
segment_length_type: torch.dtype,
) -> None:
num_bins = 5000
num_segments = 42
logit = torch.tensor([-0.0018, 0.0085, 0.0090, 0.0003, 0.0029]).type(data_type)
segment_value = torch.tensor([40, 31, 32, 13, 31]).type(segment_value_type)
lengths = torch.tensor([[1], [1], [1], [1], [1]]).type(segment_length_type)
num_interval = num_bins * (num_segments + 1)
bin_num_examples = torch.empty([num_interval], dtype=torch.float64).fill_(0.0)
bin_num_positives = torch.empty([num_interval], dtype=torch.float64).fill_(0.0)
(
calibrated_prediction,
bin_ids,
) = torch.ops.fbgemm.histogram_binning_calibration_by_feature(
logit=logit,
segment_value=segment_value,
segment_lengths=lengths,
num_segments=num_segments,
bin_num_examples=bin_num_examples,
bin_num_positives=bin_num_positives,
num_bins=num_bins,
positive_weight=0.4,
lower_bound=0.0,
upper_bound=1.0,
bin_ctr_in_use_after=10000,
bin_ctr_weight_value=0.9995,
)
expected_calibrated_prediction = torch.tensor(
[0.2853, 0.2875, 0.2876, 0.2858, 0.2863]
).type(data_type)
expected_bin_ids = torch.tensor(
[206426, 161437, 166437, 71428, 161431], dtype=torch.long
)
error_tolerance = 1e-03
if data_type == torch.bfloat16:
# Due to smaller significand bits.
error_tolerance = 1e-02
expected_bin_ids = torch.tensor(
[206426, 161438, 166438, 71430, 161430], dtype=torch.long
)
torch.testing.assert_close(
calibrated_prediction,
expected_calibrated_prediction,
rtol=error_tolerance,
atol=error_tolerance,
)
self.assertTrue(
torch.equal(
bin_ids.long(),
expected_bin_ids,
)
)
if torch.cuda.is_available():
(
calibrated_prediction_gpu,
bin_ids_gpu,
) = torch.ops.fbgemm.histogram_binning_calibration_by_feature(
logit=logit.cuda(),
segment_value=segment_value.cuda(),
segment_lengths=lengths.cuda(),
num_segments=num_segments,
bin_num_examples=bin_num_examples.cuda(),
bin_num_positives=bin_num_positives.cuda(),
num_bins=num_bins,
positive_weight=0.4,
lower_bound=0.0,
upper_bound=1.0,
bin_ctr_in_use_after=10000,
bin_ctr_weight_value=0.9995,
)
torch.testing.assert_close(
calibrated_prediction_gpu,
expected_calibrated_prediction.cuda(),
rtol=error_tolerance,
atol=error_tolerance,
)
self.assertTrue(
torch.equal(
bin_ids_gpu.long(),
expected_bin_ids.cuda(),
)
)
@given(
data_type=st.sampled_from([torch.bfloat16, torch.half, torch.float32]),
segment_value_type=st.sampled_from([torch.int, torch.long]),
segment_length_type=st.sampled_from([torch.int, torch.long]),
)
@settings(verbosity=Verbosity.verbose, deadline=None)
def test_generic_histogram_binning_calibration_by_feature(
self,
data_type: torch.dtype,
segment_value_type: torch.dtype,
segment_length_type: torch.dtype,
) -> None:
num_bins = 5000
num_segments = 42
logit = torch.tensor([-0.0018, 0.0085, 0.0090, 0.0003, 0.0029]).type(data_type)
segment_value = torch.tensor([40, 31, 32, 13, 31]).type(segment_value_type)
lengths = torch.tensor([[1], [1], [1], [1], [1]]).type(segment_length_type)
num_interval = num_bins * (num_segments + 1)
bin_num_examples = torch.empty([num_interval], dtype=torch.float64).fill_(0.0)
bin_num_positives = torch.empty([num_interval], dtype=torch.float64).fill_(0.0)
lower_bound = 0.0
upper_bound = 1.0
w = (upper_bound - lower_bound) / num_bins
bin_boundaries = torch.arange(
lower_bound + w, upper_bound - w / 2, w, dtype=torch.float64
)
(
calibrated_prediction,
bin_ids,
) = torch.ops.fbgemm.generic_histogram_binning_calibration_by_feature(
logit=logit,
segment_value=segment_value,
segment_lengths=lengths,
num_segments=num_segments,
bin_num_examples=bin_num_examples,
bin_num_positives=bin_num_positives,
bin_boundaries=bin_boundaries,
positive_weight=0.4,
bin_ctr_in_use_after=10000,
bin_ctr_weight_value=0.9995,
)
expected_calibrated_prediction = torch.tensor(
[0.2853, 0.2875, 0.2876, 0.2858, 0.2863]
).type(data_type)
expected_bin_ids = torch.tensor(
[206426, 161437, 166437, 71428, 161431], dtype=torch.long
)
error_tolerance = 1e-03
if data_type == torch.bfloat16:
# Due to smaller significand bits.
error_tolerance = 1e-02
expected_bin_ids = torch.tensor(
[206426, 161438, 166438, 71430, 161430], dtype=torch.long
)
torch.testing.assert_close(
calibrated_prediction,
expected_calibrated_prediction,
rtol=error_tolerance,
atol=error_tolerance,
)
self.assertTrue(
torch.equal(
bin_ids.long(),
expected_bin_ids,
)
)
if torch.cuda.is_available():
(
calibrated_prediction_gpu,
bin_ids_gpu,
) = torch.ops.fbgemm.generic_histogram_binning_calibration_by_feature(
logit=logit.cuda(),
segment_value=segment_value.cuda(),
segment_lengths=lengths.cuda(),
num_segments=num_segments,
bin_num_examples=bin_num_examples.cuda(),
bin_num_positives=bin_num_positives.cuda(),
bin_boundaries=bin_boundaries.cuda(),
positive_weight=0.4,
bin_ctr_in_use_after=10000,
bin_ctr_weight_value=0.9995,
)
torch.testing.assert_close(
calibrated_prediction_gpu,
expected_calibrated_prediction.cuda(),
rtol=error_tolerance,
atol=error_tolerance,
)
self.assertTrue(
torch.equal(
bin_ids_gpu.long(),
expected_bin_ids.cuda(),
)
)
@unittest.skipIf(*gpu_unavailable)
@given(
data_type=st.sampled_from([torch.bfloat16, torch.half, torch.float32]),
)
@settings(verbosity=Verbosity.verbose, deadline=None)
def test_generic_histogram_binning_calibration_by_feature_cpu_gpu(
self,
data_type: torch.dtype,
) -> None:
num_logits = random.randint(8, 16)
num_bins = random.randint(3, 8)
num_segments = random.randint(3, 8)
positive_weight = random.uniform(0.1, 1.0)
bin_ctr_in_use_after = random.randint(0, 10)
bin_ctr_weight_value = random.random()
logit = torch.randn(num_logits).type(data_type)
lengths = torch.randint(0, 2, (num_logits,))
segment_value = torch.randint(-3, num_segments + 3, (sum(lengths),))
num_interval = num_bins * (num_segments + 1)
bin_num_positives = torch.randint(0, 10, (num_interval,)).double()
bin_num_examples = (
bin_num_positives + torch.randint(0, 10, (num_interval,)).double()
)
lower_bound = 0.0
upper_bound = 1.0
w = (upper_bound - lower_bound) / num_bins
bin_boundaries = torch.arange(
lower_bound + w, upper_bound - w / 2, w, dtype=torch.float64
)
(
calibrated_prediction_cpu,
bin_ids_cpu,
) = torch.ops.fbgemm.generic_histogram_binning_calibration_by_feature(
logit=logit,
segment_value=segment_value,
segment_lengths=lengths,
num_segments=num_segments,
bin_num_examples=bin_num_examples,
bin_num_positives=bin_num_positives,
bin_boundaries=bin_boundaries,
positive_weight=positive_weight,
bin_ctr_in_use_after=bin_ctr_in_use_after,
bin_ctr_weight_value=bin_ctr_weight_value,
)
(
calibrated_prediction_gpu,
bin_ids_gpu,
) = torch.ops.fbgemm.generic_histogram_binning_calibration_by_feature(
logit=logit.cuda(),
segment_value=segment_value.cuda(),
segment_lengths=lengths.cuda(),
num_segments=num_segments,
bin_num_examples=bin_num_examples.cuda(),
bin_num_positives=bin_num_positives.cuda(),
bin_boundaries=bin_boundaries.cuda(),
positive_weight=positive_weight,
bin_ctr_in_use_after=bin_ctr_in_use_after,
bin_ctr_weight_value=bin_ctr_weight_value,
)
torch.testing.assert_close(
calibrated_prediction_cpu,
calibrated_prediction_gpu.cpu(),
rtol=1e-03,
atol=1e-03,
)
self.assertTrue(
torch.equal(
bin_ids_cpu,
bin_ids_gpu.cpu(),
)
)
def test_segment_sum_csr(self) -> None:
segment_sum_cpu = torch.ops.fbgemm.segment_sum_csr(
2,
torch.IntTensor([0, 2, 3, 5]),
torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]),
)
torch.testing.assert_close(
segment_sum_cpu, torch.Tensor([10.0, 11.0, 34.0]), rtol=0, atol=0
)
if torch.cuda.is_available():
segment_sum_cuda = torch.ops.fbgemm.segment_sum_csr(
2,
torch.IntTensor([0, 2, 3, 5]).cuda(),
torch.Tensor(
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
).cuda(),
)
torch.testing.assert_close(
segment_sum_cuda.cpu(), torch.Tensor([10.0, 11.0, 34.0]), rtol=0, atol=0
)
@given(
batch_size=st.just(2),
m=st.just(3),
k=st.just(4),
n=st.just(5),
use_cpu=st.booleans() if gpu_available else st.just(True),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_permute102_baddbmm_permute102(
self,
batch_size: int,
m: int,
k: int,
n: int,
use_cpu: bool,
) -> None:
# baddbmm doesn't support half
dtype = torch.float if use_cpu else torch.half
device = torch.device("cpu" if use_cpu else "cuda")
A = torch.rand((m, batch_size, k), dtype=dtype, device=device)
B = torch.rand((batch_size, k, n), dtype=dtype, device=device)
# bias_permute102 = torch.rand(batch_size, 1, n).half().cuda()
# bias = bias_permute102.permute(1, 0, 2)
bias = torch.rand((batch_size, n), dtype=dtype, device=device)
bias_permute102 = bias.unsqueeze(1)
# bias = bias_short.unsqueeze(0)
A_permute102 = A.permute(1, 0, 2)
C_permute102 = torch.baddbmm(bias_permute102, A_permute102, B)
C_ref = C_permute102.permute(1, 0, 2) # (m, batch_size, n)
C = torch.ops.fbgemm.permute102_baddbmm_permute102(bias, A, B)
torch.testing.assert_close(C.cpu(), C_ref.cpu())
def _pack_segments_ref(
self,
lengths: torch.Tensor,
tensor: torch.Tensor,
max_length: Optional[int] = None,
) -> np.ndarray:
lengths = lengths.numpy()
sections = np.split(tensor, np.cumsum(lengths))
max_length = np.max(lengths, initial=0) if max_length is None else max_length
padded_arrs = []
for arr in sections[:-1]: # Last section is always a blank
arr = arr[: min(max_length, len(arr)), ...]
padded_arr = np.pad(
arr,
[(0, max(max_length - arr.shape[0], 0))]
+ ([(0, 0)] * (len(arr.shape) - 1)),
constant_values=0,
)
padded_arrs.append(padded_arr)
if len(padded_arrs) == 0:
padded_arrs = torch.empty((0, 0) + tuple(tensor.shape[1:]))
else:
padded_arrs = torch.Tensor(np.stack(padded_arrs))
# pyre-fixme[7]: Expected `ndarray` but got `Tensor`.
return padded_arrs
@given(
n=st.integers(2, 10),
k=st.integers(2, 10),
batch_size=st.integers(1, 30),
divisions=st.integers(1, 10),
dtype=st.sampled_from(
[
torch.float,
torch.half,
]
),
)
@settings(deadline=None)
def test_pack_segments(
self,
n: int,
k: int,
batch_size: int,
divisions: int,
dtype: torch.dtype,
) -> None:
input_raw = np.random.rand(batch_size, n, k)
input_data = torch.tensor(input_raw, dtype=dtype, requires_grad=True)
lengths = torch.tensor(
get_n_rand_num_summing_to_k(divisions, batch_size), dtype=torch.int
)
max_length = lengths.max().item()
packed_tensor = torch.ops.fbgemm.pack_segments(
t_in=input_data, lengths=lengths, max_length=max_length
)
packed_ref = self._pack_segments_ref(lengths, input_raw)
packed_ref = torch.Tensor(packed_ref).to(dtype)
self.assertTrue(torch.equal(packed_tensor, packed_ref))
grad_cpu = torch.tensor(
np.random.uniform(low=0.01, high=0.5, size=packed_ref.shape).astype(
np.float32
)
).to(dtype)
# CPU backward
packed_tensor.backward(grad_cpu)
if gpu_available:
packed_cuda = torch.ops.fbgemm.pack_segments(
t_in=input_data.cuda(),
lengths=lengths.cuda(),
max_length=max_length,
)
self.assertTrue(torch.equal(packed_tensor, packed_cuda.cpu()))
# GPU backward
packed_cuda.backward(grad_cpu.cuda())
@given(
n=st.integers(2, 10),
k=st.integers(2, 10),
batch_size=st.integers(1, 30),
divisions=st.integers(1, 10),
max_length=st.integers(1, 20),
dtype=st.sampled_from(
[
torch.float,
torch.half,
]
),
)
@settings(deadline=None)
def test_pack_segments_smaller_max_len(
self,
n: int,
k: int,
batch_size: int,
divisions: int,
max_length: int,
dtype: torch.dtype,
) -> None:
input_data = torch.tensor(np.random.rand(batch_size, n, k), dtype=dtype)
lengths = torch.tensor(
get_n_rand_num_summing_to_k(divisions, batch_size), dtype=torch.int
)
packed_tensor = torch.ops.fbgemm.pack_segments(
t_in=input_data,
lengths=lengths,
max_length=max_length,
)
self.assertEqual(packed_tensor.shape, (divisions, max_length, n, k))
packed_ref = self._pack_segments_ref(
lengths,
input_data,
max_length=max_length,
)
# pyre-fixme[6]: For 2nd param expected `Tensor` but got `ndarray`.
self.assertTrue(torch.equal(packed_tensor, packed_ref))
if gpu_available:
packed_cuda = torch.ops.fbgemm.pack_segments(
t_in=input_data.cuda(),
lengths=lengths.cuda(),
max_length=max_length,
)
self.assertTrue(torch.equal(packed_tensor, packed_cuda.cpu()))
@skipIfRocm()
@given(
n=st.integers(2, 10),
k=st.integers(2, 10),
batch_size=st.integers(1, 30),
divisions=st.integers(1, 10),
dtype=st.sampled_from(
[
torch.float,
torch.half,
]
),
)
@settings(deadline=None)
def test_pack_segments_meta_backend(
self,
n: int,
k: int,
batch_size: int,
divisions: int,
dtype: torch.dtype,
) -> None:
input_raw = np.random.rand(batch_size, n, k)
input_data = torch.tensor(
input_raw, dtype=torch.float32, requires_grad=True
).to("meta")
lengths = torch.tensor(
get_n_rand_num_summing_to_k(divisions, batch_size), dtype=torch.int
)
max_length = lengths.max().item()
packed_tensor = torch.ops.fbgemm.pack_segments(
t_in=input_data, lengths=lengths, max_length=max_length
)
packed_ref = self._pack_segments_ref(lengths, input_raw)
# verify forward
assert packed_tensor.size() == torch.Tensor(packed_ref).size()
@given(
N=st.integers(1, 32),
shape=st.one_of(
st.lists(st.integers(1, 128), max_size=1),
st.lists(st.integers(1, 16), min_size=2, max_size=2),
),
dtype=st.sampled_from([torch.float, torch.half, torch.double]),
use_cpu=st.booleans() if gpu_available else st.just(True),
consecutive_indices=st.booleans(),
skip_indices_sorting_fwd=st.booleans(),
use_inference_mode=st.booleans(),
)
@settings(max_examples=20, deadline=None)
def test_index_select_dim0(
self,
N: int,
shape: List[int],
dtype: torch.dtype,
use_cpu: bool,
consecutive_indices: bool,
skip_indices_sorting_fwd: bool,
use_inference_mode: bool,
) -> None:
device = torch.device("cpu" if use_cpu else "cuda")
U = random.randint(0, N + 1)
kwargs = {}
if consecutive_indices:
start = np.random.randint(0, U)
length = np.random.randint(1, U - start + 1)
indices = list(range(start, start + length))
np_arr = np.array(indices)
for _ in range(N - U):
indices.append(np.random.randint(start, start + length))
np_arr = np.array(indices)
np.random.shuffle(np_arr)
indices = torch.from_numpy(np_arr).to(torch.int).to(device)
kwargs["consecutive_range_start"] = start
kwargs["consecutive_range_length"] = length
else:
indices = torch.randint(U, (N,), device=device)
kwargs["skip_indices_sorting_fwd"] = skip_indices_sorting_fwd
input = torch.rand((U,) + tuple(shape), dtype=dtype, device=device)
with torch.inference_mode() if use_inference_mode else contextlib.nullcontext():
output_ref = torch.ops.fbgemm.index_select_dim0(input, indices, **kwargs)
output = torch.index_select(input, 0, indices)
torch.testing.assert_close(output, output_ref)
if not use_inference_mode:
gradcheck_args = [
input.clone().detach().double().requires_grad_(True),
indices,
]
for k in kwargs:
gradcheck_args.append(kwargs[k])
torch.autograd.gradcheck(torch.ops.fbgemm.index_select_dim0, gradcheck_args)
@given(
num_indices=st.integers(1, 32),
max_num_input_rows=st.integers(1, 32),
shape=st.lists(st.integers(1, 32), min_size=1, max_size=2),
dtype=st.sampled_from([torch.float, torch.half, torch.double]),
use_cpu=st.booleans() if gpu_available else st.just(True),
num_groups=st.integers(1, 32),
use_var_cols=st.booleans(),
use_var_num_input_rows=st.booleans(),
check_non_contiguous=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
def test_group_index_select_dim0(
self,
num_indices: int,
max_num_input_rows: int,
shape: List[int],
dtype: torch.dtype,
use_cpu: bool,
num_groups: int,
use_var_cols: bool,
use_var_num_input_rows: bool,
check_non_contiguous: bool,
) -> None:
device = torch.device("cpu" if use_cpu else "cuda")
input_group: List[torch.Tensor] = []
input_ref_group: List[torch.Tensor] = []
indices_group: List[torch.Tensor] = []
grad_group: List[torch.Tensor] = []
for _ in range(num_groups):
if use_var_num_input_rows:
num_input_rows = (
random.randint(1, max_num_input_rows)
if max_num_input_rows > 1
else 1
)
else:
num_input_rows = max_num_input_rows
indices = torch.randint(num_input_rows, (num_indices,), device=device)
assert indices.max() < num_input_rows
if use_var_cols:
var_dim = random.randint(0, len(shape) - 1)
new_shape = random.randint(1, 32)
shape[var_dim] = new_shape
indices_group.append(indices)
input = torch.rand(
(num_input_rows,) + tuple(shape), dtype=dtype, device=device
)
input_ref = input.clone().detach()
input.requires_grad = True
input_ref.requires_grad = True
input_group.append(input)
input_ref_group.append(input_ref)
grad = torch.rand((num_indices,) + tuple(shape), dtype=dtype, device=device)
grad_group.append(grad)
# Test forward
output_ref_group = []
for input, indices in zip(input_ref_group, indices_group):
output_ref_group.append(torch.index_select(input, 0, indices))
output_group = torch.ops.fbgemm.group_index_select_dim0(
input_group, indices_group
)
# Test backward
for out, grad in zip(output_ref_group, grad_group):
out.backward(grad)
cat_output = torch.concat(
[
(
# Transpose is likely going to make the tensor
# noncontiguous
output.transpose(1, 0).flatten()
if check_non_contiguous
else output.flatten()
)
for output in output_group
]
)
cat_grad = torch.concat(
[
(
# Transpose is likely going to make the tensor
# noncontiguous
grad.transpose(1, 0).flatten()
if check_non_contiguous
else grad.flatten()
)
for grad in grad_group
]
)
cat_output.backward(cat_grad)
def compare_tensor_groups(
test_group: List[torch.Tensor],
ref_group: List[torch.Tensor],
tensor_type: str,
tols: Dict["str", float],
) -> None:
passed = True
failure_count = 0
for i, (test, ref) in enumerate(zip(test_group, ref_group)):
# pyre-ignore [6]
if not torch.allclose(test, ref, **tols):
passed = False
failure_count += 1
print(
f"FAILED: group {i} {tensor_type} ({dtype}), "
f"input shape {input_group[i].shape}, indices "
f"{indices_group[i]}, test {test}, ref {ref}"
)
assert (
passed
), f"{failure_count}/{num_groups} groups of {tensor_type} failed"
compare_tensor_groups(
output_group, output_ref_group, "activation", {"rtol": 0, "atol": 0}
)
compare_tensor_groups(
# pyre-ignore [6]
[i.grad for i in input_group],
# pyre-ignore [6]
[i.grad for i in input_ref_group],
"gradient",
{"rtol": 1e-02, "atol": 1e-02} if dtype == torch.half else {},
)
@given(
T=st.integers(1, 5),
B=st.integers(1, 5),
L=st.integers(1, 5),
)
@settings(max_examples=20, deadline=None)
def test_bottom_unique_k_per_row(
self,
T: int,
B: int,
L: int,
) -> None:
E = 1000000
all_indices = (np.random.zipf(a=1.15, size=(T, B, 3 * L)) - 1) % E
all_indices_deduped = torch.ops.fbgemm.bottom_k_per_row(
torch.as_tensor(all_indices), torch.tensor([0, L], dtype=torch.long), True
)
for index_tuple in itertools.product(range(T), range(B)):
# sample without replacement from
# https://stats.stackexchange.com/questions/20590/how-do-i-sample-without-replacement-using-a-sampling-with-replacement-function
r = set()
for x in all_indices[index_tuple]:
if x not in r:
r.add(x)
if len(r) == L:
break
assert (len(r)) == L, "too skewed distribution (alpha too big)"
all_indices[index_tuple][:L] = sorted(r)
all_indices_deduped_ref = torch.as_tensor(all_indices[:, :, :L])
torch.testing.assert_close(all_indices_deduped, all_indices_deduped_ref)
@given(
num_inputs=st.integers(0, 100),
max_input_rows=st.integers(2, 32),
max_cols_factor=st.integers(2, 256),
max_output_rows=st.integers(2, 32),
permute_output_dim_0_1=st.booleans(),
dtype=st.sampled_from([torch.float, torch.half]),
use_cpu=st.booleans() if gpu_available else st.just(True),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_batch_index_select_dim0(
self,
num_inputs: int,
max_input_rows: int,
max_cols_factor: int,
max_output_rows: int,
permute_output_dim_0_1: bool,
dtype: torch.dtype,
use_cpu: bool,
) -> None:
device = "cpu" if use_cpu else "cuda"
input_rows = torch.randint(
low=1, high=max_input_rows, size=(num_inputs,)
).tolist()
input_columns = (
torch.randint(low=1, high=max_cols_factor, size=(num_inputs,)) * 4
).tolist()
if permute_output_dim_0_1:
# All num_indices must be the same if permute_output_dim_0_1 is
# True
num_indices = torch.randint(low=1, high=max_output_rows, size=(1,)).item()
input_num_indices = [num_indices] * num_inputs
else:
input_num_indices = torch.randint(
low=1, high=max_output_rows, size=(num_inputs,)
).tolist()
def validate(
test_list: List[torch.Tensor],
ref_list: List[torch.Tensor],
rows: List[int],
val_fn: Callable[[torch.Tensor, torch.Tensor], bool],
name: str,
) -> None:
test_passed_all = True
error_msg = ""
for i, (test, ref) in enumerate(zip(test_list, ref_list)):
test = test.float()
ref = ref.float()
test_passed = val_fn(test, ref)
test_passed_all = test_passed & test_passed_all
if not test_passed:
test = test.reshape(rows[i], -1)
ref = ref.reshape(rows[i], -1)
for r in range(rows[i]):
test_row = test[r]
ref_row = ref[r]
if not val_fn(test_row, ref_row):
error_msg += f"ERROR: {name} {i} row {r} are different, test {test_row}, ref {ref_row}\n"
assert test_passed_all, error_msg
logging.info(f"{name} test passed")
if num_inputs == 0:
inputs = [torch.empty(0, dtype=dtype, device=device)]
indices = [torch.empty(0, dtype=torch.long, device=device)]
else:
inputs = [
torch.rand(rows, cols, dtype=dtype, device=device)
for rows, cols in zip(input_rows, input_columns)
]
indices = [
torch.randint(
low=0, high=rows, size=(num,), dtype=torch.long, device=device
)
for num, rows in zip(input_num_indices, input_rows)
]
for i in range(len(inputs)):
inputs[i].requires_grad = True
output_ref = [
input.index_select(dim=0, index=index).flatten()
for input, index in zip(inputs, indices)
]
concat_inputs = torch.concat(
[input.flatten().clone().detach() for input in inputs]
)
concat_indices = torch.concat(indices)
concat_inputs.requires_grad = True
output_test = torch.ops.fbgemm.batch_index_select_dim0(
concat_inputs,
concat_indices,
input_num_indices,
input_rows,
input_columns,
permute_output_dim_0_1,
)
if permute_output_dim_0_1 and num_inputs > 0:
output_list = output_test.view(input_num_indices[0], -1).split(
input_columns,
dim=1,
)
output_list = [out.flatten() for out in output_list]
else:
output_list = output_test.split(
[rows * cols for rows, cols in zip(input_num_indices, input_columns)]
)
validate(output_list, output_ref, input_num_indices, torch.equal, "output")
if num_inputs == 0:
grads = [torch.empty(0, dtype=dtype, device=device)]
else:
grads = [torch.rand_like(output) for output in output_ref]
for out_ref, grad in zip(output_ref, grads):
out_ref.backward(grad)
if permute_output_dim_0_1 and num_inputs > 0:
concat_grads = torch.concat(
[grad.view(input_num_indices[0], -1) for grad in grads], dim=1
).flatten()
else:
concat_grads = torch.concat(grads)
assert concat_grads.shape == output_test.shape
output_test.backward(concat_grads)
assert concat_inputs.grad is not None
grad_list = concat_inputs.grad.split(
[rows * cols for rows, cols in zip(input_rows, input_columns)]
)
grad_ref = []
for input in inputs:
assert input.grad is not None
grad_ref.append(input.grad.flatten())
tol = 1.0e-4 if dtype == torch.float else 1.0e-2
validate(
grad_list,
grad_ref,
input_rows,
functools.partial(torch.allclose, atol=tol, rtol=tol),
"grad",
)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import sys
import unittest
from itertools import accumulate
from typing import List, Tuple
import fbgemm_gpu
import torch
import torch._dynamo
from fbgemm_gpu.permute_pooled_embedding_modules import PermutePooledEmbeddings
from hypothesis import given, HealthCheck, settings
from torch import nn, Tensor
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
if getattr(fbgemm_gpu, "open_source", False):
# pyre-ignore[21]
from test_utils import cpu_and_maybe_gpu, gpu_unavailable
else:
from fbgemm_gpu.test.test_utils import cpu_and_maybe_gpu, gpu_unavailable
typed_gpu_unavailable: Tuple[bool, str] = gpu_unavailable
if getattr(HealthCheck, "not_a_test_method", False):
suppressed_list: List[HealthCheck] = [HealthCheck.not_a_test_method]
else:
suppressed_list = []
INTERN_MODULE = "fbgemm_gpu.permute_pooled_embedding_modules"
FIXED_EXTERN_API = {
"PermutePooledEmbeddings": {
"__init__": ["self", "embs_dims", "permute", "device"],
"forward": ["self", "pooled_embs"],
},
}
FWD_COMPAT_MSG = (
"WARNING: If this test is failing, you are probably trying "
"to make changes to a module that has been marked external to PyPer torch packages. "
"This can break forward compatibility of torch packages on training_platform "
"(see https://fb.workplace.com/groups/pyper/permalink/808155810065803/). "
"You need to split up your changes as follows:\n"
"\t1. Edit your diff so it only contains the changes as optional, and not any usage of the"
" new optional changes.\n"
"\t2. Edit FIXED_EXTERN_API in this test so your diff passes the test.\n"
"\t3. Land your diff and wait for the diff to be picked up by the production version of"
" fbpkg training_platform.\n"
"\t4. Once step 3. is complete, you can push the rest of your changes that use the new"
" changes."
)
class Net(torch.nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.fc1 = torch.nn.Linear(1, 10, bias=False)
self.permute_pooled_embeddings = PermutePooledEmbeddings(
[2, 3, 1, 4], [3, 0, 2, 1]
)
self.fc2 = torch.nn.Linear(10, 1, bias=False)
def forward(self, x: Tensor) -> Tensor:
x = self.fc1(x)
x = self.permute_pooled_embeddings(x)
x = self.fc2(x)
return x
# @parameterized_class([{"device_type": "cpu"}, {"device_type": "cuda"}])
class PooledEmbeddingModulesTest(unittest.TestCase):
@settings(deadline=10000, suppress_health_check=suppressed_list)
# pyre-fixme[56]: Pyre was not able to infer the type of argument
@given(device_type=cpu_and_maybe_gpu())
def setUp(self, device_type: torch.device) -> None:
self.device = device_type
@unittest.skipIf(True, "Skip until FailedHealthCheck is fixed")
def test_permutation(self) -> None:
net = Net().to(self.device)
input = torch.Tensor([range(10)]).to(self.device)
self.assertEqual(
net.permute_pooled_embeddings(input).view(10).tolist(),
[6, 7, 8, 9, 0, 1, 5, 2, 3, 4],
)
@unittest.skipIf(True, "Skip until FailedHealthCheck is fixed")
def test_permutation_autograd(self) -> None:
net = Net().to(self.device)
input = torch.randn(2, 1).to(self.device)
input_sum = input.sum().item()
output = net(input)
output.sum().backward()
# check grads for fc1 when permuted, equals to fc2 weights times input_sum
# pyre-fixme[16]: Optional type has no attribute `view`.
permute_res = net.permute_pooled_embeddings(net.fc1.weight.grad.view(1, 10))
permute_ref = input_sum * net.fc2.weight
torch.testing.assert_close(permute_res, permute_ref, rtol=1e-03, atol=1e-03)
def test_compatibility(self) -> None:
members = inspect.getmembers(sys.modules[INTERN_MODULE])
for name, clazz in members:
if getattr(clazz, "__module__", None) != INTERN_MODULE:
continue
self.assertIn(name, FIXED_EXTERN_API.keys(), FWD_COMPAT_MSG)
for fn, fixed_params in FIXED_EXTERN_API[name].items():
current_params = inspect.getfullargspec(getattr(clazz, fn)).args
self.assertEqual(
fixed_params,
current_params,
msg=f"\nForward incompatible change in {name} : {fn}\n\n"
f"{FWD_COMPAT_MSG}",
)
@unittest.skipIf(True, "Skip until FailedHealthCheck is fixed")
def test_pooled_table_batched_embedding(self) -> None:
num_emb_bags = 5
num_embeddings = 10
embedding_dims = [1, 2, 3, 4, 5]
emb_weight_range = 1
embedding_bags = [
nn.EmbeddingBag(
num_embeddings=num_embeddings,
embedding_dim=embedding_dims[i],
mode="sum",
sparse=True,
)
for i in range(num_emb_bags)
]
for emb_bag in embedding_bags:
torch.nn.init.uniform_(
emb_bag.weight,
-emb_weight_range,
emb_weight_range,
)
indices = [[0], [1, 2], [0, 1, 2], [3, 6], [8]]
indices = [torch.tensor(i).view(-1, len(i)) for i in indices]
pooled_embs = [emb_bag(indices[i]) for i, emb_bag in enumerate(embedding_bags)]
cat_pooled_embs = torch.cat(pooled_embs, dim=1)
permute_order = [2, 1, 3, 0, 4]
permute_pooled_embeddings = PermutePooledEmbeddings(
embedding_dims,
permute_order,
device=self.device,
)
permuted_pooled_emb = permute_pooled_embeddings(cat_pooled_embs.to(self.device))
ref_permuted_pooled_emb = [pooled_embs[i] for i in permute_order]
ref_permuted_pooled_emb = torch.cat(ref_permuted_pooled_emb, dim=1)
assert torch.allclose(
ref_permuted_pooled_emb.to(self.device), permuted_pooled_emb
)
@unittest.skipIf(True, "Skip until FailedHealthCheck is fixed")
def test_permutation_autograd_meta(self) -> None:
"""
Test that permute_pooled_embeddings_autograd works with meta tensor and
dynamo export mode
"""
input = torch.randn(2, 1)
net = Net()
output_cpu = net(input)
output_meta = net.to("meta")(input.to("meta"))
assert output_meta.shape == output_cpu.shape
assert input.shape == output_meta.shape
@unittest.skipIf(True, "Skip until FailedHealthCheck is fixed")
def test_duplicate_permutations(self) -> None:
embs_dims = [2, 3, 1, 4]
permute = [3, 0, 2, 0, 1, 3]
expected_result = [6, 7, 8, 9, 0, 1, 5, 0, 1, 2, 3, 4, 6, 7, 8, 9]
input = torch.Tensor([range(10)]).to(device="cuda")
_permute = torch.tensor(permute, device=self.device, dtype=torch.int64)
_offset_dim_list = torch.tensor(
[0] + list(accumulate(embs_dims)), device=self.device, dtype=torch.int64
)
inv_permute: List[int] = [0] * len(permute)
for i, p in enumerate(permute):
inv_permute[p] = i
_inv_permute = torch.tensor(inv_permute, device=self.device, dtype=torch.int64)
inv_embs_dims = [embs_dims[i] for i in permute]
_inv_offset_dim_list = torch.tensor(
[0] + list(accumulate(inv_embs_dims)),
device=self.device,
dtype=torch.int64,
)
result = torch.ops.fbgemm.permute_duplicate_pooled_embs_auto_grad(
input,
_offset_dim_list.to(device=input.device),
_permute.to(device=input.device),
_inv_offset_dim_list.to(device=input.device),
_inv_permute.to(device=input.device),
)
self.assertEqual(
result.view(16).tolist(),
expected_result,
)
input = input.to(device="cpu")
result = torch.ops.fbgemm.permute_duplicate_pooled_embs_auto_grad(
input,
_offset_dim_list.to(device=input.device),
_permute.to(device=input.device),
_inv_offset_dim_list.to(device=input.device),
_inv_permute.to(device=input.device),
)
self.assertEqual(
result.view(16).tolist(),
expected_result,
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import random
import unittest
from math import sqrt
from typing import List, Tuple
import fbgemm_gpu.batched_unary_embeddings_ops as batched_unary_embeddings_ops
import numpy as np
import torch
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
# pyre-ignore[21]
from test_utils import gpu_unavailable
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
from fbgemm_gpu.test.test_utils import gpu_unavailable
# Relative tolerances
TOLERANCE_REL = {
torch.float32: 1e-4,
torch.float16: 1e-2,
torch.bfloat16: 0.1,
}
# Absolute tolerances
TOLERANCE_ABS = {
torch.float32: 1e-4,
torch.float16: 1e-2,
torch.bfloat16: 1e-2,
}
class TableBatchedEmbeddingsTest(unittest.TestCase):
class RefEmb(torch.nn.Module):
def __init__(self, num_tasks: int, hash_sizes: List[int]) -> None:
super().__init__()
self.num_tasks = num_tasks
self.hash_sizes = hash_sizes
self.emb_modules = torch.nn.ModuleList()
for _ in range(num_tasks):
for h in self.hash_sizes:
emb = torch.nn.EmbeddingBag(
num_embeddings=h,
embedding_dim=1,
mode="sum",
sparse=False,
include_last_offset=True,
)
emb.weight = torch.nn.Parameter(
torch.empty([h, 1]).uniform_(-sqrt(1 / h), sqrt(1 / h))
)
self.emb_modules.append(emb)
def forward(
self, offsets: List[torch.Tensor], indices: List[torch.Tensor]
) -> torch.Tensor:
tt_list = []
for n in range(self.num_tasks):
t_list = []
for i in range(len(self.hash_sizes)):
t = self.emb_modules[n * len(self.hash_sizes) + i](
offsets=offsets[i].long(), input=indices[i].long()
)
t_list.append(t)
tt = torch.cat(t_list, dim=1)
tt_list.append(tt)
return torch.cat(tt_list).view(self.num_tasks, -1, len(self.hash_sizes))
def _generate_unary_features(
self, batch_size: int, num_embeddings: int
) -> Tuple[List, List, List]:
lengths = []
offsets = []
indices = []
offset = 0
for _ in range(batch_size):
n_indices = 1
indices += np.round(
np.random.random(n_indices) * (num_embeddings - 1)
).tolist()
offsets.append(offset)
offset += 1
lengths.append(n_indices)
offsets.append(offset)
return (lengths, offsets, indices)
def _test_main(
self,
gpu_infer: bool,
torch_compile: bool = False,
) -> None:
if gpu_infer:
device = torch.device("cuda:0")
torch.cuda.set_device(device)
else:
device = torch.device("cpu")
batch_size = 128
hash_sizes = [100, 200]
num_tasks = 3
emb_dtype = random.choice([torch.float, torch.half, torch.bfloat16])
# generate unary features
lengths = []
offsets = []
indices = []
for h in hash_sizes:
l, o, i = self._generate_unary_features(batch_size, h)
lengths.append(torch.IntTensor(l).to(device))
offsets.append(torch.IntTensor(o).to(device))
indices.append(torch.IntTensor(i).to(device))
lengths_tensor = torch.cat(lengths)
indices_tensor = torch.cat(indices)
offsets_tensor = torch.zeros(
lengths_tensor.numel() + 1,
dtype=lengths_tensor.dtype,
device=lengths_tensor.device,
)
offsets_tensor[1:] = torch.ops.fbgemm.asynchronous_inclusive_cumsum(
lengths_tensor.view(-1)
)
# forward with int_32
ref_emb = self.RefEmb(num_tasks, hash_sizes).to(device).to(emb_dtype)
unary_emb = (
batched_unary_embeddings_ops.BatchedUnaryEmbeddingBag(num_tasks, hash_sizes)
.to(device)
.to(emb_dtype)
)
for i, param in enumerate(unary_emb.split_embedding_weights()):
param.detach().copy_(ref_emb.emb_modules[i].weight)
output_ref = ref_emb(offsets, indices)
if torch_compile:
unary_emb = torch.compile(unary_emb, dynamic=True, fullgraph=True)
output = unary_emb(offsets_tensor, indices_tensor)
torch.testing.assert_close(
output_ref,
output,
atol=TOLERANCE_ABS[emb_dtype],
rtol=TOLERANCE_REL[emb_dtype],
)
# forward with int_64
ref_emb = self.RefEmb(num_tasks, hash_sizes).to(device).to(emb_dtype)
unary_emb = (
batched_unary_embeddings_ops.BatchedUnaryEmbeddingBag(
num_tasks=num_tasks, hash_sizes=hash_sizes, long_index=True
)
.to(device)
.to(emb_dtype)
)
for i, param in enumerate(unary_emb.split_embedding_weights()):
param.detach().copy_(ref_emb.emb_modules[i].weight)
output_ref = ref_emb(offsets, indices)
if torch_compile:
unary_emb = torch.compile(unary_emb, dynamic=True, fullgraph=True)
output = unary_emb(offsets_tensor.long(), indices_tensor.long())
torch.testing.assert_close(
output_ref,
output,
atol=TOLERANCE_ABS[emb_dtype],
rtol=TOLERANCE_REL[emb_dtype],
)
# No implementation for CPU backprop yet
if not gpu_infer:
return
# FIXME: the following doesn't work
# with torch.compile-d unary_emb
if torch_compile:
return
d_output = (
torch.randn([num_tasks, batch_size, len(hash_sizes)]).to(device) * 0.1
)
output_ref.backward(d_output)
output.backward(d_output)
d_weight_ref = []
for emb in ref_emb.emb_modules:
d_weight_ref.append(emb.weight.grad)
d_weight_ref = torch.cat(d_weight_ref).view(num_tasks, sum(hash_sizes), -1)
d_weight = unary_emb.weight.grad # pyre-ignore[16]
torch.testing.assert_close(
d_weight_ref,
d_weight,
atol=TOLERANCE_ABS[emb_dtype],
rtol=TOLERANCE_REL[emb_dtype],
)
# Testing the case where we add permute operation, which produces
# in contiguous grad tensor, this should also work
unary_embedding_module = batched_unary_embeddings_ops.BatchedUnaryEmbeddingBag(
num_tasks=3,
hash_sizes=[71, 107],
long_index=True,
).to(device)
offsets = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7], dtype=torch.long).to(device)
values = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8], dtype=torch.long).to(device)
for _ in range(10):
output = unary_embedding_module(offsets, values).transpose(1, 0)
output = output[1:]
output.sum().backward()
@unittest.skipIf(*gpu_unavailable)
def test_gpu(self) -> None:
self._test_main(gpu_infer=True)
# the test below fails with CUDA error in the OSS CI
# likely to the CUDA IMA issues in the test_gpu above
# commenting out for now
# @unittest.skipIf(*gpu_unavailable)
# def test_gpu_torch_compile(self) -> None:
# self._test_main(gpu_infer=True, torch_compile=True)
def test_cpu(self) -> None:
self._test_main(gpu_infer=False)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Optional, Tuple
import hypothesis.strategies as st
import torch
from fbgemm_gpu.quantize_comm import none_throws, QuantizedCommCodec
from fbgemm_gpu.split_embedding_configs import SparseType
from hypothesis import assume, given, settings
class QuantizedCommCodecTest(unittest.TestCase):
@settings(deadline=4000)
# pyre-ignore
@given(
comm_precisions_loss_scale=st.sampled_from(
[
(SparseType.FP32, None),
(SparseType.FP16, None),
(SparseType.FP16, 4.0),
(SparseType.BF16, None),
(SparseType.BF16, 2.0),
(SparseType.FP8, None),
(SparseType.FP8, 3.0),
(SparseType.INT8, None),
]
),
row_size=st.integers(4, 256),
col_size=st.integers(4, 256),
rand_seed=st.integers(0, 65534),
row_dim=st.sampled_from([-1, 4, 8, 16, 32]),
)
def test_quantized_comm_codec(
self,
comm_precisions_loss_scale: Tuple[SparseType, Optional[float]],
row_size: int,
col_size: int,
rand_seed: int,
row_dim: int,
) -> None:
(comm_precision, loss_scale) = comm_precisions_loss_scale
if comm_precision == SparseType.FP8:
if row_dim > 0:
assume((col_size * row_size) % row_dim == 0)
assume(col_size % 4 == 0)
torch.manual_seed(rand_seed)
shape = (row_size, col_size)
input_tensor = torch.rand(shape, requires_grad=True)
cur_row_dim = None
if (
comm_precision == SparseType.FP8
and torch.cuda.device_count() != 0
and row_dim > 0
):
cur_row_dim = row_dim
input_tensor = input_tensor.view(-1).cuda()
quant_codec = QuantizedCommCodec(
comm_precision, loss_scale, row_dim=cur_row_dim
)
ctx = quant_codec.create_context()
if comm_precision == SparseType.INT8:
ctx = none_throws(ctx)
assume(row_size * col_size % ctx.row_dim == 0)
input_tensor = input_tensor.view(-1)
quant_tensor = quant_codec.encode(input_tensor, ctx)
self.assertEqual(
quant_tensor.numel(),
quant_codec.calc_quantized_size(input_tensor.numel(), ctx),
)
output_tensor = quant_codec.decode(quant_tensor, ctx)
self.assertEqual(output_tensor.shape, input_tensor.shape)
rtol = 0.005
atol = 0.005
if comm_precision == SparseType.FP8:
rtol = 0.05
atol = 0.05
torch.testing.assert_close(
input_tensor.detach().cpu(),
output_tensor.detach().cpu(),
rtol=rtol,
atol=atol,
)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import fbgemm_gpu.metrics
import hypothesis.strategies as st
import torch
from hypothesis import given, settings
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:metric_ops")
class MetricOpsTest(unittest.TestCase):
@unittest.skipIf(
True,
"Test is sometimes failed due to issues with Flaky. Skipping until the issues are resolved. ",
)
# pyre-ignore [56]
@given(
n_tasks=st.integers(1, 5),
batch_size=st.integers(1, 1024),
dtype=st.sampled_from([torch.half, torch.float, torch.double]),
)
@settings(max_examples=20, deadline=None)
def test_auc(self, n_tasks: int, batch_size: int, dtype: torch.dtype) -> None:
predictions = torch.randint(0, 1000, (n_tasks, batch_size)).to(dtype).cuda()
labels = torch.randint(0, 1000, (n_tasks, batch_size)).to(dtype).cuda() / 1000.0
weights = torch.rand(n_tasks, batch_size).to(dtype).cuda()
compute_auc = fbgemm_gpu.metrics.Auc()
output_ref = compute_auc(n_tasks, predictions, labels, weights)
output = fbgemm_gpu.metrics.auc(n_tasks, predictions, labels, weights)
# Explicitly convert type based on output_ref's dtype
output = output.to(output_ref.dtype)
# Test correctness only if output_ref does not product nan or inf
if not (torch.isnan(output_ref).any() or torch.isinf(output_ref).any()):
torch.testing.assert_close(
output_ref,
output,
rtol=1e-2 if dtype == torch.half else None,
atol=1e-2 if dtype == torch.half else None,
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import itertools
import random
import unittest
from typing import List, Tuple
import hypothesis.strategies as st
import numpy as np
import torch
import torch._dynamo
from hypothesis import assume, given, settings, Verbosity
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
# pyre-ignore[21]
from test_utils import (
gpu_available,
gpu_unavailable,
on_arm_platform,
running_on_github,
symint_vector_unsupported,
TEST_WITH_ROCM,
)
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
from fbgemm_gpu.test.test_utils import (
gpu_available,
gpu_unavailable,
on_arm_platform,
running_on_github,
symint_vector_unsupported,
TEST_WITH_ROCM,
)
def lengths_to_segment_ids(lengths: torch.Tensor) -> torch.Tensor:
return torch.repeat_interleave(
torch._dim_arange(lengths, 0).long(),
lengths.long(),
)
# Converts lengths + values format to COO format
# [B], [N] -> [B, N'].
# pyre-ignore Missing return annotation [3]
def var_list_to_coo_1d(
lengths: torch.Tensor,
values: torch.Tensor,
N: int,
):
rows = lengths_to_segment_ids(lengths)
num_rows = lengths.size()[0]
# This does D&H sync
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
output_size = lengths.sum()
# This does D&H sync
cols = torch.ops.fbgemm.offsets_range(offsets, output_size)
indices = torch.stack([rows, cols])
dims = [num_rows, N]
# torch.sparse_coo_tensor is not supported by torch.fx, wrap it.
return torch.sparse_coo_tensor(
indices=indices,
values=values,
size=dims,
)
# Converts lengths + values format to COO format
# [B], [N, D] -> [B, N', D].
# pyre-ignore Missing return annotation [3]
def var_list_to_coo(lengths: torch.Tensor, values: torch.Tensor, N: int, D: int):
rows = lengths_to_segment_ids(lengths)
num_rows = lengths.size()[0]
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
output_size = lengths.sum()
# This does D&H sync
cols = torch.ops.fbgemm.offsets_range(offsets, output_size)
indices = torch.stack([rows, cols])
dims = [num_rows, N, D]
# torch.sparse_coo_tensor is not supported by torch.fx, wrap it.
return torch.sparse_coo_tensor(
indices=indices,
values=values,
size=dims,
)
def hash_size_cumsum_to_offsets(hash_size_cum_sum_list: List[int]) -> List[int]:
hash_size_offsets_list = [0]
count = 0
for f in range(1, len(hash_size_cum_sum_list)):
count = count + 1
if hash_size_cum_sum_list[f] == hash_size_cum_sum_list[f - 1]:
curr_offsets = hash_size_offsets_list[-1]
hash_size_offsets_list.append(curr_offsets)
else:
hash_size_offsets_list.append(count)
hash_size_offsets_list[-1] = count
return hash_size_offsets_list
class JaggedTensorOpsTest(unittest.TestCase):
def setUp(self) -> None:
if symint_vector_unsupported()[0]:
return
assert hasattr(
torch._dynamo.config, "assume_static_by_default"
), "Need to update the config as the dynamic/auto-dynamic setting has changed"
# Turn off static assumption for auto-dynamic
torch._dynamo.config.assume_static_by_default = False
@staticmethod
def expand_into_jagged_permute_ref_(
permute: List[int],
length: List[int],
) -> List[int]:
offsets = [0] + list(itertools.accumulate(length))
output_permute = []
for r in permute:
output_permute.extend(
range(
offsets[r],
offsets[r + 1],
)
)
return output_permute
@given(
T=st.integers(min_value=10, max_value=20),
W=st.integers(min_value=8, max_value=64),
)
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_expand_into_jagged_permute(
self,
T: int,
W: int,
) -> None:
length_per_w = [random.randint(5000, 10000) for i in range(W)]
length_1d = list(
itertools.chain.from_iterable(itertools.repeat(x, T) for x in length_per_w)
)
permute_list = list(range(T * W))
random.shuffle(permute_list)
permuted_length_1d = [length_1d[r] for r in permute_list]
permute_tensor = torch.tensor(permute_list)
# compute offsets
offsets_1d = [0] + list(itertools.accumulate(length_1d))
permuted_offsets_1d = [0] + list(itertools.accumulate(permuted_length_1d))
offsets_1d_tensor = torch.tensor(offsets_1d)
permuted_offsets_1d_tensor = torch.tensor(permuted_offsets_1d)
# cpu op
output_permute_cpu = torch.ops.fbgemm.expand_into_jagged_permute(
permute_tensor,
offsets_1d_tensor,
permuted_offsets_1d_tensor,
offsets_1d[-1],
)
# reference solution
output_permute_ref = self.expand_into_jagged_permute_ref_(
permute_list,
length_1d,
)
output_permute_ref_tensor = torch.tensor(output_permute_ref)
# assert cpu and gpu ops
torch.testing.assert_close(output_permute_cpu, output_permute_ref_tensor)
if gpu_available:
# gpu op
output_permute_gpu = torch.ops.fbgemm.expand_into_jagged_permute(
permute_tensor.cuda(),
offsets_1d_tensor.cuda(),
permuted_offsets_1d_tensor.cuda(),
offsets_1d[-1],
)
torch.testing.assert_close(
output_permute_gpu.cpu(), output_permute_ref_tensor
)
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
B=st.integers(min_value=1, max_value=128),
D=st.integers(min_value=1, max_value=128),
max_sequence_length=st.integers(min_value=1, max_value=200),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16]),
)
def test_jagged_2d_to_dense(
self,
B: int,
D: int,
max_sequence_length: int,
dtype: torch.dtype,
) -> None:
D = D * 4
lengths_ = np.random.randint(low=0, high=max_sequence_length, size=B)
total_lengths = lengths_.sum()
lengths = torch.from_numpy(lengths_)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
ref_values = torch.rand(total_lengths, D)
ref_output_values = var_list_to_coo(
lengths,
ref_values,
max_sequence_length,
D,
).to_dense()
ref_output_values = ref_output_values.to(dtype)
# test cpu forward
values = ref_values.clone().to(dtype).detach().requires_grad_(True)
output_values = torch.ops.fbgemm.jagged_2d_to_dense(
values=values,
offsets=offsets,
max_sequence_length=max_sequence_length,
)
torch.testing.assert_close(ref_output_values, output_values)
if torch.cuda.is_available():
# test gpu forward
ref_values = ref_values.cuda()
values = ref_values.clone().to(dtype).detach().requires_grad_(True)
offsets = offsets.cuda()
ref_output_values = ref_output_values.cuda()
output_values = torch.ops.fbgemm.jagged_2d_to_dense(
values=values,
offsets=offsets,
max_sequence_length=max_sequence_length,
)
torch.testing.assert_close(ref_output_values, output_values)
# test gpu backward
output_values.backward(ref_output_values)
ref_values = ref_values.to(dtype)
torch.testing.assert_close(ref_values, values.grad)
def test_jagged_2d_to_dense_truncation(self) -> None:
# Test the case where max_sequence_length < max(lengths[i])
lengths_ = np.array([2, 3, 0, 1])
lengths = torch.from_numpy(lengths_)
total_lengths = lengths_.sum()
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
embedding_dim = 16
max_sequence_length = 2
ref_values = torch.rand(total_lengths, embedding_dim)
ref_output_values = var_list_to_coo(
lengths,
ref_values,
3,
embedding_dim,
).to_dense()[:, :max_sequence_length, :]
# test cpu forward
values = ref_values.clone().detach().requires_grad_(True)
output_values = torch.ops.fbgemm.jagged_2d_to_dense(
values=values,
offsets=offsets,
max_sequence_length=max_sequence_length,
)
torch.testing.assert_close(ref_output_values, output_values)
if torch.cuda.is_available():
# test gpu forward
ref_values = ref_values.cuda()
values = ref_values.clone().detach().requires_grad_(True)
offsets = offsets.cuda()
ref_output_values = ref_output_values.cuda()
output_values = torch.ops.fbgemm.jagged_2d_to_dense(
values=values,
offsets=offsets,
max_sequence_length=max_sequence_length,
)
torch.testing.assert_close(ref_output_values, output_values)
# test gpu backward
expected_grad = ref_values
expected_grad[4, :] = 0 # due to truncation
expected_grad = expected_grad.cuda()
output_values.backward(ref_output_values)
torch.testing.assert_close(expected_grad, values.grad)
@unittest.skipIf(*symint_vector_unsupported())
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
B=st.integers(min_value=2, max_value=128),
D=st.integers(min_value=2, max_value=128),
max_sequence_length=st.integers(min_value=1, max_value=200),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
def test_jagged_2d_to_dense_dynamic_shape(
self,
B: int,
D: int,
max_sequence_length: int,
dtype: torch.dtype,
device_type: str,
) -> None:
# Start a fresh compile for each parameter of the test case
torch._dynamo.reset()
D = D * 4
lengths_ = np.random.randint(low=0, high=max_sequence_length, size=B)
total_lengths = lengths_.sum()
lengths = torch.from_numpy(lengths_)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
ref_values = torch.rand(total_lengths, D)
ref_output_values = var_list_to_coo(
lengths,
ref_values,
max_sequence_length,
D,
).to_dense()
ref_output_values = ref_output_values.to(dtype)
ref_values = ref_values.to(device_type)
values = ref_values.clone().to(dtype).detach().requires_grad_(True)
offsets = offsets.to(device_type)
ref_output_values = ref_output_values.to(device_type)
output_values = torch.compile(
torch.ops.fbgemm.jagged_2d_to_dense, dynamic=True, fullgraph=True
)(
values=values,
offsets=offsets,
max_sequence_length=max_sequence_length,
)
torch.testing.assert_close(ref_output_values, output_values)
output_values.backward(ref_output_values)
ref_values = ref_values.to(dtype)
torch.testing.assert_close(ref_values, values.grad)
@unittest.skipIf(*gpu_unavailable)
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
T=st.integers(min_value=1, max_value=5),
B=st.integers(min_value=1, max_value=64),
D=st.integers(min_value=1, max_value=128),
max_sequence_length=st.integers(min_value=1, max_value=300),
use_cpu=st.booleans() if gpu_available else st.just(True),
)
def test_stacked_jagged_2d_to_dense(
self,
T: int,
B: int,
D: int,
max_sequence_length: int,
use_cpu: bool,
) -> None:
device = torch.device("cpu" if use_cpu else "cuda")
D = D * 4
lengths_ = np.random.randint(low=0, high=max_sequence_length, size=B * T)
total_lengths = lengths_.sum()
lengths = torch.from_numpy(lengths_).to(device)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
ref_values = torch.rand(total_lengths, D, device=device)
ref_output_values = var_list_to_coo(
lengths,
ref_values,
max_sequence_length,
D,
).to_dense()
lengths = lengths.view(T, B)
values = ref_values.clone().detach().requires_grad_(True)
output_values_per_table = torch.ops.fbgemm.stacked_jagged_2d_to_dense(
values=values,
lengths=lengths,
offset_per_key=[0]
+ np.cumsum([lengths[t].sum().item() for t in range(T)]).tolist(),
max_lengths_per_key=[max_sequence_length] * T,
)
ref_output_values = torch.ops.fbgemm.jagged_2d_to_dense(
values=ref_values,
offsets=offsets,
max_sequence_length=max_sequence_length,
)
torch.testing.assert_close(
ref_output_values, torch.cat(output_values_per_table)
)
# test backward
output_values = torch.cat(output_values_per_table)
output_values.backward(ref_output_values)
torch.testing.assert_close(ref_values, values.grad)
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
B=st.integers(min_value=1, max_value=128),
max_sequence_length=st.integers(min_value=1, max_value=500),
padding_value=st.integers(min_value=-100000, max_value=100000),
)
def test_jagged_1d_to_dense(
self,
B: int,
max_sequence_length: int,
padding_value: int,
) -> None:
def lengths_to_segment_ids(lengths: torch.Tensor) -> torch.Tensor:
return torch.repeat_interleave(
torch._dim_arange(lengths, 0).long(),
lengths.long(),
)
lengths_ = np.random.randint(low=0, high=max_sequence_length, size=B)
total_lengths = lengths_.sum()
lengths = torch.from_numpy(lengths_)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
ref_values = torch.randint(low=0, high=1000000000, size=(total_lengths,))
ref_values_mask = var_list_to_coo_1d(
lengths, torch.ones_like(ref_values), max_sequence_length
).to_dense()
ref_output_values = (
var_list_to_coo_1d(
lengths,
ref_values,
max_sequence_length,
).to_dense()
+ (1 - ref_values_mask) * torch.ones_like(ref_values_mask) * padding_value
)
# test cpu forward
values = ref_values.clone().detach().requires_grad_(False)
output_values = torch.ops.fbgemm.jagged_1d_to_dense(
values=values,
offsets=offsets,
max_sequence_length=max_sequence_length,
padding_value=padding_value,
)
torch.testing.assert_close(ref_output_values, output_values)
if torch.cuda.is_available():
# test gpu forward
ref_values = ref_values.cuda()
values = ref_values.clone().detach().requires_grad_(False)
offsets = offsets.cuda()
ref_output_values = ref_output_values.cuda()
output_values = torch.ops.fbgemm.jagged_1d_to_dense(
values=values,
offsets=offsets,
max_sequence_length=max_sequence_length,
padding_value=padding_value,
)
torch.testing.assert_close(ref_output_values, output_values)
def test_jagged_1d_to_dense_truncation(self) -> None:
lengths_ = np.array([1, 3, 0, 1])
lengths = torch.from_numpy(lengths_)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
ref_values = torch.from_numpy(np.array([100, 3, 4, 5, 6]))
ref_output = torch.from_numpy(np.array([100, 3, -1, 6])).reshape(-1, 1)
# test cpu forward
values = ref_values.clone().detach().requires_grad_(False)
output = torch.ops.fbgemm.jagged_1d_to_dense(
values=values,
offsets=offsets,
max_sequence_length=1,
padding_value=-1,
)
torch.testing.assert_close(ref_output, output)
if torch.cuda.is_available():
# test gpu forward
ref_values = ref_values.cuda()
values = ref_values.clone().detach().requires_grad_(False)
offsets = offsets.cuda()
ref_output = ref_output.cuda()
output = torch.ops.fbgemm.jagged_1d_to_dense(
values=values,
offsets=offsets,
max_sequence_length=1,
padding_value=-1,
)
torch.testing.assert_close(ref_output, output)
@unittest.skipIf(*symint_vector_unsupported())
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
B=st.integers(min_value=1, max_value=128),
max_sequence_length=st.integers(min_value=1, max_value=500),
padding_value=st.integers(min_value=-100000, max_value=100000),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
def test_jagged_1d_to_dense_dynamic_shape(
self, B: int, max_sequence_length: int, padding_value: int, device_type: str
) -> None:
# Start a fresh compile for each parameter of the test case
torch._dynamo.reset()
def lengths_to_segment_ids(lengths: torch.Tensor) -> torch.Tensor:
return torch.repeat_interleave(
torch._dim_arange(lengths, 0).long(),
lengths.long(),
)
lengths_ = np.random.randint(low=0, high=max_sequence_length, size=B)
total_lengths = lengths_.sum()
lengths = torch.from_numpy(lengths_)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
ref_values = torch.randint(low=0, high=1000000000, size=(total_lengths,))
ref_values_mask = var_list_to_coo_1d(
lengths, torch.ones_like(ref_values), max_sequence_length
).to_dense()
ref_output_values = (
var_list_to_coo_1d(
lengths,
ref_values,
max_sequence_length,
).to_dense()
+ (1 - ref_values_mask) * torch.ones_like(ref_values_mask) * padding_value
)
ref_values = ref_values.to(device_type)
values = ref_values.clone().detach().requires_grad_(False)
offsets = offsets.to(device_type)
ref_output_values = ref_output_values.to(device_type)
output_values = torch.compile(
torch.ops.fbgemm.jagged_1d_to_dense, dynamic=True, fullgraph=True
)(
values=values,
offsets=offsets,
max_sequence_length=max_sequence_length,
padding_value=padding_value,
)
torch.testing.assert_close(ref_output_values, output_values)
@unittest.skipIf(*gpu_unavailable)
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
T=st.integers(min_value=1, max_value=20),
B=st.integers(min_value=1, max_value=128),
max_sequence_length=st.integers(min_value=1, max_value=500),
padding_value=st.integers(min_value=-100000, max_value=100000),
use_cpu=st.booleans() if gpu_available else st.just(True),
)
def test_stacked_jagged_1d_to_dense(
self,
T: int,
B: int,
max_sequence_length: int,
padding_value: int,
use_cpu: bool,
) -> None:
device = torch.device("cpu" if use_cpu else "cuda")
def lengths_to_segment_ids(lengths: torch.Tensor) -> torch.Tensor:
return torch.repeat_interleave(
torch._dim_arange(lengths, 0).long(),
lengths.long(),
)
lengths_ = np.random.randint(low=0, high=max_sequence_length, size=B * T)
total_lengths = lengths_.sum()
lengths = torch.from_numpy(lengths_).to(device)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
lengths = lengths.view(T, B)
ref_values = torch.randint(
low=0, high=1000000000, size=(total_lengths,), device=device
)
values = ref_values.clone().detach().requires_grad_(False)
output_values_per_table = torch.ops.fbgemm.stacked_jagged_1d_to_dense(
values=values,
lengths=lengths,
offset_per_key=[0]
+ np.cumsum([lengths[t].sum().item() for t in range(T)]).tolist(),
max_lengths_per_key=[max_sequence_length] * T,
padding_value=padding_value,
)
ref_output_values = torch.ops.fbgemm.jagged_1d_to_dense(
values=ref_values,
offsets=offsets,
max_sequence_length=max_sequence_length,
padding_value=padding_value,
)
torch.testing.assert_close(
ref_output_values, torch.cat(output_values_per_table)
)
def _to_padded_dense(
self,
values: torch.Tensor,
offsets: List[torch.LongTensor],
max_lengths: np.ndarray,
padding_value: float = 0,
) -> torch.Tensor:
outer_dense_size = len(offsets[0]) - 1
# canonicalize by unsqueeze the last dim if the inner dense dimension
# is 1 and folded.
inner_dense_size = 1 if values.ndim == 1 else values.size(-1)
dense = torch.empty(
(outer_dense_size,) + tuple(max_lengths) + (inner_dense_size,),
dtype=values.dtype,
device=values.device,
)
for i in range(outer_dense_size):
for jagged_coord in itertools.product(
*(list(range(max_l)) for max_l in max_lengths)
):
cur_offset = i
is_zero = False
for d in range(len(max_lengths)):
begin = offsets[d][cur_offset].item()
end = offsets[d][cur_offset + 1].item()
# pyre-fixme[6]: For 1st param expected `int` but got
# `Union[bool, float, int]`.
if jagged_coord[d] >= end - begin:
is_zero = True
break
cur_offset = begin + jagged_coord[d]
dense[(i,) + jagged_coord] = (
padding_value if is_zero else values[cur_offset]
)
return dense.squeeze(-1) if values.ndim == 1 else dense
# TODO: reuse this code in test_(stacked)_jagged_1/2d
def _generate_jagged_tensor(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device: torch.device,
fold_inner_dense: bool = False,
# dynamo to mark the input as dynamic shape to make sure symbolic
# shape is generated
mark_dynamic: bool = False,
) -> Tuple[torch.Tensor, List[torch.LongTensor], np.ndarray]:
max_lengths = np.random.randint(low=1, high=10, size=(num_jagged_dim,))
x_offsets: List[torch.LongTensor] = []
num_lengths = outer_dense_size
for d in range(num_jagged_dim):
# Sometimes length[i] exceed max_L meaning jagged->dense will be
# truncation vs. padding
lengths = torch.randint(
# PT2 specialize 0/1 dims as non-symbolic shape. So we need
# to make it non 0/1 for testing. In real cases it'll likelyl
# not 0/1 anyway (if so, they'll be recompiled)
low=0 if not mark_dynamic else 1,
high=max_lengths[d] * 2,
# pyre-fixme[6]: For 3rd param expected `Union[List[int], Size,
# typing.Tuple[int, ...]]` but got `Tuple[Union[bool, float, int]]`.
size=(num_lengths,),
device=device,
)
offset = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
if mark_dynamic:
torch._dynamo.mark_dynamic(offset, 0)
x_offsets.append(offset)
num_lengths = x_offsets[-1][-1].item()
x_values = torch.rand(
# pyre-fixme[6]: For 1st param expected `Union[List[int], Size,
# typing.Tuple[int, ...]]` but got `Tensor`.
x_offsets[-1][-1] * inner_dense_size,
dtype=dtype,
device=device,
)
if inner_dense_size != 1 or not fold_inner_dense:
# pyre-fixme[6]: For 1st param expected `int` but got `Union[bool, float, int]`.
x_values = x_values.reshape(x_offsets[-1][-1].item(), inner_dense_size)
if mark_dynamic:
for i in range(inner_dense_size):
torch._dynamo.mark_dynamic(x_values, i)
return x_values, x_offsets, max_lengths
def _test_dense_to_jagged(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
precompute_total_L: bool,
) -> None:
# Generate multi-dim jagged tensor
device = torch.device(device_type)
values_2d, offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim, outer_dense_size, inner_dense_size, dtype, device
)
values_2d = values_2d.clone().detach().requires_grad_(True)
# jagged -> dense
dense = torch.ops.fbgemm.jagged_to_padded_dense(values_2d, offsets, max_lengths)
# dense -> jagged (op which is being tested)
if precompute_total_L:
total_L = values_2d.size(0)
jagged_values, jagged_offsets = torch.ops.fbgemm.dense_to_jagged(
dense, offsets, total_L
)
else:
jagged_values, jagged_offsets = torch.ops.fbgemm.dense_to_jagged(
dense, offsets
)
# jagged -> dense
dense2 = torch.ops.fbgemm.jagged_to_padded_dense(
jagged_values, jagged_offsets, max_lengths
)
# verify forward
torch.testing.assert_close(dense, dense2)
# verify backward
dense.retain_grad()
ref_output_values = jagged_values.clone().detach().requires_grad_(True)
ref_values = dense.clone().detach().requires_grad_(True)
jagged_values.backward(ref_output_values)
torch.testing.assert_close(dense.grad, ref_values)
@given(
num_jagged_dim=st.integers(1, 5),
outer_dense_size=st.integers(0, 5),
inner_dense_size=st.integers(0, 5),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
precompute_total_L=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_dense_to_jagged(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
precompute_total_L: bool,
) -> None:
self._test_dense_to_jagged(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
dtype,
device_type,
precompute_total_L,
)
@unittest.skipIf(*gpu_unavailable)
@given(
num_jagged_dim=st.just(1),
outer_dense_size=st.integers(0, 6000),
inner_dense_size=st.sampled_from([8, 16, 23, 24, 48, 50, 64, 72, 96, 192]),
dtype=st.just(torch.half),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
precompute_total_L=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_dense_to_jagged_opt(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
precompute_total_L: bool,
) -> None:
self._test_dense_to_jagged(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
dtype,
device_type,
precompute_total_L,
)
# (8000+1) * 8 (size of the element of LongTensor/int64_t offsets)
# = ~62.5KB > 48KB default shared memory on V100/A100.
@unittest.skipIf(*gpu_unavailable)
@given(
num_jagged_dim=st.just(1),
outer_dense_size=st.just(8000),
inner_dense_size=st.just(16),
dtype=st.just(torch.half),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
precompute_total_L=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=1, deadline=None)
def test_dense_to_jagged_opt_large_batch(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
precompute_total_L: bool,
) -> None:
self._test_dense_to_jagged(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
dtype,
device_type,
precompute_total_L,
)
@given(
num_jagged_dim=st.integers(1, 5),
outer_dense_size=st.integers(0, 5),
inner_dense_size=st.integers(0, 5),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16]),
device_type=st.sampled_from(["meta"]),
precompute_total_L=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_dense_to_jagged_meta_backend(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
precompute_total_L: bool,
) -> None:
device = torch.device("cpu")
values_2d, offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim, outer_dense_size, inner_dense_size, dtype, device
)
values_2d = values_2d.clone().detach().requires_grad_(True)
# jagged -> dense
dense = torch.ops.fbgemm.jagged_to_padded_dense(values_2d, offsets, max_lengths)
# dense -> jagged (op which is being tested)
if precompute_total_L:
total_L = values_2d.size(0)
dense.to(device_type)
jagged_values, jagged_offsets = torch.ops.fbgemm.dense_to_jagged(
dense, offsets, total_L
)
else:
dense.to(device_type)
jagged_values, jagged_offsets = torch.ops.fbgemm.dense_to_jagged(
dense, offsets
)
jagged_values.to(device_type)
# jagged -> dense
dense2 = torch.ops.fbgemm.jagged_to_padded_dense(
jagged_values, jagged_offsets, max_lengths
)
# verify forward
assert dense.size() == dense2.size()
@unittest.skipIf(*symint_vector_unsupported())
@given(
num_jagged_dim=st.integers(1, 5),
# TODO: size = 0/1 will be incorrectly specialized
outer_dense_size=st.integers(2, 5),
inner_dense_size=st.integers(2, 5),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_dense_to_jagged_dynamic_shape(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
) -> None:
# Start a fresh compile for each parameter of the test case
torch._dynamo.reset()
values_2d, offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
dtype,
torch.device(device_type),
mark_dynamic=True,
)
values_2d = values_2d.clone().detach().requires_grad_(True)
@torch.compile(fullgraph=True, dynamic=True)
def jagged_to_dense(
values: torch.Tensor, offsets: torch.Tensor, max_lengths: List[int]
) -> torch.Tensor:
return torch.ops.fbgemm.jagged_to_padded_dense(values, offsets, max_lengths)
# jagged -> dense
dense = jagged_to_dense(values_2d, offsets, max_lengths.tolist())
# dense -> jagged, it is required to pre-compute totalL
total_L = values_2d.size(0)
dense = dense.clone().detach().to(device_type)
torch._dynamo.mark_dynamic(dense, 0)
torch._dynamo.mark_dynamic(dense, -1)
@torch.compile(fullgraph=True, dynamic=True)
def dense_to_jagged_withL(
dense: torch.Tensor, offsets: torch.Tensor, total_L: List[int]
) -> Tuple[torch.Tensor, torch.Tensor]:
return torch.ops.fbgemm.dense_to_jagged(dense, offsets, total_L)
@torch.compile(fullgraph=False, dynamic=True)
def dense_to_jagged_noL(
dense: torch.Tensor, offsets: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
return torch.ops.fbgemm.dense_to_jagged(dense, offsets)
jagged_values, jagged_offsets = dense_to_jagged_noL(dense, offsets)
jagged_values, jagged_offsets = dense_to_jagged_withL(dense, offsets, total_L)
jagged_values.to(device_type)
# jagged -> dense
dense2 = torch.ops.fbgemm.jagged_to_padded_dense(
jagged_values, jagged_offsets, max_lengths
)
# verify forward
assert dense.size() == dense2.size()
@given(
num_jagged_dim=st.integers(1, 5),
outer_dense_size=st.integers(0, 5),
inner_dense_size=st.integers(0, 5),
fold_inner_dense=st.booleans(),
padding_value=st.sampled_from([0, -1e-8]),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16, torch.double]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_to_padded_dense(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
fold_inner_dense: bool,
padding_value: float,
dtype: torch.dtype,
device_type: str,
) -> None:
# CPU doesn't support bfloat16
assume(device_type != "cpu" or dtype != torch.bfloat16)
assume(not fold_inner_dense or inner_dense_size == 1)
# Testing with a basic crafted example.
# dense representation is
# [[[[0, 1], [ 0, 0], [0, 0]],
# [[2, 3], [ 4, 5], [6, 7]],
# [[0, 0], [ 0, 0], [0, 0]],
# [[0, 0], [ 0, 0], [0, 0]]],
# [[[0, 0], [ 0, 0], [0, 0]],
# [[0, 0], [ 0, 0], [0, 0]],
# [[0, 0], [ 0, 0], [0, 0]],
# [[0, 0], [ 0, 0], [0, 0]]],
# [[[8, 9], [10, 11], [0, 0]],
# [[0, 0], [ 0, 0], [0, 0]],
# [[0, 0], [ 0, 0], [0, 0]],
# [[0, 0], [ 0, 0], [0, 0]]]],
# inner_dense_size = 2
# x_offsets = [
# torch.LongTensor([0, 2, 2, 3]), # lengths torch.Tensor([2, 0, 1]),
# torch.LongTensor([0, 1, 4, 6]), # lengths torch.Tensor([1, 3, 2]),
# ]
# outer_dense_size = len(x_offsets[0]) - 1
# max_lengths = [4, 3]
device = torch.device(device_type)
x_values, x_offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
torch.float,
device,
fold_inner_dense,
)
output_ref = self._to_padded_dense(
x_values, x_offsets, max_lengths, padding_value=padding_value
)
output = torch.ops.fbgemm.jagged_to_padded_dense(
x_values,
x_offsets,
max_lengths,
padding_value=padding_value,
)
torch.testing.assert_close(output, output_ref)
torch.autograd.gradcheck(
torch.ops.fbgemm.jagged_to_padded_dense,
(
x_values.double().requires_grad_(True),
x_offsets,
max_lengths,
padding_value,
),
)
@given(
num_jagged_dim=st.integers(1, 5),
outer_dense_size=st.integers(0, 5),
inner_dense_size=st.integers(0, 5),
padding_value=st.just(0),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16, torch.double]),
device_type=st.just("meta"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_to_padded_dense_meta_backend(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
padding_value: float,
dtype: torch.dtype,
device_type: str,
) -> None:
assume(device_type != "cpu" or dtype != torch.bfloat16)
device = torch.device("cpu")
x_values, x_offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim, outer_dense_size, inner_dense_size, torch.float, device
)
output_ref = self._to_padded_dense(
x_values, x_offsets, max_lengths, padding_value=padding_value
)
x_values.to(device_type)
output = torch.ops.fbgemm.jagged_to_padded_dense(
x_values,
x_offsets,
max_lengths,
padding_value=padding_value,
)
assert output.size() == output_ref.size()
def _test_jagged_elementwise_binary(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
operation: str,
dtype: torch.dtype,
device_type: str,
) -> None:
device = torch.device(device_type)
x_values, x_offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim, outer_dense_size, inner_dense_size, dtype, device
)
y = torch.rand(
outer_dense_size * np.prod(max_lengths) * inner_dense_size,
dtype=dtype,
device=device,
).reshape((outer_dense_size,) + tuple(max_lengths) + (inner_dense_size,))
x_padded = self._to_padded_dense(x_values, x_offsets, max_lengths)
if operation == "add":
output_ref = x_padded + y
output = torch.ops.fbgemm.jagged_dense_elementwise_add(
x_values, x_offsets, y
)
elif operation == "add_jagged_output":
# create a jagged tensor and then densify
y = self._to_padded_dense(
torch.rand(
(
max(outer_dense_size * np.prod(max_lengths), x_values.size(0)),
inner_dense_size,
),
dtype=dtype,
device=device,
),
x_offsets,
max_lengths,
)
output_ref = x_padded + y
(
output,
output_offsets,
) = torch.ops.fbgemm.jagged_dense_elementwise_add_jagged_output(
x_values, x_offsets, y
)
output = self._to_padded_dense(output, output_offsets, max_lengths)
elif operation == "mul":
output_ref = x_padded * y
output, output_offsets = torch.ops.fbgemm.jagged_dense_elementwise_mul(
x_values, x_offsets, y
)
output = self._to_padded_dense(output, output_offsets, max_lengths)
else:
raise AssertionError(f"Unknown operation {operation}")
torch.testing.assert_close(output, output_ref)
if operation == "add":
f = torch.ops.fbgemm.jagged_dense_elementwise_add
elif operation == "add_jagged_output":
# pyre-fixme[2]: Parameter must be annotated.
def add_jagged_output_func(*args) -> torch.Tensor:
return torch.ops.fbgemm.jagged_dense_elementwise_add_jagged_output(
*args
)[0]
f = add_jagged_output_func
else:
assert operation == "mul"
# pyre-fixme[2]: Parameter must be annotated.
def mul_func(*args) -> torch.Tensor:
return torch.ops.fbgemm.jagged_dense_elementwise_mul(*args)[0]
f = mul_func
torch.autograd.gradcheck(
f,
(
x_values.double().requires_grad_(True),
x_offsets,
y.double().requires_grad_(True),
),
)
@given(
num_jagged_dim=st.integers(1, 4),
outer_dense_size=st.integers(0, 4),
inner_dense_size=st.integers(0, 4),
operation=st.sampled_from(["add", "add_jagged_output", "mul"]),
dtype=st.sampled_from([torch.float, torch.half, torch.double, torch.bfloat16]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_elementwise_binary(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
operation: str,
dtype: torch.dtype,
device_type: str,
) -> None:
self._test_jagged_elementwise_binary(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
operation,
dtype,
device_type,
)
@unittest.skipIf(*gpu_unavailable)
@given(
num_jagged_dim=st.just(1),
outer_dense_size=st.integers(0, 8),
inner_dense_size=st.sampled_from([16, 64, 96, 192]),
operation=st.sampled_from(["add_jagged_output", "mul"]),
dtype=st.just(torch.half),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=4, deadline=None)
def test_jagged_elementwise_binary_opt(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
operation: str,
dtype: torch.dtype,
device_type: str,
) -> None:
self._test_jagged_elementwise_binary(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
operation,
dtype,
device_type,
)
@unittest.skipIf(*symint_vector_unsupported())
@given(
num_jagged_dim=st.integers(1, 5),
outer_dense_size=st.integers(2, 5),
inner_dense_size=st.integers(2, 5),
operation=st.sampled_from(["add", "add_jagged_output", "mul"]),
dtype=st.sampled_from([torch.float, torch.half, torch.double, torch.bfloat16]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_elementwise_binary_dynamic_shape(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
operation: str,
dtype: torch.dtype,
device_type: str,
) -> None:
# Start a fresh compile for each parameter of the test case
torch._dynamo.reset()
device = torch.device(device_type)
x_values, x_offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
dtype,
device,
mark_dynamic=True,
)
y = torch.rand(
outer_dense_size * np.prod(max_lengths) * inner_dense_size,
dtype=dtype,
device=device,
).reshape((outer_dense_size,) + tuple(max_lengths) + (inner_dense_size,))
x_padded = self._to_padded_dense(x_values, x_offsets, max_lengths)
@torch.compile(fullgraph=True, dynamic=True)
def jagged_dense_elementwise_add(
x_values: torch.Tensor, x_offsets: torch.Tensor, y: torch.Tensor
) -> torch.Tensor:
return torch.ops.fbgemm.jagged_dense_elementwise_add(x_values, x_offsets, y)
@torch.compile(fullgraph=True, dynamic=True)
def jagged_dense_elementwise_add_jagged_output(
x_values: torch.Tensor, x_offsets: torch.Tensor, y: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
return torch.ops.fbgemm.jagged_dense_elementwise_add_jagged_output(
x_values, x_offsets, y
)
@torch.compile(fullgraph=True, dynamic=True)
def jagged_dense_elementwise_mul(
x_values: torch.Tensor, x_offsets: torch.Tensor, y: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
return torch.ops.fbgemm.jagged_dense_elementwise_mul(x_values, x_offsets, y)
if operation == "add":
output_ref = x_padded + y
output = jagged_dense_elementwise_add(x_values, x_offsets, y)
elif operation == "add_jagged_output":
# create a jagged tensor and then densify
y = self._to_padded_dense(
torch.rand(
(
max(outer_dense_size * np.prod(max_lengths), x_values.size(0)),
inner_dense_size,
),
dtype=dtype,
device=device,
),
x_offsets,
max_lengths,
)
output_ref = x_padded + y
(
output,
output_offsets,
) = jagged_dense_elementwise_add_jagged_output(x_values, x_offsets, y)
output = self._to_padded_dense(output, output_offsets, max_lengths)
elif operation == "mul":
output_ref = x_padded * y
output, output_offsets = jagged_dense_elementwise_mul(
x_values, x_offsets, y
)
output = self._to_padded_dense(output, output_offsets, max_lengths)
else:
raise AssertionError(f"Unknown operation {operation}")
assert output.size() == output_ref.size()
def _test_jagged_dense_dense_elementwise_add_jagged_output(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
) -> None:
device = torch.device(device_type)
x_values, x_offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim, outer_dense_size, inner_dense_size, dtype, device
)
x_padded = self._to_padded_dense(x_values, x_offsets, max_lengths)
# create a jagged tensor and then densify
y_0 = self._to_padded_dense(
torch.rand(
(
max(outer_dense_size * np.prod(max_lengths), x_values.size(0)),
inner_dense_size,
),
dtype=dtype,
device=device,
),
x_offsets,
max_lengths,
)
y_1 = self._to_padded_dense(
torch.rand(
(
max(outer_dense_size * np.prod(max_lengths), x_values.size(0)),
inner_dense_size,
),
dtype=dtype,
device=device,
),
x_offsets,
max_lengths,
)
output_ref = x_padded + y_0 + y_1
(
output,
output_offsets,
) = torch.ops.fbgemm.jagged_dense_dense_elementwise_add_jagged_output(
x_values, x_offsets, y_0, y_1
)
output = self._to_padded_dense(output, output_offsets, max_lengths)
torch.testing.assert_close(output, output_ref)
# pyre-fixme[2]: Parameter must be annotated.
def add_jagged_output_func(*args) -> torch.Tensor:
return torch.ops.fbgemm.jagged_dense_dense_elementwise_add_jagged_output(
*args
)[0]
f = add_jagged_output_func
torch.autograd.gradcheck(
f,
(
x_values.double().requires_grad_(True),
x_offsets,
y_0.double().requires_grad_(True),
y_1.double().requires_grad_(True),
),
)
@given(
num_jagged_dim=st.integers(1, 4),
outer_dense_size=st.integers(0, 4),
inner_dense_size=st.integers(0, 4),
dtype=st.sampled_from([torch.float, torch.half, torch.double, torch.bfloat16]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_dense_dense_elementwise_add_jagged_output(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
) -> None:
self._test_jagged_dense_dense_elementwise_add_jagged_output(
num_jagged_dim, outer_dense_size, inner_dense_size, dtype, device_type
)
@unittest.skipIf(*gpu_unavailable)
@given(
num_jagged_dim=st.just(1),
outer_dense_size=st.integers(0, 8),
inner_dense_size=st.sampled_from([16, 64, 96, 192]),
dtype=st.just(torch.half),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=4, deadline=None)
def test_jagged_dense_dense_elementwise_add_jagged_output_opt(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
) -> None:
self._test_jagged_dense_dense_elementwise_add_jagged_output(
num_jagged_dim, outer_dense_size, inner_dense_size, dtype, device_type
)
@given(
num_jagged_dim=st.integers(1, 4),
outer_dense_size=st.integers(0, 4),
inner_dense_size=st.integers(0, 4),
dtype=st.sampled_from([torch.float, torch.half, torch.double, torch.bfloat16]),
device_type=st.just("meta"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_dense_dense_elementwise_add_jagged_output_meta_backend(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
) -> None:
device = torch.device("cpu")
x_values, x_offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim, outer_dense_size, inner_dense_size, dtype, device
)
x_padded = self._to_padded_dense(x_values, x_offsets, max_lengths)
# create a jagged tensor and then densify
y_0 = self._to_padded_dense(
torch.rand(
(
max(outer_dense_size * np.prod(max_lengths), x_values.size(0)),
inner_dense_size,
),
dtype=dtype,
device=device,
),
x_offsets,
max_lengths,
)
y_1 = self._to_padded_dense(
torch.rand(
(
max(outer_dense_size * np.prod(max_lengths), x_values.size(0)),
inner_dense_size,
),
dtype=dtype,
device=device,
),
x_offsets,
max_lengths,
)
output_ref = x_padded + y_0 + y_1
x_values.to(device_type)
(
output,
output_offsets,
) = torch.ops.fbgemm.jagged_dense_dense_elementwise_add_jagged_output(
x_values, x_offsets, y_0, y_1
)
output.to("cpu")
output = self._to_padded_dense(output, output_offsets, max_lengths)
assert output.size() == output_ref.size()
@unittest.skipIf(*symint_vector_unsupported())
@given(
num_jagged_dim=st.integers(1, 4),
outer_dense_size=st.integers(2, 4),
inner_dense_size=st.integers(2, 4),
dtype=st.sampled_from([torch.float, torch.half, torch.double, torch.bfloat16]),
device_type=st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_dense_dense_elementwise_add_jagged_output_dynamic_shape(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
) -> None:
# Start a fresh compile for each parameter of the test case
torch._dynamo.reset()
x_values, x_offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
dtype,
torch.device(device_type),
mark_dynamic=True,
)
x_padded = self._to_padded_dense(x_values, x_offsets, max_lengths)
# create a jagged tensor and then densify
y_0 = self._to_padded_dense(
torch.rand(
(
max(outer_dense_size * np.prod(max_lengths), x_values.size(0)),
inner_dense_size,
),
dtype=dtype,
device=torch.device(device_type),
),
x_offsets,
max_lengths,
)
y_1 = self._to_padded_dense(
torch.rand(
(
max(outer_dense_size * np.prod(max_lengths), x_values.size(0)),
inner_dense_size,
),
dtype=dtype,
device=torch.device(device_type),
),
x_offsets,
max_lengths,
)
output_ref = x_padded + y_0 + y_1
x_values.to(device_type)
(output, output_offsets) = torch.compile(
torch.ops.fbgemm.jagged_dense_dense_elementwise_add_jagged_output,
fullgraph=True,
dynamic=True,
)(x_values, x_offsets, y_0, y_1)
output.to("cpu")
output = self._to_padded_dense(output, output_offsets, max_lengths)
assert output.size() == output_ref.size()
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
B=st.integers(0, 32),
H=st.integers(1, 3),
max_L=st.integers(1, 32),
D=st.integers(0, 32),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16, torch.double]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
def test_batched_dense_vec_jagged_2d_mul(
self,
B: int,
H: int,
max_L: int,
D: int,
dtype: torch.dtype,
device_type: str,
) -> None:
assume(H == 1 or B != 0)
# CPU doesn't support bfloat16
assume(device_type != "cpu" or dtype != torch.bfloat16)
device = torch.device(device_type)
torch.backends.cuda.matmul.allow_tf32 = False
# Sometimes length[i] exceed max_L meaning jagged->dense will be
# truncation vs. padding
lengths = torch.randint(max_L * 2, size=(B,), device=device)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
values = torch.rand((offsets[-1], H * D), dtype=dtype, device=device)
dense = torch.rand((B * H, max_L), dtype=dtype, device=device)
padded_values = torch.ops.fbgemm.jagged_to_padded_dense(
values,
[offsets],
[max_L],
) # [B, N, H * D]
bmm_arg1 = dense.unsqueeze(1)
bmm_arg2 = (
padded_values.reshape(B, max_L, H, D)
.transpose(1, 2)
.reshape(B * H, max_L, D)
)
# torch.bmm not implemented for Half on CPU
if dtype in [torch.half, torch.bfloat16] and device_type == "cpu":
bmm_arg1 = bmm_arg1.float()
bmm_arg2 = bmm_arg2.float()
output_ref = torch.bmm(bmm_arg1, bmm_arg2).squeeze(
1
) # [B H, 1, N] x [B H, N, D] = [B H, 1, D]
if dtype in [torch.half, torch.bfloat16] and device_type == "cpu":
output_ref = output_ref.to(dtype)
output = torch.ops.fbgemm.batched_dense_vec_jagged_2d_mul(
dense, values, offsets
)
torch.testing.assert_close(
output,
output_ref,
rtol=1e-2 if dtype in [torch.half, torch.bfloat16] else None,
atol=1e-2 if dtype in [torch.half, torch.bfloat16] else None,
)
torch.autograd.gradcheck(
torch.ops.fbgemm.batched_dense_vec_jagged_2d_mul,
(
dense.clone().detach().double().requires_grad_(True),
values.clone().detach().double().requires_grad_(True),
offsets,
),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
B=st.integers(0, 32),
H=st.integers(1, 3),
max_L=st.integers(1, 32),
D=st.integers(0, 32),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16, torch.double]),
device_type=st.sampled_from(["meta"]),
)
def test_batched_dense_vec_jagged_2d_mul_meta_backend(
self,
B: int,
H: int,
max_L: int,
D: int,
dtype: torch.dtype,
device_type: str,
) -> None:
assume(H == 1 or B != 0)
device = torch.device("cpu")
torch.backends.cuda.matmul.allow_tf32 = False
# Sometimes length[i] exceed max_L meaning jagged->dense will be
# truncation vs. padding
lengths = torch.randint(max_L * 2, size=(B,), device=device)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
values = torch.rand((offsets[-1], H * D), dtype=dtype, device=device)
dense = torch.rand((B * H, max_L), dtype=dtype, device=device)
padded_values = torch.ops.fbgemm.jagged_to_padded_dense(
values,
[offsets],
[max_L],
) # [B, N, H * D]
bmm_arg1 = dense.unsqueeze(1)
bmm_arg2 = (
padded_values.reshape(B, max_L, H, D)
.transpose(1, 2)
.reshape(B * H, max_L, D)
)
# torch.bmm not implemented for Half on CPU
if dtype in [torch.half, torch.bfloat16]:
bmm_arg1 = bmm_arg1.float()
bmm_arg2 = bmm_arg2.float()
output_ref = torch.bmm(bmm_arg1, bmm_arg2).squeeze(
1
) # [B H, 1, N] x [B H, N, D] = [B H, 1, D]
dense.to(device_type)
values.to(device_type)
output = torch.ops.fbgemm.batched_dense_vec_jagged_2d_mul(
dense, values, offsets
)
assert output.size() == output_ref.size()
@unittest.skipIf(*symint_vector_unsupported())
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
B=st.integers(2, 32),
H=st.integers(1, 3),
max_L=st.integers(1, 32),
D=st.integers(2, 32),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16, torch.double]),
device_type=st.just("cpu"),
)
def test_batched_dense_vec_jagged_2d_mul_dynamic_shape(
self,
B: int,
H: int,
max_L: int,
D: int,
dtype: torch.dtype,
device_type: str,
) -> None:
# Start a fresh compile for each parameter of the test case
torch._dynamo.reset()
assume(H == 1 or B != 0)
device = torch.device(device_type)
torch.backends.cuda.matmul.allow_tf32 = False
# Sometimes length[i] exceed max_L meaning jagged->dense will be
# truncation vs. padding
lengths = torch.randint(low=1, high=max_L * 2, size=(B,), device=device)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
values = torch.rand((offsets[-1], H * D), dtype=dtype, device=device)
dense = torch.rand((B * H, max_L), dtype=dtype, device=device)
padded_values = torch.ops.fbgemm.jagged_to_padded_dense(
values,
[offsets],
[max_L],
) # [B, N, H * D]
bmm_arg1 = dense.unsqueeze(1)
bmm_arg2 = (
padded_values.reshape(B, max_L, H, D)
.transpose(1, 2)
.reshape(B * H, max_L, D)
)
# torch.bmm not implemented for Half on CPU
if dtype in [torch.half, torch.bfloat16]:
bmm_arg1 = bmm_arg1.float()
bmm_arg2 = bmm_arg2.float()
output_ref = torch.bmm(bmm_arg1, bmm_arg2).squeeze(
1
) # [B H, 1, N] x [B H, N, D] = [B H, 1, D]
dense.to(device_type)
values.to(device_type)
torch._dynamo.mark_dynamic(dense, 0)
torch._dynamo.mark_dynamic(values, 0)
torch._dynamo.mark_dynamic(values, 1)
torch._dynamo.mark_dynamic(offsets, 0)
output = torch.compile(
torch.ops.fbgemm.batched_dense_vec_jagged_2d_mul,
fullgraph=True,
dynamic=True,
)(dense, values, offsets)
assert output.size() == output_ref.size()
@staticmethod
def jagged_index_select_2d_ref(
values: torch.Tensor,
lengths: torch.Tensor,
inverse_lookup: torch.Tensor,
device: torch.device,
) -> torch.Tensor:
offsets = torch.ops.fbgemm.asynchronous_exclusive_cumsum(lengths)
end_offsets = offsets + lengths
full_start_offset = torch.index_select(offsets, 0, inverse_lookup)
full_end_offset = torch.index_select(end_offsets, 0, inverse_lookup)
index_ranges = torch.stack(
(full_start_offset, full_end_offset), dim=0
).transpose(0, 1)
to_be_merged_tensors = []
for row in index_ranges:
to_be_merged_tensors.append(torch.arange(row[0], row[1], device=device))
all_indices = torch.cat(to_be_merged_tensors, dim=0)
new_embeddings = torch.index_select(values, 0, all_indices)
return new_embeddings
@unittest.skipIf(*running_on_github)
@given(
max_seq_length=st.integers(5, 10),
batch_size=st.integers(1, 128),
num_cols=st.integers(1, 128),
num_jagged_tensor_rows=st.integers(1, 128),
index_dtype=st.sampled_from([torch.int, torch.long]),
jagged_tensor_dtype=st.sampled_from(
[
torch.float,
torch.half,
torch.int,
torch.long,
] # Disable torch.bfloat16 due to large error bound
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
)
@settings(max_examples=20, deadline=None)
def test_jagged_index_select_2d(
self,
max_seq_length: int,
batch_size: int,
num_cols: int,
num_jagged_tensor_rows: int,
index_dtype: torch.dtype,
jagged_tensor_dtype: torch.dtype,
use_cpu: bool,
) -> None:
device = torch.device("cpu" if use_cpu else "cuda")
is_float = jagged_tensor_dtype in [torch.float, torch.half, torch.bfloat16]
lengths = torch.randint(
low=0,
high=max_seq_length,
size=(num_jagged_tensor_rows,),
dtype=index_dtype,
device=device,
)
indices, _ = torch.sort(
torch.randint(
low=0,
high=num_jagged_tensor_rows,
size=(batch_size,),
dtype=index_dtype,
device=device,
)
)
if is_float:
values = torch.rand(
int(lengths.sum().item()),
num_cols,
dtype=jagged_tensor_dtype,
device=device,
)
else:
values = torch.randint(
2**16,
(int(lengths.sum().item()), num_cols),
dtype=jagged_tensor_dtype,
device=device,
)
values_ref = values.detach().clone()
# Only float tensors can require grad
if is_float:
values.requires_grad = True
values_ref.requires_grad = True
output, _ = torch.ops.fbgemm.jagged_index_select(values, lengths, indices)
output_ref = self.jagged_index_select_2d_ref(
values_ref, lengths, indices, device
)
assert torch.equal(output, output_ref)
if not is_float:
return
grad = torch.rand_like(output)
grad_ref = grad.detach().clone()
output.backward(grad)
output_ref.backward(grad_ref)
torch.testing.assert_close(
values.grad,
values_ref.grad,
rtol=1e-2 if jagged_tensor_dtype in [torch.half, torch.bfloat16] else None,
atol=1e-2 if jagged_tensor_dtype in [torch.half, torch.bfloat16] else None,
)
@unittest.skipIf(*running_on_github)
@given(
max_seq_length=st.integers(5, 10),
batch_size=st.integers(1, 128),
num_cols=st.integers(1, 128),
num_jagged_tensor_rows=st.integers(1, 128),
index_dtype=st.sampled_from([torch.int, torch.long]),
jagged_tensor_dtype=st.sampled_from(
[
torch.float,
torch.half,
torch.int,
torch.long,
] # Disable torch.bfloat16 due to large error bound
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
)
@settings(max_examples=20, deadline=None)
def test_jagged_index_select_2d_in_inference(
self,
max_seq_length: int,
batch_size: int,
num_cols: int,
num_jagged_tensor_rows: int,
index_dtype: torch.dtype,
jagged_tensor_dtype: torch.dtype,
use_cpu: bool,
) -> None:
device = torch.device("cpu" if use_cpu else "cuda")
is_float = jagged_tensor_dtype in [torch.float, torch.half, torch.bfloat16]
lengths = torch.randint(
low=0,
high=max_seq_length,
size=(num_jagged_tensor_rows,),
dtype=index_dtype,
device=device,
)
indices, _ = torch.sort(
torch.randint(
low=0,
high=num_jagged_tensor_rows,
size=(batch_size,),
dtype=index_dtype,
device=device,
)
)
if is_float:
values = torch.rand(
int(lengths.sum().item()),
num_cols,
dtype=jagged_tensor_dtype,
device=device,
)
else:
values = torch.randint(
2**16,
(int(lengths.sum().item()), num_cols),
dtype=jagged_tensor_dtype,
device=device,
)
values_ref = values.detach().clone()
with torch.inference_mode():
output, _ = torch.ops.fbgemm.jagged_index_select(values, lengths, indices)
output_ref = self.jagged_index_select_2d_ref(
values_ref, lengths, indices, device
)
assert torch.equal(output, output_ref)
@given(
batch_size=st.integers(1, 128),
max_length=st.integers(0, 128),
max_truncated_length=st.integers(1, 32),
index_dtype=st.sampled_from([torch.int, torch.long]),
jagged_tensor_dtype=st.sampled_from(
[torch.float, torch.half, torch.bfloat16, torch.int, torch.long]
),
use_cpu=st.just(True),
)
@settings(max_examples=20, deadline=None)
def test_jagged_1d_to_truncated_values(
self,
max_length: int,
batch_size: int,
max_truncated_length: int,
index_dtype: torch.dtype,
jagged_tensor_dtype: torch.dtype,
use_cpu: bool,
) -> None:
device = "cpu" if use_cpu else "cuda"
is_float = jagged_tensor_dtype in [torch.float, torch.half, torch.bfloat16]
lengths = torch.randint(
low=0,
high=max_length + 1,
size=(batch_size,),
dtype=index_dtype,
device=device,
)
n = int(lengths.sum().item())
if is_float:
values = torch.rand(
(n,),
dtype=jagged_tensor_dtype,
device=device,
)
else:
values = torch.randint(
2**16,
(n,),
dtype=jagged_tensor_dtype,
device=device,
)
truncated_values = torch.ops.fbgemm.jagged_1d_to_truncated_values(
values,
lengths,
max_truncated_length,
)
dense_values = torch.ops.fbgemm.jagged_1d_to_dense(
values=values,
offsets=torch.ops.fbgemm.asynchronous_complete_cumsum(lengths),
max_sequence_length=max_truncated_length,
padding_value=0,
) # [B, N]
truncated_lengths_ref = torch.clamp(lengths, max=max_truncated_length)
mask2d = torch.arange(max_truncated_length, device=device).expand(
batch_size, -1
) < truncated_lengths_ref.unsqueeze(-1)
truncated_values_ref = dense_values[mask2d].view(-1)
torch.testing.assert_close(truncated_values, truncated_values_ref)
@given(
batch_size=st.integers(1, 128),
max_length=st.integers(0, 128),
index_dtype=st.sampled_from([torch.int, torch.long]),
jagged_tensor_dtype=st.sampled_from([torch.int, torch.long]),
empty_lengths=st.booleans(),
use_cpu=st.just(True),
)
@settings(max_examples=20, deadline=None)
def test_masked_select_jagged_1d(
self,
max_length: int,
batch_size: int,
index_dtype: torch.dtype,
jagged_tensor_dtype: torch.dtype,
empty_lengths: bool,
use_cpu: bool,
) -> None:
device = "cpu" if use_cpu else "cuda"
if empty_lengths:
lengths = torch.zeros(batch_size, dtype=index_dtype, device=device)
else:
lengths = torch.randint(
low=0,
high=max_length + 1,
size=(batch_size,),
dtype=index_dtype,
device=device,
)
lengths[batch_size // 2] = 0 # test a corner case
n = int(lengths.sum().item())
values = torch.randint(
2**16,
(n,),
dtype=jagged_tensor_dtype,
device=device,
)
mask = torch.randint(2, (n,)) > 0
masked_values, masked_lengths = torch.ops.fbgemm.masked_select_jagged_1d(
values,
lengths,
mask,
)
masked_values_ref = values[mask]
cum_count = torch.cumsum(mask, 0)
cum_count = torch.cat((cum_count, torch.tensor([0])))
cum_length = cum_count[torch.cumsum(lengths, 0) - 1]
cum_length_shift_right = torch.roll(cum_length, 1)
cum_length_shift_right[0] = 0
masked_lengths_ref = (cum_length - cum_length_shift_right).to(lengths.dtype)
torch.testing.assert_close(masked_values, masked_values_ref)
torch.testing.assert_close(masked_lengths, masked_lengths_ref)
@unittest.skipIf(*gpu_unavailable)
@given(
max_seq_length=st.integers(5, 10),
input_batch_size=st.integers(1, 128),
output_batch_size=st.integers(1, 128),
num_batches=st.integers(1, 3),
index_dtype=st.sampled_from([torch.int, torch.long]),
jagged_tensor_dtype=st.sampled_from(
[
torch.float,
torch.half,
torch.int,
torch.long,
] # Disable torch.bfloat16 due to large error bound
),
has_weights=st.booleans(),
)
@settings(max_examples=20, deadline=None)
def test_keyed_jagged_index_select_dim1(
self,
max_seq_length: int,
input_batch_size: int,
output_batch_size: int,
num_batches: int,
index_dtype: torch.dtype,
jagged_tensor_dtype: torch.dtype,
has_weights: bool,
) -> None:
is_float = jagged_tensor_dtype in [torch.float, torch.half, torch.bfloat16]
lengths = torch.randint(
low=0,
high=max_seq_length,
size=(input_batch_size * num_batches,),
dtype=index_dtype,
device="cuda",
)
offsets = torch.concat(
[torch.zeros(1, dtype=torch.long, device="cuda"), lengths.cumsum(0)]
)
indices = torch.randint(
low=0,
high=1,
size=(output_batch_size,),
dtype=index_dtype,
device="cuda",
)
if is_float:
values = torch.rand(
int(offsets[-1].item()),
dtype=jagged_tensor_dtype,
device="cuda",
)
else:
values = torch.randint(
2**16,
(int(offsets[-1].item()),),
dtype=jagged_tensor_dtype,
device="cuda",
)
values_ref = values.detach().clone()
if has_weights:
weights = torch.rand(
int(offsets[-1].item()),
dtype=random.choice([torch.float, torch.half]),
device="cuda",
)
else:
weights = None
# Only float tensors can require grad
if is_float:
values.requires_grad = True
values_ref.requires_grad = True
index_select_output = torch.ops.fbgemm.keyed_jagged_index_select_dim1(
values, lengths, offsets, indices, input_batch_size, weights
)
output = index_select_output[0]
if has_weights:
output_weights = index_select_output[2]
output_ref = []
output_weight_ref = []
for k in range(num_batches):
key_lengths = lengths[k * input_batch_size : (k + 1) * input_batch_size]
start_offset = offsets[k * input_batch_size]
end_offset = offsets[(k + 1) * input_batch_size]
key_values = values_ref[start_offset:end_offset].view(-1, 1)
output_ref.append(
torch.ops.fbgemm.jagged_index_select(key_values, key_lengths, indices)[
0
].view(-1)
)
if has_weights:
# pyre-ignore[16]
key_weights = weights[start_offset:end_offset].view(-1, 1)
output_weight_ref.append(
torch.ops.fbgemm.jagged_index_select(
key_weights, key_lengths, indices
)[0].view(-1)
)
output_ref = torch.concat(output_ref)
assert torch.equal(output, output_ref)
if has_weights:
output_weight_ref = torch.concat(output_weight_ref)
# pyre-ignore[61]
assert torch.equal(output_weights, output_weight_ref)
if not is_float:
return
grad = torch.rand_like(output)
grad_ref = grad.detach().clone()
output.backward(grad)
output_ref.backward(grad_ref)
torch.testing.assert_close(
values.grad,
values_ref.grad,
rtol=1e-2 if jagged_tensor_dtype in [torch.half, torch.bfloat16] else None,
atol=1e-2 if jagged_tensor_dtype in [torch.half, torch.bfloat16] else None,
)
@given(
B=st.integers(1, 512),
max_L=st.integers(1, 1000),
D=st.integers(1, 32),
dtype=st.sampled_from([torch.float, torch.double]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_softmax(
self,
B: int,
max_L: int,
D: int,
dtype: torch.dtype,
device_type: str,
) -> None:
device = torch.device(device_type)
torch.backends.cuda.matmul.allow_tf32 = False
lengths = torch.randint(max_L + 1, size=(B,), device=device)
total_length = int(lengths.sum().item())
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
values = torch.rand(
(total_length, D), requires_grad=True, dtype=dtype, device=device
)
output, _ = torch.ops.fbgemm.jagged_softmax(
values,
offsets,
max_L,
)
values_ref = values.detach().clone().requires_grad_(True)
output_ref, _ = torch.ops.fbgemm.dense_to_jagged(
torch.nn.functional.softmax(
torch.ops.fbgemm.jagged_to_padded_dense(
values_ref,
[offsets],
max_lengths=[max_L],
padding_value=-5e7,
).transpose(1, 2),
dim=-1,
).permute(0, 2, 1),
[offsets],
total_length,
)
# verify forward
torch.testing.assert_close(output, output_ref)
# verify backward
grad_output = output.detach().clone().requires_grad_(True)
output.backward(grad_output)
output_ref.backward(grad_output)
torch.testing.assert_close(values.grad, values_ref.grad)
@given(
B=st.integers(10, 512),
M=st.integers(1, 32),
N=st.integers(1, 32),
max_L=st.integers(1, 32),
dtype=st.sampled_from([torch.float, torch.double]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@unittest.skipIf(*on_arm_platform)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_jagged_bmm(
self,
B: int,
M: int,
N: int,
max_L: int,
dtype: torch.dtype,
device_type: str,
) -> None:
assume(B != 0)
device = torch.device(device_type)
torch.backends.cuda.matmul.allow_tf32 = False
lengths = torch.randint(max_L + 1, size=(B,), device=device)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
total_length = int(lengths.sum().item())
x_values = torch.rand(
(total_length, M), requires_grad=True, dtype=dtype, device=device
)
y_values = torch.rand(
(total_length, N), requires_grad=True, dtype=dtype, device=device
)
output = torch.ops.fbgemm.jagged_jagged_bmm(
x_values,
y_values,
offsets,
max_L,
)
x_values_ref = x_values.detach().clone().requires_grad_(True)
y_values_ref = y_values.detach().clone().requires_grad_(True)
x_dense_ref = torch.ops.fbgemm.jagged_to_padded_dense(
x_values_ref,
[offsets],
max_lengths=[max_L],
)
y_dense_ref = torch.ops.fbgemm.jagged_to_padded_dense(
y_values_ref,
[offsets],
max_lengths=[max_L],
)
output_ref = torch.bmm(x_dense_ref.transpose(2, 1), y_dense_ref)
# verify forward
torch.testing.assert_close(output, output_ref)
# verify backward
grad_output = output.detach().clone().requires_grad_(True)
output.backward(grad_output)
output_ref.backward(grad_output)
torch.testing.assert_close(x_values.grad, x_values_ref.grad)
torch.testing.assert_close(y_values.grad, y_values_ref.grad)
@given(
B=st.integers(10, 512),
M=st.integers(1, 32),
N=st.integers(1, 32),
max_L=st.integers(1, 32),
dtype=st.sampled_from([torch.float, torch.double]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@unittest.skipIf(*on_arm_platform)
@settings(verbosity=Verbosity.verbose, max_examples=2, deadline=None)
def test_jagged_dense_bmm(
self,
B: int,
M: int,
N: int,
max_L: int,
dtype: torch.dtype,
device_type: str,
) -> None:
assume(B != 0)
device = torch.device(device_type)
torch.backends.cuda.matmul.allow_tf32 = False
lengths = torch.randint(max_L + 1, size=(B,), device=device)
total_length = int(lengths.sum().item())
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
x_values = torch.rand(
(total_length, M), requires_grad=True, dtype=dtype, device=device
)
y = torch.rand((B, M, N), requires_grad=True, dtype=dtype, device=device)
output, _ = torch.ops.fbgemm.jagged_dense_bmm(
x_values,
offsets,
y,
max_L,
)
x_values_ref = x_values.detach().clone().requires_grad_(True)
x_dense_ref = torch.ops.fbgemm.jagged_to_padded_dense(
x_values_ref,
[offsets],
max_lengths=[max_L],
)
y_ref = y.detach().clone().requires_grad_(True)
output_dense = torch.bmm(x_dense_ref, y_ref)
output_ref, _ = torch.ops.fbgemm.dense_to_jagged(
output_dense, [offsets], total_length
)
# verify forward
torch.testing.assert_close(output, output_ref)
# verify backward
grad_output = output.detach().clone().requires_grad_(True)
output.backward(grad_output)
output_ref.backward(grad_output)
torch.testing.assert_close(x_values.grad, x_values_ref.grad)
torch.testing.assert_close(y.grad, y_ref.grad)
@unittest.skipIf(*symint_vector_unsupported())
@given(
B=st.integers(10, 512),
M=st.integers(2, 32),
N=st.integers(2, 32),
max_L=st.integers(2, 32),
dtype=st.sampled_from([torch.float, torch.double]),
device_type=st.just("cpu"),
)
@unittest.skipIf(*on_arm_platform)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_dense_bmm_dynamic_shape(
self,
B: int,
M: int,
N: int,
max_L: int,
dtype: torch.dtype,
device_type: str,
) -> None:
# Start a fresh compile for each parameter of the test case
torch._dynamo.reset()
assume(B != 0)
device = torch.device(device_type)
torch.backends.cuda.matmul.allow_tf32 = False
lengths = torch.randint(low=1, high=max_L + 1, size=(B,), device=device)
total_length = int(lengths.sum().item())
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
x_values = torch.rand(
(total_length, M), requires_grad=True, dtype=dtype, device=device
)
y = torch.rand((B, M, N), requires_grad=True, dtype=dtype, device=device)
torch._dynamo.mark_dynamic(x_values, 0)
torch._dynamo.mark_dynamic(x_values, 1)
torch._dynamo.mark_dynamic(lengths, 0) # offsets = lengths + 1
output, _ = torch.compile(
torch.ops.fbgemm.jagged_dense_bmm, fullgraph=True, dynamic=True
)(
x_values,
offsets,
y,
max_L,
)
x_values_ref = x_values.detach().clone().requires_grad_(True)
x_dense_ref = torch.ops.fbgemm.jagged_to_padded_dense(
x_values_ref,
[offsets],
max_lengths=[max_L],
)
y_ref = y.detach().clone().requires_grad_(True)
output_dense = torch.bmm(x_dense_ref, y_ref)
output_ref, _ = torch.ops.fbgemm.dense_to_jagged(
output_dense, [offsets], total_length
)
# verify forward
torch.testing.assert_close(output, output_ref)
# verify backward
grad_output = output.detach().clone().requires_grad_(True)
output.backward(grad_output)
output_ref.backward(grad_output)
torch.testing.assert_close(x_values.grad, x_values_ref.grad)
torch.testing.assert_close(y.grad, y_ref.grad)
@given(
B=st.integers(10, 512),
N=st.integers(10, 64),
slice_length=st.integers(0, 64),
dtype=st.sampled_from([torch.float, torch.double]),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_slice(
self,
B: int,
N: int,
slice_length: int,
dtype: torch.dtype,
) -> None:
assume(B != 0)
device = torch.device("cpu")
torch.backends.cuda.matmul.allow_tf32 = False
lengths = torch.randint(N + 1, size=(B,), device=device)
start_list = [random.randint(0, max(len_ - 1, 0)) for len_ in lengths.tolist()]
start = torch.tensor(start_list, device=device)
total_length = int(lengths.sum().item())
x_values = torch.rand(
(total_length), requires_grad=True, dtype=dtype, device=device
)
output, output_lengths = torch.ops.fbgemm.jagged_slice(
x_values,
lengths,
start,
slice_length,
)
output_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(output_lengths)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
x_values_ref = x_values.detach().clone().requires_grad_(True)
def jagged_slice_ref(
x_values: torch.Tensor,
offsets: torch.Tensor,
start: torch.Tensor,
slice_length: int,
) -> Tuple[torch.Tensor, torch.Tensor]:
end_offsets_ = slice_length + start + offsets[:-1]
end_offsets = torch.where(
end_offsets_ > offsets[1:], offsets[1:], end_offsets_
)
start_offsets = start + offsets[:-1]
indices_to_select: List[torch.Tensor] = []
for i in range(end_offsets.size(0)):
indices_to_select.append(
torch.arange(start_offsets[i].item(), end_offsets[i].item())
)
output_ref = torch.index_select(x_values, 0, torch.cat(indices_to_select))
new_lengths = end_offsets - start_offsets
new_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(new_lengths)
return output_ref, new_offsets
output_ref, output_offsets_ref = jagged_slice_ref(
x_values_ref, offsets, start, slice_length
)
# verify forward
torch.testing.assert_close(
output, output_ref, msg=f"output={output} output_ref={output_ref}"
)
torch.testing.assert_close(
output_offsets,
output_offsets_ref,
msg=f"output_off={output_offsets} output_off_ref={output_offsets_ref}",
)
# verify backward
grad_output = output.detach().clone().requires_grad_(True)
output.backward(grad_output)
output_ref.backward(grad_output)
torch.testing.assert_close(
x_values.grad,
x_values_ref.grad,
msg=f"grad={x_values.grad} x_values_ref.grad={x_values_ref.grad}",
)
def test_jagged_slice_errors(
self,
) -> None:
lengths = torch.tensor([1, 2, 3, 4, 5, 6])
values = torch.tensor([x + y for x in range(6) for y in range(x)])
with self.assertRaises(RuntimeError):
torch.ops.fbgemm.jagged_slice(
values, lengths, torch.tensor([2, 1, 2, 3, 4, 2]), 7
)
with self.assertRaises(RuntimeError):
torch.ops.fbgemm.jagged_slice(
values, lengths, torch.tensor([-2, 1, 1, 0, 1, 2]), 7
)
@unittest.skipIf(*gpu_unavailable)
@given(
B=st.integers(min_value=100, max_value=200),
F=st.integers(min_value=50, max_value=100),
max_length=st.integers(min_value=5, max_value=10),
)
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_jagged_unique_indices(
self,
B: int, # Batch size
F: int, # The number of features
max_length: int, # The maximum value of pooling factor
) -> None:
hash_size_list = []
lengths_list = []
indices_list = []
linearized_indices_list = []
hash_size_offsets_list = [0]
for _ in range(F):
# We generate a small hash size to increase index duplication
hash_size = random.randint(3, 5)
hash_size_list.append(hash_size)
hash_size_offset = hash_size_offsets_list[-1] + 1
hash_size_offsets_list.append(hash_size_offset)
for _ in range(B):
length = random.randint(0, max_length)
lengths_list.append(length)
if length > 0:
indices = np.random.randint(0, hash_size, size=length)
linearized_indices = indices + sum(hash_size_list[:-1])
indices_list.extend(indices)
linearized_indices_list.extend(linearized_indices)
device = torch.device("cuda")
dtype = torch.int64
hash_size = torch.as_tensor(hash_size_list, dtype=dtype, device=device)
hash_size_offsets = torch.as_tensor(
hash_size_offsets_list, dtype=dtype, device=device
)
lengths = torch.as_tensor(lengths_list, dtype=dtype, device=device)
indices = torch.as_tensor(indices_list, dtype=dtype, device=device)
linearized_indices = torch.as_tensor(
linearized_indices_list, dtype=dtype, device=device
)
hash_size_cum_sum = torch.zeros(F + 1, dtype=dtype, device=device)
hash_size_cum_sum[1:] = torch.cumsum(hash_size, dim=0)
offsets = torch.zeros(F * B + 1, dtype=dtype, device=device)
offsets[1:] = torch.cumsum(lengths, dim=0)
(
output_lengths,
output_offsets,
unique_indices,
reverse_index,
) = torch.ops.fbgemm.jagged_unique_indices(
hash_size_cum_sum, hash_size_offsets, offsets, indices
)
# Check hash size cumsum to offsets function
output_hash_size_offsets_list = hash_size_cumsum_to_offsets(
hash_size_cum_sum.tolist()
)
self.assertEqual(output_hash_size_offsets_list, hash_size_offsets_list)
# Compute hash size cumsum and offsets based on KJT offsets and indices
(
inferred_hash_size_cum_sum,
inferred_hash_size_offsets,
) = torch.ops.fbgemm.jagged_hash_size_cumsum(offsets, indices, B)
(
output_lengths_inf,
output_offsets_inf,
unique_indices_inf,
reverse_index_inf,
) = torch.ops.fbgemm.jagged_unique_indices(
inferred_hash_size_cum_sum, inferred_hash_size_offsets, offsets, indices
)
self.assertTrue(torch.equal(output_lengths, output_lengths_inf))
self.assertTrue(torch.equal(output_offsets, output_offsets_inf))
self.assertTrue(torch.equal(unique_indices, unique_indices_inf))
self.assertTrue(torch.equal(reverse_index, reverse_index_inf))
unique_linearized_indices = torch.unique(linearized_indices, sorted=True)
self.assertTrue(unique_linearized_indices.numel() == unique_indices.numel())
unique_indices_list = unique_indices.tolist()
reverse_index_list = reverse_index.tolist()
for i in range(len(reverse_index_list)):
pos = reverse_index_list[i]
self.assertTrue(unique_indices_list[pos] == indices_list[i])
input_offsets_list = offsets.tolist()
output_offsets_list = output_offsets.tolist()
for i in range(F):
input_start = input_offsets_list[i * B]
input_end = input_offsets_list[(i + 1) * B]
output_start = output_offsets_list[i * B]
output_end = output_offsets_list[(i + 1) * B]
for each_offset in range(input_start, input_end):
pos = reverse_index_list[each_offset]
self.assertTrue((output_start <= pos) and (pos < output_end))
@unittest.skipIf(*gpu_unavailable)
@given(
B=st.integers(min_value=100, max_value=200),
F=st.integers(min_value=50, max_value=100),
max_length=st.integers(min_value=5, max_value=10),
)
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_jagged_unique_indices_multi_keys(
self,
B: int, # Batch size
F: int, # The number of features
max_length: int, # The maximum value of pooling factor
) -> None:
hash_size_list = []
lengths_list = []
indices_list = []
linearized_indices_list = []
MAX_HASH_SIZE = 10
for _ in range(F):
# We generate a small hash size to increase index duplication
hash_size = random.randint(3, 6)
self.assertTrue(hash_size <= MAX_HASH_SIZE)
masked_hash_size = MAX_HASH_SIZE if random.randint(1, 3) == 3 else 0
hash_size_list.append(masked_hash_size)
for _ in range(B):
length = random.randint(0, max_length)
lengths_list.append(length)
if length > 0:
indices = np.random.randint(0, hash_size, size=length)
linearized_indices = indices + sum(hash_size_list[:-1])
indices_list.extend(indices)
linearized_indices_list.extend(linearized_indices)
device = torch.device("cuda")
dtype = torch.int64
hash_size = torch.as_tensor(hash_size_list, dtype=dtype, device=device)
lengths = torch.as_tensor(lengths_list, dtype=dtype, device=device)
indices = torch.as_tensor(indices_list, dtype=dtype, device=device)
linearized_indices = torch.as_tensor(
linearized_indices_list, dtype=dtype, device=device
)
hash_size_cum_sum = torch.zeros(F + 1, dtype=dtype, device=device)
hash_size_cum_sum[1:] = torch.cumsum(hash_size, dim=0)
offsets = torch.zeros(F * B + 1, dtype=dtype, device=device)
offsets[1:] = torch.cumsum(lengths, dim=0)
# Compute hash size offsets based on hash size cumsum to dedup
# indices from multiple keys
hash_size_cum_sum_list = hash_size_cum_sum.tolist()
hash_size_offsets_list = hash_size_cumsum_to_offsets(hash_size_cum_sum_list)
hash_size_offsets = torch.as_tensor(
hash_size_offsets_list, dtype=dtype, device=device
)
(
_, # output lengths
_, # output offsets
unique_indices,
reverse_index,
) = torch.ops.fbgemm.jagged_unique_indices(
hash_size_cum_sum, hash_size_offsets, offsets, indices
)
unique_linearized_indices = torch.unique(linearized_indices, sorted=True)
self.assertTrue(unique_linearized_indices.numel() == unique_indices.numel())
unique_indices_list = unique_indices.tolist()
reverse_index_list = reverse_index.tolist()
for i in range(len(reverse_index_list)):
pos = reverse_index_list[i]
self.assertTrue(unique_indices_list[pos] == indices_list[i])
@unittest.skipIf(*gpu_unavailable)
@given(
B=st.integers(min_value=100, max_value=200),
F=st.integers(min_value=50, max_value=100),
)
@settings(verbosity=Verbosity.verbose, max_examples=2, deadline=None)
def test_jagged_unique_indices_empty(
self,
B: int, # Batch size
F: int, # The number of features
) -> None:
hash_size_cumsum_list = [0] + list(itertools.accumulate([10] * F))
hash_size_offsets_list = [0] + list(itertools.accumulate([1] * F))
offsets_list = [0] * (B * F + 1)
indices_list = []
device = torch.device("cuda")
dtype = torch.int64
hash_size_cumsum = torch.as_tensor(
hash_size_cumsum_list, device=device, dtype=dtype
)
hash_size_offsets = torch.as_tensor(
hash_size_offsets_list, device=device, dtype=dtype
)
offsets = torch.as_tensor(offsets_list, device=device, dtype=dtype)
indices = torch.as_tensor(indices_list, device=device, dtype=dtype)
(
output_lengths,
output_offsets,
unique_indices,
reverse_index,
) = torch.ops.fbgemm.jagged_unique_indices(
hash_size_cumsum, hash_size_offsets, offsets, indices
)
# The output should be empty since there are no input indices
self.assertEqual(unique_indices.numel(), 0)
self.assertEqual(reverse_index.numel(), 0)
self.assertEqual(torch.sum(output_lengths).item(), 0)
self.assertEqual(torch.sum(output_offsets).item(), 0)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import unittest
import hypothesis.strategies as st
import torch
from hypothesis import given, settings, Verbosity
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
# pyre-ignore[21]
from test_utils import gpu_unavailable
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:merge_pooled_embeddings")
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:merge_pooled_embeddings_cpu"
)
from fbgemm_gpu.test.test_utils import gpu_unavailable
open_source = False
@unittest.skipIf(*gpu_unavailable)
@unittest.skipIf(open_source, "Not supported in open source yet")
class MergePooledEmbeddingsTest(unittest.TestCase):
@given(
num_ads=st.integers(min_value=1, max_value=10),
embedding_dimension=st.integers(min_value=1, max_value=32),
ads_tables=st.integers(min_value=1, max_value=32),
num_gpus=st.integers(min_value=1, max_value=torch.cuda.device_count()),
non_default_stream=st.booleans(),
r=st.randoms(use_true_random=False),
dim=st.integers(min_value=0, max_value=1),
)
# Can instantiate 8 contexts which takes a long time.
@settings(verbosity=Verbosity.verbose, max_examples=40, deadline=None)
def test_merge(
self,
num_ads,
embedding_dimension,
ads_tables,
num_gpus,
non_default_stream,
r,
dim: int,
) -> None:
dst_device = r.randint(0, num_gpus - 1)
torch.cuda.set_device(dst_device)
ad_ds = [embedding_dimension * ads_tables for _ in range(num_gpus)]
batch_indices = torch.zeros(num_ads).long().cuda()
pooled_ad_embeddings = [
torch.randn(
num_ads, ad_d, dtype=torch.float16, device=torch.device(f"cuda:{i}")
)
for i, ad_d in enumerate(ad_ds)
]
r.shuffle(pooled_ad_embeddings)
streams = [torch.cuda.Stream(device=i) for i in range(num_gpus)]
import contextlib
uncat_size = batch_indices.size(0) if dim == 1 else ad_ds[0]
with contextlib.ExitStack() as stack:
if non_default_stream:
for stream in streams:
stack.enter_context(torch.cuda.stream(stream))
output = torch.ops.fbgemm.merge_pooled_embeddings(
pooled_ad_embeddings, uncat_size, batch_indices.device, dim
)
def ref(pooled_ad_embeddings, batch_indices):
return torch.cat([p.cpu() for p in pooled_ad_embeddings], dim=dim)
output_ref = ref(pooled_ad_embeddings, batch_indices)
output_cpu = torch.ops.fbgemm.merge_pooled_embeddings(
[pe.cpu() for pe in pooled_ad_embeddings],
uncat_size,
batch_indices.cpu().device,
dim,
)
self.assertEqual(output.device, torch.device(f"cuda:{dst_device}"))
torch.testing.assert_close(output_ref, output.cpu())
torch.testing.assert_close(output_ref, output_cpu)
@given(
num_inputs=st.integers(min_value=1, max_value=10),
num_gpus=st.integers(min_value=1, max_value=torch.cuda.device_count()),
r=st.randoms(use_true_random=False),
)
# Can instantiate 8 contexts which takes a long time.
@settings(verbosity=Verbosity.verbose, max_examples=40, deadline=None)
def test_all_to_one_device(
self,
num_inputs,
num_gpus,
r,
) -> None:
dst_device = torch.device(f"cuda:{r.randint(0, num_gpus - 1)}")
with torch.cuda.device(dst_device):
inputs = [torch.randn(10, 20) for _ in range(num_inputs)]
cuda_inputs = [
input.to(f"cuda:{i % num_gpus}") for i, input in enumerate(inputs)
]
cuda_outputs = torch.ops.fbgemm.all_to_one_device(cuda_inputs, dst_device)
for i, o in zip(inputs, cuda_outputs):
self.assertEqual(o.device, dst_device)
torch.testing.assert_close(o.cpu(), i)
def test_merge_pooled_embeddings_cpu_with_different_target_device(self) -> None:
uncat_size = 2
pooled_embeddings = [torch.ones(uncat_size, 4), torch.ones(uncat_size, 8)]
output_meta = torch.ops.fbgemm.merge_pooled_embeddings(
pooled_embeddings,
uncat_size,
torch.device("meta"),
1,
)
self.assertFalse(output_meta.is_cpu)
self.assertTrue(output_meta.is_meta)
@given(
num_inputs=st.integers(min_value=1, max_value=10),
num_gpus=st.integers(min_value=1, max_value=torch.cuda.device_count()),
r=st.randoms(use_true_random=False),
)
# Can instantiate 8 contexts which takes a long time.
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_sum_reduce_to_one(
self,
num_inputs,
num_gpus,
r,
) -> None:
dst_device = torch.device(f"cuda:{r.randint(0, num_gpus - 1)}")
with torch.cuda.device(dst_device):
inputs = [torch.randn(10, 20) for _ in range(num_inputs)]
cuda_inputs = [
input.to(f"cuda:{i % num_gpus}") for i, input in enumerate(inputs)
]
cuda_output = torch.ops.fbgemm.sum_reduce_to_one(cuda_inputs, dst_device)
self.assertEqual(cuda_output.device, dst_device)
torch.testing.assert_close(
cuda_output.cpu(), torch.stack(inputs).sum(dim=0)
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import copy
import math
import pickle
import random
import unittest
from itertools import accumulate
from typing import Any, List, Optional, Tuple, Union
import fbgemm_gpu
import hypothesis.strategies as st
import numpy as np
import torch
from fbgemm_gpu.split_embedding_configs import (
EmbOptimType as OptimType,
FP8QuantizationConfig,
SparseType,
)
from fbgemm_gpu.split_embedding_optimizer_ops import (
SplitEmbeddingArgs,
SplitEmbeddingOptimizerParams,
SplitEmbeddingRowwiseAdagrad,
)
from fbgemm_gpu.split_embedding_utils import (
b_indices,
fake_quantize_embs,
generate_requests,
get_table_batched_offsets_from_dense,
quantize_embs,
round_up,
to_device,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
BoundsCheckMode,
CacheAlgorithm,
EmbeddingLocation,
PoolingMode,
RecordCacheMetrics,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
rounded_row_size_in_bytes,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
ComputeDevice,
CounterBasedRegularizationDefinition,
CounterWeightDecayMode,
DEFAULT_ASSOC,
DenseTableBatchedEmbeddingBagsCodegen,
GradSumDecay,
INT8_EMB_ROW_DIM_OFFSET,
LearningRateMode,
SplitTableBatchedEmbeddingBagsCodegen,
TailIdThreshold,
WeightDecayMode,
)
from hypothesis import assume, given, HealthCheck, settings, Verbosity
from hypothesis.strategies import composite
from torch import Tensor
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from test_utils import gpu_available, gpu_unavailable, TEST_WITH_ROCM
else:
from fbgemm_gpu.test.test_utils import (
gpu_available,
gpu_unavailable,
TEST_WITH_ROCM,
)
MAX_EXAMPLES = 40
# For long running tests reduce the number of iterations to reduce timeout errors.
MAX_EXAMPLES_LONG_RUNNING = 15
@composite
# pyre-ignore
def get_nbit_weights_ty(draw) -> Optional[SparseType]:
"""
Returns None if mixed weights ty should be used, otherwise, returns specific SparseType.
"""
mixed_weights_ty = draw(st.booleans())
if mixed_weights_ty:
return None
return draw(
st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.FP8,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
)
)
def gen_mixed_B_batch_sizes(B: int, T: int) -> Tuple[List[List[int]], List[int]]:
num_ranks = np.random.randint(low=1, high=4)
low = max(int(0.25 * B), 1)
high = int(B)
if low == high:
Bs_rank_feature = [[B] * num_ranks for _ in range(T)]
else:
Bs_rank_feature = [
np.random.randint(low=low, high=high, size=num_ranks).tolist()
for _ in range(T)
]
Bs = [sum(Bs_feature) for Bs_feature in Bs_rank_feature]
return Bs_rank_feature, Bs
def format_ref_tensors_in_mixed_B_layout(
ref_tensors: List[torch.Tensor], Bs_rank_feature: List[List[int]]
) -> torch.Tensor:
# Relayout the reference tensor
# Jagged dimension: (rank, table, local batch)
num_ranks = len(Bs_rank_feature[0])
split_tensors = [[] for _ in range(num_ranks)] # shape (rank, table)
for t, ref_tensor in enumerate(ref_tensors):
assert ref_tensor.shape[0] == sum(Bs_rank_feature[t])
tensors = ref_tensor.split(Bs_rank_feature[t])
for r, tensor in enumerate(tensors):
split_tensors[r].append(tensor.flatten())
concat_list = []
for r in range(num_ranks):
concat_list += split_tensors[r]
return torch.cat(concat_list, dim=0)
class SplitTableBatchedEmbeddingsTest(unittest.TestCase):
def execute_forward_( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_precision: SparseType,
weighted: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
pooling_mode: PoolingMode,
use_cpu: bool,
output_dtype: SparseType,
use_experimental_tbe: bool,
) -> None:
# NOTE: cache is not applicable to CPU version.
assume(not use_cpu or not use_cache)
# NOTE: limit (T * B * L * D) to avoid timeout for CPU version!
assume(not use_cpu or T * B * L * D <= 2048)
# NOTE: CPU does not support FP16.
assume(not (use_cpu and weights_precision == SparseType.FP16))
# NOTE: weighted operation can be done only for SUM.
assume(pooling_mode == PoolingMode.SUM or not weighted)
# NOTE: No bag ops only work on GPUs, no mixed
assume(not use_cpu or pooling_mode != PoolingMode.NONE)
assume(not mixed or pooling_mode != PoolingMode.NONE)
# TODO: Support these cases
assume(
not mixed_B
or (
weights_precision != SparseType.INT8
and output_dtype != SparseType.INT8
and not use_cpu
and not use_cache
and pooling_mode != PoolingMode.NONE
)
)
emb_op = SplitTableBatchedEmbeddingBagsCodegen
if pooling_mode == PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
if not mixed_B:
Bs = [B] * T
Bs_rank_feature = [[0]]
else:
Bs_rank_feature, Bs = gen_mixed_B_batch_sizes(B, T)
compute_device = ComputeDevice.CUDA
if use_cpu:
managed = [EmbeddingLocation.HOST] * T
compute_device = ComputeDevice.CPU
elif TEST_WITH_ROCM:
# ROCm managed memory allocation is under development
managed = [EmbeddingLocation.DEVICE] * T
elif use_cache:
managed = [EmbeddingLocation.MANAGED_CACHING] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
EmbeddingLocation.DEVICE if d < average_D else managed[t]
)
else:
managed = [
np.random.choice(
[
EmbeddingLocation.DEVICE,
EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if weights_precision == SparseType.INT8:
for t in range(T):
bs[t].weight.data.copy_(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
bs[t].weight.data
)
)
)
if weights_precision == SparseType.FP16:
bs = [b.half() for b in bs]
# Generate indices
xs = [
to_device(torch.randint(low=0, high=e, size=(b, L)), use_cpu)
for e, b in zip(Es, Bs)
]
# Generate positional weights
xws = [to_device(torch.randn(size=(b, L)), use_cpu) for b in Bs]
xws_acc_type = copy.deepcopy(xws)
if weights_precision == SparseType.FP16:
xws = [xw.half() for xw in xws]
# Run baseline
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
if do_pooling:
if mixed_B:
f = format_ref_tensors_in_mixed_B_layout(fs, Bs_rank_feature)
else:
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
else:
f = torch.cat(fs, dim=0).view(-1, D)
# Create a TBE op
cc = emb_op(
embedding_specs=[
(
E,
D,
EmbeddingLocation(M),
compute_device,
)
for (E, D, M) in zip(Es, Ds, managed)
],
weights_precision=weights_precision,
optimizer=OptimType.EXACT_ROWWISE_ADAGRAD
if mixed_B
else OptimType.EXACT_SGD,
learning_rate=0.05,
cache_algorithm=cache_algorithm,
pooling_mode=pooling_mode,
output_dtype=output_dtype,
use_experimental_tbe=use_experimental_tbe,
)
# NOTE: test TorchScript-compatible!
cc = torch.jit.script(cc)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(
bs[t].weight
if weights_precision != SparseType.INT8
else torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(bs[t].weight)
)
x = torch.cat([x.contiguous().flatten() for x in xs], dim=0)
xw = torch.cat([xw.contiguous().flatten() for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(
x, L, sum(Bs), use_cpu
)
batch_size_per_feature_per_rank = Bs_rank_feature if mixed_B else None
# Run TBE
fc2 = (
cc(
indices,
offsets,
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
if not weighted
else cc(
indices,
offsets,
to_device(xw.contiguous().view(-1), use_cpu),
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
)
# Compare results: f = baseline, fc2 = TBE
tolerance = (
1.0e-5
if weights_precision == SparseType.FP32 and output_dtype == SparseType.FP32
else 8.0e-3
)
torch.testing.assert_close(
fc2.float(), f.float(), atol=tolerance, rtol=tolerance
)
def test_forward_cpu_int8(
self,
) -> None:
weights_precision = SparseType.INT8
use_cpu = True
T = random.randint(1, 10)
D = random.randint(2, min(256, int(2048 / T)))
B = random.randint(1, min(128, int(2048 / T / D)))
L = random.randint(0, min(20, int(2048 / T / D / B)))
log_E = random.randint(3, 5)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = CacheAlgorithm.LRU
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
]
)
mixed = False
mixed_B = False
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
SparseType.FP32,
False, # use_experimental_tbe
)
def test_forward_cpu_fp32(
self,
) -> None:
weights_precision = SparseType.FP32
use_cpu = True
T = random.randint(1, 10)
D = random.randint(2, min(256, int(2048 / T)))
B = random.randint(1, min(128, int(2048 / T / D)))
L = random.randint(0, min(20, int(2048 / T / D / B)))
log_E = random.randint(3, 5)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = CacheAlgorithm.LRU
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
]
)
mixed = False
mixed_B = False
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
SparseType.FP32,
False, # use_experimental_tbe
)
@unittest.skipIf(*gpu_unavailable)
def test_forward_gpu_no_cache_int8(
self,
) -> None:
weights_precision = SparseType.INT8
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = CacheAlgorithm.LRU
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
)
if pooling_mode == PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
mixed_B = False
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
SparseType.FP32,
False, # use_experimental_tbe
)
@unittest.skipIf(*gpu_unavailable)
@given(
use_experimental_tbe=st.booleans() if not TEST_WITH_ROCM else st.just(False),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_forward_gpu_no_cache_fp16(
self,
use_experimental_tbe: bool,
) -> None:
weights_precision = SparseType.FP16
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = CacheAlgorithm.LRU
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
]
+ ([PoolingMode.NONE] if not use_experimental_tbe else [])
)
if pooling_mode == PoolingMode.NONE:
mixed = False
mixed_B = False
else:
mixed = random.choice([True, False])
mixed_B = (
random.choice([True, False]) if not use_experimental_tbe else False
)
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
SparseType.FP32,
use_experimental_tbe,
)
@unittest.skipIf(*gpu_unavailable)
@given(
use_experimental_tbe=st.booleans() if not TEST_WITH_ROCM else st.just(False),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_forward_gpu_no_cache_fp32(
self,
use_experimental_tbe: bool,
) -> None:
weights_precision = SparseType.FP32
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = CacheAlgorithm.LRU
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
]
+ ([PoolingMode.NONE] if not use_experimental_tbe else [])
)
if pooling_mode == PoolingMode.NONE:
mixed = False
mixed_B = False
else:
mixed = random.choice([True, False])
mixed_B = (
random.choice([True, False]) if not use_experimental_tbe else False
)
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
SparseType.FP32,
use_experimental_tbe,
)
@unittest.skipIf(*gpu_unavailable)
@given(
cache_algorithm=st.sampled_from(CacheAlgorithm),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_forward_gpu_uvm_cache_int8(
self,
cache_algorithm: CacheAlgorithm,
) -> None:
weights_precision = SparseType.INT8
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = True
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
)
output_dtype = random.choice(
[
SparseType.FP32,
SparseType.FP16,
]
)
if pooling_mode == PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
mixed_B = False
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
output_dtype,
False, # use_experimental_tbe
)
@unittest.skipIf(*gpu_unavailable)
@given(
cache_algorithm=st.sampled_from(CacheAlgorithm),
use_experimental_tbe=st.booleans() if not TEST_WITH_ROCM else st.just(False),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_forward_gpu_uvm_cache_fp16(
self,
cache_algorithm: CacheAlgorithm,
use_experimental_tbe: bool,
) -> None:
weights_precision = SparseType.FP16
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = True
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
]
+ ([PoolingMode.NONE] if not use_experimental_tbe else [])
)
output_dtype = random.choice(
[
SparseType.FP32,
SparseType.FP16,
]
)
if pooling_mode == PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
mixed_B = False
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
output_dtype,
use_experimental_tbe,
)
@unittest.skipIf(*gpu_unavailable)
@given(
cache_algorithm=st.sampled_from(CacheAlgorithm),
use_experimental_tbe=st.booleans() if not TEST_WITH_ROCM else st.just(False),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_forward_gpu_uvm_cache_fp32(
self,
cache_algorithm: CacheAlgorithm,
use_experimental_tbe: bool,
) -> None:
weights_precision = SparseType.FP32
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = True
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
]
+ ([PoolingMode.NONE] if not use_experimental_tbe else [])
)
output_dtype = random.choice(
[
SparseType.FP32,
SparseType.FP16,
]
)
if pooling_mode == PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
mixed_B = False
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
output_dtype,
use_experimental_tbe,
)
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
output_dtype=st.sampled_from([SparseType.FP16, SparseType.INT8]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much],
)
def test_forward_fused_pooled_emb_quant(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
output_dtype: SparseType,
) -> None:
Ds = [
round_up(np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)), 4)
for _ in range(T)
]
E = int(10**log_E)
Es = [np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)]
op = SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
E,
D,
EmbeddingLocation.DEVICE,
ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
output_dtype=output_dtype,
device=torch.cuda.current_device(),
)
op_ref = SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
E,
D,
EmbeddingLocation.DEVICE,
ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
output_dtype=SparseType.FP32,
device=torch.cuda.current_device(),
)
# sync weights between two ops
split_weights = op.split_embedding_weights()
ref_split_weights = op_ref.split_embedding_weights()
for t in range(T):
split_weights[t].data.copy_(ref_split_weights[t])
requests = generate_requests(2, B, T, L, min(Es), reuse=0.1)
for indices, offsets, _ in requests:
lowp_pooled_output = op(
indices=indices,
offsets=offsets,
)
fp32_pooled_output = op_ref(
indices=indices,
offsets=offsets,
)
lowp_pooled_emb_split = [
d + 8 if output_dtype == SparseType.INT8 else d for d in op.dims
]
lowp_pooled_output_per_table = torch.split(
lowp_pooled_output, lowp_pooled_emb_split, dim=1
)
deq_lowp_pooled_output_per_table = [
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(t.contiguous())
if output_dtype == SparseType.INT8
else t.float()
for t in lowp_pooled_output_per_table
]
fp32_pooled_output_per_table = torch.split(
fp32_pooled_output, op.dims, dim=1
)
dq_fp32_pooled_output_per_table = [
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
t.contiguous()
).contiguous()
)
if output_dtype == SparseType.INT8
else t.half().float()
for t in fp32_pooled_output_per_table
]
cat_deq_lowp_pooled_output = torch.cat(
deq_lowp_pooled_output_per_table, dim=1
)
cat_dq_fp32_pooled_output = torch.cat(
dq_fp32_pooled_output_per_table, dim=1
)
torch.testing.assert_close(
cat_deq_lowp_pooled_output, cat_dq_fp32_pooled_output
)
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weights_ty=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
# FIXME: INT2 caused big numerical error for this test
# SparseType.INT2,
]
),
output_dtype=st.sampled_from(
[
SparseType.FP16,
SparseType.BF16,
SparseType.INT8,
# SparseType.INT4,
]
)
if not TEST_WITH_ROCM
else st.sampled_from(
[
SparseType.FP16,
# The counterparts of __nv_bfloat16 and __nv_bfloat162 are not supported on ROCm
SparseType.INT8,
# SparseType.INT4,
]
),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much],
)
def test_nbit_forward_fused_pooled_emb_quant(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_ty: SparseType,
output_dtype: SparseType,
) -> None:
D_alignment = max(weights_ty.align_size() for t in range(T))
D_alignment = max(D_alignment, output_dtype.align_size())
D = round_up(D, D_alignment)
# BF16 output only works for CUDA device sm80+ (e.g., A100)
assume(
torch.cuda.is_available()
and torch.cuda.get_device_capability() >= (8, 0)
or not output_dtype == SparseType.BF16
)
Ds = [
round_up(
np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)),
D_alignment,
)
for _ in range(T)
]
Ds = [D] * T
E = int(10**log_E)
Es = [np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)]
weights_ty_list = [weights_ty] * T
managed = [EmbeddingLocation.DEVICE] * T
op = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
W_TY,
EmbeddingLocation(M),
)
for (E, D, M, W_TY) in zip(Es, Ds, managed, weights_ty_list)
],
output_dtype=output_dtype,
device=torch.cuda.current_device(),
)
# Initialize the random weights for int nbit table split embedding bag
op.fill_random_weights()
op_ref = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
W_TY,
EmbeddingLocation(M),
)
for (E, D, M, W_TY) in zip(Es, Ds, managed, weights_ty_list)
],
output_dtype=SparseType.FP32,
device=torch.cuda.current_device(),
)
# Initialize the random weights for int nbit table split embedding bag
op_ref.fill_random_weights()
# sync weights between two ops
split_weights = op.split_embedding_weights()
ref_split_weights = op_ref.split_embedding_weights()
for t in range(T):
(weights, scale_shift) = split_weights[t]
(ref_weights, ref_scale_shift) = ref_split_weights[t]
self.assertEqual(weights.size(), ref_weights.size())
element_size = weights_ty_list[t].bit_rate() / 8.0
rand_tensor = torch.rand(
ref_weights.shape[0], int(ref_weights.shape[1] / element_size)
)
rand_weights, rand_scale_shift = quantize_embs(
rand_tensor, weights_ty_list[t]
)
ref_weights.copy_(rand_weights)
weights.copy_(ref_weights)
if rand_scale_shift is not None:
self.assertIsNotNone(scale_shift)
self.assertIsNotNone(ref_scale_shift)
ref_scale_shift.copy_(rand_scale_shift)
scale_shift.copy_(ref_scale_shift)
requests = generate_requests(1, B, T, L, min(Es), reuse=0.1)
for indices, offsets, _ in requests:
lowp_pooled_output = op(
indices=indices.int(),
offsets=offsets.int(),
)
fp32_pooled_output = op_ref(
indices=indices.int(),
offsets=offsets.int(),
)
lowp_pooled_emb_split = [
d + 8 if output_dtype == SparseType.INT8 else d for d in Ds
]
lowp_pooled_output_per_table = torch.split(
lowp_pooled_output, lowp_pooled_emb_split, dim=1
)
deq_lowp_pooled_output_per_table = [
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(t.contiguous())
if output_dtype == SparseType.INT8
else t.float()
for t in lowp_pooled_output_per_table
]
fp32_pooled_output_per_table = torch.split(fp32_pooled_output, Ds, dim=1)
dq_fp32_pooled_output_per_table = [
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
t.contiguous()
).contiguous()
).contiguous()
if output_dtype == SparseType.INT8
else t.half().float()
for t in fp32_pooled_output_per_table
]
cat_deq_lowp_pooled_output = torch.cat(
deq_lowp_pooled_output_per_table, dim=1
)
cat_dq_fp32_pooled_output = torch.cat(
dq_fp32_pooled_output_per_table, dim=1
)
torch.testing.assert_close(
cat_deq_lowp_pooled_output,
cat_dq_fp32_pooled_output,
rtol=1e-2,
atol=1e-2,
equal_nan=True,
)
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weights_ty=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
),
output_dtype=st.sampled_from(
[
SparseType.FP16,
SparseType.BF16,
SparseType.INT8,
]
)
if not TEST_WITH_ROCM
else st.sampled_from(
[
SparseType.FP16,
# The counterparts of __nv_bfloat16 and __nv_bfloat162 are not supported on ROCm
SparseType.INT8,
]
),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much],
)
def test_nbit_split_embedding_weights_with_scale_and_bias(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_ty: SparseType,
output_dtype: SparseType,
) -> None:
D_alignment = max(weights_ty.align_size() for t in range(T))
D_alignment = max(D_alignment, output_dtype.align_size())
D = round_up(D, D_alignment)
# BF16 output only works for CUDA device sm80+ (e.g., A100)
assume(
torch.cuda.is_available()
and torch.cuda.get_device_capability() >= (8, 0)
or not output_dtype == SparseType.BF16
)
Ds = [
round_up(
np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)),
D_alignment,
)
for _ in range(T)
]
Ds = [D] * T
E = int(10**log_E)
Es = [np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)]
weights_ty_list = [weights_ty] * T
managed = [EmbeddingLocation.DEVICE] * T
op = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
W_TY,
EmbeddingLocation(M),
)
for (E, D, M, W_TY) in zip(Es, Ds, managed, weights_ty_list)
],
output_dtype=output_dtype,
device=torch.cuda.current_device(),
)
# Initialize the random weights for int nbit table split embedding bag
op.fill_random_weights()
# sync weights between two ops
split_weights = op.split_embedding_weights()
split_weights_with_scale_bias = op.split_embedding_weights_with_scale_bias(
split_scale_bias_mode=2
)
for t in range(T):
(weights, scale_bias) = split_weights[t]
(weights2, scale, bias) = split_weights_with_scale_bias[t]
torch.testing.assert_close(weights2, weights)
if scale is None:
self.assertIsNone(scale_bias)
self.assertIsNone(bias)
else:
torch.testing.assert_close(
scale.cpu(),
torch.tensor(
scale_bias[:, : scale_bias.size(1) // 2]
.contiguous()
.cpu()
.numpy()
.view(np.float16)
),
)
torch.testing.assert_close(
bias.cpu(),
torch.tensor(
scale_bias[:, scale_bias.size(1) // 2 :]
.contiguous()
.cpu()
.numpy()
.view(np.float16)
),
)
@given(
T=st.integers(min_value=1, max_value=3),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=32),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=10),
weights_precision=st.sampled_from([SparseType.FP16, SparseType.FP32]),
weighted=st.booleans(),
mixed=st.booleans(),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=10,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_dense( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_precision: SparseType,
weighted: bool,
mixed: bool,
long_segments: bool,
pooling_mode: PoolingMode,
use_cpu: bool,
output_dtype: SparseType,
) -> None:
# NOTE: torch.autograd.gradcheck() is too time-consuming for CPU version
# so we have to limit (T * B * L * D)!
assume(not use_cpu or T * B * L * D <= 2048)
assume(pooling_mode == PoolingMode.SUM or not weighted)
assume(not (use_cpu and weights_precision == SparseType.FP16))
# No bag ops only work on GPUs, no mixed, no weighted
assume(not use_cpu or pooling_mode != PoolingMode.NONE)
assume(not mixed or pooling_mode != PoolingMode.NONE)
assume(not weighted or pooling_mode != PoolingMode.NONE)
emb_op = DenseTableBatchedEmbeddingBagsCodegen
if pooling_mode == PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2 * E)) for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=False), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=False), use_cpu)
for (E, D) in zip(Es, Ds)
]
if weights_precision == SparseType.FP16:
bs = [b.half() for b in bs]
xs = [
to_device(
torch.from_numpy(
np.random.choice(range(e), size=(B, L), replace=True).astype(
np.int64
)
),
use_cpu,
)
for e in Es
]
if long_segments and L > 0 and weights_precision != SparseType.FP16:
for x in xs:
x[:, 0] = 0
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
if weights_precision == SparseType.FP16:
xws = [xw.half() for xw in xws]
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
# pyre-fixme[16]: `Optional` has no attribute `view`.
grad_weights = torch.cat([b.weight.grad.view(-1) for b in bs])
if weights_precision == SparseType.FP16 and not use_cpu:
grad_weights = grad_weights.half()
cc = emb_op(
embedding_specs=[(E, D) for (E, D) in zip(Es, Ds)],
pooling_mode=pooling_mode,
use_cpu=use_cpu,
weights_precision=weights_precision,
output_dtype=output_dtype,
)
if do_pooling:
# NOTE: test TorchScript-compatible!
cc = torch.jit.script(cc)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=use_cpu)
fc2 = (
cc(indices, offsets)
if not weighted
else cc(indices, offsets, to_device(xw.contiguous().view(-1), use_cpu))
)
if do_pooling:
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
else:
f = torch.cat(fs, dim=0).view(-1, D)
torch.testing.assert_close(
fc2.float(),
f.float(),
atol=5.0e-3
if weights_precision == SparseType.FP16 or output_dtype == SparseType.FP16
else 1.0e-5,
rtol=5.0e-3
if weights_precision == SparseType.FP16 or output_dtype == SparseType.FP16
else 1.0e-5,
)
if do_pooling:
goc = torch.cat([go.view(B, -1) for go in gos], dim=1)
else:
goc = torch.cat(gos, dim=0)
fc2.backward(goc)
torch.testing.assert_close(
cc.weights.grad,
grad_weights,
atol=5.0e-3
if weights_precision == SparseType.FP16 or output_dtype == SparseType.FP16
else 1.0e-4,
rtol=5.0e-3
if weights_precision == SparseType.FP16 or output_dtype == SparseType.FP16
else 1.0e-4,
)
cc = DenseTableBatchedEmbeddingBagsCodegen(
[(E, D) for (E, D) in zip(Es, Ds)],
# NOTE: only SUM pooling can work with per_sample_weights!
pooling_mode=PoolingMode.SUM,
use_cpu=use_cpu,
)
per_sample_weights = to_device(xw.contiguous().view(-1), use_cpu)
if use_cpu:
# NOTE: GPU version of DenseTableBatchedEmbeddingBagsCodegen doesn't support double.
cc = cc.double()
per_sample_weights = per_sample_weights.double()
per_sample_weights.requires_grad = True
indices.requires_grad = False
offsets.requires_grad = False
for param in cc.parameters():
param.requires_grad = False
y = cc(indices, offsets, per_sample_weights)
y.sum().backward()
# pyre-fixme[16]: `Optional` has no attribute `clone`.
indice_weight_grad_all = per_sample_weights.grad.clone().cpu()
T_ = len(xws)
feature_requires_grad = to_device(
torch.tensor(np.random.choice([0, 1], replace=True, size=(T_,))).int(),
use_cpu,
)
per_sample_weights = per_sample_weights.detach().clone()
per_sample_weights.requires_grad = True
y = cc(
indices,
offsets,
per_sample_weights,
feature_requires_grad=feature_requires_grad,
)
y.sum().backward()
indice_weight_grad_mask = per_sample_weights.grad.clone().cpu()
for t in range(T_):
if feature_requires_grad[t]:
torch.testing.assert_close(
indice_weight_grad_mask.view(T_, B, L)[t],
indice_weight_grad_all.view(T_, B, L)[t],
)
else:
torch.testing.assert_close(
indice_weight_grad_mask.view(T_, B, L)[t],
torch.zeros_like(indice_weight_grad_mask.view(T_, B, L)[t]),
)
per_sample_weights = to_device(xw.contiguous().view(-1), use_cpu)
if use_cpu:
# NOTE: GPU version of DenseTableBatchedEmbeddingBagsCodegen doesn't support double.
cc = cc.double()
per_sample_weights = per_sample_weights.double()
else:
cc = cc.float()
per_sample_weights = per_sample_weights.float()
per_sample_weights.requires_grad = True
indices.requires_grad = False
offsets.requires_grad = False
for param in cc.parameters():
param.requires_grad = False
torch.autograd.gradcheck(
cc, (indices, offsets, per_sample_weights), eps=1e-2, atol=1e-3, rtol=1e-3
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weights_precision=st.sampled_from([SparseType.FP16, SparseType.FP32]),
weighted=st.booleans(),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
),
output_dtype=st.sampled_from([SparseType.FP16, SparseType.FP32]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_none(self, **kwargs: Any) -> None:
self.execute_backward_none_(**kwargs)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weights_precision=st.sampled_from([SparseType.FP16, SparseType.FP32]),
weighted=st.booleans(),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
),
output_dtype=st.sampled_from([SparseType.FP16, SparseType.FP32]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_none_with_rowwise_adagrad(self, **kwargs: Any) -> None:
self.execute_backward_none_(optimizer=OptimType.EXACT_ROWWISE_ADAGRAD, **kwargs)
def execute_backward_none_( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_precision: SparseType,
weighted: bool,
long_segments: bool,
pooling_mode: PoolingMode,
output_dtype: SparseType,
optimizer: Optional[OptimType] = None,
) -> None:
use_cpu = False
mixed = False
use_cache = False
# NOTE: cache is not applicable to CPU version.
assume(not use_cpu or not use_cache)
# NOTE: limit (T * B * L * D) to avoid timeout for CPU version!
assume(not use_cpu or T * B * L * D <= 2048)
assume(not (use_cpu and weights_precision == SparseType.FP16))
# No bag ops only work on GPUs, no mixed, no weighted
assume(not use_cpu or pooling_mode != PoolingMode.NONE)
assume(not mixed or pooling_mode != PoolingMode.NONE)
assume(not weighted or pooling_mode != PoolingMode.NONE)
assume(pooling_mode == PoolingMode.SUM or not weighted)
if pooling_mode == PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
compute_device = ComputeDevice.CUDA
if use_cpu:
managed = [EmbeddingLocation.HOST] * T
compute_device = ComputeDevice.CPU
elif TEST_WITH_ROCM:
# ROCm managed memory allocation is under development
managed = [EmbeddingLocation.DEVICE] * T
elif use_cache:
managed = [EmbeddingLocation.MANAGED_CACHING] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
EmbeddingLocation.DEVICE if d < average_D else managed[t]
)
else:
managed = [
np.random.choice(
[
EmbeddingLocation.DEVICE,
]
)
for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if weights_precision == SparseType.FP16:
bs = [b.half() for b in bs]
feature_table_map = list(range(T))
xs = [
to_device(
torch.from_numpy(
np.random.choice(range(Es[t]), size=(B, L)).astype(np.int64)
),
use_cpu,
)
for t in feature_table_map
]
if long_segments and L > 0:
for x in xs:
x[:, 0] = 0
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(len(xs))]
xws_acc_type = copy.deepcopy(xws)
if weights_precision == SparseType.FP16:
xws = [xw.half() for xw in xws]
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=use_cpu)
embedding_specs = [
(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)
]
# Hyperparameters in case optimizer is not None
lr = 0.5
eps = 0.2
stochastic_rounding = random.choice([True, False])
if optimizer is None:
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos: Union[List[Tensor], Tensor] = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
else:
bs_ = SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=embedding_specs,
optimizer=optimizer,
feature_table_map=feature_table_map,
weights_precision=weights_precision,
pooling_mode=pooling_mode,
output_dtype=output_dtype,
learning_rate=lr,
eps=eps,
stochastic_rounding=stochastic_rounding,
)
for t in range(T):
bs_.split_embedding_weights()[t].data.copy_(bs[t].weight)
fs = (
bs_(indices, offsets)
if not weighted
else bs_(
indices,
offsets,
to_device(xw.contiguous().view(-1), use_cpu),
)
)
gos: Union[List[Tensor], Tensor] = torch.rand_like(fs)
fs.backward(gos)
cc = SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=embedding_specs,
optimizer=OptimType.NONE,
feature_table_map=feature_table_map,
weights_precision=weights_precision,
pooling_mode=pooling_mode,
output_dtype=output_dtype,
)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
total_unique_indices = 0
# Compute number of unique indices
for t in range(len(feature_table_map)):
start = offsets[t * B]
end = offsets[(t + 1) * B]
uniq_indices = indices[start:end].unique()
total_unique_indices += uniq_indices.numel()
fc2 = (
cc(indices, offsets, total_unique_indices=total_unique_indices)
if not weighted
else cc(
indices,
offsets,
to_device(xw.contiguous().view(-1), use_cpu),
total_unique_indices=total_unique_indices,
)
)
if optimizer is None:
assert type(gos) is list
if do_pooling:
goc = torch.cat([go.view(B, -1) for go in gos], dim=1)
else:
goc = torch.cat(gos, dim=0)
else:
assert type(gos) is Tensor
goc = gos.clone()
fc2.backward(goc)
if optimizer is not None:
params = SplitEmbeddingOptimizerParams(weights_dev=cc.weights_dev)
embedding_args = SplitEmbeddingArgs(
weights_placements=cc.weights_placements,
weights_offsets=cc.weights_offsets,
max_D=cc.max_D,
)
optim = SplitEmbeddingRowwiseAdagrad(
params,
embedding_args,
embedding_specs,
feature_table_map,
learning_rate=lr,
eps=eps,
stochastic_rounding=stochastic_rounding,
)
optim.step()
if use_cache:
cc.flush()
if optimizer is None:
test_tensor = cc.weights_dev.grad
weight_grads = []
for t in range(T):
grad = bs[t].weight.grad
# Check grad to suppress pyre error
assert grad is not None
weight_grads.append(grad)
ref_grad = torch.concat(weight_grads, dim=0).to_sparse().coalesce()
ref_tensor = (
ref_grad.half() if weights_precision == SparseType.FP16 else ref_grad
)
else:
indices = cc.weights_dev.grad._indices().flatten()
# Select only the part in the table that is updated
test_tensor = torch.index_select(cc.weights_dev.view(-1, D), 0, indices)
ref_tensor = torch.index_select(bs_.weights_dev.view(-1, D), 0, indices)
tolerance = (
1.0e-2
if long_segments
else (
1.0e-4
if weights_precision == SparseType.FP32
and output_dtype == SparseType.FP32
else 1.0e-2
)
)
torch.testing.assert_close(
test_tensor,
ref_tensor,
atol=tolerance,
rtol=tolerance,
)
def execute_backward_sgd_( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_precision: SparseType,
weighted: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
long_segments: bool,
pooling_mode: PoolingMode,
use_cpu: bool,
output_dtype: SparseType,
) -> None:
# NOTE: cache is not applicable to CPU version.
assume(not use_cpu or not use_cache)
# NOTE: limit (T * B * L * D) to avoid timeout for CPU version!
assume(not use_cpu or T * B * L * D <= 2048)
assume(not (use_cpu and weights_precision == SparseType.FP16))
# No bag ops only work on GPUs, no mixed, no weighted
assume(not use_cpu or pooling_mode != PoolingMode.NONE)
assume(not mixed or pooling_mode != PoolingMode.NONE)
assume(not weighted or pooling_mode != PoolingMode.NONE)
assume(pooling_mode == PoolingMode.SUM or not weighted)
# TODO: Support these cases
assume(
not mixed_B
or (
weights_precision != SparseType.INT8
and output_dtype != SparseType.INT8
and not use_cpu
and not use_cache
and pooling_mode != PoolingMode.NONE
)
)
emb_op = SplitTableBatchedEmbeddingBagsCodegen
if pooling_mode == PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
if not mixed_B:
Bs = [B] * T
else:
low = max(int(0.25 * B), 1)
high = int(B)
if low == high:
Bs = [B] * T
else:
Bs = [np.random.randint(low=low, high=high) for _ in range(T)]
compute_device = ComputeDevice.CUDA
if use_cpu:
managed = [EmbeddingLocation.HOST] * T
compute_device = ComputeDevice.CPU
elif TEST_WITH_ROCM:
# ROCm managed memory allocation is under development
managed = [EmbeddingLocation.DEVICE] * T
elif use_cache:
managed = [EmbeddingLocation.MANAGED_CACHING] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
EmbeddingLocation.DEVICE if d < average_D else managed[t]
)
else:
managed = [
np.random.choice(
[
EmbeddingLocation.DEVICE,
EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if weights_precision == SparseType.FP16:
bs = [b.half() for b in bs]
feature_table_map = list(range(T))
table_to_replicate = T // 2
# pyre-fixme[6]: For 2nd param expected `Embedding` but got
# `Union[Embedding, EmbeddingBag]`.
bs.insert(table_to_replicate, bs[table_to_replicate])
feature_table_map.insert(table_to_replicate, table_to_replicate)
num_features = len(feature_table_map)
if not mixed_B:
Bs = [B] * num_features
Bs_rank_feature = [[0]]
else:
Bs_rank_feature, Bs = gen_mixed_B_batch_sizes(B, num_features)
# Generate indices
xs = [
to_device(
torch.from_numpy(
np.random.choice(range(Es[t]), size=(b, L), replace=True).astype(
np.int64
)
),
use_cpu,
)
for t, b in zip(feature_table_map, Bs)
]
if long_segments and L > 0:
for x in xs:
x[:, 0] = 0
# Generate positional weights
xws = [to_device(torch.randn(size=(b, L)), use_cpu) for b in Bs]
xws_acc_type = copy.deepcopy(xws)
if weights_precision == SparseType.FP16:
xws = [xw.half() for xw in xws]
# Run baseline's forward
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
# Generate gradients
gos = [torch.randn_like(f) for f in fs]
# Run baseline's backward
[f.backward(go) for (f, go) in zip(fs, gos)]
# do SGD update
lr = 0.05
del bs[table_to_replicate]
# pyre-fixme[58]: `*` is not supported for operand types
# `Optional[torch._tensor.Tensor]` and `float`.
new_weights = [(b.weight - b.weight.grad * lr) for b in bs]
# Create a TBE op
cc = emb_op(
embedding_specs=[
(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)
],
optimizer=OptimType.EXACT_SGD,
feature_table_map=feature_table_map,
learning_rate=lr,
weights_precision=weights_precision,
cache_algorithm=cache_algorithm,
pooling_mode=pooling_mode,
output_dtype=output_dtype,
)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.contiguous().flatten() for x in xs], dim=0)
xw = torch.cat([xw.contiguous().flatten() for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(
x, L, sum(Bs), use_cpu=use_cpu
)
batch_size_per_feature_per_rank = Bs_rank_feature if mixed_B else None
# Run TBE's forward
fc2 = (
cc(
indices,
offsets,
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
if not weighted
else cc(
indices,
offsets,
to_device(xw.contiguous().view(-1), use_cpu),
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
)
# Generate gradients
if do_pooling:
if mixed_B:
goc = format_ref_tensors_in_mixed_B_layout(gos, Bs_rank_feature)
else:
goc = torch.cat([go.view(B, -1) for go in gos], dim=1)
else:
goc = torch.cat(gos, dim=0)
# Run TBE's backward
fc2.backward(goc)
if use_cache:
cc.flush()
for t in range(T):
torch.testing.assert_close(
cc.split_embedding_weights()[t],
new_weights[t].half()
if weights_precision == SparseType.FP16 and not use_cpu
else new_weights[t],
atol=1.0e-2
if long_segments
else (5.0e-3 if weights_precision == SparseType.FP16 else 1.0e-5),
rtol=1.0e-1
if long_segments
else (2.0e-2 if weights_precision == SparseType.FP16 else 1.0e-5),
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weights_precision=st.sampled_from([SparseType.FP16, SparseType.FP32]),
weighted=st.booleans(),
mixed=st.booleans(),
mixed_B=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_sgd( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_precision: SparseType,
weighted: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
long_segments: bool,
pooling_mode: PoolingMode,
use_cpu: bool,
) -> None:
self.execute_backward_sgd_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B if not use_cpu else False,
use_cache,
cache_algorithm,
long_segments,
pooling_mode,
use_cpu,
SparseType.FP32, # output_dtype
)
@given(
D=st.integers(min_value=2, max_value=10),
# 128 * 1024 is to exercise a case num_ctas_for_run needs to be capped
# at the number of SMs (H100 SXM5 has 132 SMs and the default seglen
# per CTA is 1024)
B=st.sampled_from([1152, 256 * 1024]),
L=st.integers(min_value=1, max_value=4),
weighted=st.booleans(),
mixed=st.booleans(),
mixed_B=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
@unittest.skipIf(*gpu_unavailable)
def test_backward_sgd_really_long_segments( # noqa C901
self,
D: int,
B: int,
L: int,
weighted: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
) -> None:
self.execute_backward_sgd_(
2, # T
D,
B,
1, # log_E,
L,
SparseType.FP32, # weights_precision
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
True, # long_segments
PoolingMode.SUM, # pooling_mode
False, # use_cpu
SparseType.FP32, # output_dtype
)
def execute_backward_adagrad_( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
pooling_mode: PoolingMode,
use_cpu: bool,
output_dtype: SparseType,
weight_decay_mode: WeightDecayMode = WeightDecayMode.NONE,
) -> None:
# NOTE: cache is not applicable to CPU version.
assume(not use_cpu or not use_cache)
# NOTE: torch.autograd.gradcheck() is too time-consuming for CPU version
# so we have to limit (T * B * L * D)!
assume(not use_cpu or T * B * L * D <= 1024)
assume(not (use_cpu and weights_precision == SparseType.FP16))
assume(
pooling_mode == PoolingMode.SUM or not weighted
) # No bag ops only work on GPUs, no mixed, no weighted
assume(not use_cpu or pooling_mode != PoolingMode.NONE)
assume(not mixed or pooling_mode != PoolingMode.NONE)
assume(not weighted or pooling_mode != PoolingMode.NONE)
# TODO: Support these cases
assume(
not mixed_B
or (
weights_precision != SparseType.INT8
and output_dtype != SparseType.INT8
and not use_cpu
and not use_cache
and pooling_mode != PoolingMode.NONE
)
)
emb_op = SplitTableBatchedEmbeddingBagsCodegen
if pooling_mode == PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
# stochastic rounding only implemented for rowwise
assume(not stochastic_rounding or row_wise)
# only row-wise supports caching
assume(row_wise or not use_cache)
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
if not mixed_B:
Bs = [B] * T
else:
low = max(int(0.25 * B), 1)
high = int(B)
if low == high:
Bs = [B] * T
else:
Bs = [np.random.randint(low=low, high=high) for _ in range(T)]
compute_device = ComputeDevice.CUDA
if use_cpu:
managed = [EmbeddingLocation.HOST] * T
compute_device = ComputeDevice.CPU
elif TEST_WITH_ROCM:
# ROCm managed memory allocation is under development
managed = [EmbeddingLocation.DEVICE] * T
elif use_cache:
managed = [EmbeddingLocation.MANAGED_CACHING] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
EmbeddingLocation.DEVICE if d < average_D else managed[t]
)
else:
managed = [
np.random.choice(
[
EmbeddingLocation.DEVICE,
EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if weights_precision == SparseType.FP16:
bs = [b.half() for b in bs]
feature_table_map = list(range(T))
# autograd with shared embedding only works for exact
table_to_replicate = T // 2
# pyre-fixme[6]: For 2nd param expected `Embedding` but got
# `Union[Embedding, EmbeddingBag]`.
bs.insert(table_to_replicate, bs[table_to_replicate])
feature_table_map.insert(table_to_replicate, table_to_replicate)
num_features = len(feature_table_map)
if not mixed_B:
Bs = [B] * num_features
Bs_rank_feature = [[0]]
else:
Bs_rank_feature, Bs = gen_mixed_B_batch_sizes(B, num_features)
xs = [
to_device(
torch.from_numpy(
np.random.choice(range(Es[t]), size=(b, L), replace=True).astype(
np.int64
)
),
use_cpu,
)
for t, b in zip(feature_table_map, Bs)
]
xws = [to_device(torch.randn(size=(b, L)), use_cpu) for b in Bs]
xws_acc_type = copy.deepcopy(xws)
if weights_precision == SparseType.FP16 and not use_cpu:
xws = [xw.half() for xw in xws]
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
# do SGD update
lr = 0.5
eps = 0.2
optimizer = (
OptimType.EXACT_ROWWISE_ADAGRAD if row_wise else OptimType.EXACT_ADAGRAD
)
cc = emb_op(
embedding_specs=[
(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)
],
feature_table_map=feature_table_map,
optimizer=optimizer,
learning_rate=lr,
eps=eps,
weights_precision=weights_precision,
stochastic_rounding=stochastic_rounding,
pooling_mode=pooling_mode,
output_dtype=output_dtype,
)
del bs[table_to_replicate]
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.contiguous().flatten() for x in xs], dim=0)
xw = torch.cat([xw.contiguous().flatten() for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(
x, L, sum(Bs), use_cpu=use_cpu
)
batch_size_per_feature_per_rank = Bs_rank_feature if mixed_B else None
fc2 = (
cc(
indices,
offsets,
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
if not weighted
else cc(
indices,
offsets,
to_device(xw.contiguous().view(-1), use_cpu),
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
)
if do_pooling:
if mixed_B:
goc = format_ref_tensors_in_mixed_B_layout(gos, Bs_rank_feature)
else:
goc = torch.cat([go.view(B, -1) for go in gos], dim=1)
else:
goc = torch.cat(gos, dim=0)
fc2.backward(goc)
cc.flush()
split_optimizer_states = cc.split_optimizer_states()
assert len(split_optimizer_states) == T
get_optimizer_states = None
if row_wise:
# get_optimizer_state should/must be implemented for rowwise
get_optimizer_states = cc.get_optimizer_state()
assert len(get_optimizer_states) == T
tolerance = (
1.0e-4
if weights_precision == SparseType.FP32 and output_dtype == SparseType.FP32
else 1.0e-2
)
for t in range(T):
expected_keys = {"sum"}
if row_wise and weight_decay_mode == WeightDecayMode.COUNTER:
(m1, c1, c2) = split_optimizer_states[t]
expected_keys.update(
[
"prev_iter",
"row_counter",
]
)
else:
(m1,) = split_optimizer_states[t]
if get_optimizer_states is not None:
optimizer_states_dict = get_optimizer_states[t]
assert set(optimizer_states_dict.keys()) == expected_keys
# pyre-fixme[16]: `Optional` has no attribute `float`.
ref_optimizer_state = bs[t].weight.grad.float().cpu().to_dense().pow(2)
torch.testing.assert_close(
m1.float().cpu(),
ref_optimizer_state.mean(dim=1) if row_wise else ref_optimizer_state,
atol=tolerance,
rtol=tolerance,
)
for t in range(T):
# optimizer_state = squares (no row-wise) or sum squares (row-wise)
if row_wise and weight_decay_mode == WeightDecayMode.COUNTER:
(m1, c1, c2) = split_optimizer_states[t]
else:
(m1,) = split_optimizer_states[t]
torch.testing.assert_close(
cc.split_embedding_weights()[t].float().cpu(),
torch.addcdiv(
bs[t].weight.float().cpu(),
value=-lr,
tensor1=bs[t].weight.grad.float().cpu().to_dense(),
tensor2=m1.float()
.sqrt_()
.add_(eps)
.view(Es[t], 1 if row_wise else Ds[t])
.cpu(),
),
atol=tolerance,
rtol=tolerance,
)
if use_cpu:
D_gradcheck = (D_gradcheck + 15) // 16 * 4
else:
D_gradcheck = D_gradcheck * 4
cc = emb_op(
embedding_specs=[
(E, D_gradcheck, M, compute_device) for (E, M) in zip(Es, managed)
],
feature_table_map=feature_table_map,
optimizer=optimizer,
learning_rate=0.0,
eps=eps,
weights_precision=weights_precision,
stochastic_rounding=stochastic_rounding,
# NOTE: only SUM pooling can work with per_sample_weights!
pooling_mode=PoolingMode.SUM,
output_dtype=output_dtype,
)
per_sample_weights = to_device(xw.contiguous().view(-1), use_cpu)
if use_cpu:
# NOTE: GPU version of SplitTableBatchedEmbeddingBagsCodegen doesn't support double.
cc = cc.double()
per_sample_weights = per_sample_weights.double()
per_sample_weights.requires_grad = True
indices.requires_grad = False
offsets.requires_grad = False
for param in cc.parameters():
param.requires_grad = False
torch.autograd.gradcheck(
cc,
(
indices,
offsets,
per_sample_weights,
None,
batch_size_per_feature_per_rank,
),
)
per_sample_weights = to_device(xw.contiguous().view(-1), use_cpu)
if use_cpu:
per_sample_weights = per_sample_weights.double()
per_sample_weights.requires_grad = True
indices.requires_grad = False
offsets.requires_grad = False
for param in cc.parameters():
param.requires_grad = False
y = cc(
indices,
offsets,
per_sample_weights,
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
y.sum().backward()
# pyre-fixme[16]: `Optional` has no attribute `clone`.
indice_weight_grad_all = per_sample_weights.grad.clone().cpu()
T_ = len(xws)
feature_requires_grad = to_device(
torch.tensor(np.random.choice([0, 1], replace=True, size=(T_,))).int(),
use_cpu,
)
per_sample_weights = per_sample_weights.detach().clone()
per_sample_weights.requires_grad = True
y = cc(
indices,
offsets,
per_sample_weights,
feature_requires_grad=feature_requires_grad,
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
y.sum().backward()
indice_weight_grad_mask = per_sample_weights.grad.clone().cpu()
torch.cuda.synchronize()
acc_B = 0
for t in range(T_):
B = Bs[t]
table_indice_weight_grad_mask = indice_weight_grad_mask[
acc_B : acc_B + B * L
]
table_indice_weight_grad_all = indice_weight_grad_all[acc_B : acc_B + B * L]
acc_B += B * L
if feature_requires_grad[t]:
torch.testing.assert_close(
table_indice_weight_grad_mask,
table_indice_weight_grad_all,
)
else:
torch.testing.assert_close(
table_indice_weight_grad_mask,
torch.zeros_like(table_indice_weight_grad_mask),
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP16),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
mixed_B=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp16_pmSUM( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
use_cpu: bool,
output_dtype: SparseType,
) -> None:
# VBE is supported in rowwise_adagrad only
if not row_wise:
mixed_B = False
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
mixed_B,
use_cache,
cache_algorithm,
PoolingMode.SUM,
use_cpu,
output_dtype,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP16),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
mixed_B=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp16_pmMEAN( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
use_cpu: bool,
output_dtype: SparseType,
) -> None:
# VBE is supported in rowwise_adagrad only
if not row_wise:
mixed_B = False
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
mixed_B,
use_cache,
cache_algorithm,
PoolingMode.MEAN,
use_cpu,
output_dtype,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP16),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp16_pmNONE( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
use_cpu: bool,
output_dtype: SparseType,
) -> None:
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
False, # mixed_B
use_cache,
cache_algorithm,
PoolingMode.NONE,
use_cpu,
output_dtype,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP32),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
mixed_B=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp32_pmSUM( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
use_cpu: bool,
output_dtype: SparseType,
) -> None:
# VBE is supported in rowwise_adagrad only
if not row_wise:
mixed_B = False
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
mixed_B,
use_cache,
cache_algorithm,
PoolingMode.SUM,
use_cpu,
output_dtype,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP32),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
mixed_B=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp32_pmMEAN( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
use_cpu: bool,
output_dtype: SparseType,
) -> None:
# VBE is supported in rowwise_adagrad only
if not row_wise:
mixed_B = False
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
mixed_B,
use_cache,
cache_algorithm,
PoolingMode.MEAN,
use_cpu,
output_dtype,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP32),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp32_pmNONE( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
use_cpu: bool,
output_dtype: SparseType,
) -> None:
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
False, # mixed_B
use_cache,
cache_algorithm,
PoolingMode.NONE,
use_cpu,
output_dtype,
)
def _generate_cache_tbes(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
mixed: bool,
cache_algorithm: CacheAlgorithm = CacheAlgorithm.LRU,
prefetch_pipeline: bool = False,
use_int_weight: bool = False,
) -> Tuple[
SplitTableBatchedEmbeddingBagsCodegen,
SplitTableBatchedEmbeddingBagsCodegen,
int,
int,
]:
lr = 1.0 if use_int_weight else 0.02
E = int(10**log_E)
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
managed = [EmbeddingLocation.MANAGED_CACHING] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = EmbeddingLocation.DEVICE if d < average_D else managed[t]
cc_ref = SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
D,
EmbeddingLocation.DEVICE,
ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
stochastic_rounding=False,
prefetch_pipeline=False,
learning_rate=lr,
)
cc = SplitTableBatchedEmbeddingBagsCodegen(
[(E, D, M, ComputeDevice.CUDA) for (E, D, M) in zip(Es, Ds, managed)],
cache_algorithm=cache_algorithm,
stochastic_rounding=False,
prefetch_pipeline=prefetch_pipeline,
learning_rate=lr,
)
if use_int_weight:
min_val = -20
max_val = +20
for param in cc_ref.split_embedding_weights():
p = torch.randint(
int(min_val),
int(max_val) + 1,
size=param.shape,
device=param.device,
)
param.data.copy_(p)
for t in range(T):
self.assertEqual(
cc.split_embedding_weights()[t].size(),
cc_ref.split_embedding_weights()[t].size(),
)
cc.split_embedding_weights()[t].data.copy_(
cc_ref.split_embedding_weights()[t]
)
return (cc, cc_ref, min(Es), sum(Ds))
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=1, max_value=20),
mixed=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_cache_pipeline(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
mixed: bool,
cache_algorithm: CacheAlgorithm,
) -> None:
cc, cc_ref, min_Es, sum_Ds = self._generate_cache_tbes(
T, D, B, log_E, L, mixed, cache_algorithm
)
iters = 3
requests = generate_requests(iters, B, T, L, min_Es, reuse=0.1)
grad_output = torch.randn(B, sum_Ds).cuda()
for indices, offsets, _ in requests:
output = cc(indices, offsets)
output_ref = cc_ref(indices, offsets)
torch.testing.assert_close(output, output_ref)
output.backward(grad_output)
output_ref.backward(grad_output)
cc.flush()
for t in range(T):
torch.testing.assert_close(
cc.split_embedding_weights()[t], cc_ref.split_embedding_weights()[t]
)
def _test_cache_prefetch_pipeline( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
mixed: bool,
prefetch_location: str,
prefetch_stream: Optional[torch.cuda.Stream],
) -> None:
"""
test cache prefetch pipeline with prefetch_pipeline=True.
prefetch_location can be "before_fwd" or "between_fwd_bwd",
where the TBE prefetch(batch_{i+1}) is called before forward(batch_i)
or in between of forward(batch_i) and backward(batch_i), respectively.
If prefetch_stream is not None, the TBE prefetch function will use this stream.
In addition, we make the TBE weights initialized as integer values, learning_rate
as integer value, and gradients as integer values so that the test is more stable.
"""
assert prefetch_location in ["before_fwd", "between_fwd_bwd"]
cc, cc_ref, min_Es, sum_Ds = self._generate_cache_tbes(
T, D, B, log_E, L, mixed, CacheAlgorithm.LRU, True, True
)
iters = 5
requests = generate_requests(iters, B, T, L, min_Es, reuse=0.1)
grad_output = (
torch.randint(
low=-10,
high=10,
size=(B, sum_Ds),
)
.float()
.cuda()
)
torch.cuda.synchronize() # make sure TBEs and inputs are ready
self.assertTrue(torch.all(cc.lxu_cache_locking_counter == 0))
cur_stream: torch.cuda.Stream = torch.cuda.current_stream()
req_iter = iter(requests)
batch_i = next(req_iter)
batch_ip1 = None
output, output_ref = None, None
def _prefetch(
cc: SplitTableBatchedEmbeddingBagsCodegen,
batch: Optional[Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]],
) -> None:
if not batch:
return
context_stream = prefetch_stream if prefetch_stream else cur_stream
stream = cur_stream if prefetch_stream else None
indices, offsets, _ = batch
with torch.cuda.stream(context_stream):
cc.prefetch(indices, offsets, stream)
_prefetch(cc, batch_i)
while batch_i:
indices, offsets, _ = batch_i
batch_ip1 = next(req_iter, None)
if prefetch_stream:
cur_stream.wait_stream(prefetch_stream)
if prefetch_location == "before_fwd":
_prefetch(cc, batch_ip1)
output = cc(indices, offsets)
if prefetch_location == "between_fwd_bwd":
_prefetch(cc, batch_ip1)
output.backward(grad_output)
batch_i = batch_ip1
batch_ip1 = None
cc.flush()
for indices, offsets, _ in requests:
output_ref = cc_ref(indices, offsets)
output_ref.backward(grad_output)
for t in range(T):
torch.testing.assert_close(
cc.split_embedding_weights()[t], cc_ref.split_embedding_weights()[t]
)
torch.testing.assert_close(output, output_ref)
self.assertTrue(torch.all(cc.lxu_cache_locking_counter == 0))
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=1, max_value=20),
mixed=st.booleans(),
prefetch_location=st.sampled_from(["before_fwd", "between_fwd_bwd"]),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_cache_prefetch_pipeline(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
mixed: bool,
prefetch_location: str,
) -> None:
self._test_cache_prefetch_pipeline(
T,
D,
B,
log_E,
L,
mixed,
prefetch_location,
prefetch_stream=None,
)
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=1, max_value=20),
mixed=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_cache_prefetch_pipeline_stream_1(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
mixed: bool,
) -> None:
self._test_cache_prefetch_pipeline(
T,
D,
B,
log_E,
L,
mixed,
prefetch_location="before_fwd",
prefetch_stream=torch.cuda.Stream(),
)
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=1, max_value=20),
mixed=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_cache_prefetch_pipeline_stream_2(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
mixed: bool,
) -> None:
self._test_cache_prefetch_pipeline(
T,
D,
B,
log_E,
L,
mixed,
prefetch_location="between_fwd_bwd",
prefetch_stream=torch.cuda.Stream(),
)
def execute_backward_optimizers_( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
mixed_B: bool,
optimizer: OptimType,
long_segments: bool,
pooling_mode: PoolingMode,
use_cpu: bool,
weight_decay_mode: WeightDecayMode = WeightDecayMode.L2,
uvm_non_rowwise_momentum: bool = False,
) -> None:
# NOTE: limit (T * B * L * D) to avoid timeout for CPU version!
assume(not use_cpu or T * B * L * D <= 2048)
assume(
not use_cpu
or optimizer
in [
OptimType.EXACT_ADAGRAD,
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_SGD,
]
)
assume(pooling_mode == PoolingMode.SUM or not weighted)
# No bag ops only work on GPUs, no mixed, no weighted
assume(not use_cpu or pooling_mode != PoolingMode.NONE)
assume(not mixed or pooling_mode != PoolingMode.NONE)
assume(not weighted or pooling_mode != PoolingMode.NONE)
assume(not mixed_B or (not use_cpu and pooling_mode != PoolingMode.NONE))
emb_op = SplitTableBatchedEmbeddingBagsCodegen
if pooling_mode == PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
if not mixed_B:
Bs = [B] * T
Bs_rank_feature = [[0]]
else:
Bs_rank_feature, Bs = gen_mixed_B_batch_sizes(B, T)
compute_device = ComputeDevice.CUDA
if use_cpu:
managed = [EmbeddingLocation.HOST] * T
compute_device = ComputeDevice.CPU
elif TEST_WITH_ROCM:
# ROCm managed memory allocation is under development
managed = [EmbeddingLocation.DEVICE] * T
else:
managed = [
np.random.choice(
[
EmbeddingLocation.DEVICE,
EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
xs = [
to_device(
torch.from_numpy(
np.random.choice(range(e), size=(b, L), replace=True).astype(
np.int64
)
),
use_cpu,
)
for (e, b) in zip(Es, Bs)
]
if long_segments and L > 0:
for x, e in zip(xs, Es):
x[:, 0] = np.random.randint(low=0, high=e)
xws = [to_device(torch.randn(size=(b, L)), use_cpu) for b in Bs]
xws_acc_type = copy.deepcopy(xws)
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
# do SGD update
optimizer_kwargs = {"learning_rate": 0.5}
(lr, eps, beta1, beta2, weight_decay, momentum, eta) = (
0.5,
1e-4,
0.9,
0.99,
0.01,
0.9,
0.01,
)
counter_based_regularization: CounterBasedRegularizationDefinition
if optimizer == OptimType.EXACT_ADAGRAD:
optimizer_kwargs["eps"] = eps
if optimizer == OptimType.EXACT_ROWWISE_ADAGRAD:
optimizer_kwargs["eps"] = eps
optimizer_kwargs["weight_decay"] = weight_decay
optimizer_kwargs["weight_decay_mode"] = weight_decay_mode
if weight_decay_mode == WeightDecayMode.COUNTER:
counter_based_regularization = CounterBasedRegularizationDefinition(
counter_weight_decay_mode=CounterWeightDecayMode.DECOUPLE,
counter_halflife=20000,
adjustment_iter=24000,
adjustment_ub=0.1,
learning_rate_mode=LearningRateMode.TAIL_ID_LR_DECREASE,
grad_sum_decay=GradSumDecay.NO_DECAY,
tail_id_threshold=TailIdThreshold(val=1000, is_ratio=False),
)
optimizer_kwargs[
"counter_based_regularization"
# pyre-fixme[6]: Expected `float` for 2nd param but got `CounterBasedRegularizationDefinition`.
] = counter_based_regularization
if optimizer == OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD:
optimizer_kwargs["eps"] = eps
optimizer_kwargs["weight_decay"] = weight_decay
if optimizer in (OptimType.PARTIAL_ROWWISE_ADAM, OptimType.ADAM):
optimizer_kwargs["eps"] = eps
optimizer_kwargs["beta1"] = beta1
optimizer_kwargs["beta2"] = beta2
optimizer_kwargs["weight_decay"] = weight_decay
if optimizer in (OptimType.PARTIAL_ROWWISE_LAMB, OptimType.LAMB):
optimizer_kwargs["eps"] = eps
optimizer_kwargs["beta1"] = beta1
optimizer_kwargs["beta2"] = beta2
optimizer_kwargs["weight_decay"] = weight_decay
if optimizer == OptimType.LARS_SGD:
optimizer_kwargs["weight_decay"] = weight_decay
optimizer_kwargs["momentum"] = momentum
optimizer_kwargs["eta"] = eta
cc = emb_op(
embedding_specs=[
(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)
],
optimizer=optimizer,
pooling_mode=pooling_mode,
uvm_non_rowwise_momentum=uvm_non_rowwise_momentum,
# pyre-fixme[6]: Expected `CacheAlgorithm` for 5th param but got `float`.
**optimizer_kwargs,
)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.contiguous().flatten() for x in xs], dim=0)
xw = torch.cat([xw.contiguous().flatten() for xw in xws_acc_type], dim=0)
batch_size_per_feature_per_rank = Bs_rank_feature if mixed_B else None
(indices, offsets) = get_table_batched_offsets_from_dense(
x, L, sum(Bs), use_cpu=use_cpu
)
fc2 = (
cc(
indices,
offsets,
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
if not weighted
else cc(
indices,
offsets,
to_device(xw.contiguous().view(-1), use_cpu),
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
)
if do_pooling:
if mixed_B:
goc = format_ref_tensors_in_mixed_B_layout(gos, Bs_rank_feature)
else:
goc = torch.cat([go.view(B, -1) for go in gos], dim=1)
else:
goc = torch.cat(gos, dim=0)
fc2.backward(goc)
cc.flush()
split_optimizer_states = cc.split_optimizer_states()
self.assertEqual(len(split_optimizer_states), T)
split_weights = cc.split_embedding_weights()
get_optimizer_states = None
try:
get_optimizer_states = cc.get_optimizer_state()
assert len(get_optimizer_states) == T
except NotImplementedError:
assert optimizer not in (
OptimType.ADAM,
OptimType.PARTIAL_ROWWISE_ADAM,
OptimType.LAMB,
OptimType.PARTIAL_ROWWISE_LAMB,
OptimType.EXACT_SGD,
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
OptimType.EXACT_ADAGRAD,
)
if optimizer in (OptimType.EXACT_ROWWISE_ADAGRAD, OptimType.EXACT_ADAGRAD):
rowwise = optimizer == OptimType.EXACT_ROWWISE_ADAGRAD
for t in range(T):
row_counter: Optional[torch.Tensor] = None
freq: Optional[torch.Tensor] = None
iter_: int = -1
if rowwise and weight_decay_mode == WeightDecayMode.COUNTER:
(m1, prev_iter, row_counter) = split_optimizer_states[t]
else:
(m1,) = split_optimizer_states[t]
# to_dense in GPU is non-deterministic due to atmomics used in
# coalescing and floating point non-associativity.
# pyre-fixme[16]: `Optional` has no attribute `cpu`.
dense_cpu_grad = bs[t].weight.grad.cpu().to_dense()
if rowwise and not use_cpu:
# We need to skip when using cpu because use_fbgemm (https://fburl.com/code/12131iub)
# is true and the template code (https://fburl.com/code/1kctlup3) is not executed.
if weight_decay_mode == WeightDecayMode.L2:
dense_cpu_grad += weight_decay * bs[t].weight.cpu()
elif weight_decay_mode == WeightDecayMode.COUNTER:
iter_ = int(cc.iter.item())
(
dense_cpu_grad,
row_counter,
freq,
) = self.get_grad_from_counter_adagrad(
dense_cpu_grad,
bs[t].weight.cpu(),
counter_based_regularization,
row_counter.cpu(),
prev_iter.cpu(),
iter_,
weight_decay,
)
m1_ref = (
dense_cpu_grad.pow(2)
if not rowwise
else dense_cpu_grad.pow(2).mean(dim=1)
)
torch.testing.assert_close(
m1.float().index_select(dim=0, index=xs[t].view(-1)).cpu(),
m1_ref.float().index_select(dim=0, index=xs[t].view(-1).cpu()),
atol=1.0e-4,
rtol=1.0e-4,
)
weights_new = split_weights[t]
denom = (
torch.sqrt(
m1_ref if not rowwise else m1_ref.view(m1_ref.numel(), 1)
)
+ eps
)
if rowwise and not use_cpu:
if weight_decay_mode == WeightDecayMode.DECOUPLE:
weights_ref = bs[t].weight.cpu() - lr * (
dense_cpu_grad / denom + weight_decay * bs[t].weight.cpu()
)
elif weight_decay_mode == WeightDecayMode.L2:
# pyre-fixme[58]: `/` is not supported for operand types `float`
# and `Tensor`.
weights_ref = bs[t].weight.cpu() - lr * dense_cpu_grad / denom
elif weight_decay_mode == WeightDecayMode.COUNTER:
max_counter = cc.max_counter.item()
weights_ref = self.get_wts_from_counter_adagrad(
dense_cpu_grad,
bs[t].weight.cpu(),
denom,
counter_based_regularization,
row_counter,
# pyre-fixme[6]: Expected `Tensor` for 6th param but got `Optional[Tensor]`
freq,
max_counter,
iter_,
eps,
lr,
weight_decay,
)
else:
# pyre-fixme[58]: `/` is not supported for operand types `float`
# and `Tensor`.
weights_ref = bs[t].weight.cpu() - lr * dense_cpu_grad / denom
# TODO: why is tolerance off here?
torch.testing.assert_close(
weights_new.index_select(dim=0, index=xs[t].view(-1)).cpu(),
weights_ref.index_select(dim=0, index=xs[t].view(-1).cpu()),
atol=1.0e-2,
rtol=1.0e-2,
)
optimizer_states_dict = get_optimizer_states[t]
expected_keys = {"sum"}
if rowwise and weight_decay_mode == WeightDecayMode.COUNTER:
expected_keys.update(["prev_iter", "row_counter"])
assert set(optimizer_states_dict.keys()) == expected_keys
if optimizer == OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD:
for t in range(T):
(m1,) = split_optimizer_states[t]
# to_dense in GPU is non-deterministic due to atmomics used in
# coalescing and floating point non-associativity.
dense_cpu_grad = bs[t].weight.grad.cpu().to_dense()
dense_cpu_grad += weight_decay * bs[t].weight.cpu()
iter_ = cc.iter.item()
lambda_ = (iter_ + 1) ** 0.5
m1_ref = dense_cpu_grad.pow(2).mean(dim=1)
m1_ref *= lambda_
torch.testing.assert_close(
m1.float().index_select(dim=0, index=xs[t].view(-1)).cpu(),
m1_ref.float().index_select(dim=0, index=xs[t].view(-1).cpu()),
atol=1.0e-4,
rtol=1.0e-4,
)
weights_new = split_weights[t]
weights_ref = bs[t].weight.cpu() - lr * lambda_ * dense_cpu_grad / (
# pyre-fixme[58]: `/` is not supported for operand types `float`
# and `Tensor`.
torch.pow(m1_ref.view(m1_ref.numel(), 1), 1.0 / 3)
+ eps
)
torch.testing.assert_close(
weights_new.index_select(dim=0, index=xs[t].view(-1)).cpu(),
weights_ref.index_select(dim=0, index=xs[t].view(-1).cpu()),
atol=1.0e-4,
rtol=1.0e-4,
)
if get_optimizer_states is not None:
optimizer_states_dict = get_optimizer_states[t]
assert set(optimizer_states_dict.keys()) == {"sum"}
if optimizer in (OptimType.PARTIAL_ROWWISE_ADAM, OptimType.ADAM):
rowwise = optimizer == OptimType.PARTIAL_ROWWISE_ADAM
for t in range(T):
(m1, m2) = split_optimizer_states[t]
dense_cpu_grad = bs[t].weight.grad.cpu().to_dense()
m2_ref = (
dense_cpu_grad.pow(2)
if not rowwise
else dense_cpu_grad.pow(2).mean(dim=1)
) * (1.0 - beta2)
torch.testing.assert_close(m2.cpu(), m2_ref, atol=1.0e-4, rtol=1.0e-4)
m1_ref = dense_cpu_grad * (1.0 - beta1)
torch.testing.assert_close(m1.cpu(), m1_ref, atol=1.0e-4, rtol=1.0e-4)
iter_ = cc.iter.item()
v_hat_t = m2_ref / (1 - beta2**iter_)
v_hat_t = v_hat_t if not rowwise else v_hat_t.view(v_hat_t.numel(), 1)
m_hat_t = m1_ref / (1 - beta1**iter_)
weights_new = split_weights[t]
weights_ref = (
torch.addcdiv(
bs[t].weight.cpu(),
value=-lr,
tensor1=m_hat_t,
tensor2=v_hat_t.sqrt_().add_(eps),
)
- lr * weight_decay * bs[t].weight.cpu()
)
torch.testing.assert_close(
weights_new.index_select(dim=0, index=xs[t].view(-1)).cpu(),
weights_ref.index_select(dim=0, index=xs[t].view(-1).cpu()),
atol=1.0e-3,
rtol=1.0e-3,
)
if get_optimizer_states is not None:
optimizer_states_dict = get_optimizer_states[t]
assert set(optimizer_states_dict.keys()) == {
"exp_avg",
"exp_avg_sq",
}
if optimizer in (OptimType.PARTIAL_ROWWISE_LAMB, OptimType.LAMB):
rowwise = optimizer == OptimType.PARTIAL_ROWWISE_LAMB
for t in range(T):
(m1, m2) = split_optimizer_states[t]
dense_cpu_grad = bs[t].weight.grad.cpu().to_dense()
m2_ref = (
dense_cpu_grad.pow(2)
if not rowwise
else dense_cpu_grad.pow(2).mean(dim=1)
) * (1.0 - beta2)
torch.testing.assert_close(m2.cpu(), m2_ref, atol=1.0e-4, rtol=1.0e-4)
m1_ref = dense_cpu_grad * (1.0 - beta1)
torch.testing.assert_close(m1.cpu(), m1_ref, atol=1.0e-4, rtol=1.0e-4)
iter_ = cc.iter.item()
v_hat_t = m2_ref / (1 - beta2**iter_)
v_hat_t = v_hat_t if not rowwise else v_hat_t.view(v_hat_t.numel(), 1)
m_hat_t = m1_ref / (1 - beta1**iter_)
rtw = (m_hat_t / (torch.sqrt(v_hat_t) + eps)) + weight_decay * bs[
t
].weight.cpu()
true_ratio = torch.linalg.norm(bs[t].weight, dim=1, ord=2).view(
m1.shape[0], 1
).cpu() / torch.linalg.norm(rtw, dim=1, ord=2).view(m1.shape[0], 1)
weights_new = split_weights[t]
weights_ref = bs[t].weight.cpu() - lr * true_ratio * rtw
torch.testing.assert_close(
weights_new.index_select(dim=0, index=xs[t].view(-1)).cpu(),
weights_ref.index_select(dim=0, index=xs[t].view(-1).cpu()),
atol=1.0e-3,
rtol=1.0e-3,
)
if get_optimizer_states is not None:
optimizer_states_dict = get_optimizer_states[t]
assert set(optimizer_states_dict.keys()) == {
"exp_avg",
"exp_avg_sq",
}
if optimizer == OptimType.LARS_SGD:
for t in range(T):
(m1,) = split_optimizer_states[t]
weight_norm = (
torch.linalg.norm(bs[t].weight, dim=1, ord=2)
.view(m1.shape[0], 1)
.cpu()
)
dense_cpu_grad = bs[t].weight.grad.cpu().to_dense()
grad_norm = torch.linalg.norm(dense_cpu_grad, dim=1, ord=2).view(
m1.shape[0], 1
)
adjusted_lr = (
lr * eta * weight_norm / (grad_norm + weight_decay * weight_norm)
)
m1_ref = adjusted_lr * (
dense_cpu_grad + weight_decay * bs[t].weight.cpu()
)
torch.testing.assert_close(
m1.index_select(dim=0, index=xs[t].view(-1)).cpu(),
# pyre-fixme[16]: `float` has no attribute `index_select`.
m1_ref.index_select(dim=0, index=xs[t].view(-1).cpu()),
atol=1.0e-4,
rtol=1.0e-4,
)
weights_new = split_weights[t]
weights_ref = bs[t].weight.cpu() - m1_ref
torch.testing.assert_close(
weights_new.index_select(dim=0, index=xs[t].view(-1)).cpu(),
weights_ref.index_select(dim=0, index=xs[t].view(-1).cpu()),
atol=1.0e-4,
rtol=1.0e-4,
)
def get_grad_from_counter_adagrad(
self,
dense_cpu_grad: torch.Tensor,
weights: torch.Tensor,
counter_based_regularization: CounterBasedRegularizationDefinition,
row_counter: torch.Tensor,
prev_iter: torch.Tensor,
iter_: int,
weight_decay: float,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
row_counter = row_counter.view(row_counter.numel(), 1)
prev_iter = prev_iter.view(prev_iter.numel(), 1)
freq = torch.ones_like(row_counter)
counter_weight_decay_mode = (
counter_based_regularization.counter_weight_decay_mode
)
counter_halflife = counter_based_regularization.counter_halflife
l2_wd = 1.0 if counter_weight_decay_mode == CounterWeightDecayMode.L2 else 0.0
if counter_halflife > 0:
counter_log_rho = math.log(2.0) / counter_halflife
# if id occurs multiple times in a batch, iter_delta=1
iter_delta = torch.where(prev_iter == 0.0, 1.0, iter_ * 1.0 - prev_iter)
prev_iter = iter_ * torch.ones_like(prev_iter)
row_counter = 1.0 + torch.exp(-iter_delta * counter_log_rho) * row_counter
freq = torch.tensor([counter_halflife]) / row_counter
dense_cpu_grad += l2_wd * freq * weight_decay * weights
return dense_cpu_grad, row_counter, freq
def get_wts_from_counter_adagrad(
self,
dense_cpu_grad: torch.Tensor,
weights: torch.Tensor,
denom: torch.Tensor,
counter_based_regularization: CounterBasedRegularizationDefinition,
row_counter: torch.Tensor,
freq: torch.Tensor,
max_counter: float,
iter_: int,
eps: float,
learning_rate: float,
weight_decay: float,
) -> torch.Tensor:
counter_weight_decay_mode = (
counter_based_regularization.counter_weight_decay_mode
)
counter_halflife = counter_based_regularization.counter_halflife
tail_id_threshold_val = counter_based_regularization.tail_id_threshold.val
if counter_based_regularization.tail_id_threshold.is_ratio:
tail_id_threshold_val = math.floor(tail_id_threshold_val * max_counter)
learning_rate_mode = counter_based_regularization.learning_rate_mode
adjustment_iter = counter_based_regularization.adjustment_iter
adjustment_ub = counter_based_regularization.adjustment_ub
multiplier = torch.tensor([learning_rate]) / denom
adjusted_multiplier = multiplier
exp_reg_correction = torch.ones_like(row_counter)
if counter_halflife > 0:
if adjustment_iter <= 0 or (
adjustment_iter > 0 and iter_ > adjustment_iter
):
if learning_rate_mode == LearningRateMode.TAIL_ID_LR_INCREASE:
adjusted_multiplier = torch.where(
row_counter > tail_id_threshold_val,
multiplier
* torch.maximum(
torch.minimum(
torch.pow(
torch.tensor([max_counter]) / (row_counter + 1.0),
adjustment_ub,
),
torch.Tensor([10.0]),
),
torch.Tensor([1.0]),
),
multiplier,
)
elif learning_rate_mode == LearningRateMode.TAIL_ID_LR_DECREASE:
adjusted_multiplier = torch.where(
row_counter > tail_id_threshold_val,
multiplier
* torch.minimum(
torch.maximum(
torch.pow(
(row_counter + 1.0) / max_counter,
adjustment_ub,
),
torch.Tensor([0.1]),
),
torch.Tensor([1.0]),
),
multiplier,
)
elif learning_rate_mode == LearningRateMode.COUNTER_SGD:
adjusted_multiplier = torch.where(
row_counter > tail_id_threshold_val,
torch.Tensor([learning_rate])
/ (torch.sqrt(adjustment_ub * row_counter) + eps),
multiplier,
)
if counter_weight_decay_mode == CounterWeightDecayMode.DECOUPLE:
exp_reg_correction = 1.0 - freq * weight_decay * learning_rate
elif counter_weight_decay_mode == CounterWeightDecayMode.L2:
exp_reg_correction = 1.0 - freq * weight_decay * multiplier
weights = exp_reg_correction * weights - adjusted_multiplier * dense_cpu_grad
return weights
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
mixed=st.booleans(),
optimizer=st.sampled_from(
[
OptimType.ADAM,
OptimType.PARTIAL_ROWWISE_ADAM,
]
),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
uvm_non_rowwise_momentum=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
@unittest.skipIf(*gpu_unavailable)
def test_backward_optimizers_adam( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
optimizer: OptimType,
long_segments: bool,
pooling_mode: PoolingMode,
use_cpu: bool,
uvm_non_rowwise_momentum: bool,
) -> None:
self.execute_backward_optimizers_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
False, # mixed_B
optimizer,
long_segments,
pooling_mode,
use_cpu,
uvm_non_rowwise_momentum=uvm_non_rowwise_momentum,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=2, max_value=20),
weighted=st.booleans(),
mixed=st.booleans(),
mixed_B=st.booleans(),
optimizer=st.sampled_from(
[
OptimType.EXACT_ADAGRAD,
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
]
),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
weight_decay_mode=st.sampled_from(
[
WeightDecayMode.L2,
WeightDecayMode.DECOUPLE,
# temporarily disabled due to a test error to unblock release
# will fix in a follow-up diff
# WeightDecayMode.COUNTER,
]
),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
@unittest.skipIf(*gpu_unavailable)
def test_backward_optimizers_adagrad( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
mixed_B: bool,
optimizer: OptimType,
long_segments: bool,
pooling_mode: PoolingMode,
use_cpu: bool,
weight_decay_mode: WeightDecayMode,
) -> None:
if (
pooling_mode == PoolingMode.NONE
or optimizer != OptimType.EXACT_ROWWISE_ADAGRAD
):
mixed_B = False
self.execute_backward_optimizers_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
mixed_B,
optimizer,
long_segments,
pooling_mode,
use_cpu,
weight_decay_mode,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
mixed=st.booleans(),
optimizer=st.sampled_from(
[
OptimType.LAMB,
OptimType.PARTIAL_ROWWISE_LAMB,
]
),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
@unittest.skipIf(*gpu_unavailable)
def test_backward_optimizers_lamb( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
optimizer: OptimType,
long_segments: bool,
pooling_mode: PoolingMode,
use_cpu: bool,
) -> None:
self.execute_backward_optimizers_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
False, # mixed_B
optimizer,
long_segments,
pooling_mode,
use_cpu,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
mixed=st.booleans(),
optimizer=st.just(OptimType.LARS_SGD),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
@unittest.skipIf(*gpu_unavailable)
def test_backward_optimizers_lars( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
optimizer: OptimType,
long_segments: bool,
pooling_mode: PoolingMode,
use_cpu: bool,
) -> None:
self.execute_backward_optimizers_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
False, # mixed_B
optimizer,
long_segments,
pooling_mode,
use_cpu,
)
def execute_nbit_forward_( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
pooling_mode: PoolingMode,
weights_ty: SparseType,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
use_cpu: bool,
use_array_for_index_remapping: bool,
do_pruning: bool,
mixed_weights_ty: bool,
output_dtype: SparseType,
) -> None:
# NOTE: weighted operation can be done only for SUM.
assume(pooling_mode == PoolingMode.SUM or not weighted)
assume(not mixed or pooling_mode != PoolingMode.NONE)
mode = "sum"
do_pooling = True
if pooling_mode == PoolingMode.SUM:
mode = "sum"
elif pooling_mode == PoolingMode.MEAN:
mode = "mean"
else:
mode = "sum"
do_pooling = False
E = int(10**log_E)
if not mixed_weights_ty:
weights_ty_list = [weights_ty] * T
else:
weights_ty_list = [
np.random.choice(
[
SparseType.FP32,
SparseType.FP16,
SparseType.FP8,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
)
for _ in range(T)
]
D_alignment = max(
1 if ty.bit_rate() % 8 == 0 else int(8 / ty.bit_rate())
for ty in weights_ty_list
)
D = round_up(D, D_alignment)
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(
np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)),
D_alignment,
)
for _ in range(T)
]
Ds = [min(D, 128) for D in Ds]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if use_cpu:
managed = [EmbeddingLocation.HOST] * T
elif use_cache:
managed = [
EmbeddingLocation.MANAGED_CACHING,
] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
EmbeddingLocation.DEVICE if d < average_D else managed[t]
)
else:
managed = [
np.random.choice(
[
EmbeddingLocation.DEVICE,
EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
# Fix exponent bias to 7 for now (TODO: Randomize it from a range of integers)
if SparseType.FP8 in weights_ty_list:
fp8_config = FP8QuantizationConfig(random.choice([4, 5]), 7)
has_fp8_weight = True
else:
has_fp8_weight = False
xs = [to_device(torch.randint(low=0, high=e, size=(B, L)), use_cpu) for e in Es]
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
if do_pruning:
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(
x, use_cpu=use_cpu
)
# generate index_remapping
dense_indices = torch.randint(low=0, high=E, size=(T, B, L)).view(-1).int()
original_E = E
current_device = "cpu" if use_cpu else torch.cuda.current_device()
indices = indices.view(-1).int()
offsets = offsets.view(-1).int()
# generate index_remapping done
# Initialize and insert Array index remapping based data structure
index_remappings_array = []
for t in range(T):
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
indice_t = (indices.view(T, B, L))[t].long().view(-1).to(current_device)
dense_indice_t = (
(dense_indices.view(T, B, L))[t].view(-1)
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
.to(current_device)
)
index_remappings_array_t = torch.tensor(
[-1] * original_E,
dtype=torch.int32,
device=current_device,
)
index_remappings_array_t[indice_t] = dense_indice_t
index_remappings_array.append(index_remappings_array_t.cpu())
else:
index_remappings_array = [torch.arange(E, dtype=torch.int32) for E in Es]
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(
x, use_cpu=use_cpu
)
cc = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
W_TY,
EmbeddingLocation(M),
)
for (E, D, M, W_TY) in zip(Es, Ds, managed, weights_ty_list)
],
pooling_mode=pooling_mode,
index_remapping=index_remappings_array if B != 0 else None,
device="cpu" if use_cpu else torch.cuda.current_device(),
cache_algorithm=cache_algorithm,
use_array_for_index_remapping=use_array_for_index_remapping,
output_dtype=output_dtype,
fp8_exponent_bits=fp8_config.get("exponent_bits")
if has_fp8_weight
else None,
fp8_exponent_bias=fp8_config.get("exponent_bias")
if has_fp8_weight
else None,
)
# Initialize the random weights for int nbit table split embedding bag
cc.fill_random_weights()
# NOTE: test TorchScript-compatible!
cc = torch.jit.script(cc)
for t in range(T):
(weights, scale_shift) = cc.split_embedding_weights()[t]
if scale_shift is not None:
(E, R) = scale_shift.shape
self.assertEqual(R, 4)
if weights_ty_list[t] == SparseType.INT2:
scales = np.random.uniform(0.1, 1, size=(E,)).astype(np.float16)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
if weights_ty_list[t] == SparseType.INT4:
scales = np.random.uniform(0.01, 0.1, size=(E,)).astype(np.float16)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
if weights_ty_list[t] == SparseType.INT8:
scales = np.random.uniform(0.001, 0.01, size=(E,)).astype(
np.float16
)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
scale_shift[:, :] = torch.tensor(
np.stack([scales, shifts], axis=1).astype(np.float16).view(np.uint8)
)
fake_quantize_embs(
weights,
scale_shift,
bs[t].weight.detach(),
weights_ty_list[t],
use_cpu=False,
# pyre-fixme[61]: `fp8_config` is undefined, or not always defined.
fp8_config=fp8_config if has_fp8_weight else None,
)
if not use_cpu:
fc2 = (
cc(indices.int(), offsets.int())
if not weighted
else cc(indices.int(), offsets.int(), xw.contiguous().view(-1))
)
else:
cc = cc.cpu()
indices, offsets = indices.cpu(), offsets.cpu()
fc2 = (
cc(indices.int(), offsets.int())
if not weighted
else cc(indices.int(), offsets.int(), xw.contiguous().view(-1).cpu())
)
if do_pooling and B == 0:
self.assertEqual(fc2.size(), (0, cc.total_D))
return
new_indices = []
for t in range(T):
new_indices_t = torch.zeros([B, L], dtype=torch.int32)
for i in range(B):
for j in range(L):
old_index = xs[t][i, j]
new_index = index_remappings_array[t][old_index]
new_indices_t[i][j] = new_index
new_indices.append(new_indices_t)
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, new_indices)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, new_indices, xws)
]
)
if do_pooling:
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
else:
f = torch.cat(fs, dim=0).view(-1, D)
torch.testing.assert_close(
fc2.float().cpu(),
f.float().cpu(),
atol=1.0e-2,
rtol=1.0e-2,
)
@given(
nbit_weights_ty=get_nbit_weights_ty(),
use_array_for_index_remapping=st.booleans(),
do_pruning=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
)
def test_nbit_forward_cpu(
self,
nbit_weights_ty: Optional[SparseType],
use_array_for_index_remapping: bool,
do_pruning: bool,
) -> None:
use_cpu = True
T = random.randint(1, 50)
B = random.randint(0, 128)
L = random.randint(0, 32)
D = random.randint(2, 2048)
log_E = random.randint(2, 4)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = CacheAlgorithm.LRU
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
)
mixed = random.choice([True, False])
if pooling_mode == PoolingMode.NONE:
nbit_weights_ty = random.choice(
[
SparseType.FP32,
SparseType.FP16,
# CPU sequence embedding does not support FP8/INT4/INT2 yet
# SparseType.FP8,
SparseType.INT8,
# SparseType.INT4,
# SparseType.INT2,
]
)
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
if nbit_weights_ty is None:
# don't care when mixed type is used.
weights_ty: SparseType = SparseType.INT8
mixed_weights_ty = True
else:
weights_ty: SparseType = nbit_weights_ty
mixed_weights_ty = False
output_dtype = random.choice(
(
[SparseType.BF16]
if weights_ty in [SparseType.INT4, SparseType.INT2]
else []
)
+ [SparseType.FP32, SparseType.FP16]
)
self.execute_nbit_forward_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
pooling_mode,
weights_ty,
use_cache,
cache_algorithm,
use_cpu,
use_array_for_index_remapping,
do_pruning,
mixed_weights_ty,
output_dtype,
)
@given(
nbit_weights_ty=get_nbit_weights_ty(),
use_array_for_index_remapping=st.booleans(),
do_pruning=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
)
def test_nbit_forward_cpu_bf16_out(
self,
nbit_weights_ty: Optional[SparseType],
use_array_for_index_remapping: bool,
do_pruning: bool,
) -> None:
use_cpu = True
T = random.randint(1, 50)
B = random.randint(0, 128)
L = random.randint(0, 32)
D = random.randint(2, 2048)
log_E = random.randint(2, 4)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = CacheAlgorithm.LRU
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
]
)
mixed = random.choice([True, False])
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
if nbit_weights_ty is None:
# don't care when mixed type is used.
weights_ty: SparseType = SparseType.INT8
mixed_weights_ty = True
else:
weights_ty: SparseType = nbit_weights_ty
mixed_weights_ty = False
output_dtype = SparseType.BF16
self.execute_nbit_forward_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
pooling_mode,
weights_ty,
use_cache,
cache_algorithm,
use_cpu,
use_array_for_index_remapping,
do_pruning,
mixed_weights_ty,
output_dtype,
)
@unittest.skipIf(*gpu_unavailable)
@given(
nbit_weights_ty=get_nbit_weights_ty(),
use_array_for_index_remapping=st.booleans(),
do_pruning=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
)
def test_nbit_forward_gpu_no_cache(
self,
nbit_weights_ty: Optional[SparseType],
use_array_for_index_remapping: bool,
do_pruning: bool,
) -> None:
use_cpu = False
T = random.randint(1, 50)
B = random.randint(0, 128)
L = random.randint(0, 32)
D = random.randint(2, 2048)
log_E = random.randint(2, 4)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = CacheAlgorithm.LRU
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
)
if pooling_mode == PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
if nbit_weights_ty is None:
# don't care when mixed type is used.
weights_ty: SparseType = SparseType.INT8
mixed_weights_ty = True
else:
weights_ty: SparseType = nbit_weights_ty
mixed_weights_ty = False
output_dtype = random.choice(
[SparseType.FP32, SparseType.FP16, SparseType.BF16]
)
self.execute_nbit_forward_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
pooling_mode,
weights_ty,
use_cache,
cache_algorithm,
use_cpu,
use_array_for_index_remapping,
do_pruning,
mixed_weights_ty,
output_dtype,
)
@unittest.skipIf(*gpu_unavailable)
@given(
weights_ty=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
),
emulate_pruning=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_int_nbit_split_embedding_uvm_caching_codegen_lookup_function(
self,
weights_ty: SparseType,
emulate_pruning: bool,
) -> None:
# TODO: support direct-mapped in int_nbit_split_embedding_uvm_caching_codegen_lookup_function
# This test is for int_nbit_split_embedding_uvm_caching_codegen_lookup_function.
# We run IntNBitTableBatchedEmbeddingBagsCodegen with UVM_CACHING, and then
# run int_nbit_split_embedding_uvm_caching_codegen_lookup_function with the
# exact same cache configuration. As both use the same logic, the result
# as well as cache state should match.
# Currently, int_nbit_split_embedding_uvm_caching_codegen_lookup_function supports only LRU.
cache_algorithm = CacheAlgorithm.LRU
associativity = 32 # Currently, hard-coded 32-way set associative.
current_device: torch.device = torch.device(torch.cuda.current_device())
T = random.randint(1, 5)
B = random.randint(1, 128)
L = random.randint(1, 20)
D = random.randint(2, 256)
log_E = random.randint(3, 5)
iters = 3
E = int(10**log_E)
D_alignment = (
1 if weights_ty.bit_rate() % 8 == 0 else int(8 / weights_ty.bit_rate())
)
D = round_up(D, D_alignment)
# Currently, int_nbit_split_embedding_uvm_caching_codegen_lookup_function supports only all UVM or all UVM_CACHING.
Ds = [D] * T
Es = [E] * T
managed_caching = [EmbeddingLocation.MANAGED_CACHING] * T
# Note both cc_ref and cc use caching.
cc_ref = IntNBitTableBatchedEmbeddingBagsCodegen(
[("", E, D, weights_ty, M) for (E, D, M) in zip(Es, Ds, managed_caching)],
cache_algorithm=cache_algorithm,
)
cc_ref.fill_random_weights()
# cc is only for cache states; we test int_nbit_split_embedding_uvm_caching_codegen_lookup_function directly;
# hence, no need to synchronize cc's weights with cc_ref's.
cc = IntNBitTableBatchedEmbeddingBagsCodegen(
[("", E, D, weights_ty, M) for (E, D, M) in zip(Es, Ds, managed_caching)],
cache_algorithm=cache_algorithm,
)
cc.fill_random_weights()
# weights_placement for all UVM case.
managed_uvm = [EmbeddingLocation.MANAGED] * T
placement_uvm = torch.tensor(
managed_uvm, device=current_device, dtype=torch.int32
)
# zero size HBM cache for UVM case.
zero_size_cache_weights = torch.zeros(
0, 0, device=current_device, dtype=torch.uint8
)
requests = generate_requests(
iters, B, T, L, min(Es), reuse=0.1, emulate_pruning=emulate_pruning
)
for indices, offsets, _ in requests:
indices = indices.int()
offsets = offsets.int()
output_ref = cc_ref(indices, offsets)
# int_nbit_split_embedding_uvm_caching_codegen_lookup_function for UVM_CACHING.
# using weights and other params from cc_ref, but
# cache states from cc.
output_uvm_caching = torch.ops.fbgemm.int_nbit_split_embedding_uvm_caching_codegen_lookup_function(
dev_weights=cc_ref.weights_host
if cc_ref.host_size > 0
else cc_ref.weights_dev,
uvm_weights=cc_ref.weights_uvm,
weights_placements=cc_ref.weights_placements,
weights_offsets=cc_ref.weights_offsets,
weights_tys=cc_ref.weights_tys,
D_offsets=cc_ref.D_offsets,
total_D=cc_ref.total_D,
max_int2_D=cc_ref.max_int2_D,
max_int4_D=cc_ref.max_int4_D,
max_int8_D=cc_ref.max_int8_D,
max_float16_D=cc_ref.max_float16_D,
max_float32_D=cc_ref.max_float32_D,
indices=indices,
offsets=offsets,
pooling_mode=int(cc_ref.pooling_mode),
indice_weights=None,
output_dtype=cc_ref.output_dtype,
lxu_cache_weights=cc.lxu_cache_weights, # cc, not cc_ref.
lxu_cache_locations=torch.empty(0, dtype=torch.int32).fill_(-1),
row_alignment=cc_ref.row_alignment,
max_float8_D=cc_ref.max_float8_D,
fp8_exponent_bits=cc_ref.fp8_exponent_bits,
fp8_exponent_bias=cc_ref.fp8_exponent_bias,
# Additional args for UVM_CACHING: using cc, not cc_ref.
cache_hash_size_cumsum=cc.cache_hash_size_cumsum,
total_cache_hash_size=cc.total_cache_hash_size,
cache_index_table_map=cc.cache_index_table_map,
lxu_cache_state=cc.lxu_cache_state,
lxu_state=cc.lxu_state,
)
torch.testing.assert_close(output_uvm_caching, output_ref, equal_nan=True)
# cache status; we use the exact same logic, but still assigning ways in a associative cache can be
# arbitrary. We compare sum along ways in each set, instead of expecting exact tensor match.
cache_weights_ref = torch.reshape(
cc_ref.lxu_cache_weights,
[-1, associativity],
)
cache_weights = torch.reshape(cc.lxu_cache_weights, [-1, associativity])
torch.testing.assert_close(
torch.sum(cache_weights_ref, 1),
torch.sum(cache_weights, 1),
equal_nan=True,
)
torch.testing.assert_close(
torch.sum(cc.lxu_cache_state, 1),
torch.sum(cc_ref.lxu_cache_state, 1),
equal_nan=True,
)
# lxu_state can be different as time_stamp values can be different.
# we check the entries with max value.
max_timestamp_ref = torch.max(cc_ref.lxu_state)
max_timestamp_uvm_caching = torch.max(cc.lxu_state)
x = cc_ref.lxu_state == max_timestamp_ref
y = cc.lxu_state == max_timestamp_uvm_caching
torch.testing.assert_close(torch.sum(x, 1), torch.sum(y, 1))
# int_nbit_split_embedding_uvm_caching_codegen_lookup_function for UVM.
output_uvm = torch.ops.fbgemm.int_nbit_split_embedding_uvm_caching_codegen_lookup_function(
dev_weights=cc_ref.weights_host
if cc_ref.host_size > 0
else cc_ref.weights_dev,
uvm_weights=cc_ref.weights_uvm,
weights_placements=placement_uvm, # all UVM weights placement.
weights_offsets=cc_ref.weights_offsets,
weights_tys=cc_ref.weights_tys,
D_offsets=cc_ref.D_offsets,
total_D=cc_ref.total_D,
max_int2_D=cc_ref.max_int2_D,
max_int4_D=cc_ref.max_int4_D,
max_int8_D=cc_ref.max_int8_D,
max_float16_D=cc_ref.max_float16_D,
max_float32_D=cc_ref.max_float32_D,
indices=indices,
offsets=offsets,
pooling_mode=int(cc_ref.pooling_mode),
indice_weights=None,
output_dtype=cc_ref.output_dtype,
lxu_cache_weights=zero_size_cache_weights, # empty HBM cache.
lxu_cache_locations=torch.empty(0, dtype=torch.int32).fill_(-1),
row_alignment=cc_ref.row_alignment,
max_float8_D=cc_ref.max_float8_D,
fp8_exponent_bits=cc_ref.fp8_exponent_bits,
fp8_exponent_bias=cc_ref.fp8_exponent_bias,
# Additional args for UVM_CACHING; not needed for UVM.
cache_hash_size_cumsum=None,
total_cache_hash_size=None,
cache_index_table_map=None,
lxu_cache_state=None,
lxu_state=None,
)
torch.testing.assert_close(output_uvm, output_ref, equal_nan=True)
@unittest.skipIf(*gpu_unavailable)
@given(
weights_ty=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
),
cache_algorithm=st.sampled_from(CacheAlgorithm),
associativity=st.sampled_from([1, DEFAULT_ASSOC]),
do_pruning=st.booleans(),
use_array_for_index_remapping=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_forward_uvm_cache(
self,
weights_ty: SparseType,
cache_algorithm: CacheAlgorithm,
associativity: int,
do_pruning: bool,
use_array_for_index_remapping: bool,
) -> None:
assume(cache_algorithm == CacheAlgorithm.LRU or associativity != 1)
T = random.randint(1, 5)
B = random.randint(1, 128)
L = random.randint(1, 20)
D = random.randint(2, 256)
log_E = random.randint(3, 5)
mixed = random.choice([True, False])
iters = 3
E = int(10**log_E)
D_alignment = (
1 if weights_ty.bit_rate() % 8 == 0 else int(8 / weights_ty.bit_rate())
)
D = round_up(D, D_alignment)
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(
np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)),
D_alignment,
)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
managed = [EmbeddingLocation.MANAGED_CACHING] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = EmbeddingLocation.DEVICE if d < average_D else managed[t]
index_remapping = None
pruning_hash_load_factor = 0.5
if do_pruning:
current_device = torch.cuda.current_device()
index_remapping = []
for E in Es:
# For each table, keep the first half of rows as is, but
# the rest is treated as pruned (-1).
remapping = list(range(0, E // 2)) + [-1] * (E - E // 2)
remapping_t = torch.tensor(
remapping,
dtype=torch.int32,
device=current_device,
)
index_remapping.append(remapping_t)
cc_ref = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E,
D,
weights_ty,
EmbeddingLocation.DEVICE,
)
for (E, D) in zip(Es, Ds)
],
index_remapping=index_remapping,
use_array_for_index_remapping=use_array_for_index_remapping,
pruning_hash_load_factor=pruning_hash_load_factor,
)
cc_ref.fill_random_weights()
cc = IntNBitTableBatchedEmbeddingBagsCodegen(
[("", E, D, weights_ty, M) for (E, D, M) in zip(Es, Ds, managed)],
cache_algorithm=cache_algorithm,
cache_assoc=associativity,
index_remapping=index_remapping,
use_array_for_index_remapping=use_array_for_index_remapping,
pruning_hash_load_factor=pruning_hash_load_factor,
)
cc.fill_random_weights()
split_weights = cc.split_embedding_weights()
ref_split_weights = cc_ref.split_embedding_weights()
for t in range(T):
(weights, scale_shift) = split_weights[t]
(ref_weights, ref_scale_shift) = ref_split_weights[t]
self.assertEqual(weights.size(), ref_weights.size())
weights.copy_(ref_weights)
if ref_scale_shift is not None:
scale_shift.copy_(ref_scale_shift)
requests = generate_requests(iters, B, T, L, min(Es), reuse=0.1)
for indices, offsets, _ in requests:
indices = indices.int()
offsets = offsets.int()
output = cc(indices, offsets)
output_ref = cc_ref(indices, offsets)
torch.testing.assert_close(output, output_ref, equal_nan=True)
@given(
T=st.integers(min_value=1, max_value=5),
B=st.integers(min_value=1, max_value=8),
L=st.integers(min_value=0, max_value=8),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
use_cpu_hashtable=st.booleans(),
use_array_for_index_remapping=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_pruning(
self,
T: int,
B: int,
L: int,
use_cpu: bool,
use_cpu_hashtable: bool,
use_array_for_index_remapping: bool,
) -> None:
E = int(1000)
LOAD_FACTOR = 0.8
pruning_ratio = 0.5
capacities = [int(B * L / LOAD_FACTOR) + 1 for _ in range(T)]
original_E = int(E / (1.0 - pruning_ratio))
# Enforce the size of original_E/B/L to get the unique indices
assume(original_E > B * L)
current_device = "cpu" if use_cpu else torch.cuda.current_device()
if use_cpu_hashtable:
assume(use_cpu)
indices = torch.randint(low=0, high=original_E, size=(T, B, L))
for t in range(T):
while (
torch.unique(
indices[t], return_counts=False, return_inverse=False
).numel()
!= indices[t].numel()
):
indices[t] = torch.randint(low=0, high=original_E, size=(B, L))
indices = indices.view(-1).int()
dense_indices = torch.randint(low=0, high=E, size=(T, B, L)).view(-1).int()
offsets = torch.tensor([L * b_t for b_t in range(B * T + 1)]).int()
# Initialize and insert Hashmap index remapping based data structure
hash_table = torch.empty(
(sum(capacities), 2),
dtype=torch.int32,
)
hash_table[:, :] = -1
hash_table_offsets = torch.tensor([0] + np.cumsum(capacities).tolist()).long()
torch.ops.fbgemm.pruned_hashmap_insert(
indices, dense_indices, offsets, hash_table, hash_table_offsets
)
if use_cpu_hashtable:
ht = torch.classes.fbgemm.PrunedMapCPU()
ht.insert(indices, dense_indices, offsets, T)
# Initialize and insert Array index remapping based data structure
index_remappings_array = torch.tensor(
[-1] * original_E * T,
dtype=torch.int32,
device=current_device,
)
index_remappings_array_offsets = torch.empty(
T + 1,
dtype=torch.int64,
# pyre-fixme[6]: For 3rd param expected `Union[None, str, device]` but
# got `Union[int, str]`.
device=current_device,
)
index_remappings_array_offsets[0] = 0
for t in range(T):
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int, str]`.
indice_t = (indices.view(T, B, L))[t].long().view(-1).to(current_device)
dense_indice_t = (
(dense_indices.view(T, B, L))[t].view(-1)
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
.to(current_device)
)
selected_indices = torch.add(indice_t, t * original_E)[:E]
index_remappings_array[selected_indices] = dense_indice_t
index_remappings_array_offsets[t + 1] = (
index_remappings_array_offsets[t] + original_E
)
# Move data when using device
if not use_cpu:
(
indices,
dense_indices,
offsets,
hash_table,
hash_table_offsets,
index_remappings_array,
index_remappings_array_offsets,
) = (
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
indices.to(current_device),
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
dense_indices.to(current_device),
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
offsets.to(current_device),
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
hash_table.to(current_device),
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
hash_table_offsets.to(current_device),
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
index_remappings_array.to(current_device),
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
index_remappings_array_offsets.to(current_device),
)
# Lookup
if use_cpu_hashtable:
dense_indices_ = ht.lookup(indices, offsets)
elif not use_array_for_index_remapping: # hashmap based pruning
dense_indices_ = torch.ops.fbgemm.pruned_hashmap_lookup(
indices, offsets, hash_table, hash_table_offsets
)
else: # array based pruning
dense_indices_ = torch.ops.fbgemm.pruned_array_lookup(
indices,
offsets,
index_remappings_array,
index_remappings_array_offsets,
)
# Validate the lookup result
torch.testing.assert_close(dense_indices, dense_indices_)
# For array based pruning, it will be out-of-boundary for arbitrarily
# large indices. We will rely on bound checker to make sure indices
# are within the boundary.
if not use_array_for_index_remapping:
# now, use a value that does not exist in the original set of indices
# and so should be pruned out.
indices[:] = np.iinfo(np.int32).max
if use_cpu_hashtable:
dense_indices_ = ht.lookup(indices, offsets)
elif not use_array_for_index_remapping: # hashmap based pruning
dense_indices_ = torch.ops.fbgemm.pruned_hashmap_lookup(
indices, offsets, hash_table, hash_table_offsets
)
else: # array based pruning
dense_indices_ = torch.ops.fbgemm.pruned_array_lookup(
indices,
offsets,
index_remappings_array,
index_remappings_array_offsets,
)
torch.testing.assert_close(dense_indices.clone().fill_(-1), dense_indices_)
@given(
L=st.integers(min_value=0, max_value=16),
H=st.integers(min_value=512, max_value=1024),
S=st.integers(min_value=0, max_value=128),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_cache_update_function(self, L: int, H: int, S: int) -> None:
# Generate synthetic data
linear_cache_indices_cpu = torch.randint(L, H, (S,))
lxu_cache_locations_cpu = torch.clone(linear_cache_indices_cpu)
indices = [True if np.random.rand() < 0.5 else False for _ in range(S)]
lxu_cache_locations_cpu[indices] = -1
cache_miss_ids = torch.clone(linear_cache_indices_cpu)
cache_miss_ids[lxu_cache_locations_cpu != -1] = -2
# Calculate the correct output
unique_cache_miss_ids = torch.unique(cache_miss_ids)
expect_out = sum(unique_cache_miss_ids >= 0)
linear_cache_indices = to_device(
torch.tensor(linear_cache_indices_cpu, dtype=torch.int64), use_cpu=False
)
lxu_cache_locations = to_device(
torch.tensor(lxu_cache_locations_cpu, dtype=torch.int32), use_cpu=False
)
# Create an abstract split table
D = 8
T = 2
E = 10**3
Ds = [D] * T
Es = [E] * T
emb_op = SplitTableBatchedEmbeddingBagsCodegen
cc = emb_op(
embedding_specs=[
(
E,
D,
EmbeddingLocation.MANAGED_CACHING,
ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
record_cache_metrics=RecordCacheMetrics(True, False),
)
cc._update_cache_miss_counter(lxu_cache_locations, linear_cache_indices)
(
cache_miss_forward_count,
unique_cache_miss_count,
) = cc.get_cache_miss_counter().cpu()
self.assertEqual(unique_cache_miss_count, expect_out)
self.assertLessEqual(cache_miss_forward_count, unique_cache_miss_count)
@given(N=st.integers(min_value=1, max_value=8))
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_cache_miss_counter(self, N: int) -> None:
# Create an abstract split table
D = 8
T = 2
E = 10**3
Ds = [D] * T
Es = [E] * T
emb_op = SplitTableBatchedEmbeddingBagsCodegen
cc = emb_op(
embedding_specs=[
(
E,
D,
EmbeddingLocation.MANAGED_CACHING,
ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
record_cache_metrics=RecordCacheMetrics(True, True),
)
# Create fake input data and the target output
xs = []
x1 = torch.Tensor([[[1], [1]], [[3], [4]]])
x1 = to_device(torch.tensor(x1, dtype=torch.int64), use_cpu=False)
x2 = torch.Tensor([[[2], [1]], [[3], [4]]])
x2 = to_device(torch.tensor(x2, dtype=torch.int64), use_cpu=False)
x3 = torch.Tensor([[[5], [6]], [[7], [8]]])
x3 = to_device(torch.tensor(x3, dtype=torch.int64), use_cpu=False)
xs.append(x1)
xs.append(x2)
xs.append(x3)
target_counter_list = [[1, 3], [2, 4], [3, 8]]
target_tablewise_cache_miss_list = [[1, 2], [2, 2], [4, 4]]
for x, t_counter, t_tablewise_cache_miss in zip(
xs, target_counter_list, target_tablewise_cache_miss_list
):
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=False)
for _ in range(N):
cc(indices, offsets)
(
cache_miss_forward_count,
unique_cache_miss_count,
) = cc.get_cache_miss_counter().cpu()
tablewise_cache_miss = cc.get_table_wise_cache_miss().cpu()
self.assertEqual(cache_miss_forward_count, t_counter[0])
self.assertEqual(unique_cache_miss_count, t_counter[1])
for i in range(len(tablewise_cache_miss)):
self.assertEqual(tablewise_cache_miss[i], t_tablewise_cache_miss[i])
@given(N=st.integers(min_value=1, max_value=2))
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_stb_uvm_cache_stats(self, N: int) -> None:
# Create an abstract split table
D = 8
T = 2
E = 10**3
Ds = [D] * T
Es = [E] * T
emb_op = SplitTableBatchedEmbeddingBagsCodegen
cc = emb_op(
embedding_specs=[
(
E,
D,
EmbeddingLocation.MANAGED_CACHING,
ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
gather_uvm_cache_stats=True,
)
x = torch.Tensor([[[1], [1]], [[3], [4]]])
x = to_device(torch.tensor(x, dtype=torch.int64), use_cpu=False)
for _ in range(N):
indices, offsets = get_table_batched_offsets_from_dense(x, use_cpu=False)
cc.reset_cache_states()
cc.reset_uvm_cache_stats()
cc(indices, offsets)
(
n_calls,
n_requested_indices,
n_unique_indices,
n_unique_misses,
n_conflict_unique_misses,
n_conflict_misses,
) = cc.get_uvm_cache_stats()
self.assertEqual(n_calls, 1)
self.assertEqual(n_requested_indices, len(indices))
self.assertEqual(n_unique_indices, len(set(indices.tolist())))
self.assertEqual(n_unique_misses, len(set(indices.tolist())))
self.assertEqual(n_conflict_unique_misses, 0)
self.assertEqual(n_conflict_misses, 0)
@unittest.skipIf(*gpu_unavailable)
@given(
L=st.integers(min_value=0, max_value=16),
H=st.integers(min_value=512, max_value=1024),
S=st.integers(min_value=0, max_value=128),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_cache_update_function(self, L: int, H: int, S: int) -> None:
# Generate synthetic data
linear_cache_indices_cpu = torch.randint(L, H, (S,))
lxu_cache_locations_cpu = torch.clone(linear_cache_indices_cpu)
indices = [True if np.random.rand() < 0.5 else False for _ in range(S)]
lxu_cache_locations_cpu[indices] = -1
cache_miss_ids = torch.clone(linear_cache_indices_cpu)
cache_miss_ids[lxu_cache_locations_cpu != -1] = -2
# Calculate the correct output
unique_cache_miss_ids = torch.unique(cache_miss_ids)
expect_out = sum(unique_cache_miss_ids >= 0)
linear_cache_indices = linear_cache_indices_cpu.to(torch.int32).cuda()
lxu_cache_locations = lxu_cache_locations_cpu.to(torch.int32).cuda()
expected_unique_access = len(torch.unique(linear_cache_indices_cpu))
expected_total_access = len(linear_cache_indices_cpu)
# Create an abstract split table
D = 8
T = 2
E = 10**3
Ds = [D] * T
Es = [E] * T
cc = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
SparseType.INT8,
EmbeddingLocation.MANAGED_CACHING,
)
for (E, D) in zip(Es, Ds)
],
device=torch.cuda.current_device(),
record_cache_metrics=RecordCacheMetrics(True, False),
)
cc.fill_random_weights()
cc._update_cache_miss_counter(lxu_cache_locations, linear_cache_indices)
(
cache_miss_forward_count,
unique_cache_miss_count,
unique_access_count,
total_access_count,
) = cc.get_cache_miss_counter().cpu()
self.assertEqual(unique_cache_miss_count, expect_out)
self.assertLessEqual(cache_miss_forward_count, unique_cache_miss_count)
self.assertEqual(unique_access_count, expected_unique_access)
self.assertEqual(total_access_count, expected_total_access)
@unittest.skipIf(*gpu_unavailable)
@given(N=st.integers(min_value=1, max_value=8))
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_cache_miss_counter(self, N: int) -> None:
# Create an abstract split table
D = 8
T = 2
E = 10**3
Ds = [D] * T
Es = [E] * T
cc = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
SparseType.INT8,
EmbeddingLocation.MANAGED_CACHING,
)
for (E, D) in zip(Es, Ds)
],
device=torch.cuda.current_device(),
record_cache_metrics=RecordCacheMetrics(True, True),
)
cc.fill_random_weights()
# Create fake input data and the target output
x1 = torch.Tensor([[[1], [1]], [[3], [4]]]).cuda()
x2 = torch.Tensor([[[2], [1]], [[3], [4]]]).cuda()
x3 = torch.Tensor([[[5], [6]], [[7], [8]]]).cuda()
xs = [x1, x2, x3]
target_counter_list = [[1, 3], [2, 4], [3, 8]]
target_tablewise_cache_miss_list = [[1, 2], [2, 2], [4, 4]]
for x, t_counter, t_tablewise_cache_miss in zip(
xs, target_counter_list, target_tablewise_cache_miss_list
):
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=False)
for _ in range(N):
cc(indices.int(), offsets.int())
(
cache_miss_forward_count,
unique_cache_miss_count,
_,
_,
) = cc.get_cache_miss_counter().cpu()
tablewise_cache_miss = cc.get_table_wise_cache_miss().cpu()
self.assertEqual(cache_miss_forward_count, t_counter[0])
self.assertEqual(unique_cache_miss_count, t_counter[1])
for i in range(len(tablewise_cache_miss)):
self.assertEqual(tablewise_cache_miss[i], t_tablewise_cache_miss[i])
@unittest.skipIf(*gpu_unavailable)
@given(
N=st.integers(min_value=1, max_value=8),
dtype=st.sampled_from([SparseType.INT8, SparseType.INT4, SparseType.INT2]),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_uvm_cache_stats(self, N: int, dtype: SparseType) -> None:
# Create an abstract split table
D = 8
T = 2
E = 10**3
Ds = [D] * T
Es = [E] * T
cc = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
dtype,
EmbeddingLocation.MANAGED_CACHING,
)
for (E, D) in zip(Es, Ds)
],
device=torch.cuda.current_device(),
gather_uvm_cache_stats=True,
)
cc.fill_random_weights()
# Create fake input data and the target output
x1 = torch.Tensor([[[1], [1]], [[3], [4]]]).cuda()
x2 = torch.Tensor([[[2], [1]], [[3], [4]]]).cuda()
x3 = torch.Tensor([[[5], [6]], [[7], [8]]]).cuda()
xs = [x1, x2, x3]
# num_unique_indices, num_unique_misses
# note that these are cumulative over calls; and also "unique" is per batch.
target_counter_list = [[3, 3], [4, 4], [4, 8]]
num_calls_expected = 0
num_indices_expcted = 0
num_unique_indices_expected = 0
for x, t_counter in zip(xs, target_counter_list):
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=False)
for _ in range(N):
num_calls_expected = num_calls_expected + 1
num_indices_expcted = num_indices_expcted + len(indices)
cc(indices.int(), offsets.int())
(
num_calls,
num_indices,
num_unique_indices,
num_unique_misses,
num_conflict_unique_miss,
num_conflict_miss,
) = cc.get_uvm_cache_stats().cpu()
# Note num_unique_indices is cumulative stats.
num_unique_indices_expected = num_unique_indices_expected + t_counter[0]
self.assertEqual(num_calls, num_calls_expected)
self.assertEqual(num_indices, num_indices_expcted)
self.assertEqual(num_unique_indices, num_unique_indices_expected)
self.assertEqual(num_unique_misses, t_counter[1])
self.assertEqual(num_conflict_unique_miss, 0)
self.assertEqual(num_conflict_miss, 0)
T = 1 # for simplicity
Ds = [D] * T
Es = [E] * T
cc1 = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
SparseType.INT8,
EmbeddingLocation.MANAGED_CACHING,
)
for (E, D) in zip(Es, Ds)
],
device=torch.cuda.current_device(),
gather_uvm_cache_stats=True,
cache_sets=1, # Only one set.
)
cc1.fill_random_weights()
associativty = DEFAULT_ASSOC # 32 for NVidia / 64 for AMD.
repetition = 17
indices1 = torch.Tensor(
[[list(range(0, associativty))] * repetition]
).cuda() # 0, 1, ..., 31.
indices2 = torch.Tensor(
[[list(range(0, associativty + 1))] * repetition]
).cuda() # 0, 1, ..., 31, 32.
indices3 = torch.Tensor(
[[list(range(0, associativty + 10))] * repetition]
).cuda() # 0, 1, ..., 31, 32, ..., 41.
# num_conflict_unique_miss, num_conflict_miss
expected = [[0, 0], [1, 17], [10, 170]]
for x, e in zip((indices1, indices2, indices3), expected):
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=False)
for _ in range(N):
cc1(indices.int(), offsets.int())
(
_,
_,
_,
_,
num_conflict_unique_miss,
num_conflict_miss,
) = cc1.get_uvm_cache_stats().cpu()
self.assertEqual(num_conflict_unique_miss, e[0])
self.assertEqual(num_conflict_miss, e[1])
cc1.reset_uvm_cache_stats()
@unittest.skipIf(*gpu_unavailable)
@given(
N=st.integers(min_value=1, max_value=8),
dtype=st.sampled_from([SparseType.INT8, SparseType.INT4, SparseType.INT2]),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_direct_mapped_uvm_cache_stats(
self, N: int, dtype: SparseType
) -> None:
# Create an abstract split table
D = 8
T = 2
E = 10**3
Ds = [D] * T
Es = [E] * T
cc = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
dtype,
EmbeddingLocation.MANAGED_CACHING,
)
for (E, D) in zip(Es, Ds)
],
device=torch.cuda.current_device(),
gather_uvm_cache_stats=True,
cache_assoc=1, # Direct Mapped
)
cc.fill_random_weights()
# Create fake input data and the target output
x1 = torch.Tensor([[[1], [1]], [[3], [4]]]).cuda()
x2 = torch.Tensor([[[2], [1]], [[3], [4]]]).cuda()
x3 = torch.Tensor([[[5], [6]], [[7], [8]]]).cuda()
xs = [x1, x2, x3]
# num_unique_indices, num_unique_misses
# note that these are cumulative over calls; and also "unique" is per batch.
target_counter_list = [[3, 3], [4, 4], [4, 8]]
num_calls_expected = 0
num_indices_expcted = 0
num_unique_indices_expected = 0
for x, t_counter in zip(xs, target_counter_list):
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=False)
for _ in range(N):
num_calls_expected = num_calls_expected + 1
num_indices_expcted = num_indices_expcted + len(indices)
cc(indices.int(), offsets.int())
(
num_calls,
num_indices,
num_unique_indices,
num_unique_misses,
num_conflict_unique_miss,
num_conflict_miss,
) = cc.get_uvm_cache_stats().cpu()
# Note num_unique_indices is cumulative stats.
num_unique_indices_expected = num_unique_indices_expected + t_counter[0]
self.assertEqual(num_calls, num_calls_expected)
self.assertEqual(num_indices, num_indices_expcted)
self.assertEqual(num_unique_indices, 0) # N/A for Direct Mapped
self.assertEqual(num_unique_misses, 0) # N/A for Direct Mapped
self.assertEqual(
num_conflict_unique_miss, t_counter[1]
) # number of actually inserted rows for Direct Mapped
self.assertEqual(num_conflict_miss, 0)
T = 1 # for simplicity
Ds = [D] * T
Es = [E] * T
cc1 = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
SparseType.INT8,
EmbeddingLocation.MANAGED_CACHING,
)
for (E, D) in zip(Es, Ds)
],
device=torch.cuda.current_device(),
gather_uvm_cache_stats=True,
cache_sets=1, # Only one set.
cache_assoc=1, # Direct Mapped
)
cc1.fill_random_weights()
associativty = 1 # Direct-Mapped
repetition = 17
indices1 = torch.Tensor(
[[list(range(0, associativty))] * repetition]
).cuda() # no conflict miss
indices2 = torch.Tensor(
[[list(range(0, associativty + 1))] * repetition]
).cuda() # 1 * 17 conflict miss per request
indices3 = torch.Tensor(
[[list(range(0, associativty + 10))] * repetition]
).cuda() # 10 * 17 conflict misses per request
# num_conflict_unique_miss, num_conflict_miss
expected = [[1, 0], [1, 17], [1, 170]]
accum_num_conflict_miss = 0
for x, e in zip((indices1, indices2, indices3), expected):
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=False)
for _ in range(N):
cc1(indices.int(), offsets.int())
(
_,
_,
_,
_,
num_conflict_unique_miss,
num_conflict_miss,
) = cc1.get_uvm_cache_stats().cpu()
# for DM this represents number of actually inserted rows
self.assertEqual(num_conflict_unique_miss, e[0])
accum_num_conflict_miss += e[1]
self.assertEqual(num_conflict_miss, accum_num_conflict_miss)
@given(
T=st.integers(min_value=1, max_value=64),
B=st.integers(min_value=1, max_value=64),
max_L=st.integers(min_value=1, max_value=64),
bounds_check_mode=st.sampled_from(
[
BoundsCheckMode.FATAL,
BoundsCheckMode.WARNING,
BoundsCheckMode.IGNORE,
]
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
weighted=st.booleans(),
dtype=st.sampled_from(
[
torch.int64,
torch.int32,
]
),
mixed_B=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_bounds_check( # noqa C901
self,
T: int,
B: int,
max_L: int,
bounds_check_mode: BoundsCheckMode,
use_cpu: bool,
weighted: bool,
dtype: torch.dtype,
mixed_B: bool,
) -> None:
# use_cpu does not support mixed_B
if use_cpu and mixed_B:
mixed_B = False
rows_per_table = torch.tensor(
np.random.randint(low=1, high=1000, size=(T,))
).long()
if not mixed_B:
Bs = [B] * T
else:
low = max(int(0.25 * B), 1)
high = int(B)
if low == high:
Bs = [B] * T
else:
Bs = [np.random.randint(low=low, high=high) for _ in range(T)]
B_offsets = [0] + list(accumulate(Bs))
Ls = np.random.randint(low=0, high=max_L, size=(B_offsets[-1],))
indices = [
np.random.randint(
low=0,
high=rows_per_table[t],
size=sum(Ls[B_offsets[t] : B_offsets[t + 1]]),
)
for t in range(T)
]
indices = torch.tensor(np.concatenate(indices, axis=0)).to(dtype)
weights = (
torch.rand(indices.shape, dtype=torch.float, device=indices.device)
if weighted
else None
)
offsets = torch.tensor([0] + np.cumsum(Ls.flatten()).tolist()).to(dtype)
warning = torch.tensor([0]).long()
if mixed_B:
B_offsets = torch.tensor(B_offsets, device="cuda", dtype=torch.int32)
max_B = max(Bs)
else:
B_offsets = None
max_B = -1
self.assertEqual(indices.numel(), np.sum(Ls).item())
self.assertEqual(offsets[-1], np.sum(Ls).item())
if not use_cpu:
indices, offsets, rows_per_table, warning = (
indices.cuda(),
offsets.cuda(),
rows_per_table.cuda(),
warning.cuda(),
)
if weighted:
# pyre-fixme[16]: `Optional` has no attribute `cuda`.
weights = weights.cuda()
indices_copy = indices.clone()
offsets_copy = offsets.clone()
torch.ops.fbgemm.bounds_check_indices(
rows_per_table,
indices,
offsets,
bounds_check_mode,
warning,
weights,
B_offsets=B_offsets,
max_B=max_B,
)
# we don't modify when we are in-bounds.
torch.testing.assert_close(indices_copy, indices)
indices[:] = torch.iinfo(dtype).max
if bounds_check_mode != BoundsCheckMode.FATAL:
torch.ops.fbgemm.bounds_check_indices(
rows_per_table,
indices,
offsets,
bounds_check_mode,
warning,
weights,
B_offsets=B_offsets,
max_B=max_B,
)
torch.testing.assert_close(indices, torch.zeros_like(indices))
if bounds_check_mode == BoundsCheckMode.WARNING:
self.assertEqual(warning.item(), indices.numel())
else:
if use_cpu and indices.numel():
with self.assertRaises(RuntimeError):
torch.ops.fbgemm.bounds_check_indices(
rows_per_table,
indices,
offsets,
bounds_check_mode,
warning,
weights,
B_offsets=B_offsets,
max_B=max_B,
)
# It would be nice to test the CUDA implementation of BoundsCheckMode==FATAL,
# but the device assert kills the CUDA context and requires a process restart,
# which is a bit inconvenient.
# test offsets bound errors
indices = indices_copy.clone()
offsets = offsets_copy.clone()
if offsets.numel() > 0:
offsets[0] = -100
if offsets.numel() > 1:
offsets[-1] += 100
if bounds_check_mode != BoundsCheckMode.FATAL:
torch.ops.fbgemm.bounds_check_indices(
rows_per_table,
indices,
offsets,
bounds_check_mode,
warning,
weights,
B_offsets=B_offsets,
max_B=max_B,
)
if offsets.numel() > 0:
self.assertEqual(offsets[0].item(), 0)
if offsets.numel() > 1:
self.assertEqual(offsets[-1].item(), indices.numel())
if bounds_check_mode == BoundsCheckMode.WARNING:
# -1 because when we have 2 elements in offsets, we have only 1
# warning for the pair.
self.assertGreaterEqual(warning.item(), min(2, offsets.numel() - 1))
else:
if use_cpu and indices.numel():
with self.assertRaises(RuntimeError):
torch.ops.fbgemm.bounds_check_indices(
rows_per_table,
indices,
offsets,
bounds_check_mode,
warning,
weights,
)
# test offsets.size(0) ! = B * T + 1 case. Here we test with T >= 2 case.
# If T == 1, we will always get the even division.
# (does not apply to mixed_B = True)
if not mixed_B and T >= 2:
indices = indices_copy.clone()
offsets = offsets_copy.clone()
offsets = torch.cat(
(
offsets,
torch.tensor(
[indices.numel()] * (T - 1),
dtype=offsets.dtype,
device=offsets.device,
),
),
dim=0,
)
with self.assertRaises(RuntimeError):
torch.ops.fbgemm.bounds_check_indices(
rows_per_table,
indices,
offsets,
bounds_check_mode,
warning,
weights,
)
# test weights.size(0) != indices.size(0) case
weights = torch.rand(
(indices.size(0) + 1,), dtype=torch.float, device=indices.device
)
with self.assertRaises(RuntimeError):
torch.ops.fbgemm.bounds_check_indices(
rows_per_table,
indices,
offsets,
bounds_check_mode,
warning,
weights,
B_offsets=B_offsets,
max_B=max_B,
)
def test_pickle(self) -> None:
tensor_queue = torch.classes.fbgemm.TensorQueue(torch.empty(0))
pickled = pickle.dumps(tensor_queue)
unpickled = pickle.loads(pickled) # noqa: F841
@unittest.skipIf(*gpu_unavailable)
def test_linearize_cache_indices(self) -> None:
indices = torch.tensor(
[10, 2, 3, 7, 1, 4, 5, 9, 2, 7, 6, 8, 5, 1, 0, 4],
dtype=torch.int,
device="cuda",
)
pruned_indices = torch.tensor(
[10, -1, 3, 7, 1, 4, -1, 9, 2, -1, 6, 8, 5, 1, -1, 4],
dtype=torch.int,
device="cuda",
)
equal_offsets = torch.tensor([0, 4, 8, 12, 16], dtype=torch.int, device="cuda")
varying_offsets = torch.tensor(
[0, 1, 3, 6, 8, 10, 14, 15, 16], dtype=torch.int, device="cuda"
)
# Testing equal sized tables.
cache_hash_size_cumsum_0 = torch.tensor([0, 12, 24, 36, 48]).cuda()
linear_cache_indices_0 = torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum_0, indices, equal_offsets
)
self.assertTrue(
torch.equal(
linear_cache_indices_0.cpu(),
torch.tensor(
[10, 2, 3, 7, 13, 16, 17, 21, 26, 31, 30, 32, 41, 37, 36, 40],
dtype=torch.int,
),
)
)
# Testing partially cached tables.
cache_hash_size_cumsum_1 = torch.tensor([0, 12, -1, 24, 36]).cuda()
linear_cache_indices_1 = torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum_1, indices, equal_offsets
)
self.assertTrue(
torch.equal(
linear_cache_indices_1.cpu(),
torch.tensor(
[10, 2, 3, 7, 13, 16, 17, 21, 36, 36, 36, 36, 29, 25, 24, 28],
dtype=torch.int,
),
)
)
# Testing batched with varying pooling factor.
cache_hash_size_cumsum_2 = torch.tensor([0, 12, -1, 24, 36]).cuda()
linear_cache_indices_2 = torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum_2, indices, varying_offsets
)
self.assertTrue(
torch.equal(
linear_cache_indices_2.cpu(),
torch.tensor(
[10, 2, 3, 19, 13, 16, 17, 21, 36, 36, 36, 36, 36, 36, 24, 28],
dtype=torch.int,
),
)
)
# Testing when multiple features share the same table.
cache_hash_size_cumsum_3 = torch.tensor([0, 0, 12, 12, 24]).cuda()
linear_cache_indices_3 = torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum_3, indices, varying_offsets
)
self.assertTrue(
torch.equal(
linear_cache_indices_3.cpu(),
torch.tensor(
[10, 2, 3, 7, 1, 4, 5, 9, 14, 19, 18, 20, 17, 13, 12, 16],
dtype=torch.int,
),
)
)
# Testing equal sized tables + pruned indices
cache_hash_size_cumsum_4 = torch.tensor([0, 12, 24, 36, 48]).cuda()
linear_cache_indices_4 = torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum_4, pruned_indices, equal_offsets
)
self.assertTrue(
torch.equal(
linear_cache_indices_4.cpu(),
torch.tensor(
[10, 48, 3, 7, 13, 16, 48, 21, 26, 48, 30, 32, 41, 37, 48, 40],
dtype=torch.int,
),
)
)
# Testing batched with varying pooling factor + pruned indices
cache_hash_size_cumsum_5 = torch.tensor([0, 12, -1, 24, 36]).cuda()
linear_cache_indices_5 = torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum_5, pruned_indices, varying_offsets
)
self.assertTrue(
torch.equal(
linear_cache_indices_5.cpu(),
torch.tensor(
[10, 36, 3, 19, 13, 16, 36, 21, 36, 36, 36, 36, 36, 36, 36, 28],
dtype=torch.int,
),
)
)
@unittest.skipIf(*gpu_unavailable)
def test_linearize_cache_indices_from_row_idx(self) -> None:
update_row_indices = torch.tensor(
[10, 2, 3, 7, 1, 4, 5, 9, 2, 7, 6, 8, 5, 1, 0, 4],
dtype=torch.int,
device="cuda",
)
update_table_indices = torch.tensor(
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3],
dtype=torch.int,
device="cuda",
)
varying_update_table_indices = torch.tensor(
[0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3],
dtype=torch.int,
device="cuda",
)
# Testing equal sized tables.
cache_hash_size_cumsum_0 = torch.tensor([0, 12, 24, 36, 48]).cuda()
linear_cache_indices_0 = torch.ops.fbgemm.linearize_cache_indices_from_row_idx(
cache_hash_size_cumsum_0,
update_table_indices,
update_row_indices,
)
self.assertTrue(
torch.equal(
linear_cache_indices_0.cpu(),
torch.tensor(
[10, 2, 3, 7, 13, 16, 17, 21, 26, 31, 30, 32, 41, 37, 36, 40],
dtype=torch.int,
),
)
)
# Testing partially cached tables.
cache_hash_size_cumsum_1 = torch.tensor([0, 12, -1, 24, 36]).cuda()
linear_cache_indices_1 = torch.ops.fbgemm.linearize_cache_indices_from_row_idx(
cache_hash_size_cumsum_1,
update_table_indices,
update_row_indices,
)
self.assertTrue(
torch.equal(
linear_cache_indices_1.cpu(),
torch.tensor(
[10, 2, 3, 7, 13, 16, 17, 21, 36, 36, 36, 36, 29, 25, 24, 28],
dtype=torch.int,
),
)
)
# Testing batched with varying pooling factor.
cache_hash_size_cumsum_2 = torch.tensor([0, 12, -1, 24, 36]).cuda()
linear_cache_indices_2 = torch.ops.fbgemm.linearize_cache_indices_from_row_idx(
cache_hash_size_cumsum_2,
varying_update_table_indices,
update_row_indices,
)
self.assertTrue(
torch.equal(
linear_cache_indices_2.cpu(),
torch.tensor(
[10, 2, 3, 19, 13, 16, 17, 21, 36, 36, 36, 36, 36, 36, 24, 28],
dtype=torch.int,
),
)
)
@unittest.skipIf(*gpu_unavailable)
@given(
associativity=st.sampled_from([1, DEFAULT_ASSOC]),
)
@settings(deadline=None)
def test_lxu_cache_lookup(self, associativity: int) -> None:
max_index: int = 8000
# Use single cache set to avoid dealing with cache set hash algorithm.
lxu_cache_state_gpu = (
torch.arange(associativity, dtype=torch.int64).unsqueeze(0).cuda()
)
# Testing all miss.
linear_cache_indices_0 = (
torch.tensor([32, 33, 34, 35, 36, 100, 1000, 1725])
if associativity <= 32
else torch.tensor([64, 65, 66, 67, 68, 100, 1000, 1725])
).cuda()
lxu_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices_0, lxu_cache_state_gpu, max_index
)
torch.testing.assert_close(
lxu_locations,
torch.full_like(lxu_locations, -1),
)
# Testing all hits.
cache_indices_1 = torch.randint(0, associativity, (associativity,))
linear_cache_indices_1 = cache_indices_1.cuda()
lxu_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices_1, lxu_cache_state_gpu, max_index
)
torch.testing.assert_close(
lxu_locations.cpu(),
cache_indices_1.int(),
)
# Testing mixture.
miss_cache_indices_0 = torch.randint(associativity, max_index // 2, (10,))
hit_cache_indices_0 = torch.randint(0, associativity, (8,))
miss_cache_indices_1 = torch.randint(max_index // 2, max_index, (16,))
hit_cache_indices_1 = torch.randint(0, associativity, (8,))
linear_cache_indices_2 = torch.cat(
[
miss_cache_indices_0,
hit_cache_indices_0,
miss_cache_indices_1,
hit_cache_indices_1,
]
).cuda()
lxu_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices_2, lxu_cache_state_gpu, max_index
)
expected_result = torch.cat(
[
torch.full_like(miss_cache_indices_0, -1),
hit_cache_indices_0,
torch.full_like(miss_cache_indices_1, -1),
hit_cache_indices_1,
]
).int()
torch.testing.assert_close(
lxu_locations.cpu(),
expected_result,
)
@unittest.skipIf(*gpu_unavailable)
@given(
cache_sets=st.integers(min_value=10, max_value=300),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_lxu_cache_locking_counter_decrement(
self,
cache_sets: int,
) -> None:
warp_size = DEFAULT_ASSOC
N = cache_sets * warp_size
lxu_cache_locking_counter = torch.randint(
low=1,
high=3,
size=[cache_sets, warp_size],
device="cuda",
dtype=torch.int32,
)
counter_ref = lxu_cache_locking_counter.tolist()
lxu_cache_locations_list = []
lxu_cache_locations_set = set()
for _ in range(3 * N):
location = random.randrange(-1, N)
lxu_cache_locations_list.append(location)
lxu_cache_locations_set.add(location)
for idx in lxu_cache_locations_set:
if idx >= 0:
q, r = idx // warp_size, idx % warp_size
counter_ref[q][r] -= 1
counter_ref = torch.tensor(counter_ref, device="cuda", dtype=torch.int32)
lxu_cache_locations = torch.tensor(
lxu_cache_locations_list, device="cuda", dtype=torch.int32
)
torch.ops.fbgemm.lxu_cache_locking_counter_decrement(
lxu_cache_locking_counter, lxu_cache_locations
)
self.assertTrue(torch.equal(lxu_cache_locking_counter, counter_ref))
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=64),
log_E=st.integers(min_value=2, max_value=3),
N=st.integers(min_value=0, max_value=50),
weights_ty=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
),
output_dtype=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
]
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
test_internal=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_embedding_inplace_update(
self,
T: int, # num of embedding tables
D: int, # embedding dim
log_E: int, # embedding table row number
N: int, # num of update rows per table
weights_ty: SparseType,
output_dtype: SparseType,
use_cpu: bool,
test_internal: bool, # test with OSS op or internal customized op
) -> None:
D_alignment = max(weights_ty.align_size(), output_dtype.align_size())
D = round_up(D, D_alignment)
Ds = [
round_up(
np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)),
D_alignment,
)
for _ in range(T)
]
E = int(10**log_E)
Es = [np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)]
row_alignment = 1 if use_cpu else 16
current_device = "cpu" if use_cpu else torch.cuda.current_device()
location = EmbeddingLocation.HOST if use_cpu else EmbeddingLocation.DEVICE
weights_ty_list = [weights_ty] * T
if open_source:
test_internal = False
# create two embedding bag op with random weights
locations = [location] * T
op = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
("", E, D, W_TY, L)
for (E, D, W_TY, L) in zip(Es, Ds, weights_ty_list, locations)
],
output_dtype=output_dtype,
device=current_device,
)
op.fill_random_weights()
op_ref = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
("", E, D, W_TY, L)
for (E, D, W_TY, L) in zip(Es, Ds, weights_ty_list, locations)
],
output_dtype=output_dtype,
device=current_device,
)
op_ref.fill_random_weights()
# randomly generate update table and row indices
update_table_indices = []
update_table_indices2 = []
update_row_indices = []
update_row_indices2 = []
for t in range(T):
n = np.random.randint(low=0, high=N) if N > 0 else 0
if n == 0:
continue
update_table_indices.append(t)
update_row_id_list = random.sample(range(Es[t]), n)
update_row_indices.append(update_row_id_list)
update_table_indices2.extend([t] * n)
update_row_indices2.extend(update_row_id_list)
# generate update tensor based on weights from "op_ref" embedding table
update_weights_list = []
ref_split_weights = op_ref.split_embedding_weights(split_scale_shifts=False)
update_weight_size = sum(
[
rounded_row_size_in_bytes(
Ds[t],
weights_ty_list[t],
row_alignment,
)
for t in update_table_indices2
]
)
update_weights_tensor2 = torch.randint(
low=0,
high=255,
size=(update_weight_size,),
dtype=torch.uint8,
device=current_device,
)
update_offsets = 0
for i in range(len(update_table_indices)):
table_idx = update_table_indices[i]
(ref_weights, _) = ref_split_weights[table_idx]
D_bytes = rounded_row_size_in_bytes(
Ds[table_idx], weights_ty_list[table_idx], row_alignment
)
update_weights = []
for row_idx in update_row_indices[i]:
update_weights.append(ref_weights[row_idx].tolist())
update_weights_tensor2[
update_offsets : update_offsets + D_bytes
] = ref_weights[row_idx]
update_offsets += D_bytes
update_weights_tensor = torch.tensor(
update_weights,
device=current_device,
dtype=torch.uint8,
)
update_weights_list.append(update_weights_tensor)
# run inplace update on "op" embedding table
if not test_internal:
# Test scatter_ based OSS solution
op.embedding_inplace_update(
update_table_indices,
update_row_indices,
update_weights_list,
)
else:
# Test customized op
op.embedding_inplace_update_internal(
update_table_indices2,
update_row_indices2,
update_weights_tensor2,
)
# verify weights are equal with "op_ref" for the updated rows in "op"
split_weights = op.split_embedding_weights(split_scale_shifts=False)
for i in range(len(update_table_indices)):
t = update_table_indices[i]
for r in update_row_indices[i]:
(weights, _) = split_weights[t]
(ref_weights, _) = ref_split_weights[t]
self.assertEqual(weights.size(), ref_weights.size())
torch.testing.assert_close(
weights[r],
ref_weights[r],
rtol=1e-2,
atol=1e-2,
equal_nan=True,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
log_E=st.integers(min_value=2, max_value=3),
weights_precision=st.sampled_from(
[SparseType.FP16, SparseType.FP32, SparseType.INT8]
),
mixed=st.booleans(),
use_cache=st.booleans(),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
num_indices_per_table=st.integers(min_value=1, max_value=5),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES,
deadline=None,
)
def test_reset_embedding_weight_momentum(
self,
T: int,
D: int,
log_E: int,
weights_precision: SparseType,
mixed: bool,
use_cache: bool,
output_dtype: SparseType,
num_indices_per_table: int,
) -> None:
emb_op = SplitTableBatchedEmbeddingBagsCodegen
E = int(10**log_E)
D = D * 4
Ds: List[int] = []
Es: List[int] = []
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
compute_device = ComputeDevice.CUDA
if use_cache:
managed = [EmbeddingLocation.MANAGED_CACHING] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
EmbeddingLocation.DEVICE if d < average_D else managed[t]
)
else:
managed = [
np.random.choice(
[
EmbeddingLocation.DEVICE,
EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
optimizer = OptimType.EXACT_ROWWISE_ADAGRAD
cc = emb_op(
embedding_specs=[
(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)
],
optimizer=optimizer,
weights_precision=weights_precision,
output_dtype=output_dtype,
)
pruned_indices: List[int] = []
pruned_indices_offsets: List[int] = [0]
logical_table_ids: List[int] = []
buffer_ids: List[int] = []
for i in range(len(Es)):
indices = [
np.random.randint(low=1, high=int(Es[i] - 2))
for _ in range(num_indices_per_table)
]
pruned_indices += indices
pruned_indices_offsets.append(
pruned_indices_offsets[i] + num_indices_per_table
)
logical_table_ids.append(i)
buffer_ids.append(i)
pruned_indices_tensor = to_device(
torch.tensor(pruned_indices, dtype=torch.int64, requires_grad=False), False
)
pruned_indices_offsets_tensor = to_device(
torch.tensor(
pruned_indices_offsets, dtype=torch.int64, requires_grad=False
),
False,
)
logical_table_ids_tensor = to_device(
torch.tensor(logical_table_ids, dtype=torch.int32, requires_grad=False),
False,
)
buffer_ids_tensor = to_device(
torch.tensor(buffer_ids, dtype=torch.int32, requires_grad=False), False
)
momentum1: List[Tensor] = [
s for (s,) in cc.split_optimizer_states()
] # List[rows]
weight: List[Tensor] = cc.split_embedding_weights() # List[(rows, dim)]
for t in range(T):
momentum1[t].fill_(1)
weight[t].fill_(1)
def check_weight_momentum(v: int) -> None:
for i in range(len(pruned_indices)):
logical_id = i // num_indices_per_table
table_momentum1 = momentum1[logical_id]
table_weight = weight[logical_id]
dim = Ds[logical_id]
expected_row_momentum1 = to_device(
torch.tensor(v, dtype=torch.float32), False
)
expected_row_weight = to_device(
torch.tensor([v] * dim, dtype=weights_precision.as_dtype()),
False,
)
pruned_index = pruned_indices[i]
row_weight = table_weight[pruned_index]
if weights_precision == SparseType.INT8:
row_weight = row_weight[:-INT8_EMB_ROW_DIM_OFFSET]
self.assertEqual(table_momentum1[pruned_index], expected_row_momentum1)
torch.testing.assert_close(
row_weight,
expected_row_weight,
rtol=0,
atol=0,
equal_nan=True,
)
check_weight_momentum(1)
cc.reset_embedding_weight_momentum(
pruned_indices_tensor,
pruned_indices_offsets_tensor,
logical_table_ids_tensor,
buffer_ids_tensor,
)
check_weight_momentum(0)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import hypothesis.strategies as st
import numpy as np
import torch
from hypothesis import given, settings, Verbosity
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
# pyre-ignore[21]
from test_utils import gpu_unavailable
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
from fbgemm_gpu.test.test_utils import gpu_unavailable
MAX_EXAMPLES = 20
class LayoutTransformOpsTest(unittest.TestCase):
@unittest.skipIf(*gpu_unavailable)
# pyre-fixme[56]
@given(
B=st.integers(min_value=1, max_value=20),
T=st.integers(min_value=1, max_value=20),
D=st.integers(min_value=2, max_value=20),
W=st.integers(min_value=1, max_value=20),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_recat_embedding_grad_output(self, B: int, T: int, D: int, W: int) -> None:
num_features_per_rank = np.random.randint(low=1, high=20, size=(W,)).tolist()
grad_output = torch.randn(B, sum(num_features_per_rank), D).float().cuda()
grad_outputs_by_rank = grad_output.split(num_features_per_rank, dim=1)
sharded_grad_output = torch.cat(
[
grad_output_by_rank.contiguous().view(-1)
for grad_output_by_rank in grad_outputs_by_rank
],
dim=0,
)
sharded_grad_output_impl = torch.ops.fbgemm.recat_embedding_grad_output(
grad_output, num_features_per_rank
)
torch.testing.assert_close(
sharded_grad_output_impl.cpu(), sharded_grad_output.cpu()
)
@unittest.skipIf(*gpu_unavailable)
# pyre-fixme[56]
@given(
B=st.integers(min_value=1, max_value=20),
W=st.integers(min_value=1, max_value=20),
cuda=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_recat_embedding_grad_output_mixed_D(
self, B: int, W: int, cuda: bool
) -> None:
num_features_per_rank = np.random.randint(low=1, high=20, size=(W,)).tolist()
global_T = sum(num_features_per_rank)
mixed_D_list = np.random.randint(low=1, high=10, size=(global_T,))
grad_output = torch.randn(B, sum(mixed_D_list)).float().cuda()
if cuda:
grad_output = grad_output.cuda()
num_feature_offsets_list = torch.tensor(
[0] + np.cumsum(num_features_per_rank).tolist()
)
dim_sum_per_rank = [
sum(
mixed_D_list[
num_feature_offsets_list[i] : num_feature_offsets_list[i + 1]
]
)
for i in range(W)
]
grad_outputs_by_rank = grad_output.split(dim_sum_per_rank, dim=1)
sharded_grad_output = torch.cat(
[
grad_output_by_rank.contiguous().view(-1)
for grad_output_by_rank in grad_outputs_by_rank
],
dim=0,
)
sharded_grad_output_impl = torch.ops.fbgemm.recat_embedding_grad_output_mixed_D(
grad_output, dim_sum_per_rank
)
torch.testing.assert_close(
sharded_grad_output_impl.cpu(), sharded_grad_output.cpu()
)
@unittest.skipIf(*gpu_unavailable)
# pyre-fixme[56]
@given(
B=st.integers(min_value=1, max_value=20),
W=st.integers(min_value=1, max_value=20),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_recat_embedding_grad_output_mixed_D_batch(self, B: int, W: int) -> None:
num_features_per_rank = np.random.randint(low=1, high=20, size=(W,)).tolist()
global_T = sum(num_features_per_rank)
mixed_D_list = np.random.randint(low=1, high=10, size=(global_T,))
grad_output = torch.randn(B, sum(mixed_D_list)).float().cuda()
num_feature_offsets_list = torch.tensor(
[0] + np.cumsum(num_features_per_rank).tolist()
)
dim_sum_per_rank = [
sum(
mixed_D_list[
num_feature_offsets_list[i] : num_feature_offsets_list[i + 1]
]
)
for i in range(W)
]
dim_sum_per_rank_tensor = torch.cuda.LongTensor(dim_sum_per_rank)
cumsum_dim_sum_per_rank_tensor = torch.cuda.LongTensor(
np.cumsum([0] + dim_sum_per_rank)[:-1]
)
grad_outputs_by_rank = grad_output.split(dim_sum_per_rank, dim=1)
sharded_grad_output = torch.cat(
[
grad_output_by_rank.contiguous().view(-1)
for grad_output_by_rank in grad_outputs_by_rank
],
dim=0,
)
sharded_grad_output_impl = (
torch.ops.fbgemm.recat_embedding_grad_output_mixed_D_batch(
grad_output.cuda(),
dim_sum_per_rank_tensor.cuda(),
cumsum_dim_sum_per_rank_tensor.cuda(),
)
)
torch.testing.assert_close(
sharded_grad_output_impl.cpu(), sharded_grad_output.cpu()
)
num_features_per_rank = np.random.randint(low=1, high=20, size=(W,)).tolist()
global_T = sum(num_features_per_rank)
mixed_D_list = np.random.randint(low=1, high=10, size=(global_T,))
grad_output = torch.randn(B, sum(mixed_D_list)).float().cuda()
num_feature_offsets_list = torch.tensor(
[0] + np.cumsum(num_features_per_rank).tolist()
)
dim_sum_per_rank = [
sum(
mixed_D_list[
num_feature_offsets_list[i] : num_feature_offsets_list[i + 1]
]
)
for i in range(W)
]
dim_sum_per_rank_tensor = torch.cuda.LongTensor(dim_sum_per_rank)
cumsum_dim_sum_per_rank_tensor = torch.cuda.LongTensor(
np.cumsum([0] + dim_sum_per_rank)[:-1]
)
grad_outputs_by_rank = grad_output.split(dim_sum_per_rank, dim=1)
sharded_grad_output = torch.cat(
[
grad_output_by_rank.contiguous().view(-1)
for grad_output_by_rank in grad_outputs_by_rank
],
dim=0,
)
sharded_grad_output_impl = (
torch.ops.fbgemm.recat_embedding_grad_output_mixed_D_batch(
grad_output, dim_sum_per_rank_tensor, cumsum_dim_sum_per_rank_tensor
)
)
torch.testing.assert_close(
sharded_grad_output_impl.cpu(), sharded_grad_output.cpu()
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
"""Check Python source code contains Meta copyright header
"""
from __future__ import annotations
import os
import sys
import click
def process_header(header, comment):
lines = header.split("\n")
new_lines = []
for line in lines:
if line is None or line == "":
new_lines.append(comment)
else:
new_lines.append(comment + " " + line)
return "\n".join(new_lines) + "\n"
HEADER = """Copyright (c) Meta Platforms, Inc. and affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
HEADER_lines = HEADER.splitlines()[1:]
PY_HEADER = process_header(HEADER, "#")
CPP_HEADER = process_header(HEADER, "//")
def dfs(root_path: str) -> list[str]:
"""DFS source code tree to find python files missing header
Parameters
----------
root_path : str
root source directory path
Returns
-------
list[str]
file list missing header
"""
ret = []
for root, _, files in os.walk(root_path, topdown=False):
for name in files:
path = os.path.join(root, name)
if path.endswith(".py"):
with open(path) as fi:
src = fi.read()
flag = True
for line in HEADER_lines:
if line not in src:
flag = False
break
if not flag:
ret.append(path)
return ret
def fix_header(file_list: list[str]) -> None:
"""Adding Meta header to to source files
Parameters
----------
file_list : list[str]
file list missing header
"""
for path in file_list:
src = ""
with open(path) as fi:
src = fi.read()
with open(path, "w") as fo:
fo.write(PY_HEADER)
fo.write(src)
@click.command()
@click.option(
"--path", help="Root directory of source to be checked", required=True, type=str
)
@click.option(
"--fixit", default=False, help="Fix missing header", required=False, type=bool
)
def check_header(path, fixit):
ret = dfs(path)
if len(ret) == 0:
sys.exit(0)
print("Need to add Meta header to the following files.")
print("----------------File List----------------")
for line in ret:
print(line)
print("-----------------------------------------")
if fixit:
fix_header(ret)
sys.exit(1)
if __name__ == "__main__":
check_header()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import subprocess
import sys
import pytorch_sphinx_theme
for dir_i in os.listdir("../.."):
if dir_i == "fbgemm_gpu":
continue
possible_dir = os.path.join("../..", dir_i)
if os.path.isdir(possible_dir):
sys.path.insert(0, possible_dir)
# Doxygen
subprocess.call("doxygen Doxyfile.in", shell=True)
# -- Project information -----------------------------------------------------
highlight_language = "c++"
project = "fbgemm"
copyright = "2022, FBGEMM team"
author = "FBGEMM team"
# The full version, including alpha/beta/rc tags
release = "0.1.2"
# breathe_projects_source = {"auto": ("../src/", ["auto_function.h", "auto_class.h"])}
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.napoleon", "sphinx.ext.autodoc"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
extensions = ["sphinx.ext.intersphinx", "breathe", "sphinx.ext.autodoc"]
intersphinx_mapping = {"pytorch": ("https://pytorch.org/docs/master", None)}
# Setup absolute paths for communicating with breathe / exhale where
# items are expected / should be trimmed by.
# This file is {repo_root}/docs/cpp/source/conf.py
breathe_projects = {"fbgemm_gpu": "../build/xml/", "codegen": "../build/xml/codegen/"}
breathe_default_project = "fbgemm_gpu"
# Tell sphinx what the primary language being documented is.
primary_domain = "cpp"
# Tell sphinx what the pygments highlight language should be.
highlight_language = "cpp"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pytorch_sphinx_theme"
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
"pytorch_project": "fbgemm",
"collapse_navigation": True,
"analytics_id": "UA-117752657-2",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# flake8: noqa F401
from typing import Optional
try:
# Internal
from .embedding_common_code_generator import *
except ImportError:
# OSS
from embedding_common_code_generator import *
def _generate(**kwargs: Any) -> None:
gen_args = kwargs["args"]
kwargs["args"] = gen_args["cuda"]
optimizer = kwargs.get("optimizer")
# Generate cuda host code
template = env.get_template("embedding_optimizer_split_template.cu")
write(
f"gen_embedding_optimizer_{optimizer}_split_cuda.cu", template.render(**kwargs)
)
# Generate host code
template = env.get_template("embedding_optimizer_split_host_template.cpp")
write(f"gen_embedding_optimizer_{optimizer}_split.cpp", template.render(**kwargs))
template = env.get_template("embedding_optimizer_split_kernel_template.cu")
write(
f"gen_embedding_optimizer_{optimizer}_split_kernel.cu",
template.render(**kwargs),
)
# Generates Python invoker for CUDA
template = env.get_template("split_embedding_optimizer_codegen.template")
write(
f"split_embedding_optimizer_{optimizer}.py",
template.render(is_fbcode=args.is_fbcode, **kwargs),
)
# Generate optimizer kernel
template = env.get_template("embedding_optimizer_split_device_kernel_template.cuh")
write(
f"gen_embedding_optimizer_{optimizer}_split_device_kernel.cuh",
template.render(**kwargs),
)
def generate(**kwargs: Any) -> None:
_generate(
optimizer_class_name="".join(
[optim.capitalize() for optim in kwargs["optimizer"].split("_")]
),
**kwargs,
)
def optimizer_codegen(
install_dir: Optional[str] = None, is_fbcode: Optional[bool] = None
) -> None:
if install_dir is not None and len(install_dir) != 0:
args.install_dir = install_dir
if is_fbcode is not None:
args.is_fbcode = is_fbcode
# Generate optimizers
generate(**(rowwise_adagrad()))
def main() -> None:
optimizer_codegen()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# flake8: noqa F401
import re
from typing import Optional
try:
# Internal
from .embedding_common_code_generator import *
except ImportError:
# OSS
from embedding_common_code_generator import *
import re
def generate_backward_embedding_cuda(
template_filepath: str,
optimizer: str,
filename_format: str,
kwargs: Dict[str, Any],
) -> None:
if not kwargs.get("has_gpu_support"):
return
template = env.get_template(template_filepath)
vbe_options = [True, False] if kwargs.get("has_vbe_support") else [False]
for weighted in [True, False]:
for nobag in [True, False]:
for vbe in vbe_options:
if (not nobag or (not weighted and not vbe)) and (
not kwargs.get("dense") or not vbe
):
wdesc = f"{ 'weighted' if weighted else 'unweighted' }{ '_nobag' if nobag else '' }{ '_vbe' if vbe else '' }"
filename = filename_format.format(optimizer, wdesc)
write(
filename,
template.render(
weighted=weighted,
nobag=nobag,
vbe=vbe,
is_index_select=False,
**kwargs,
),
)
print(f"[Backward Split] [{optimizer}]: {filename}")
def generate(**kwargs: Any) -> None:
optimizer = kwargs.get("optimizer")
gen_args = kwargs["args"]
#
# Generate GPU variants of the operators
#
kwargs["args"] = gen_args["cuda"]
# Generate the backward splits
generate_backward_embedding_cuda(
"embedding_backward_split_template.cu",
optimizer,
"gen_embedding_backward_{}_split_{}_cuda.cu",
kwargs,
)
# Generate the cta_per_row kernels for the backward splits
generate_backward_embedding_cuda(
"embedding_backward_split_kernel_cta_template.cu",
optimizer,
"gen_embedding_backward_{}_split_{}_kernel_cta.cu",
kwargs,
)
# Generate the warp_per_row kernels for the backward splits
generate_backward_embedding_cuda(
"embedding_backward_split_kernel_warp_template.cu",
optimizer,
"gen_embedding_backward_{}_split_{}_kernel_warp.cu",
kwargs,
)
# Generate optimizer kernel
template = env.get_template("embedding_optimizer_split_device_kernel_template.cuh")
filename = f"gen_embedding_optimizer_{optimizer}_split_device_kernel.cuh"
write(filename, template.render(**kwargs))
# Generate the backward splits (non-dense)
# We generate only the API to preserve the backward compatibility if
# has_gpu_support=True
if not kwargs.get("dense"):
template = env.get_template("embedding_backward_split_host_template.cpp")
filename = f"gen_embedding_backward_split_{optimizer}.cpp"
write(filename, template.render(**kwargs))
print(f"[Backward Split] [{optimizer}]: {filename}")
if kwargs.get("has_cpu_support") or kwargs.get("has_gpu_support"):
# Generates Python invoker for CUDA + CPU
template = env.get_template(
"split_embedding_codegen_lookup_invoker.template"
)
filename = f"lookup_{optimizer}.py"
write(filename, template.render(is_fbcode=args.is_fbcode, **kwargs))
print(f"[Backward Split] [{optimizer}]: {filename}")
#
# Generate CPU variants of the operators
#
kwargs["args"] = gen_args["cpu"]
# Generate the backward splits
if kwargs.get("has_cpu_support"):
is_approx = "approx" in optimizer
template = (
env.get_template("embedding_backward_split_cpu_approx_template.cpp")
if is_approx
else env.get_template("embedding_backward_split_cpu_template.cpp")
)
filename = f"gen_embedding_backward_{optimizer}_split_cpu.cpp"
write(filename, template.render(**kwargs))
print(f"[Backward Split] [{optimizer}]: {filename}")
# Generate the backward splits (non-dense)
if not kwargs.get("dense"):
template = env.get_template("embedding_backward_split_host_cpu_template.cpp")
filename = f"gen_embedding_backward_split_{optimizer}_cpu.cpp"
write(filename, template.render(**kwargs))
print(f"[Backward Split] [{optimizer}]: {filename}")
# Format the way to generate PackedTensorAccessors
def make_pta_acc_format(pta_str_list: List[str], func_name: str) -> List[str]:
new_str_list = []
for pta_str in pta_str_list:
if "packed_accessor" in pta_str:
match = re.search(
r"([a-zA-z0-9_]*)[.]packed_accessor([3|6][2|4])<(.*)>\(\)", pta_str
)
assert match is not None and len(match.groups()) == 3
tensor, acc_nbits, args = match.groups()
if "acc_type" in args:
match = re.search("at::acc_type<([a-zA-Z_]*), true>", args)
assert match is not None and len(match.groups()) == 1
new_type = match.group(1)
args = re.sub("at::acc_type<[a-zA-Z_]*, true>", new_type, args)
func_name_suffix = "_ACC_TYPE"
else:
func_name_suffix = ""
new_str_list.append(
f"{func_name}{func_name_suffix}({tensor}, {args}, {acc_nbits})"
)
else:
new_str_list.append(pta_str)
return new_str_list
def replace_pta_namespace(pta_str_list: List[str]) -> List[str]:
return [
pta_str.replace("at::PackedTensorAccessor", "pta::PackedTensorAccessor")
for pta_str in pta_str_list
]
def backward_indices() -> None:
template = env.get_template("embedding_backward_split_indice_weights_template.cu")
src_cu = template.render()
write("gen_embedding_backward_split_indice_weights_codegen_cuda.cu", src_cu)
src_cu = template.render(dense=True)
write("gen_embedding_backward_dense_indice_weights_codegen_cuda.cu", src_cu)
def backward_dense() -> None:
generate(
optimizer="dense",
dense=True,
args=make_args(
[
(FLOAT, "unused"),
]
),
split_precomputation=split_precomputation,
split_weight_update=split_weight_update,
split_post_update="",
split_weight_update_cpu=split_weight_update_cpu,
has_cpu_support=False,
has_gpu_support=True,
has_vbe_support=False,
)
def generate_forward_embedding_cuda(
template_filepath: str,
filename_format: str,
dense_options: List[bool],
nobag_options: List[bool],
vbe_options: List[bool],
) -> None:
template = env.get_template(template_filepath)
for dense in dense_options:
for weighted in [True, False]:
for nobag in nobag_options:
for vbe in vbe_options:
if (not nobag or (not weighted and not vbe)) and (
not dense or not vbe
):
dense_desc = f"{ 'dense' if dense else 'split'}"
weight_desc = f"{ 'weighted' if weighted else 'unweighted' }"
nobag_desc = f"{ '_nobag' if nobag else '' }"
vbe_desc = f"{ '_vbe' if vbe else '' }"
desc = (
f"{ dense_desc }_{ weight_desc }{ nobag_desc }{ vbe_desc }"
)
filename = filename_format.format(desc)
write(
filename,
template.render(
dense=dense,
weighted=weighted,
nobag=nobag,
vbe=vbe,
is_index_select=False,
),
)
print(f"[Forward Split]: {filename}")
def forward_split() -> None:
# Generate the forward splits
generate_forward_embedding_cuda(
"embedding_forward_split_template.cu",
"gen_embedding_forward_{}_codegen_cuda.cu",
dense_options=[True, False],
nobag_options=[False], # nobag is not used
vbe_options=[True, False],
)
# Generate the kernels for the forward splits
generate_forward_embedding_cuda(
"embedding_forward_split_kernel_template.cu",
"gen_embedding_forward_{}_kernel.cu",
dense_options=[True, False],
nobag_options=[True, False],
vbe_options=[True, False],
)
# Generate the kernels for the forward splits v2
generate_forward_embedding_cuda(
"embedding_forward_split_kernel_v2_template.cu",
"gen_embedding_forward_{}_v2_kernel.cu",
dense_options=[False], # dense is not supported
nobag_options=[False], # nobag is not supported
vbe_options=[False], # vbe is not supported
)
# Generate the small kernels (for nobag only) for the forward splits
template = env.get_template(
"embedding_forward_split_kernel_nobag_small_template.cu"
)
for dense in [True, False]:
wdesc = f"{ 'dense' if dense else 'split' }"
filename = f"gen_embedding_forward_{wdesc}_unweighted_nobag_kernel_small.cu"
write(filename, template.render(dense=dense, is_index_select=False))
print(f"[Forward Split]: {filename}")
# TODO: Separate this function into another codegen script
def index_select() -> None:
kwargs = make_args([(FLOAT, "unused")])
kwargs["args"] = kwargs["cuda"]
for templ_file, gen_file in [
(
"embedding_forward_split_template.cu",
"gen_batch_index_select_dim0_forward_codegen_cuda.cu",
),
(
"embedding_forward_split_kernel_template.cu",
"gen_batch_index_select_dim0_forward_kernel.cu",
),
(
"embedding_forward_split_kernel_nobag_small_template.cu",
"gen_batch_index_select_dim0_forward_kernel_small.cu",
),
(
"embedding_backward_split_template.cu",
"gen_batch_index_select_dim0_backward_codegen_cuda.cu",
),
(
"embedding_backward_split_kernel_cta_template.cu",
"gen_batch_index_select_dim0_backward_kernel_cta.cu",
),
(
"embedding_backward_split_kernel_warp_template.cu",
"gen_batch_index_select_dim0_backward_kernel_warp.cu",
),
]:
template = env.get_template(templ_file)
write(
gen_file,
template.render(
weighted=False,
dense=True,
vbe=False,
nobag=True,
is_index_select=True,
**kwargs,
),
)
template = env.get_template("embedding_backward_split_grad_template.cu")
write("gen_embedding_backward_split_grad.cu", template.render())
def forward_quantized() -> None:
@dataclass
class template_instance_params:
output_rows_per_thread: str
input_rows_in_flight: str
min_128b_rows: str
max_128b_rows: str
@dataclass
class elem_type:
enum_name: str
cpp_type_name: str
primitive_type: str
bit_width: int
template_params: List[template_instance_params]
type_map = {
"FP32": elem_type(
"FP32",
"float",
"FP",
32,
[
template_instance_params(*map(str, (2, 4, 0, 4))),
template_instance_params(*map(str, (2, 2, 4, 16))),
template_instance_params(*map(str, (1, 1, 16, 32))),
template_instance_params(*map(str, (1, 1, 32, 64))),
],
),
"FP16": elem_type(
"FP16",
"__half2",
"FP",
16,
[
template_instance_params(*map(str, (2, 8, 0, 2))),
template_instance_params(*map(str, (2, 8, 2, 4))),
template_instance_params(*map(str, (2, 4, 4, 8))),
template_instance_params(*map(str, (2, 2, 8, 16))),
template_instance_params(*map(str, (2, 1, 16, 32))),
],
),
"FP8": elem_type(
"FP8",
"uint32_t",
"FP",
8,
[
template_instance_params(*map(str, (2, 8, 0, 1))),
template_instance_params(*map(str, (2, 4, 1, 2))),
template_instance_params(*map(str, (2, 4, 2, 4))),
template_instance_params(*map(str, (2, 4, 4, 8))),
template_instance_params(*map(str, (2, 2, 4, 8))),
],
),
"INT8": elem_type(
"INT8",
"uint32_t",
"INT",
8,
[
template_instance_params(*map(str, (2, 8, 0, 1))),
template_instance_params(*map(str, (2, 4, 1, 2))),
template_instance_params(*map(str, (2, 4, 2, 4))),
template_instance_params(*map(str, (2, 4, 4, 8))),
template_instance_params(*map(str, (2, 2, 8, 16))),
],
),
"INT4": elem_type(
"INT4",
"uint32_t",
"INT",
4,
[
template_instance_params(*map(str, (4, 8, 0, 1))),
template_instance_params(*map(str, (2, 8, 1, 2))),
template_instance_params(*map(str, (1, 4, 2, 4))),
template_instance_params(*map(str, (1, 4, 4, 8))),
],
),
"INT2": elem_type(
"INT2",
"uint32_t",
"INT",
2,
[
template_instance_params(*map(str, (2, 16, 0, 1))),
template_instance_params(*map(str, (2, 8, 1, 2))),
template_instance_params(*map(str, (2, 8, 2, 4))),
],
),
}
# Generate the CUDA nbit (kernel) templates
template = env.get_template(
"embedding_forward_quantized_split_nbit_kernel_template.cu"
)
for weighted in [True, False]:
for nobag in [True, False]:
if not nobag or not weighted:
for emb_weight_type in type_map.values():
wdesc = f"{ 'weighted' if weighted else 'unweighted' }{ '_nobag' if nobag else '' }"
filename = f"gen_embedding_forward_quantized_split_nbit_kernel_{ wdesc }_{ emb_weight_type.enum_name.lower() }_codegen_cuda.cu"
write(
filename,
template.render(
weighted=weighted,
nobag=nobag,
emb_weight_type=emb_weight_type,
),
)
print(f"[Forward Quantized]: {filename}")
# Generate the CUDA nbit (host) templates
template = env.get_template(
"embedding_forward_quantized_split_nbit_host_template.cu"
)
for weighted in [True, False]:
for nobag in [True, False]:
if not nobag or not weighted:
wdesc = f"{ 'weighted' if weighted else 'unweighted' }{ '_nobag' if nobag else '' }"
filename = f"gen_embedding_forward_quantized_split_nbit_host_{ wdesc }_codegen_cuda.cu"
write(
filename,
template.render(weighted=weighted, nobag=nobag, type_map=type_map),
)
print(f"[Forward Quantized]: {filename}")
# Generate the CPU templates
template = env.get_template("embedding_forward_quantized_cpu_template.cpp")
for weighted in [True, False]:
filename = f"gen_embedding_forward_quantized_{ 'weighted' if weighted else 'unweighted' }_codegen_cpu.cpp"
write(filename, template.render(weighted=weighted, type_map=type_map))
print(f"[Forward Quantized]: {filename}")
def backward_grad() -> None:
# Generate the common grad functions
template = env.get_template("embedding_backward_split_grad_template.cu")
write("gen_embedding_backward_split_grad.cu", template.render())
def backward_indices() -> None:
template = env.get_template("embedding_backward_split_indice_weights_template.cu")
src_cu = template.render()
write("gen_embedding_backward_split_indice_weights_codegen_cuda.cu", src_cu)
src_cu = template.render(dense=True)
write("gen_embedding_backward_dense_indice_weights_codegen_cuda.cu", src_cu)
def backward_dense() -> None:
generate(
optimizer="dense",
dense=True,
args=make_args(
[
(FLOAT, "unused"),
]
),
has_cpu_support=True,
has_gpu_support=True,
has_vbe_support=False,
)
def gen__init__py() -> None:
template = env.get_template("__init__.template")
src_py = template.render()
write("__init__.py", src_py)
def emb_codegen(
install_dir: Optional[str] = None, is_fbcode: Optional[bool] = None
) -> None:
if install_dir is not None and len(install_dir) != 0:
args.install_dir = install_dir
if is_fbcode is not None:
args.is_fbcode = is_fbcode
backward_grad()
# Generate forwards and specialized backwards
backward_indices()
backward_dense()
forward_quantized()
forward_split()
# Generate backwards and optimizers
generate(**(adagrad()))
generate(**(adam()))
generate(**(lamb()))
generate(**(lars_sgd()))
generate(**(partial_rowwise_adam()))
generate(**(partial_rowwise_lamb()))
generate(**(rowwise_adagrad()))
generate(**(approx_rowwise_adagrad()))
generate(**(rowwise_adagrad_with_weight_decay()))
generate(**(approx_rowwise_adagrad_with_weight_decay()))
generate(**(rowwise_adagrad_with_counter()))
generate(**(approx_rowwise_adagrad_with_counter()))
generate(**(rowwise_weighted_adagrad()))
generate(**(sgd()))
generate(**(approx_sgd()))
generate(**(none_optimizer()))
# Generate index_select ops using TBE backend
index_select()
gen__init__py()
def main() -> None:
emb_codegen()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# flake8: noqa F401
import argparse
import os
import re
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
import jinja2
args: argparse.Namespace
_: List[str]
TENSOR: int
INT_TENSOR: int
LONG_TENSOR: int
INT: int
FLOAT: int
parser = argparse.ArgumentParser()
# By default the source template files are in the same folder as
# embedding_backward_code_generator.py;
# The install dir is by default the same as the current folder.
parser.add_argument("--install_dir", default=".", help="where to put generated file")
parser.add_argument("--opensource", action="store_false", dest="is_fbcode")
parser.add_argument("--is_rocm", action="store_true")
args, _ = parser.parse_known_args()
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
)
# Upper Limit of "max_embedding_dim (max_D)":
# BT_block_size * sizeof(float) * 4 * kWarpSize * {{ kMaxVecsPerThread }}
# needs to be smaller than the allocated shared memory size (2/3 of 96 KB
# on V100 and 160 KB on A100.
# BT_block_size * 4 * 4 * 32 * (max_D // 128) <= 64 * 1024 (V100) or 96 * 1024 (A100)
# Since BT_block_size >= 1, max_D <= 16K (V100) or 24K (A100).
# Note that if we increase max_D, it will increase the compilation time significantly.
env.globals["max_embedding_dim"] = 1024
# An optimization for ROCm
env.globals["items_per_warp"] = 128 if args.is_rocm is False else 256
env.globals["dense"] = False
def write(filename: str, s: str) -> None:
with open(os.path.join(args.install_dir, filename), "w") as f:
f.write(s)
def _arg_constructor(
type: str, name: str, gpu: bool = True, precision: int = 32
) -> str:
return (
f"{name}.packed_accessor{precision}<{type}, 1, at::RestrictPtrTraits>()"
if gpu
else f"{name}.accessor<{type}, 1>()"
)
def _arg(
type: str,
name: str,
gpu: bool = True,
precision: int = 32,
pass_by_ref: bool = False,
) -> str:
ref = "&" if pass_by_ref else ""
return (
f"at::PackedTensorAccessor{precision}<{type}, 1, at::RestrictPtrTraits>{ref} {name}"
if gpu
else f"at::TensorAccessor<{type}, 1>{ref} {name}"
)
def acc_cache_tensor_arg_constructor(name: str, gpu: bool = True) -> str:
return _arg_constructor(
"at::acc_type<" + ("cache_t" if gpu else "scalar_t") + ", true>",
name,
gpu=gpu,
precision=64,
)
def acc_cache_tensor_arg(name: str, gpu: bool = True, pass_by_ref: bool = False) -> str:
return _arg(
"at::acc_type<" + ("cache_t" if gpu else "scalar_t") + ", true>",
name,
gpu=gpu,
precision=64,
pass_by_ref=pass_by_ref,
)
def long_tensor_arg_constructor(name: str, gpu: bool = True) -> str:
return _arg_constructor("int64_t", name, gpu=gpu)
def long_tensor_arg(name: str, gpu: bool = True, pass_by_ref: bool = False) -> str:
return _arg("int64_t", name, gpu=gpu, pass_by_ref=pass_by_ref)
def int_tensor_arg_constructor(name: str, gpu: bool = True) -> str:
return _arg_constructor("int32_t", name, gpu=gpu)
def int_tensor_arg(name: str, gpu: bool = True, pass_by_ref: bool = False) -> str:
return _arg("int32_t", name, gpu=gpu, pass_by_ref=pass_by_ref)
def tensor_arg(name: str) -> str:
return f"Tensor {name}"
def double_arg(name: str, default: float = 0.0) -> str:
return f"double {name} = {default}"
def double_arg_no_default(name: str) -> str:
return f"double {name}"
def float_arg(name: str, default: float = 0.0) -> str:
return f"float {name} = {default}"
def float_arg_no_default(name: str) -> str:
return f"float {name}"
def int64_arg(name: str, default: int = 0) -> str:
return f"int64_t {name} = {default}"
def int64_arg_no_default(name: str) -> str:
return f"int64_t {name}"
def int_arg(name: str, default: int = 0) -> str:
return f"int {name} = {default}"
# Format the macro call to generate pta::PackedTensorAccessors
def make_pta_acc_format(pta_str_list: List[str], func_name: str) -> List[str]:
new_str_list = []
for pta_str in pta_str_list:
if "packed_accessor" in pta_str:
match = re.search(
r"([a-zA-z0-9_]*)[.]packed_accessor([3|6][2|4])<(.*)>\(\)", pta_str
)
assert match is not None and len(match.groups()) == 3
tensor, acc_nbits, args = match.groups()
if "acc_type" in args:
match = re.search("at::acc_type<([a-zA-Z_]*), true>", args)
assert match is not None and len(match.groups()) == 1
new_type = match.group(1)
args = re.sub("at::acc_type<[a-zA-Z_]*, true>", new_type, args)
macro_name = "MAKE_PTA_ACC_WITH_NAME"
else:
macro_name = "MAKE_PTA_WITH_NAME"
args = args.replace(", at::RestrictPtrTraits", "")
new_str_list.append(
f"{macro_name}({func_name}, {tensor}, {args}, {acc_nbits})"
)
else:
new_str_list.append(pta_str)
return new_str_list
def replace_pta_namespace(pta_str_list: List[str]) -> List[str]:
return [
pta_str.replace("at::PackedTensorAccessor", "pta::PackedTensorAccessor")
for pta_str in pta_str_list
]
env.filters["make_pta_acc_format"] = make_pta_acc_format
env.filters["replace_pta_namespace"] = replace_pta_namespace
@dataclass
class Args:
split_kernel_args: List[str]
split_kernel_args_no_defaults: List[str]
split_kernel_arg_constructors: List[str]
split_cpu_kernel_args: List[str]
split_cpu_kernel_arg_constructors: List[str]
split_function_args: List[str]
split_function_args_no_defaults: List[str]
split_saved_tensors: List[str]
split_tensors: List[str]
saved_data: List[Tuple[str, str]]
split_function_arg_names: List[str]
split_function_schemas: List[str]
split_variables: List[str]
split_ref_kernel_args: List[str]
TENSOR, INT_TENSOR, LONG_TENSOR, INT, FLOAT = range(5)
def make_args(
arg_spec: List[Union[Tuple[int, str], Tuple[int, str, Union[float, int]]]]
) -> Dict[str, Any]:
def make_kernel_arg(
ty: int, name: str, default: Union[int, float, None], pass_by_ref: bool = False
) -> str:
return {
TENSOR: lambda x: acc_cache_tensor_arg(x, pass_by_ref=pass_by_ref),
INT_TENSOR: lambda x: int_tensor_arg(x, pass_by_ref=pass_by_ref),
LONG_TENSOR: lambda x: long_tensor_arg(x, pass_by_ref=pass_by_ref),
INT: (lambda x: int64_arg(x, default=int(default)))
if default is not None
else int64_arg_no_default,
FLOAT: (lambda x: float_arg(x, default=default))
if default is not None
else float_arg_no_default,
}[ty](name)
def make_kernel_arg_constructor(ty: int, name: str) -> str:
return {
TENSOR: acc_cache_tensor_arg_constructor,
INT_TENSOR: int_tensor_arg_constructor,
LONG_TENSOR: long_tensor_arg_constructor,
INT: lambda x: x,
FLOAT: lambda x: x,
}[ty](name)
def make_cpu_kernel_arg(ty: int, name: str, default: Union[int, float]) -> str:
return {
TENSOR: lambda x: acc_cache_tensor_arg(x, gpu=False),
INT_TENSOR: lambda x: int_tensor_arg(x, gpu=False),
LONG_TENSOR: lambda x: long_tensor_arg(x, gpu=False),
INT: lambda x: int64_arg(x, default=int(default)),
FLOAT: lambda x: float_arg(x, default=default),
}[ty](name)
def make_cpu_kernel_arg_constructor(ty: int, name: str) -> str:
return {
TENSOR: lambda x: acc_cache_tensor_arg_constructor(x, gpu=False),
INT_TENSOR: lambda x: int_tensor_arg_constructor(x, gpu=False),
LONG_TENSOR: lambda x: long_tensor_arg_constructor(x, gpu=False),
INT: lambda x: x,
FLOAT: lambda x: x,
}[ty](name)
def make_function_arg(
ty: int, name: str, default: Optional[Union[int, float]]
) -> str:
return {
TENSOR: tensor_arg,
INT_TENSOR: tensor_arg,
LONG_TENSOR: tensor_arg,
INT: (lambda x: int64_arg(x, default=int(default)))
if default is not None
else int64_arg_no_default,
FLOAT: (lambda x: double_arg(x, default=default))
if default is not None
else double_arg_no_default,
}[ty](name)
def make_function_schema_arg(ty: int, name: str, default: Union[int, float]) -> str:
return {
TENSOR: tensor_arg,
INT_TENSOR: tensor_arg,
LONG_TENSOR: tensor_arg,
INT: lambda x: int_arg(x, default=int(default)),
FLOAT: lambda x: float_arg(x, default=default),
}[ty](name)
def make_ivalue_cast(ty: int) -> str:
return {INT: "toInt", FLOAT: "toDouble"}[ty]
def make_args_for_compute_device(
split_arg_spec: List[Tuple[int, str, Union[int, float]]]
) -> Args:
return Args(
split_kernel_args=[
make_kernel_arg(ty, name, default)
for (ty, name, default) in split_arg_spec
],
split_kernel_args_no_defaults=[
make_kernel_arg(ty, name, None) for (ty, name, _) in split_arg_spec
],
split_kernel_arg_constructors=[
make_kernel_arg_constructor(ty, name)
for (ty, name, default) in split_arg_spec
],
split_cpu_kernel_args=[
make_cpu_kernel_arg(ty, name, default)
for (ty, name, default) in split_arg_spec
],
split_cpu_kernel_arg_constructors=[
make_cpu_kernel_arg_constructor(ty, name)
for (ty, name, default) in split_arg_spec
],
split_function_args=[
make_function_arg(ty, name, default)
for (ty, name, default) in split_arg_spec
],
split_function_args_no_defaults=[
make_function_arg(ty, name, None)
for (ty, name, default) in split_arg_spec
],
split_tensors=[
name for (ty, name, default) in augmented_arg_spec if ty == TENSOR
],
split_saved_tensors=[
name
for (ty, name, default) in split_arg_spec
if ty in (TENSOR, INT_TENSOR, LONG_TENSOR)
],
saved_data=[
(name, make_ivalue_cast(ty))
for (ty, name, default) in augmented_arg_spec
if ty != TENSOR
],
split_function_arg_names=[name for (ty, name, default) in split_arg_spec],
split_function_schemas=[
make_function_schema_arg(ty, name, default)
for (ty, name, default) in split_arg_spec
],
split_variables=["Variable()" for _ in split_arg_spec],
split_ref_kernel_args=[
make_kernel_arg(ty, name, default, pass_by_ref=True)
for (ty, name, default) in split_arg_spec
],
)
DEFAULT_ARG_VAL = 0
augmented_arg_spec = [
item if len(item) == 3 else (*item, DEFAULT_ARG_VAL) for item in arg_spec
]
split_arg_spec = []
for (ty, arg, default) in augmented_arg_spec:
if ty in (FLOAT, INT):
split_arg_spec.append((ty, arg, default))
else:
assert ty == TENSOR
split_arg_spec.extend(
[
(TENSOR, f"{arg}_host", default),
(INT_TENSOR, f"{arg}_placements", default),
(LONG_TENSOR, f"{arg}_offsets", default),
]
)
cpu = make_args_for_compute_device(split_arg_spec)
split_arg_spec = []
for (ty, arg, default) in augmented_arg_spec:
if ty in (FLOAT, INT):
split_arg_spec.append((ty, arg, default))
else:
assert ty == TENSOR
split_arg_spec.extend(
[
(TENSOR, f"{arg}_dev", default),
(TENSOR, f"{arg}_uvm", default),
(INT_TENSOR, f"{arg}_placements", default),
(LONG_TENSOR, f"{arg}_offsets", default),
]
)
cuda = make_args_for_compute_device(split_arg_spec)
return {"cpu": cpu, "cuda": cuda}
def adagrad() -> Dict[str, Any]:
split_weight_update = """
Vec4T<cache_t> m_t(&momentum1[idx * D + d]);
m_t.acc.x += grad.acc.x * grad.acc.x;
m_t.acc.y += grad.acc.y * grad.acc.y;
m_t.acc.z += grad.acc.z * grad.acc.z;
m_t.acc.w += grad.acc.w * grad.acc.w;
m_t.store(&momentum1[idx * D + d]);
weight_new.acc.x -= learning_rate * grad.acc.x / (sqrtf(m_t.acc.x) + eps);
weight_new.acc.y -= learning_rate * grad.acc.y / (sqrtf(m_t.acc.y) + eps);
weight_new.acc.z -= learning_rate * grad.acc.z / (sqrtf(m_t.acc.z) + eps);
weight_new.acc.w -= learning_rate * grad.acc.w / (sqrtf(m_t.acc.w) + eps);
"""
split_weight_update_cpu = """
for (int64_t d = 0; d < D; ++d) {
momentum1_host[embedding_begin + d] +=
grad_buffer[d] * grad_buffer[d];
host_weights_data[embedding_begin + d] -=
learning_rate * grad_buffer[d] /
(sqrt(momentum1_host[embedding_begin + d]) + eps);
}
"""
return {
"optimizer": "adagrad",
"args": make_args(
[(TENSOR, "momentum1"), (FLOAT, "eps"), (FLOAT, "learning_rate")]
),
"split_precomputation": "",
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": True,
"has_gpu_support": True,
"has_vbe_support": False,
}
def table_info_precomputation(momentum_prefix: str = "momentum1") -> str:
template = """
// table_begin -> (E, D, {momentum_prefix}_row_begin).
std::map<int64_t, std::tuple<int64_t, int64_t, int64_t>> table_info_map;
for (int64_t t = 0; t < T; ++t) {
const auto D = D_offsets_data[t + 1] - D_offsets_data[t];
const auto table_begin = weights_offsets_data[t];
const auto {momentum_prefix}_row_begin = {momentum_prefix}_offsets_data[t];
table_info_map[table_begin] = std::make_tuple(0, D, {momentum_prefix}_row_begin);
}
int64_t previous_table_begin = host_weights.numel();
// NOTE: table_info_map is sorted by table_begin!
for (auto it = table_info_map.rbegin(); it != table_info_map.rend(); ++it) {
const auto D = std::get<1>(it->second);
// Calculates number of rows of each table.
std::get<0>(it->second) = (previous_table_begin - it->first) / D;
previous_table_begin = it->first;
}
"""
return template.replace("{momentum_prefix}", momentum_prefix)
def rowwise_adagrad() -> Dict[str, Any]:
split_weight_update = """
weight_new.acc.x = correction * weight_new.acc.x - multiplier * grad.acc.x;
weight_new.acc.y = correction * weight_new.acc.y - multiplier * grad.acc.y;
weight_new.acc.z = correction * weight_new.acc.z - multiplier * grad.acc.z;
weight_new.acc.w = correction * weight_new.acc.w - multiplier * grad.acc.w;
"""
split_post_update = """
if (max_norm > 0.0) {
CUDA_KERNEL_ASSERT(!(std::is_same<emb_t, uint8_t>::value && !cache_weights)); // not supported for uint8 yet
// compute weight norm
at::acc_type<cache_t, true> weight_sum_square = 0.0;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template);
weight_sum_square += weight_new.acc.x * weight_new.acc.x + weight_new.acc.y * weight_new.acc.y + weight_new.acc.z * weight_new.acc.z + weight_new.acc.w * weight_new.acc.w;
}
const at::acc_type<cache_t, true> weight_norm =
sqrtf(warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(weight_sum_square, shfl_sync_mask));
// scale by max_norm if weight_norm exceeds max_norm
if (threadIdx.x == 0) {
multiplier = weight_norm > max_norm ? max_norm / weight_norm : 1.0f;
}
multiplier = SHFL_SYNC(multiplier, 0);
if (weight_norm > max_norm) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template);
weight_new.acc.x *= multiplier;
weight_new.acc.y *= multiplier;
weight_new.acc.z *= multiplier;
weight_new.acc.w *= multiplier;
weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if embedding is not int8
}
}
}
"""
split_precomputation = """
at::acc_type<cache_t, true> g_local_sum_square = 0.0;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
auto gx = grad_sum[i].acc.x;
auto gy = grad_sum[i].acc.y;
auto gz = grad_sum[i].acc.z;
auto gw = grad_sum[i].acc.w;
if (weight_decay_mode == 1) {
// L2 regularization
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t, true>> weight = weight_row_template.load(d, qparams_template);
gx += weight_decay * weight.acc.x;
gy += weight_decay * weight.acc.y;
gz += weight_decay * weight.acc.z;
gw += weight_decay * weight.acc.w;
}
g_local_sum_square += gx * gx + gy * gy + gz * gz + gw * gw;
}
const at::acc_type<cache_t, true> g_avg_square =
warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(g_local_sum_square, shfl_sync_mask) / D;
at::acc_type<cache_t, true> multiplier;
at::acc_type<cache_t, true> correction;
if (threadIdx.x == 0) {
at::acc_type<cache_t, true> new_sum_square_grads = momentum1[idx] + g_avg_square;
momentum1[idx] = new_sum_square_grads;
multiplier = learning_rate / (sqrtf(new_sum_square_grads) + eps);
if (weight_decay_mode == 1) {
// L2 regularization
correction = 1.0 - multiplier * weight_decay;
} else if (weight_decay_mode == 2) {
// Decoupled weight decay
correction = 1.0 - learning_rate * weight_decay;
} else {
// default value
correction = 1.0;
}
}
multiplier = SHFL_SYNC(multiplier, 0);
correction = SHFL_SYNC(correction, 0);
"""
split_weight_update_cpu = """
at::acc_type<grad_t, true> g_local_sum_square = 0.0;
for (int64_t d = 0; d < D; ++d) {
auto grad = grad_buffer[d];
if (weight_decay_mode == 1) {
// L2 regularization
grad += weight_decay * host_weights_data[embedding_begin + d];
}
g_local_sum_square += grad * grad;
}
auto g_avg_square = g_local_sum_square / D;
at::acc_type<grad_t, true> new_sum_square_grads = momentum1_host[momentum1_offsets_data[feature_begin] + idx] + g_avg_square;
momentum1_host[momentum1_offsets_data[feature_begin] + idx] = new_sum_square_grads;
at::acc_type<grad_t, true> multiplier;
multiplier = learning_rate / (sqrtf(new_sum_square_grads) + eps);
at::acc_type<grad_t, true> correction;
if (weight_decay_mode == 1) {
// L2 regularization
correction = 1.0 - multiplier * weight_decay;
} else if (weight_decay_mode == 2) {
// Decoupled weight decay
correction = 1.0 - learning_rate * weight_decay;
} else {
// default value
correction = 1.0;
}
for (int64_t d = 0; d < D; ++d) {
host_weights_data[embedding_begin + d] = correction * host_weights_data[embedding_begin + d] - grad_buffer[d] * multiplier;
}
"""
return {
"optimizer": "rowwise_adagrad",
"args": make_args(
[
(TENSOR, "momentum1"),
(FLOAT, "eps"),
(FLOAT, "learning_rate"),
(FLOAT, "weight_decay", 0.0),
(INT, "weight_decay_mode", 0),
(FLOAT, "max_norm", 0.0),
]
),
"split_precomputation": split_precomputation,
"split_weight_update": split_weight_update,
"split_post_update": split_post_update,
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": True,
"has_gpu_support": True,
"has_vbe_support": True,
}
def approx_rowwise_adagrad() -> Dict[str, Any]:
rowwise_adagrad_args = rowwise_adagrad()
approx_split_weight_update = """
// dummy computation to avoid unused variable warning
weight_new.fma_(grad, -multiplier);
assert(false); // approx rowwise AdaGrad is not supported on GPU
"""
return {
"optimizer": "approx_rowwise_adagrad",
"args": make_args(
[
(TENSOR, "momentum1"),
(FLOAT, "eps"),
(FLOAT, "learning_rate"),
(FLOAT, "weight_decay", 0.0),
(INT, "weight_decay_mode", 0),
]
),
"split_precomputation": rowwise_adagrad_args["split_precomputation"],
"split_weight_update": approx_split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": rowwise_adagrad_args["split_weight_update_cpu"],
"has_cpu_support": False,
"has_gpu_support": False,
"has_vbe_support": False,
}
def rowwise_adagrad_with_weight_decay() -> Dict[str, Any]:
split_weight_update = """
weight_new.acc.x = correction * weight_new.acc.x - multiplier * grad.acc.x;
weight_new.acc.y = correction * weight_new.acc.y - multiplier * grad.acc.y;
weight_new.acc.z = correction * weight_new.acc.z - multiplier * grad.acc.z;
weight_new.acc.w = correction * weight_new.acc.w - multiplier * grad.acc.w;
"""
split_precomputation = """
at::acc_type<cache_t, true> g_local_sum_square = 0.0;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
auto gx = grad_sum[i].acc.x;
auto gy = grad_sum[i].acc.y;
auto gz = grad_sum[i].acc.z;
auto gw = grad_sum[i].acc.w;
if (weight_decay_mode == 1) {
// L2 regularization
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t, true>> weight = weight_row_template.load(d, qparams_template);
gx += weight_decay * weight.acc.x;
gy += weight_decay * weight.acc.y;
gz += weight_decay * weight.acc.z;
gw += weight_decay * weight.acc.w;
}
g_local_sum_square += gx * gx + gy * gy + gz * gz + gw * gw;
}
const at::acc_type<cache_t, true> g_avg_square =
warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(g_local_sum_square, shfl_sync_mask) / D;
at::acc_type<cache_t, true> multiplier;
at::acc_type<cache_t, true> correction;
if (threadIdx.x == 0) {
at::acc_type<cache_t, true> new_sum_square_grads = momentum1[idx] + g_avg_square;
momentum1[idx] = new_sum_square_grads;
multiplier = learning_rate / (sqrtf(new_sum_square_grads) + eps);
if (weight_decay_mode == 1) {
// L2 regularization
correction = 1.0 - multiplier * weight_decay;
} else if (weight_decay_mode == 2) {
// Decoupled weight decay
correction = 1.0 - learning_rate * weight_decay;
} else {
// default value
correction = 1.0;
}
}
multiplier = SHFL_SYNC(multiplier, 0);
correction = SHFL_SYNC(correction, 0);
"""
split_weight_update_cpu = """
at::acc_type<grad_t, true> g_local_sum_square = 0.0;
for (int64_t d = 0; d < D; ++d) {
auto grad = grad_buffer[d];
if (weight_decay_mode == 1) {
// L2 regularization
grad += weight_decay * host_weights_data[embedding_begin + d];
}
g_local_sum_square += grad * grad;
}
auto g_avg_square = g_local_sum_square / D;
at::acc_type<grad_t, true> new_sum_square_grads = momentum1_host[momentum1_offsets_data[feature_begin] + idx] + g_avg_square;
momentum1_host[momentum1_offsets_data[feature_begin] + idx] = new_sum_square_grads;
at::acc_type<grad_t, true> multiplier;
multiplier = learning_rate / (sqrtf(new_sum_square_grads) + eps);
at::acc_type<grad_t, true> correction;
if (weight_decay_mode == 1) {
// L2 regularization
correction = 1.0 - multiplier * weight_decay;
} else if (weight_decay_mode == 2) {
// Decoupled weight decay
correction = 1.0 - learning_rate * weight_decay;
} else {
// default value
correction = 1.0;
}
for (int64_t d = 0; d < D; ++d) {
host_weights_data[embedding_begin + d] = correction * host_weights_data[embedding_begin + d] - grad_buffer[d] * multiplier;
}
"""
return {
"optimizer": "rowwise_adagrad_with_weight_decay",
"args": make_args(
[
(TENSOR, "momentum1"),
(FLOAT, "eps"),
(FLOAT, "learning_rate"),
(FLOAT, "weight_decay", 0.0),
(INT, "weight_decay_mode", 0),
]
),
"split_precomputation": split_precomputation,
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": False,
"has_gpu_support": True,
"has_vbe_support": False,
}
def approx_rowwise_adagrad_with_weight_decay() -> Dict[str, Any]:
rowwise_adagrad_with_weight_decay_args = rowwise_adagrad_with_weight_decay()
approx_split_weight_update = """
// dummy computation to avoid unused variable warning
weight_new.fma_(grad, -multiplier);
assert(false); // approx rowwise AdaGrad is not supported on GPU
"""
return {
"optimizer": "approx_rowwise_adagrad_with_weight_decay",
"args": make_args(
[
(TENSOR, "momentum1"),
(FLOAT, "eps"),
(FLOAT, "learning_rate"),
(FLOAT, "weight_decay", 0.0),
(INT, "weight_decay_mode", 0),
]
),
"split_precomputation": rowwise_adagrad_with_weight_decay_args[
"split_precomputation"
],
"split_weight_update": approx_split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": rowwise_adagrad_with_weight_decay_args[
"split_weight_update_cpu"
],
"has_cpu_support": False,
"has_gpu_support": True,
"has_vbe_support": False,
}
def rowwise_adagrad_with_counter() -> Dict[str, Any]:
split_weight_update = """
weight_new.acc.x = (exp_reg_correction * weight_new.acc.x - adjusted_multiplier * grad.acc.x);
weight_new.acc.y = (exp_reg_correction * weight_new.acc.y - adjusted_multiplier * grad.acc.y);
weight_new.acc.z = (exp_reg_correction * weight_new.acc.z - adjusted_multiplier * grad.acc.z);
weight_new.acc.w = (exp_reg_correction * weight_new.acc.w - adjusted_multiplier * grad.acc.w);
"""
split_precomputation = """
at::acc_type<cache_t, true> freq = 1.0;
at::acc_type<cache_t, true> l2_wd = 0.0;
at::acc_type<cache_t, true> tail_id_threshold_val = tail_id_threshold;
CUDA_KERNEL_ASSERT(max_counter > 0.0); // avoid divide by zero error
if (is_tail_id_thresh_ratio == 1){
tail_id_threshold_val = floorf(tail_id_threshold * max_counter);
}
if (counter_halflife > 0 && threadIdx.x == 0) {
// if id occurs multiple times in a batch, iter_delta=1
const auto iter_delta = prev_iter[idx] == 0 ? 1.0 : iter * 1.0 - prev_iter[idx];
prev_iter[idx] = iter * 1.0;
const auto counter_log_rho = logf(2.0) / counter_halflife;
row_counter[idx] = 1.0 + expf(-iter_delta * counter_log_rho) * row_counter[idx];
freq = counter_halflife / row_counter[idx];
if (weight_decay_mode == 1) {
// L2 regularization
l2_wd = 1.0;
}
}
freq = SHFL_SYNC(freq, 0);
l2_wd = SHFL_SYNC(l2_wd, 0);
tail_id_threshold_val = SHFL_SYNC(tail_id_threshold_val, 0);
at::acc_type<cache_t, true> g_local_sum_square = 0.0;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t, true>> weight = weight_row_template.load(d, qparams_template);
auto gx = grad_sum[i].acc.x + l2_wd * freq * weight_decay * weight.acc.x;
auto gy = grad_sum[i].acc.y + l2_wd * freq * weight_decay * weight.acc.y;
auto gz = grad_sum[i].acc.z + l2_wd * freq * weight_decay * weight.acc.z;
auto gw = grad_sum[i].acc.w + l2_wd * freq * weight_decay * weight.acc.w;
g_local_sum_square += gx * gx + gy * gy + gz * gz + gw * gw;
}
const at::acc_type<cache_t, true> g_avg_square =
warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(g_local_sum_square, shfl_sync_mask) / D;
at::acc_type<cache_t, true> multiplier;
at::acc_type<cache_t, true> adjusted_multiplier;
at::acc_type<cache_t, true> exp_reg_correction;
if (threadIdx.x == 0) {
at::acc_type<cache_t, true> new_sum_square_grads = momentum1[idx] + g_avg_square;
momentum1[idx] = new_sum_square_grads;
multiplier = learning_rate / (sqrtf(new_sum_square_grads) + eps);
adjusted_multiplier = multiplier;
if ( learning_rate_mode >=0 ) {
if (adjustment_iter <= 0 || (adjustment_iter > 0 && iter > adjustment_iter)) {
if (row_counter[idx] > tail_id_threshold_val) {
if ( learning_rate_mode == 0 ) {
adjusted_multiplier = multiplier * max(min(powf(max_counter/(row_counter[idx] + 1.0), adjustment_ub), 10.0), 1.0);
} else if ( learning_rate_mode == 1 ) {
adjusted_multiplier = multiplier * min(max(powf((row_counter[idx] + 1.0)/max_counter, adjustment_ub), 0.1), 1.0);
} else if (learning_rate_mode == 2) {
adjusted_multiplier = learning_rate / (sqrtf(adjustment_ub*row_counter[idx]) + eps);
}
}
}
}
exp_reg_correction = 1.0;
if (adjustment_iter <= 0 || (adjustment_iter > 0 && iter > adjustment_iter)) {
if (weight_decay_mode == 2) {
// Decoupled weight decay
exp_reg_correction = 1.0 - freq * weight_decay * learning_rate;
} else if (weight_decay_mode == 1) {
// L2 regularization (coupled wd)
exp_reg_correction = 1.0 - freq * weight_decay * multiplier;
}
}
}
multiplier = SHFL_SYNC(multiplier, 0);
adjusted_multiplier = SHFL_SYNC(adjusted_multiplier, 0);
exp_reg_correction = SHFL_SYNC(exp_reg_correction, 0);
"""
split_weight_update_cpu = """
at::acc_type<grad_t, true> g_local_sum_square = 0.0;
for (int64_t d = 0; d < D; ++d) {
g_local_sum_square += grad_buffer[d] * grad_buffer[d];
}
auto g_avg_square = g_local_sum_square / D;
auto offset_idx = momentum1_offsets_data[feature_begin] + idx;
at::acc_type<grad_t, true> new_sum_square_grads = momentum1_host[offset_idx] + g_avg_square;
momentum1_host[offset_idx] = new_sum_square_grads;
at::acc_type<grad_t, true> multiplier;
multiplier = learning_rate / (sqrtf(new_sum_square_grads) + eps);
const auto iter_delta = iter * 1.0 - prev_iter_host[offset_idx];
prev_iter_host[offset_idx] = iter * 1.0;
const auto exp_reg = 1.0 / (weight_decay * multiplier + 1.0);
const auto exp_reg_correction = powf(exp_reg, iter_delta);
for (int64_t d = 0; d < D; ++d) {
const auto weight = host_weights_data[embedding_begin + d];
host_weights_data[embedding_begin + d] = exp_reg_correction * weight - exp_reg * multiplier * grad_buffer[d];
}
"""
return {
"optimizer": "rowwise_adagrad_with_counter",
"args": make_args(
[
(TENSOR, "momentum1"),
(TENSOR, "prev_iter"),
(TENSOR, "row_counter"),
(FLOAT, "eps"),
(FLOAT, "learning_rate"),
(FLOAT, "weight_decay", 0.0),
(INT, "iter"),
(INT, "counter_halflife", -1),
(INT, "adjustment_iter", -1),
(FLOAT, "adjustment_ub", 1.0),
(INT, "learning_rate_mode", -1),
(INT, "weight_decay_mode", 1),
(INT, "grad_sum_decay", -1),
(FLOAT, "max_counter"),
(FLOAT, "tail_id_threshold", 0.0),
(INT, "is_tail_id_thresh_ratio", 0),
]
),
"split_precomputation": split_precomputation,
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": True,
"has_gpu_support": True,
"has_vbe_support": False,
}
def approx_rowwise_adagrad_with_counter() -> Dict[str, Any]:
rowwise_adagrad_with_counter_args = rowwise_adagrad_with_counter()
approx_split_weight_update = """
// dummy computation to avoid unused variable warning
weight_new.fma_(grad, -multiplier);
assert(false); // approx rowwise AdaGrad is not supported on GPU
"""
return {
"optimizer": "approx_rowwise_adagrad_with_counter",
"args": make_args(
[
(TENSOR, "momentum1"),
(TENSOR, "prev_iter"),
(TENSOR, "row_counter"),
(FLOAT, "eps"),
(FLOAT, "learning_rate"),
(FLOAT, "weight_decay", 0.0),
(INT, "iter"),
(INT, "counter_halflife", -1),
(INT, "adjustment_iter", -1),
(FLOAT, "adjustment_ub", 1.0),
(INT, "learning_rate_mode", -1),
(INT, "weight_decay_mode", 1),
(INT, "grad_sum_decay", -1),
(FLOAT, "max_counter"),
(FLOAT, "tail_id_threshold", 0.0),
(INT, "is_tail_id_thresh_ratio", 0),
]
),
"split_precomputation": rowwise_adagrad_with_counter_args[
"split_precomputation"
],
"split_weight_update": approx_split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": rowwise_adagrad_with_counter_args[
"split_weight_update_cpu"
],
"has_cpu_support": False,
"has_gpu_support": False,
"has_vbe_support": False,
}
def rowwise_weighted_adagrad() -> Dict[str, Any]:
split_weight_update = """
weight_new.acc.x = correction * weight_new.acc.x - multiplier * grad.acc.x;
weight_new.acc.y = correction * weight_new.acc.y - multiplier * grad.acc.y;
weight_new.acc.z = correction * weight_new.acc.z - multiplier * grad.acc.z;
weight_new.acc.w = correction * weight_new.acc.w - multiplier * grad.acc.w;
"""
split_precomputation = """
at::acc_type<cache_t, true> g_local_sum_square = 0.0;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t, true>> weight = weight_row_template.load(d, qparams_template);
auto gx = grad_sum[i].acc.x + weight_decay * weight.acc.x;
auto gy = grad_sum[i].acc.y + weight_decay * weight.acc.y;
auto gz = grad_sum[i].acc.z + weight_decay * weight.acc.z;
auto gw = grad_sum[i].acc.w + weight_decay * weight.acc.w;
g_local_sum_square += gx * gx + gy * gy + gz * gz + gw * gw;
}
const at::acc_type<cache_t, true> g_avg_square =
warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(g_local_sum_square, shfl_sync_mask) / D;
at::acc_type<cache_t, true> multiplier;
at::acc_type<cache_t, true> correction;
if (threadIdx.x == 0) {
at::acc_type<cache_t, true> lambda = sqrtf(iter + 1);
at::acc_type<cache_t, true> new_sum_square_grads = momentum1[idx] + lambda * g_avg_square;
momentum1[idx] = new_sum_square_grads;
multiplier = learning_rate * lambda / (cbrtf(new_sum_square_grads) + eps);
correction = 1.0 - multiplier * weight_decay;
}
multiplier = SHFL_SYNC(multiplier, 0);
correction = SHFL_SYNC(correction, 0);
"""
split_weight_update_cpu = """
// weight_decay not supported for cpu version
at::acc_type<grad_t, true> g_local_sum_square = 0.0;
for (int64_t d = 0; d < D; ++d) {
g_local_sum_square += grad_buffer[d] * grad_buffer[d];
}
auto g_avg_square = g_local_sum_square / D;
at::acc_type<grad_t, true> lambda = sqrtf(iter + 1);
at::acc_type<grad_t, true> new_sum_square_grads = momentum1_host[momentum1_offsets_data[feature_begin] + idx] + lambda * g_avg_square;
momentum1_host[momentum1_offsets_data[feature_begin] + idx] = new_sum_square_grads;
at::acc_type<grad_t, true> multiplier;
multiplier = learning_rate * lambda / (cbrtf(new_sum_square_grads) + eps);
for (int64_t d = 0; d < D; ++d) {
host_weights_data[embedding_begin + d] -= grad_buffer[d] * multiplier;
}
"""
return {
"optimizer": "rowwise_weighted_adagrad",
"is_experimental_optimizer": True,
"args": make_args(
[
(TENSOR, "momentum1"),
(FLOAT, "eps"),
(FLOAT, "learning_rate"),
(FLOAT, "weight_decay"),
(INT, "iter"),
]
),
"split_precomputation": split_precomputation,
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": True,
"has_gpu_support": True,
"has_vbe_support": False,
}
def sgd() -> Dict[str, Any]:
split_weight_update = """
weight_new.fma_(grad, -learning_rate);
"""
split_weight_update_cpu = """
for (int64_t d = 0; d < D; ++d) {
host_weights_data[embedding_begin + d] -= learning_rate * grad_buffer[d];
}
"""
return {
"optimizer": "sgd",
"args": make_args([(FLOAT, "learning_rate")]),
"split_precomputation": "",
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": True,
"has_gpu_support": True,
"has_vbe_support": True,
}
def approx_sgd() -> Dict[str, Any]:
sgd_args = sgd()
approx_split_weight_update = """
// approx_sgd not supported for GPU.
// Just do the same thing as exact sgd to avoid unused variable warning.
weight_new.fma_(grad, -learning_rate);
assert(false); // approx SGD is not supported on GPU
"""
return {
"optimizer": "approx_sgd",
"args": make_args([(FLOAT, "learning_rate")]),
"split_precomputation": "",
"split_weight_update": approx_split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": sgd_args["split_weight_update_cpu"],
"has_cpu_support": False,
"has_gpu_support": False,
"has_vbe_support": False,
}
def lamb() -> Dict[str, Any]:
split_precomputation = """
at::acc_type<cache_t, true> weight_sum_sq = 0.0;
at::acc_type<cache_t, true> rtw_sum_sq = 0.0;
auto weight_row = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(weights, cache_weights, D, nullptr);
float2 qparams;
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
qparams = weight_row.load_qparams();
}
#pragma unroll 1
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t, true>> weight = weight_row.load(d, qparams);
Vec4T<at::acc_type<cache_t, true>> m1(&momentum1[idx * D + d]);
m1.acc.x = beta1 * m1.acc.x + (1.0 - beta1) * grad_sum[i].acc.x;
m1.acc.y = beta1 * m1.acc.y + (1.0 - beta1) * grad_sum[i].acc.y;
m1.acc.z = beta1 * m1.acc.z + (1.0 - beta1) * grad_sum[i].acc.z;
m1.acc.w = beta1 * m1.acc.w + (1.0 - beta1) * grad_sum[i].acc.w;
m1.store(&momentum1[idx * D + d]);
Vec4T<at::acc_type<cache_t, true>> m2(&momentum2[idx * D + d]);
m2.acc.x = beta2 * m2.acc.x + (1.0 - beta2) * grad_sum[i].acc.x * grad_sum[i].acc.x;
m2.acc.y = beta2 * m2.acc.y + (1.0 - beta2) * grad_sum[i].acc.y * grad_sum[i].acc.y;
m2.acc.z = beta2 * m2.acc.z + (1.0 - beta2) * grad_sum[i].acc.z * grad_sum[i].acc.z;
m2.acc.w = beta2 * m2.acc.w + (1.0 - beta2) * grad_sum[i].acc.w * grad_sum[i].acc.w;
m2.store(&momentum2[idx * D + d]);
// now, we are finished with grad_sum. We can *reuse* grad_sum to store r_t + weight_decay * weight;
grad_sum[i].acc.x = (m1.acc.x / (1.0 - powf(beta1, iter))) / (sqrtf((m2.acc.x / (1.0 - powf(beta2, iter)))) + eps) + weight_decay * weight.acc.x;
grad_sum[i].acc.y = (m1.acc.y / (1.0 - powf(beta1, iter))) / (sqrtf((m2.acc.y / (1.0 - powf(beta2, iter)))) + eps) + weight_decay * weight.acc.y;
grad_sum[i].acc.z = (m1.acc.z / (1.0 - powf(beta1, iter))) / (sqrtf((m2.acc.z / (1.0 - powf(beta2, iter)))) + eps) + weight_decay * weight.acc.z;
grad_sum[i].acc.w = (m1.acc.w / (1.0 - powf(beta1, iter))) / (sqrtf((m2.acc.w / (1.0 - powf(beta2, iter)))) + eps) + weight_decay * weight.acc.w;
weight_sum_sq += weight.acc.x * weight.acc.x + weight.acc.y * weight.acc.y + weight.acc.z * weight.acc.z + weight.acc.w * weight.acc.w;
rtw_sum_sq += grad_sum[i].acc.x * grad_sum[i].acc.x + grad_sum[i].acc.y * grad_sum[i].acc.y + grad_sum[i].acc.z * grad_sum[i].acc.z + grad_sum[i].acc.w * grad_sum[i].acc.w;
}
const auto weight_norm =
sqrtf(warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(weight_sum_sq, shfl_sync_mask));
const auto rtw_norm =
sqrtf(warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(rtw_sum_sq, shfl_sync_mask));
const auto true_ratio = weight_norm / rtw_norm;
"""
split_weight_update = """
weight_new.fma_(grad, -learning_rate * true_ratio);
"""
split_weight_update_cpu = ""
return {
"optimizer": "lamb",
"is_experimental_optimizer": True,
"args": make_args(
[
(TENSOR, "momentum1"),
(TENSOR, "momentum2"),
(FLOAT, "learning_rate"),
(FLOAT, "eps"),
(FLOAT, "beta1"),
(FLOAT, "beta2"),
(FLOAT, "weight_decay"),
(INT, "iter"),
]
),
"split_precomputation": split_precomputation,
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": False,
"has_gpu_support": True,
"has_vbe_support": False,
}
def partial_rowwise_lamb() -> Dict[str, Any]:
split_precomputation = """
at::acc_type<cache_t, true> g_local_sum_square = 0.0;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
g_local_sum_square += grad_sum[i].acc.x * grad_sum[i].acc.x +
grad_sum[i].acc.y * grad_sum[i].acc.y +
grad_sum[i].acc.z * grad_sum[i].acc.z +
grad_sum[i].acc.w * grad_sum[i].acc.w;
}
const at::acc_type<cache_t, true> g_avg_square =
warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(g_local_sum_square, shfl_sync_mask) / D;
at::acc_type<cache_t, true> m2;
if (threadIdx.x == 0) {
m2 = beta2 * momentum2[idx] + (1.0 - beta2) * g_avg_square;
momentum2[idx] = m2;
}
m2 = SHFL_SYNC(m2, 0);
at::acc_type<cache_t, true> m2_hat = 1.0 / (sqrtf((m2 / (1.0 - powf(beta2, iter)))) + eps);
at::acc_type<cache_t, true> weight_sum_sq = 0.0;
at::acc_type<cache_t, true> rtw_sum_sq = 0.0;
auto weight_row = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(weights, cache_weights, D, nullptr);
float2 qparams;
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
qparams = weight_row.load_qparams();
}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t, true>> m1(&momentum1[idx * D + d]);
m1.acc.x = beta1 * m1.acc.x + (1.0 - beta1) * grad_sum[i].acc.x;
m1.acc.y = beta1 * m1.acc.y + (1.0 - beta1) * grad_sum[i].acc.y;
m1.acc.z = beta1 * m1.acc.z + (1.0 - beta1) * grad_sum[i].acc.z;
m1.acc.w = beta1 * m1.acc.w + (1.0 - beta1) * grad_sum[i].acc.w;
m1.store(&momentum1[idx * D + d]);
// now, we are finished with grad_sum. We can *reuse* grad_sum to store r_t + weight_decay * weight;
Vec4T<at::acc_type<cache_t, true>> weight = weight_row.load(d, qparams);
grad_sum[i].acc.x = (m1.acc.x / (1.0 - powf(beta1, iter))) * m2_hat + weight_decay * weight.acc.x;
grad_sum[i].acc.y = (m1.acc.y / (1.0 - powf(beta1, iter))) * m2_hat + weight_decay * weight.acc.y;
grad_sum[i].acc.z = (m1.acc.z / (1.0 - powf(beta1, iter))) * m2_hat + weight_decay * weight.acc.z;
grad_sum[i].acc.w = (m1.acc.w / (1.0 - powf(beta1, iter))) * m2_hat + weight_decay * weight.acc.w;
weight_sum_sq += weight.acc.x * weight.acc.x + weight.acc.y * weight.acc.y + weight.acc.z * weight.acc.z + weight.acc.w * weight.acc.w;
rtw_sum_sq += grad_sum[i].acc.x * grad_sum[i].acc.x + grad_sum[i].acc.y * grad_sum[i].acc.y + grad_sum[i].acc.z * grad_sum[i].acc.z + grad_sum[i].acc.w * grad_sum[i].acc.w;
}
const auto weight_norm =
sqrtf(warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(weight_sum_sq));
const auto rtw_norm =
sqrtf(warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(rtw_sum_sq));
const auto true_ratio = weight_norm / rtw_norm;
"""
split_weight_update = """
weight_new.fma_(grad, -learning_rate * true_ratio);
"""
split_weight_update_cpu = "" # TODO
return {
"optimizer": "partial_rowwise_lamb",
"args": make_args(
[
(TENSOR, "momentum1"),
(TENSOR, "momentum2"),
(FLOAT, "learning_rate"),
(FLOAT, "eps"),
(FLOAT, "beta1"),
(FLOAT, "beta2"),
(FLOAT, "weight_decay"),
(INT, "iter"),
]
),
"split_precomputation": split_precomputation,
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": False,
"has_gpu_support": True,
"has_vbe_support": False,
}
def adam() -> Dict[str, Any]:
split_weight_update = """
Vec4T<cache_t> m_t(&momentum1[idx * D + d]);
m_t.acc.x *= beta1;
m_t.acc.y *= beta1;
m_t.acc.z *= beta1;
m_t.acc.w *= beta1;
m_t.fma_(grad, 1.0 - beta1);
m_t.store(&momentum1[idx * D + d]);
Vec4T<cache_t> v_t(&momentum2[idx * D + d]);
v_t.acc.x *= beta2;
v_t.acc.y *= beta2;
v_t.acc.z *= beta2;
v_t.acc.w *= beta2;
grad.acc.x *= grad.acc.x;
grad.acc.y *= grad.acc.y;
grad.acc.z *= grad.acc.z;
grad.acc.w *= grad.acc.w;
v_t.fma_(grad, 1.0 - beta2);
v_t.store(&momentum2[idx * D + d]);
weight_new.acc.x -= learning_rate * (m_t.acc.x / (1.0 - powf(beta1, iter)) / (sqrtf((v_t.acc.x / (1.0 - powf(beta2, iter)))) + eps) + weight_decay * weight_new.acc.x);
weight_new.acc.y -= learning_rate * (m_t.acc.y / (1.0 - powf(beta1, iter)) / (sqrtf((v_t.acc.y / (1.0 - powf(beta2, iter)))) + eps) + weight_decay * weight_new.acc.y);
weight_new.acc.z -= learning_rate * (m_t.acc.z / (1.0 - powf(beta1, iter)) / (sqrtf((v_t.acc.z / (1.0 - powf(beta2, iter)))) + eps) + weight_decay * weight_new.acc.z);
weight_new.acc.w -= learning_rate * (m_t.acc.w / (1.0 - powf(beta1, iter)) / (sqrtf((v_t.acc.w / (1.0 - powf(beta2, iter)))) + eps) + weight_decay * weight_new.acc.w);
"""
split_weight_update_cpu = "" # TODO
return {
"optimizer": "adam",
"is_experimental_optimizer": True,
"args": make_args(
[
(TENSOR, "momentum1"),
(TENSOR, "momentum2"),
(FLOAT, "learning_rate"),
(FLOAT, "eps"),
(FLOAT, "beta1"),
(FLOAT, "beta2"),
(FLOAT, "weight_decay"),
(INT, "iter"),
]
),
"split_precomputation": "",
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": False,
"has_gpu_support": True,
"has_vbe_support": False,
}
def partial_rowwise_adam() -> Dict[str, Any]:
split_precomputation = """
at::acc_type<cache_t, true> g_local_sum_square = 0.0;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
g_local_sum_square += grad_sum[i].acc.x * grad_sum[i].acc.x +
grad_sum[i].acc.y * grad_sum[i].acc.y +
grad_sum[i].acc.z * grad_sum[i].acc.z +
grad_sum[i].acc.w * grad_sum[i].acc.w;
}
const at::acc_type<cache_t, true> g_avg_square =
warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(g_local_sum_square) / D;
at::acc_type<cache_t, true> v_hat_t;
if (threadIdx.x == 0) {
at::acc_type<cache_t, true> v_t = momentum2[idx] * beta2 + g_avg_square * (1.0 - beta2);
momentum2[idx] = v_t;
v_hat_t = v_t / (1.0 - powf(beta2, iter));
}
v_hat_t = SHFL_SYNC(v_hat_t, 0);
"""
split_weight_update = """
Vec4T<cache_t> m_t(&momentum1[idx * D + d]);
m_t.acc.x *= beta1;
m_t.acc.y *= beta1;
m_t.acc.z *= beta1;
m_t.acc.w *= beta1;
m_t.fma_(grad, 1.0 - beta1);
m_t.store(&momentum1[idx * D + d]);
weight_new.acc.x -= learning_rate * (m_t.acc.x / (1.0 - powf(beta1, iter)) / (sqrtf(v_hat_t) + eps) + weight_decay * weight_new.acc.x);
weight_new.acc.y -= learning_rate * (m_t.acc.y / (1.0 - powf(beta1, iter)) / (sqrtf(v_hat_t) + eps) + weight_decay * weight_new.acc.y);
weight_new.acc.z -= learning_rate * (m_t.acc.z / (1.0 - powf(beta1, iter)) / (sqrtf(v_hat_t) + eps) + weight_decay * weight_new.acc.z);
weight_new.acc.w -= learning_rate * (m_t.acc.w / (1.0 - powf(beta1, iter)) / (sqrtf(v_hat_t) + eps) + weight_decay * weight_new.acc.w);
"""
split_weight_update_cpu = "" # TODO
return {
"optimizer": "partial_rowwise_adam",
"args": make_args(
[
(TENSOR, "momentum1"),
(TENSOR, "momentum2"),
(FLOAT, "learning_rate"),
(FLOAT, "eps"),
(FLOAT, "beta1"),
(FLOAT, "beta2"),
(FLOAT, "weight_decay"),
(INT, "iter"),
]
),
"split_precomputation": split_precomputation,
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": False,
"has_gpu_support": True,
"has_vbe_support": False,
}
def lars_sgd() -> Dict[str, Any]:
split_precomputation = """
at::acc_type<cache_t, true> weight_sum_sq = 0.0;
at::acc_type<cache_t, true> grad_sum_sq = 0.0;
auto weight_row = WeightRow<emb_t, cache_t, at::acc_type<cache_t, true>>(weights, cache_weights, D, nullptr);
float2 qparams;
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
qparams = weight_row.load_qparams();
}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kThreadGroupSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kThreadGroupSize * i + threadIdx.x * 4;
Vec4T<at::acc_type<cache_t,true>> weight = weight_row.load(d, qparams);
weight_sum_sq += weight.acc.x * weight.acc.x + weight.acc.y * weight.acc.y + weight.acc.z * weight.acc.z + weight.acc.w * weight.acc.w;
grad_sum_sq += grad_sum[i].acc.x * grad_sum[i].acc.x + grad_sum[i].acc.y * grad_sum[i].acc.y + grad_sum[i].acc.z * grad_sum[i].acc.z + grad_sum[i].acc.w * grad_sum[i].acc.w;
}
const auto weight_norm =
sqrtf(warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(weight_sum_sq));
const auto grad_norm =
sqrtf(warpReduceAllSum<at::acc_type<cache_t, true>, kThreadGroupSize>(grad_sum_sq));
const at::acc_type<cache_t, true> adjusted_lr = learning_rate * eta * weight_norm / (grad_norm + weight_decay * weight_norm);
"""
split_weight_update = """
Vec4T<cache_t> m1(&momentum1[idx * D + d]);
m1.acc.x = momentum * m1.acc.x + adjusted_lr * (grad.acc.x + weight_decay * weight_new.acc.x);
m1.acc.y = momentum * m1.acc.y + adjusted_lr * (grad.acc.y + weight_decay * weight_new.acc.y);
m1.acc.z = momentum * m1.acc.z + adjusted_lr * (grad.acc.z + weight_decay * weight_new.acc.z);
m1.acc.w = momentum * m1.acc.w + adjusted_lr * (grad.acc.w + weight_decay * weight_new.acc.w);
m1.store(&momentum1[idx * D + d]);
weight_new.acc.x -= m1.acc.x;
weight_new.acc.y -= m1.acc.y;
weight_new.acc.z -= m1.acc.z;
weight_new.acc.w -= m1.acc.w;
"""
split_weight_update_cpu = "" # TODO
return {
"optimizer": "lars_sgd",
"is_experimental_optimizer": True,
"args": make_args(
[
(TENSOR, "momentum1"),
(FLOAT, "learning_rate"),
(FLOAT, "eta"),
(FLOAT, "momentum"),
(FLOAT, "weight_decay"),
]
),
"split_precomputation": split_precomputation,
"split_weight_update": split_weight_update,
"split_post_update": "",
"split_weight_update_cpu": split_weight_update_cpu,
"has_cpu_support": False,
"has_gpu_support": True,
"has_vbe_support": False,
}
def none_optimizer() -> Dict[str, Any]:
return {
"optimizer": "none",
"dense": False,
"args": make_args(
[
(INT, "total_hash_size"),
(INT, "total_unique_indices"),
]
),
# Generate only GPU code
"has_cpu_support": False,
"has_gpu_support": True,
"has_vbe_support": False,
}
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import NamedTuple
import torch
from torch import nn
class SplitEmbeddingOptimizerParams(NamedTuple):
weights_dev: nn.Parameter
# TODO: Enable weights_uvm and weights_lxu_cache support
# weights_uvm: nn.Parameter
# weights_lxu_cache: nn.Parameter
class SplitEmbeddingArgs(NamedTuple):
weights_placements: torch.Tensor
weights_offsets: torch.Tensor
max_D: int
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import NamedTuple, Optional
import torch
class VBEMetadata(NamedTuple):
B_offsets: Optional[torch.Tensor]
output_offsets_feature_rank: Optional[torch.Tensor]
B_offsets_rank_per_feature: Optional[torch.Tensor]
max_B_feature_rank: int = -1
max_B: int = -1
output_size: int = -1
class CommonArgs(NamedTuple):
placeholder_autograd_tensor: torch.Tensor
dev_weights: torch.Tensor
host_weights: torch.Tensor
uvm_weights: torch.Tensor
lxu_cache_weights: torch.Tensor
weights_placements: torch.Tensor
weights_offsets: torch.Tensor
D_offsets: torch.Tensor
total_D: int
max_D: int
hash_size_cumsum: torch.Tensor
total_hash_size_bits: int
indices: torch.Tensor
offsets: torch.Tensor
pooling_mode: int
indice_weights: Optional[torch.Tensor]
feature_requires_grad: Optional[torch.Tensor]
lxu_cache_locations: torch.Tensor
output_dtype: int
vbe_metadata: VBEMetadata
is_experimental: bool
class OptimizerArgs(NamedTuple):
stochastic_rounding: bool
gradient_clipping: bool
max_gradient: float
learning_rate: float
eps: float
beta1: float
beta2: float
weight_decay: float
weight_decay_mode: int
eta: float
momentum: float
counter_halflife: int
adjustment_iter: int
adjustment_ub: float
learning_rate_mode: int
grad_sum_decay: int
tail_id_threshold: float
is_tail_id_thresh_ratio: int
total_hash_size: int # Required for OptimType.NONE
class Momentum(NamedTuple):
dev: torch.Tensor
host: torch.Tensor
uvm: torch.Tensor
offsets: torch.Tensor
placements: torch.Tensor
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import subprocess
def configureDoxyfile(input_dir, output_dir):
with open("Doxyfile.in", "r") as file:
filedata = file.read()
filedata = filedata.replace("@DOXYGEN_INPUT_DIR@", input_dir)
filedata = filedata.replace("@DOXYGEN_OUTPUT_DIR@", output_dir)
with open("Doxyfile", "w") as file:
file.write(filedata)
# Check if we're running on Read the Docs' servers
read_the_docs_build = os.environ.get("READTHEDOCS", None) == "True"
breathe_projects = {}
if read_the_docs_build:
input_dir = "../include/fbgemm"
output_dir = "build"
configureDoxyfile(input_dir, output_dir)
subprocess.call("doxygen", shell=True)
breathe_projects["fbgemm"] = output_dir + "/xml"
# -- Project information -----------------------------------------------------
project = "FBGEMM"
copyright = "2020, Facebook Inc."
author = "Facebook Inc."
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# ...
extensions = ["breathe"]
# ...
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Breathe Configuration
breathe_default_project = "FBGEMM"
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## This is a helper script that generates simple Caffe2 models.
from caffe2.proto import caffe2_pb2
from caffe2.python import utils
# Define a weights network
weights = caffe2_pb2.NetDef()
weights.name = "init"
op = caffe2_pb2.OperatorDef()
op.type = "fake_data_provider"
op.output.extend(["data"])
weights.op.extend([op])
weights.external_output.extend(op.output)
op = caffe2_pb2.OperatorDef()
op.type = "GivenTensorFill"
op.output.extend(["fc_w"])
op.arg.extend([utils.MakeArgument("shape", [1, 4])])
op.arg.extend([utils.MakeArgument("values", [1.0 for i in range(4)])])
weights.op.extend([op])
weights.external_output.extend(op.output)
op = caffe2_pb2.OperatorDef()
op.type = "GivenTensorFill"
op.output.extend(["fc_b"])
op.arg.extend([utils.MakeArgument("shape", [1, 4])])
op.arg.extend([utils.MakeArgument("values", [1.0 for i in range(4)])])
weights.op.extend([op])
weights.external_output.extend(op.output)
# Define an inference net
net = caffe2_pb2.NetDef()
net.name = "predict"
op = caffe2_pb2.OperatorDef()
op.type = "fake_operator"
op.input.extend(["data"])
op.output.extend(["fake_out"])
net.op.extend([op])
op = caffe2_pb2.OperatorDef()
op.type = "FC"
op.input.extend(["fake_out"])
op.input.extend(["fc_w"])
op.input.extend(["fc_b"])
op.output.extend(["fc_out"])
net.op.extend([op])
op = caffe2_pb2.OperatorDef()
op.type = "Relu"
op.input.extend(["fc_out"])
op.output.extend(["relu_out"])
net.op.extend([op])
# Relu out is what we want
net.external_output.extend(op.output)
# We want DCE to remove this one
op = caffe2_pb2.OperatorDef()
op.type = "useless_operator"
op.input.extend(["fake_out"])
op.output.extend(["useless_out"])
net.op.extend([op])
with open("predictNet.pb", "wb") as f:
f.write(net.SerializeToString())
with open("initNet.pb", "wb") as f:
f.write(weights.SerializeToString())
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace
from google.protobuf import text_format
def fix_tensor_fills(init_net_file):
init_net_pb = open(init_net_file, "rb").read()
init_net = caffe2_pb2.NetDef()
init_net.ParseFromString(init_net_pb)
for op in init_net.op:
if any("indices" in x for x in op.output):
op.type = "GivenTensorInt64Fill"
elif any("lengths" in x for x in op.output):
op.type = "GivenTensorIntFill"
open(init_net_file + "txt", "w").write(text_format.MessageToString(init_net))
open(init_net_file, "wb").write(init_net.SerializeToString())
def read_init_net_pbtxt(init_net_file):
init_net_txt = open(init_net_file, "r").read()
init_net = caffe2_pb2.NetDef()
text_format.Merge(init_net_txt, init_net)
return init_net
def read_init_net(init_net_file):
init_net_pb = open(init_net_file, "rb").read()
init_net = caffe2_pb2.NetDef()
init_net.ParseFromString(init_net_pb)
return init_net
def read_predict_net(predict_net_file):
predict_net_txt = open(predict_net_file, "r").read()
predict_net = caffe2_pb2.NetDef()
predict_net.name = "the_model"
text_format.Merge(predict_net_txt, predict_net)
return predict_net
def run(predict_net, init_net):
workspace.ResetWorkspace()
workspace.RunNetOnce(init_net)
workspace.CreateNet(predict_net)
workspace.RunNet(predict_net.name)
out = workspace.FetchBlob(predict_net.external_output[0])
print(out)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("predict_net", default="predict_net.pbtxt", nargs="?")
parser.add_argument("init_net", default="init_net.pb", nargs="?")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
init_net = read_init_net(args.init_net)
predict_net = read_predict_net(args.predict_net)
run(predict_net, init_net)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch.onnx
import torchvision
from torch.autograd import Variable
# Export ONNX model from PyTorch
# Refer to https://pytorch.org/docs/stable/onnx.html
class PyTorchPretrainedModel:
def __init__(self, model_name):
self.model_name = model_name
method_to_call = getattr(torchvision.models, self.model_name)
self.model = method_to_call(pretrained=True)
self.model_parameters_num = len(list(self.model.state_dict()))
def export_onnx_model(
self, input_name, output_name, batch_size, model_path, verbose
):
dummy_input = Variable(torch.randn(batch_size, 3, 224, 224))
input_names = [input_name] + [
"learned_%d" % i for i in range(self.model_parameters_num)
]
output_names = [output_name]
torch.onnx.export(
self.model,
dummy_input,
model_path,
verbose=verbose,
input_names=input_names,
output_names=output_names,
)
if __name__ == "__main__":
# For more pretrained model in PyTorch, refer to:
# https://pytorch.org/docs/stable/torchvision/models.html
parser = argparse.ArgumentParser("ONNX model exported from PyTorch.")
parser.add_argument("--model_name", type=str, default="resnet18")
parser.add_argument("--model_path", type=str, default="resnet18.onnx")
parser.add_argument("--model_input_name", type=str, default="data")
parser.add_argument("--model_output_name", type=str, default="output")
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--verbose", action="store_true")
args = parser.parse_args()
pytorch_model = PyTorchPretrainedModel(args.model_name)
pytorch_model.export_onnx_model(
args.model_input_name,
args.model_output_name,
args.batch_size,
args.model_path,
args.verbose,
)
|
#!/usr/bin/env python3
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import re
from collections import defaultdict
from operator import attrgetter, itemgetter
import numpy
def formatUs(time):
"""Format human readable time (input in us)."""
if time < 1000:
return f"{time:.2f} us"
time = time / 1000
if time < 1000:
return f"{time:.2f} ms"
time = time / 1000
return f"{time:.2f} s"
class Event:
"""Class to hold TraceEvents, matches glow::TraceEvent."""
def __init__(self, name, start, end, optype):
self.name = name
self.start = start
self.end = end
self.optype = optype
self.children = []
self.child_time = 0
def __repr__(self):
return f"Event({self.name}, {self.start}, {self.end}, {self.optype})"
def printTree(self, tabs):
"""Pretty print the tree."""
indent = tabs * "\t"
print(f"{indent}{self.name} ({self.optype})")
for c in self.children:
c.printTree(tabs + 1)
def totalOverlap(self, event):
"""Returns True if this Event completely incloses the provided event."""
return self.start <= event.start and self.end >= event.end
def addChild(self, event):
"""Add an enclosed event."""
self.children.append(event)
def updateChildTime(self):
"""Determine the total time cost of all children."""
self.child_time = 0
for child in self.children:
child.updateChildTime()
self.child_time += child.end - child.start
def selfTime(self):
"""Return this Event's time cost above the sum of its children."""
return (self.end - self.start) - self.child_time
def loadEvents(filename, runtimeEvents, fixedEvent, skip):
"""Load the json trace file and create Events."""
trace = None
with open(filename) as f:
trace = json.load(f)
events = []
partialEvents = {}
for line in trace:
if "name" in line:
name = line["name"]
evtype = line["ph"]
start = int(line["ts"])
optype = "runtime"
if "args" in line:
if "type" in line["args"]:
optype = line["args"]["type"]
elif "kind" in line["args"]:
optype = line["args"]["kind"]
# If we're looking for a single event, skip others.
if (
fixedEvent
and not re.match(fixedEvent, name)
and not re.match(fixedEvent, optype)
):
continue
# if we're not including runtime events, skip them.
if not fixedEvent and not runtimeEvents and optype == "runtime":
continue
# If we're skipping some number of events, skip them.
if skip > 0:
skip = skip - 1
continue
end = 0
if evtype == "X":
end = start + int(line["dur"])
events.append(Event(name, start, end, optype))
elif evtype == "B":
partialEvents[name] = Event(name, start, end, optype)
elif evtype == "E":
if not name in partialEvents:
# This is a bug in Glow tracing, but ignore for now.
continue
ev = partialEvents[name]
ev.end = start
events.append(ev)
return events
def stackEvents(events):
"""Find all enclosed events and move them to be children. Returns a tree of Events
where parents completely enclose the timeline of their children."""
# Ensure events are sorted by time.
events = sorted(events, key=attrgetter("end"), reverse=True)
events = sorted(events, key=attrgetter("start"))
result = []
lastEvent = None
for ev in events:
# If ev is enclosed by the previous event, add it as a child.
if lastEvent:
if lastEvent.totalOverlap(ev):
lastEvent.addChild(ev)
continue
# If we're closing the previous event, recursively stack its children.
if lastEvent.children:
lastEvent.children = stackEvents(lastEvent.children)
lastEvent.updateChildTime()
# If not enclosed its a new top-level event, which may enclose other events.
lastEvent = ev
result.append(ev)
# Stack children of the last Event.
if lastEvent.children:
lastEvent.children = stackEvents(lastEvent.children)
lastEvent.updateChildTime()
return result
def dumpAccumulate(events, keyfunc, traceTime):
"""Accumulate Event durations by a key produced by keyfunc. Keyfunc is a lambda which
takes an Event as a parameter."""
nameMap = defaultdict(list)
for ev in events:
name = keyfunc(ev)
nameMap[name].append(ev.selfTime())
layers = []
for (name, times) in nameMap.items():
layers.append(
(name, len(times), numpy.mean(times), numpy.std(times), numpy.sum(times))
)
# Iterate sorted by total time.
for (name, num, mean, stddev, total) in sorted(
layers, key=itemgetter(4), reverse=True
):
mean = formatUs(mean)
stddev = formatUs(stddev)
pc = (total / traceTime) * 100
total = formatUs(total)
print(
f"{name} {num} events, mean: {mean}, stddev: {stddev}, total: {total} ({pc:.2f}%)"
)
print()
print()
def main():
parser = argparse.ArgumentParser(description="process trace json")
parser.add_argument("filename", type=str, help="filename for trace file to load")
parser.add_argument(
"--layers", action="store_true", help="aggregate and display by layer names"
)
parser.add_argument(
"--kinds", action="store_true", help="aggregate and display by op kind"
)
parser.add_argument("--runtime", action="store_true", help="include runtime events")
parser.add_argument(
"--summarize", action="store_true", help="print a summary of the trace"
)
parser.add_argument(
"--event", type=str, default="", help="restrict events matching this regex"
)
parser.add_argument(
"--skip",
type=int,
default=0,
help="skip a number of events matching conditions",
)
args = parser.parse_args()
events = loadEvents(args.filename, args.runtime, args.event, args.skip)
if not events:
return
# Stack events so we can determine selfTime.
stacked = stackEvents(events)
# Ensure events are sorted by startTime.
stacked = sorted(stacked, key=attrgetter("start"))
totalTime = stacked[-1].end - stacked[0].start
coveredTime = 0
for ev in stacked:
coveredTime += ev.end - ev.start
if args.layers:
dumpAccumulate(events, lambda ev: f"{ev.name} ({ev.optype})", coveredTime)
if args.kinds:
dumpAccumulate(events, lambda ev: ev.optype, coveredTime)
if args.event:
dumpAccumulate(events, lambda ev: f"{ev.name} ({ev.optype})", coveredTime)
if args.summarize:
print("Total time of trace:", formatUs(totalTime))
print("Time covered by events:", formatUs(coveredTime))
print("Unattributed time:", formatUs(totalTime - coveredTime))
if __name__ == "__main__":
main()
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This was mostly taken from a tutorial from Caffe2:
# caffe2/blob/master/caffe2/python/tutorials/py_gen/MNIST.py
# It currently allows to train either LeNet or an MLP on MNIST. We can then load
# the pre-trained protobuf file into Glow to run.
import os
import shutil
import caffe2.python.predictor.predictor_exporter as pe
import numpy as np
from caffe2.python import brew, core, model_helper, optimizer, workspace
from caffe2.python.predictor import mobile_exporter
# If you would like to see some really detailed initializations,
# you can change --caffe2_log_level=0 to --caffe2_log_level=-1
core.GlobalInit(["caffe2", "--caffe2_log_level=0"])
print("Necessities imported!")
# If True, a more complicated convolutional model is used
# If False, a multilayer perceptron model is used
USE_LENET_MODEL = True
# This section preps your image and test set in a lmdb database
def DownloadResource(url, path):
"""Downloads resources from s3 by url and unzips them to the provided path"""
import StringIO
import zipfile
import requests
print("Downloading... {} to {}".format(url, path))
r = requests.get(url, stream=True)
z = zipfile.ZipFile(StringIO.StringIO(r.content))
z.extractall(path)
print("Completed download and extraction.")
current_folder = os.path.join(os.path.expanduser("~"), "caffe2_notebooks")
data_folder = os.path.join(current_folder, "tutorial_data", "mnist")
root_folder = os.path.join(current_folder, "tutorial_files", "tutorial_mnist")
db_missing = False
if not os.path.exists(data_folder):
os.makedirs(data_folder)
print("Your data folder was not found!! This was generated: {}".format(data_folder))
# Look for existing database: lmdb
# MNIST lmdb can be found here:
# https://download.caffe2.ai/databases/mnist-lmdb.zip
if os.path.exists(os.path.join(data_folder, "mnist-train-nchw-lmdb")):
print("lmdb train db found!")
else:
db_missing = True
if os.path.exists(os.path.join(data_folder, "mnist-test-nchw-lmdb")):
print("lmdb test db found!")
else:
db_missing = True
# attempt the download of the db if either was missing
if db_missing:
print("one or both of the MNIST lmbd dbs not found!!")
db_url = "http://download.caffe2.ai/databases/mnist-lmdb.zip"
try:
DownloadResource(db_url, data_folder)
except Exception as ex:
print(
"Failed to download dataset. Please download it manually from {}".format(
db_url
)
)
print(
"Unzip it and place the two database folders here: {}".format(data_folder)
)
raise ex
if os.path.exists(root_folder):
print("Looks like you ran this before, so we need to cleanup those old files...")
shutil.rmtree(root_folder)
os.makedirs(root_folder)
workspace.ResetWorkspace(root_folder)
print("training data folder:" + data_folder)
print("workspace root folder:" + root_folder)
def AddInput(model, batch_size, db, db_type):
# load the data
data_uint8, label = brew.db_input(
model,
blobs_out=["data_uint8", "label"],
batch_size=batch_size,
db=db,
db_type=db_type,
)
print(data_uint8._from_net)
# cast the data to float
data = model.Cast(data_uint8, "data", to=core.DataType.FLOAT)
# scale data from [0,255] down to [0,1]
data = model.Scale(data, data, scale=float(1.0 / 256))
# don't need the gradient for the backward pass
data = model.StopGradient(data, data)
return data, label
def AddMLPModel(model, data):
size = 28 * 28 * 1
sizes = [size, size * 2, size * 2, 10]
layer = data
for i in range(len(sizes) - 1):
layer = brew.fc(
model,
layer,
"dense_{}".format(i),
dim_in=sizes[i],
dim_out=sizes[i + 1],
use_cudnn=False,
)
layer = model.net.Relu(layer, "relu_{}".format(i), use_cudnn=False)
softmax = model.net.Softmax(layer, "softmax", use_cudnn=False)
return softmax
def AddLeNetModel(model, data):
"""
This part is the standard LeNet model: from data to the softmax prediction.
For each convolutional layer we specify dim_in - number of input channels
and dim_out - number or output channels. Also each Conv and MaxPool layer changes the
image size. For example, kernel of size 5 reduces each side of an image by 4.
While when we have kernel and stride sizes equal 2 in a MaxPool layer, it divides
each side in half.
"""
# Image size: 28 x 28 -> 24 x 24
conv1 = brew.conv(
model, data, "conv1", dim_in=1, dim_out=20, kernel=5, use_cudnn=False
)
# Image size: 24 x 24 -> 12 x 12
pool1 = model.net.MaxPool(conv1, "pool1", kernel=2, stride=2, use_cudnn=False)
# Image size: 12 x 12 -> 8 x 8
conv2 = brew.conv(
model, pool1, "conv2", dim_in=20, dim_out=50, kernel=5, use_cudnn=False
)
# Image size: 8 x 8 -> 4 x 4
pool2 = model.net.MaxPool(conv2, "pool2", kernel=2, stride=2, use_cudnn=False)
# 50 * 4 * 4 stands for dim_out from previous layer multiplied by the
# image size
fc3 = brew.fc(model, pool2, "fc3", dim_in=50 * 4 * 4, dim_out=500, use_cudnn=False)
fc3 = model.net.Relu(fc3, "relu3", use_cudnn=False)
pred = brew.fc(model, fc3, "pred", 500, 10, use_cudnn=False)
softmax = model.net.Softmax(pred, "softmax", use_cudnn=False)
return softmax
def AddModel(model, data):
if USE_LENET_MODEL:
return AddLeNetModel(model, data)
else:
return AddMLPModel(model, data)
def AddAccuracy(model, softmax, label):
"""Adds an accuracy op to the model"""
accuracy = model.Accuracy([softmax, label], "accuracy", use_cudnn=False)
return accuracy
def AddTrainingOperators(model, softmax, label):
"""Adds training operators to the model."""
xent = model.LabelCrossEntropy([softmax, label], "xent", use_cudnn=False)
# compute the expected loss
loss = model.AveragedLoss(xent, "loss", use_cudnn=False)
# track the accuracy of the model
AddAccuracy(model, softmax, label)
# use the average loss we just computed to add gradient operators to the
# model
model.AddGradientOperators([loss])
optimizer.build_sgd(
model, base_learning_rate=0.1, policy="step", stepsize=1, gamma=0.999
)
arg_scope = {"order": "NCHW"}
train_model = model_helper.ModelHelper(name="mnist_train", arg_scope=arg_scope)
data, label = AddInput(
train_model,
batch_size=64,
db=os.path.join(data_folder, "mnist-train-nchw-lmdb"),
db_type="lmdb",
)
softmax = AddModel(train_model, data)
AddTrainingOperators(train_model, softmax, label)
test_model = model_helper.ModelHelper(
name="mnist_test", arg_scope=arg_scope, init_params=False
)
data, label = AddInput(
test_model,
batch_size=100,
db=os.path.join(data_folder, "mnist-test-nchw-lmdb"),
db_type="lmdb",
)
softmax = AddModel(test_model, data)
# Deployment model. We simply need the main AddModel part.
deploy_model = model_helper.ModelHelper(
name="mnist_deploy", arg_scope=arg_scope, init_params=False
)
AddModel(deploy_model, "data")
# The parameter initialization network only needs to be run once.
# Now all the parameter blobs are going to be initialized in the workspace.
workspace.RunNetOnce(train_model.param_init_net)
# overwrite=True allows you to run this cell several times and avoid errors
workspace.CreateNet(train_model.net, overwrite=True)
# Set the iterations number and track the accuracy & loss
total_iters = 200
accuracy = np.zeros(total_iters)
loss = np.zeros(total_iters)
print("The blobs in the workspace pre-train: {}".format(workspace.Blobs()))
# Now, we will manually run the network for 200 iterations.
for i in range(total_iters):
workspace.RunNet(train_model.net)
accuracy[i] = workspace.blobs["accuracy"]
loss[i] = workspace.blobs["loss"]
print("The blobs in the workspace post-train: {}".format(workspace.Blobs()))
# param_init_net here will only create a data reader
# Other parameters won't be re-created because we selected
# init_params=False before
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net, overwrite=True)
test_accuracy = np.zeros(100)
for i in range(100):
workspace.RunNet(test_model.net.Proto().name)
test_accuracy[i] = workspace.FetchBlob("accuracy")
print("test_accuracy: %f" % test_accuracy.mean())
# construct the model to be exported
# the inputs/outputs of the model are manually specified.
pe_meta = pe.PredictorExportMeta(
predict_net=deploy_model.net.Proto(),
parameters=[str(b) for b in deploy_model.params],
inputs=["data"],
outputs=["softmax"],
)
# save the model to a file. Use minidb as the file format
pe.save_to_db("minidb", os.path.join(root_folder, "mnist_model.minidb"), pe_meta)
print("The deploy model is saved to: " + root_folder + "/mnist_model.minidb")
workspace.RunNetOnce(deploy_model.param_init_net)
init_net, predict_net = mobile_exporter.Export(
workspace, deploy_model.net, deploy_model.params
)
with open("init_net.pb", "wb") as f:
f.write(init_net.SerializeToString())
with open("predict_net.pb", "wb") as f:
f.write(predict_net.SerializeToString())
with open("predict_net.pbtxt", "wb") as f:
f.write(str(deploy_model.net.Proto()))
# Now we can load the model back and run the prediction to verify it works.
# we retrieve the last input data out and use it in our prediction test
# before we scratch the workspace
blob = workspace.FetchBlob("data")
# reset the workspace, to make sure the model is actually loaded
workspace.ResetWorkspace(root_folder)
# verify that all blobs are destroyed.
print("The blobs in the workspace after reset: {}".format(workspace.Blobs()))
# load the predict net
predict_net = pe.prepare_prediction_net(
os.path.join(root_folder, "mnist_model.minidb"), "minidb"
)
# verify that blobs are loaded back
print(
"The blobs in the workspace after loading the model: {}".format(workspace.Blobs())
)
# feed the previously saved data to the loaded model
workspace.FeedBlob("data", blob)
# predict
workspace.RunNetOnce(predict_net)
softmax = workspace.FetchBlob("softmax")
|
#!/usr/bin/env python
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function
import argparse
import array
import collections
import gzip
import os.path
import pickle
import sys
import tarfile
try:
from urllib.error import URLError
except ImportError:
from urllib2 import URLError
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
Dataset = collections.namedtuple("TargetItem", "filename, url, handler, dest_path")
# Load a file using pickle module, and parameters vary based on different
# Python versions.
def pickle_load(file):
if sys.version_info.major >= 3:
return pickle.load(file, encoding="bytes")
return pickle.load(file)
# A helper function to extract mnist dataset from tar file, and split the dataset
# into data and labels.
def handle_mnist(filename, dest_path):
print("Extracting {} ...".format(filename))
with gzip.open(filename, "rb") as file:
training_set, _, _ = pickle_load(file)
data, labels = training_set
images_file = open(os.path.join(dest_path, "mnist_images.bin"), "wb")
data.tofile(images_file)
images_file.close()
labels_file = open(os.path.join(dest_path, "mnist_labels.bin"), "wb")
L = array.array("B", labels)
L.tofile(labels_file)
labels_file.close()
def untar(filename, dest_path, member=None):
print("Extracting {} ...".format(filename))
tar = tarfile.open(filename, "r:gz")
if not member:
tar.extractall(dest_path)
else:
tar.extract(member, dest_path)
tar.close()
DATASETS = dict(
mnist=Dataset(
"mnist.pkl.gz",
"http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz",
handle_mnist,
".",
),
cifar10=Dataset(
"cifar-10.binary.tar.gz",
"http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz",
untar,
".",
),
ptb=Dataset(
"ptb.tgz",
"http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz",
untar,
"ptb",
),
fr2en=Dataset(
"fr2en.tar.gz",
"http://fb-glow-assets.s3.amazonaws.com/models/fr2en.tar.gz",
untar,
"fr2en",
),
)
DATASET_NAMES = list(DATASETS.keys())
CAFFE2_MODELS = [
"densenet121",
"inception_v1",
"inception_v2",
"lenet_mnist",
"resnet50",
"shufflenet",
"squeezenet",
"vgg19",
"zfnet512",
"bvlc_alexnet",
"en2gr",
"quant_resnet50",
]
ONNX_MODELS = [
"resnet50",
"vgg19",
"squeezenet",
"zfnet512",
"densenet121",
"shufflenet",
"inception_v1",
"inception_v2",
"bvlc_alexnet",
"lenet_mnist",
"googlenet_v1_slim",
"googlenet_v4_slim",
"resnet50_tf",
"emotion_ferplus",
"bvlc_reference_rcnn_ilsvrc13",
]
def report_download_progress(chunk_number, chunk_size, file_size):
if file_size != -1:
percent = min(1, (chunk_number * chunk_size) / file_size)
bar = "#" * int(64 * percent)
sys.stdout.write("\r0% |{:<64}| {}%".format(bar, int(percent * 100)))
def download(path, filename, url):
if not os.path.exists(path):
os.mkdir(path)
destFile = os.path.join(path, filename)
if os.path.exists(destFile):
print("{} already exists, skipping ...".format(filename))
else:
print("Downloading {} from {} ...".format(filename, url))
try:
urlretrieve(url, destFile, reporthook=report_download_progress)
except URLError:
print("Error downloading {}!".format(filename))
finally:
# Just a newline.
print()
def download_caffe2_models(outDir, models):
for modelname in models:
print("For model ", modelname)
for filename in ["predict_net.pbtxt", "predict_net.pb", "init_net.pb"]:
path = os.path.join(outDir, modelname)
url = "http://fb-glow-assets.s3.amazonaws.com/models/{}/{}".format(
modelname, filename
)
download(path, filename, url)
if modelname == "en2gr":
for filename in ["dst_dictionary.txt", "src_dictionary.txt"]:
path = os.path.join(outDir, "en2gr")
url = "http://fb-glow-assets.s3.amazonaws.com/models/en2gr/{}".format(
filename
)
download(path, filename, url)
return
def download_onnx_models(outDir, models):
for modelname in models:
if modelname in [
"resnet50",
"vgg19",
"squeezenet",
"zfnet512",
"densenet121",
"shufflenet",
]:
url = "https://s3.amazonaws.com/download.onnx/models/opset_6/{}.tar.gz".format(
modelname
)
filename = "{}.tar.gz".format(modelname)
download(outDir, filename, url)
untar(os.path.join(outDir, filename), outDir)
elif modelname in ["inception_v1", "inception_v2", "bvlc_alexnet"]:
url = "https://s3.amazonaws.com/download.onnx/models/opset_8/{}.tar.gz".format(
modelname
)
filename = "{}.tar.gz".format(modelname)
download(outDir, filename, url)
untar(os.path.join(outDir, filename), outDir)
elif modelname in ["lenet_mnist"]:
url = "http://fb-glow-assets.s3.amazonaws.com/models/{}.tar.gz".format(
modelname
)
filename = "{}.tar.gz".format(modelname)
download(outDir, filename, url)
untar(os.path.join(outDir, filename), outDir)
elif modelname in ["googlenet_v1_slim", "googlenet_v4_slim", "resnet50_tf"]:
url = "http://fb-glow-assets.s3.amazonaws.com/models/{}.onnx".format(
modelname
)
filename = "{}.onnx".format(modelname)
path = os.path.join(outDir, modelname)
download(path, filename, url)
elif modelname == "emotion_ferplus":
url = "https://onnxzoo.blob.core.windows.net/models/opset_8/emotion_ferplus/emotion_ferplus.tar.gz"
filename = "emotion_ferplus.tar.gz"
download(outDir, filename, url)
untar(os.path.join(outDir, filename), outDir, "emotion_ferplus/model.onnx")
elif modelname == "bvlc_reference_rcnn_ilsvrc13":
url = "https://s3.amazonaws.com/download.onnx/models/opset_8/bvlc_reference_rcnn_ilsvrc13.tar.gz"
filename = "bvlc_reference_rcnn_ilsvrc13.tar.gz"
download(outDir, filename, url)
untar(
os.path.join(outDir, filename),
outDir,
"bvlc_reference_rcnn_ilsvrc13/model.onnx",
)
return
def parse():
parser = argparse.ArgumentParser(description="Download datasets for Glow")
parser.add_argument("-d", "--datasets", nargs="+", choices=DATASET_NAMES)
parser.add_argument("-D", "--all-datasets", action="store_true")
parser.add_argument("-c", "--caffe2-models", nargs="+", choices=CAFFE2_MODELS)
parser.add_argument("-C", "--all-caffe2-models", action="store_true")
parser.add_argument("-o", "--onnx-models", nargs="+", choices=ONNX_MODELS)
parser.add_argument("-O", "--all-onnx-models", action="store_true")
parser.add_argument("-P", "--output-directory", default=".")
options = parser.parse_args()
if options.all_datasets:
datasets = DATASET_NAMES
elif options.datasets:
datasets = options.datasets
else:
datasets = []
if options.all_caffe2_models:
caffe2Models = CAFFE2_MODELS
elif options.caffe2_models:
caffe2Models = options.caffe2_models
else:
caffe2Models = []
if options.all_onnx_models:
onnxModels = ONNX_MODELS
elif options.onnx_models:
onnxModels = options.onnx_models
else:
onnxModels = []
return options.output_directory, datasets, caffe2Models, onnxModels
def main():
outDir, datasets, caffe2Models, onnxModels = parse()
if not os.path.exists(outDir):
os.mkdir(outDir)
outDir = os.path.join(".", outDir)
try:
for name in datasets:
dataset = DATASETS[name]
download(outDir, dataset.filename, dataset.url)
dataset.handler(
os.path.join(outDir, dataset.filename),
os.path.join(outDir, dataset.dest_path),
)
if datasets:
print("\n===Done with downloading datasets.\n\n")
if caffe2Models:
download_caffe2_models(outDir, caffe2Models)
print("===Done with downloading caffe2 models.\n\n")
if onnxModels:
download_onnx_models(outDir, onnxModels)
print("===Done with downloading onnx models.\n\n")
except KeyboardInterrupt:
print("Interrupted")
if __name__ == "__main__":
main()
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Load a pre-trained Caffe2 image classifier and run it on an image.
import argparse
import collections
import os
import time
import numpy as np
import skimage.io
from caffe2.python import workspace
print("Required modules imported.")
cmd_line_parser = argparse.ArgumentParser(
description="Run Caffe2 using provided models and inputs."
)
cmd_line_parser.add_argument(
"--image", "-i", required=True, help="Image to be processed by the neural network"
)
cmd_line_parser.add_argument(
"--directory",
"-d",
required=True,
help="Directory containing the network structure "
"<predict_net.pb> and weight <init_net.pb> files. "
"The model name is assumed to be the directory "
"name, and should correspond to a model from the "
"model_props (e.g. 'resnet50', 'lenet_mnist', "
"etc.). If the directory name is not the model "
"name, use --model-name (-m) to specify the name "
"of the supported model to use.",
)
cmd_line_parser.add_argument(
"--model-name", "-m", required=False, help="Name of the model to be used"
)
cmd_line_parser.add_argument(
"--image_mode",
required=False,
help="Image mode; one of '0to1', '0to256', or '128to127'",
)
cmd_line_parser.add_argument("--time", action="store_true")
cmd_line_parser.add_argument("--iterations", type=int, default=1)
args = cmd_line_parser.parse_args()
# 0to256 is the default input
def mode_0to256(x):
return x
def mode_0to1(x):
return x / 255
def mode_128to127(x):
return x - 128
Model = collections.namedtuple(
"Model", "blob_name, image_mode_op, image_size, num_color_channels"
)
model_props = dict(
densenet121=Model("data", mode_0to1, 224, 3),
inception_v1=Model("data", mode_128to127, 224, 3),
inception_v2=Model("data", mode_128to127, 224, 3), # unknown
resnet50=Model("gpu_0/data", mode_0to1, 224, 3),
shufflenet=Model("gpu_0/data", mode_0to1, 224, 3),
squeezenet=Model("data", mode_128to127, 224, 3),
vgg19=Model("data", mode_128to127, 224, 3),
zfnet512=Model("gpu_0/data", mode_0to256, 224, 3),
lenet_mnist=Model("data", mode_0to1, 28, 1),
resnext=Model("data", mode_0to1, 224, 3),
)
MODEL = args.model_name
if MODEL is None:
MODEL = os.path.basename(os.path.normpath(args.directory))
if MODEL not in list(model_props.keys()):
print(
"Model " + MODEL + " is not supported. Specify --model-name (-m) if "
"it is not the base name of the directory containing pb files."
)
exit(1)
MODEL_ROOT = args.directory
IMAGE_LOCATION = args.image
img = skimage.img_as_ubyte(skimage.io.imread(IMAGE_LOCATION)).astype(np.float32)
image_shape = np.array(img).shape
print("Initial img shape: " + str(image_shape))
if img.shape[0] != img.shape[1] or img.shape[0] != model_props[MODEL].image_size:
print("Invalid image dimensions for model.")
exit(2)
num_dims = len(np.array(img).shape)
if num_dims != 3:
img = np.expand_dims(img, axis=num_dims)
img = img[:, :, : model_props[MODEL].num_color_channels]
# Create a zero initiated image.
transposed_image = np.zeros(
(
1,
model_props[MODEL].num_color_channels,
model_props[MODEL].image_size,
model_props[MODEL].image_size,
)
).astype(np.float32)
for w in range(0, model_props[MODEL].image_size):
for h in range(0, model_props[MODEL].image_size):
for c in range(0, model_props[MODEL].num_color_channels):
# WHC -> CWH, RGB -> BGR
transposed_image[0][model_props[MODEL].num_color_channels - c - 1][w][
h
] = model_props[MODEL].image_mode_op(img[w][h][c])
final_image = transposed_image
print("Shape of final_image: " + str(np.array(final_image).shape))
with open(MODEL_ROOT + "/init_net.pb", "rb") as f:
init_net = f.read()
with open(MODEL_ROOT + "/predict_net.pb", "rb") as f:
predict_net = f.read()
workspace.ResetWorkspace()
blob_name = model_props[MODEL].blob_name
workspace.FeedBlob(blob_name, final_image)
print("The blobs in the workspace after FeedBlob: {}".format(workspace.Blobs()))
# Create a predictor using the loaded model.
p = workspace.Predictor(init_net, predict_net)
start = time.time()
for i in range(0, args.iterations):
results = p.run([final_image])
end = time.time()
if args.time:
print(
"Wall time per iteration (s): {:0.4f}".format((end - start) / args.iterations)
)
max_idx = np.argmax(results[0][0])
sum_probability = sum(results[0][0])
print("Max index is {}".format(max_idx))
print(
"Predicted class at index {} with probability {}".format(
max_idx, results[0][0][max_idx]
)
)
print("Number of classes {}".format(len(results[0][0])))
print("Sum of probabilities is {}".format(sum_probability))
|
#!/usr/bin/env python3
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import os
# imagenet-process : Runs preprocessing of standard imagenet images
# to work with a pretrained model (e.g. resnet)
# through glow
# usage: python3 imagenet-process.py "images/*.JPEG" processed
import PIL.Image
import torchvision
parser = argparse.ArgumentParser(description="imagenet preprocessor")
parser.add_argument("input", metavar="input", help="glob to input images")
parser.add_argument(
"output", metavar="output", default="./", help="directory to put output images"
)
parser.add_argument("--normalize", action="store_true")
args = parser.parse_args()
# create the output dir if necessary
try:
os.makedirs(args.output, exist_ok=True)
except Exception as e:
print(e)
for ifn in glob.glob(args.input):
name, ext = os.path.splitext(ifn)
name = os.path.basename(name)
outputname = os.path.join(args.output, name + ".png")
print("processing", name, "as", outputname)
im = PIL.Image.open(ifn)
im = im.convert("RGB")
resize = torchvision.transforms.Compose(
[torchvision.transforms.Resize(256), torchvision.transforms.CenterCrop(224)]
)
processed_im = resize(im)
if args.normalize:
transform_fn = torchvision.transforms.Compose(
[
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
else:
transform_fn = torchvision.transforms.ToTensor()
processed_im = transform_fn(processed_im)
processed_im = processed_im.unsqueeze(0)
torchvision.utils.save_image(processed_im, outputname)
|
#!/usr/bin/env python3
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import shutil
import sys
import tempfile
import pexpect
import PIL.Image as Image
import torchvision
parser = argparse.ArgumentParser(
description="Glow image-classifier Driver for " "TopK ImageNet Calculation"
)
parser.add_argument(
"--validation-images-dir",
metavar="DIR",
required=True,
help="Path to the directory containing the validation set "
"of images. Subdirectories are expected to be organized "
"such that when sorted their index corresponds to their "
"label. For example, if the validation_images_dir contains "
"{'abc/', 'def/', 'ghi/'}, then this should correspond to "
"labels {0, 1, 2} respectively.",
)
parser.add_argument(
"--batch-size",
default=1,
type=int,
metavar="N",
help="Batch size for use with the model. The total number "
"of images in the validation_images_dir should be "
"divisible by the batch size.",
)
parser.add_argument(
"--only-resize-and-save",
default=False,
action="store_true",
help="Use to pre-process images "
"to 224x224. Saves the images to "
"the validation_images_dir/processed/",
)
parser.add_argument(
"--resize-input-images",
default=False,
action="store_true",
help="Resize and center-crop images " "at runtime to 224x224.",
)
parser.add_argument(
"--verbose", default=False, action="store_true", help="Verbose printing."
)
parser.add_argument(
"--image-classifier-cmd",
default="",
help="Command to use for running the image-classifier, "
"including the binary and all of its command lime "
"parameters.",
)
# Opens and returns an image located at @param path using the PIL loader.
def pil_loader(path):
# open path as file to avoid ResourceWarning
# (https://github.com/python-pillow/Pillow/issues/835)
with open(path, "rb") as img:
img = Image.open(img)
return img.convert("RGB")
# Opens and returns an image located at @param path using the accimage loader.
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
# Opens and returns an image located at @param path using either the accimage
# loader or PIL loader.
def default_image_loader(path):
if torchvision.get_image_backend() == "accimage":
return accimage_loader(path)
return pil_loader(path)
def get_sorted_img_subdirs(validation_images_dir):
img_dir_paths = []
for img_dir in os.listdir(validation_images_dir):
dir_path = os.path.join(validation_images_dir, img_dir)
if os.path.isdir(dir_path):
img_dir_paths.append(img_dir)
img_dir_paths.sort()
return img_dir_paths
# @returns two lists of the same length found in directory
# @param validation_images_dir; the first list contains paths to all images
# found, and the second list contains the corresponding labels of the image.
def get_img_paths_and_labels(validation_images_dir):
img_subdirs = get_sorted_img_subdirs(validation_images_dir)
# Create lists holding paths to each image to be classified and the label
# for that image.
img_paths = []
img_labels = []
curr_label_idx = 0
for img_subdir in img_subdirs:
img_subdir_path = os.path.join(validation_images_dir, img_subdir)
for img in os.listdir(img_subdir_path):
full_img_path = os.path.join(img_subdir_path, img)
if os.path.isfile(full_img_path):
img_paths.append(full_img_path)
img_labels.append(curr_label_idx)
curr_label_idx = curr_label_idx + 1
return img_paths, img_labels
# Given an image located at @param img_path, transform the image
# and save it to the path @param path_to_new_img.
def resize_and_save_image(img_path, path_to_new_img):
# Load the image.
img = default_image_loader(img_path)
# Use to Resize and CenterCrop the images to 224x224.
transform_resize = torchvision.transforms.Compose(
[torchvision.transforms.Resize(256), torchvision.transforms.CenterCrop(224)]
)
resized_img = transform_resize(img)
resized_img.save(path_to_new_img, format="png")
# Used to pre-process an input set of images. Takes a string of a directory
# @param validation_images_dir and saves the cropped subset of the images in a
# subdirectory `processed/`, which must not yet exist.
def save_centered_cropped_dataset(validation_images_dir):
processed_validation_images_dir = os.path.join(validation_images_dir, "processed")
print(
"Saving centered cropped input images: %s" % (processed_validation_images_dir)
)
img_subdirs = get_sorted_img_subdirs(validation_images_dir)
try:
os.makedirs(processed_validation_images_dir)
except OSError:
sys.exit("New validation directory must not exist")
# Iterate over all labels subdirectories, loading, transforming and saving
# all images to the new location.
for img_subdir in img_subdirs:
orig_img_subdir_path = os.path.join(validation_images_dir, img_subdir)
processed_img_subdir_path = os.path.join(
processed_validation_images_dir, img_subdir
)
# Create a new subdirectory for the next label.
try:
os.makedirs(processed_img_subdir_path)
except OSError:
sys.exit("New label subdirectory somehow already existed.")
# Transform and save all images in this label subdirectory.
for orig_img_filename in os.listdir(orig_img_subdir_path):
orig_img_path = os.path.join(orig_img_subdir_path, orig_img_filename)
if os.path.isfile(orig_img_path):
processed_img_path = os.path.join(
processed_img_subdir_path, orig_img_filename
)
resize_and_save_image(orig_img_path, processed_img_path)
# @returns a list of strings (of length equal to the @param batch_size) which
# are paths to images to do inference on. @param img_paths is the set of all
# image paths, @param img_index is the next index to use in @param img_paths,
# and @param tmp_dir_name is the location of where to save the images if
# @param resize_input_images is true. Note that if @param resize_input_images is
# true, then names for the temporary images are used for every batch, thus only
# @param batch_size temporary images will ever exist in @param tmp_dir_name.
def get_curr_img_paths(
img_paths, img_index, batch_size, tmp_dir_name, resize_input_images
):
curr_img_paths = []
for batch_idx in range(batch_size):
img_path = img_paths[img_index + batch_idx]
# If we are resizing the image then we are going to save it to a
# temp location to read in later for inference.
if resize_input_images:
# Save the new image to the tmp directory. Note that these names are
# reused every call to get_curr_img_paths().
path_to_tmp_img = os.path.join(
tmp_dir_name, "tmp" + str(batch_idx) + ".png"
)
resize_and_save_image(img_path, path_to_tmp_img)
img_path = path_to_tmp_img
curr_img_paths.append(img_path)
return curr_img_paths
# Verifies that the @param image_classifier_cmd is well formatted via
# assertions.
def verify_spawn_cmd(image_classifier_cmd):
split_cmd = image_classifier_cmd.split()
if "image-classifier" in split_cmd[0]:
assert "-" in split_cmd, "Streaming mode must be used."
assert "-topk=5" in split_cmd, "-topk=5 must be used."
assert any(
"-model-input-name=" in s for s in split_cmd
), "image-classifier requires -model-input-name to be specified."
assert any(
"-m=" in s for s in split_cmd
), "image-classifier requires -m to be specified"
assert any(
"-image-mode=" in s for s in split_cmd
), "image-classifier requires -image-mode to be specified"
# Prints the Top-1 and Top-5 accuracy given @param total_image_count, @param
# top1_count, and @param top5_count.
def print_topk_accuracy(total_image_count, top1_count, top5_count):
top1_accuracy = float(top1_count) / float(total_image_count)
top5_accuracy = float(top5_count) / float(total_image_count)
print("\tTop-1 accuracy: " + "{0:.4f}".format(top1_accuracy))
print("\tTop-5 accuracy: " + "{0:.4f}".format(top5_accuracy))
# Calculates and prints top-1 and top-5 accuracy for images located in
# subdirectories at @param validation_images_dir, given the command line
# parameters passed in to @param args.
def calculate_top_k(
validation_images_dir,
image_classifier_cmd,
batch_size,
resize_input_images,
verbose,
):
print("Calculating Top-1 and Top-5 accuracy...")
verify_spawn_cmd(image_classifier_cmd)
img_paths, img_labels = get_img_paths_and_labels(validation_images_dir)
total_image_count = len(img_paths)
assert (
total_image_count % batch_size == 0
), "Total number of images must be divisible by batch size"
if verbose:
print("Running image classifier with: " + image_classifier_cmd)
try:
# Create a temporary directory to store the transformed image we
# classify (if applicable) and the log of image-classifer output.
tmp_dir_name = tempfile.mkdtemp()
path_to_tmp_log = os.path.join(tmp_dir_name, "log.txt")
with open(path_to_tmp_log, "w") as fout:
classifier_proc = pexpect.spawn(
image_classifier_cmd, logfile=fout, timeout=None
)
if verbose:
print("Temp log located at: " + path_to_tmp_log)
prompt = "Enter image filenames to classify: "
top1_count = 0
top5_count = 0
# Process the images in batches as specified on the command line.
for img_index in range(0, total_image_count, batch_size):
curr_img_paths = get_curr_img_paths(
img_paths, img_index, batch_size, tmp_dir_name, resize_input_images
)
# Expect prompt from the image-classifier for the next image path.
classifier_proc.expect(prompt)
appended_paths = " ".join(curr_img_paths)
assert (
len(appended_paths) <= 1024
), "Line length is too long (max 1024): %r" % len(appended_paths)
# Send the paths to the image-classifier.
classifier_proc.sendline(appended_paths)
for batch_idx in range(batch_size):
# Now we expect the image-classifier's response with the label.
# The first line will include the path to the file, e.g.:
# File: tests/images/imagenet/cat_285.png
classifier_proc.expect(" File: " + curr_img_paths[batch_idx])
# All labels will be formatted like:
# Label-K1: 281 (probability: 0.7190)
top5_labels = []
for _ in range(5):
label_and_prob = classifier_proc.readline()
# Get the label from the line.
label = label_and_prob.split()[1]
top5_labels.append(int(label))
expected_label = img_labels[img_index + batch_idx]
if expected_label == top5_labels[0]:
top1_count += 1
if expected_label in top5_labels:
top5_count += 1
curr_completed_count = img_index + batch_size
if curr_completed_count % 100 == 0:
print(
"Finished image index %d out of %d"
% ((curr_completed_count, total_image_count))
)
if verbose:
print(" Current Top-1/5 accuracy:")
print_topk_accuracy(
curr_completed_count, top1_count, top5_count
)
else:
print("")
finally:
classifier_proc.close(force=True)
# Remove the temp directory we used to save the images and log.
shutil.rmtree(tmp_dir_name)
print(
"\nCompleted running; Final Top-1/5 accuracy across %d images:"
% (total_image_count)
)
print_topk_accuracy(total_image_count, top1_count, top5_count)
def main():
# Parse the recognized command line arguments into args.
args = parser.parse_args()
# Path to the directory containing the validation set of images.
# Subdirectories are expected to be organized such that when sorted their
# index corresponds to their label. For example, if the
# validation_images_dir contains {'abc/', 'def/', 'ghi/'}, then this should
# correspond to labels {0, 1, 2} respectively.
validation_images_dir = os.path.join(args.validation_images_dir)
assert os.path.exists(validation_images_dir), (
"Validation directory does not exist: " + validation_images_dir
)
# This is used solely to pre-process the input image set.
if args.only_resize_and_save:
save_centered_cropped_dataset(validation_images_dir)
return
calculate_top_k(
validation_images_dir,
args.image_classifier_cmd,
args.batch_size,
args.resize_input_images,
args.verbose,
)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import re
import sqlite3
import typing
from typing import Dict, List, Tuple
# Maintaining all nodes
NODES_MAP: Dict[str, "Node"] = {}
# Scope stack
SCOPE_STACK: List[str] = []
# Scope related information
scopeID = 0
class NodeNameAndKind(typing.NamedTuple):
"""A class that represents the named tuple of node name and kind."""
name: str
kind: str
class NodeValue(typing.NamedTuple):
"""A class that represents the named tuple of node and result number."""
node: "Node"
resNo: int
class Node:
"""A class that represents a node in the compute graph
Public attributes:
kindName_: str. The kind name.
name_: str. The node name.
inputs_: List[NodeValue]. Input node values.
users_: Dict['Node', int]. The users of this node.
"""
def __init__(self, kindName: str, name: str):
self.kindName_: str = kindName
self.name_: str = name
self.inputs_: List[NodeValue] = []
self.users_: Dict["Node", int] = {}
def __repr__(self):
return self.name_
def get_kind_name(self) -> str:
"""Gets the kind name."""
return self.kindName_
def get_name(self) -> str:
"""Gets the node name."""
return self.name_
def getNodeNameAndKind(self) -> NodeNameAndKind:
"""Gets the Name+Kind tuple."""
return (self.name_, self.kindName_)
def get_inputs(self) -> List[NodeValue]:
"""Gets the input node."""
return self.inputs_
def get_users(self) -> Dict["Node", int]:
"""Gets the user of this node."""
return self.users_
def add_user(self, u: "Node") -> None:
"""Adds one user of this node. Increment the number of uses of the user by 1."""
if u not in self.users_:
self.users_[u] = 0
self.users_[u] += 1
def remove_user(self, u: "Node") -> None:
"""Removes one use from the given user."""
if u in self.users_:
self.users_[u] -= 1
if self.users_[u] == 0:
del self.users_[u]
def has_no_uses(self) -> bool:
"""Returns True if the node has no uses."""
return len(self.users_) == 0
def set_input(self, nodeVal: NodeValue) -> None:
"""Adds one input node value."""
self.inputs_.append(nodeVal)
def replace_input(self, oldNodeVal: NodeValue, newNodeVal: NodeValue) -> None:
"""Replace one operand with another one.
Args:
oldNode: Node. Old operand node.
oldResNo: int. Old operand result number.
newNode: Node. New operand node.
newResNo: int. New operand result number.
"""
try:
self.inputs_.remove(oldNodeVal)
except ValueError:
print("Removed input value must already exist in the node's input list. ")
self.inputs_.append(newNodeVal)
def set_scope_of_creation(self, creationScopeName: str) -> None:
self.creationScopeName_ = creationScopeName
class DottyPrinter:
"""A class for generating the dotty graph file
Public attributes:
vertices_: List[str]. Vertices in the dotty file.
edges_: List[str]. Edges in the dotty file.
uniqueVertexMap_: Dict[Node, int]. A map for node with their unique index.
uniqueVertexNo_: int. A incrementing number that represents the number of unique nodes in the graph.
colors_: List[str]. A list for colors for nodes in the dotty graph.
"""
def __init__(self, nodesMap: Dict[NodeNameAndKind, Node]):
self.nodesMap_ = nodesMap
self.vertices_: List[str] = []
self.edges_: List[str] = []
self.uniqueVertexMap_: Dict[Node, int] = {}
self.uniqueVertexNo_: int = 0
self.colors_: List[str] = [
"AliceBlue",
"CadetBlue1",
"Coral",
"DarkOliveGreen1",
"DarkSeaGreen1",
"GhostWhite",
"Khaki1",
"LavenderBlush1",
"LemonChiffon1",
"LightSkyBlue",
"MistyRose1",
"MistyRose2",
"PaleTurquoise2",
"PeachPuff1",
"PowderBlue",
"Salmon",
"Thistle1",
"Thistle3",
"Wheat1",
"Yellow2",
]
def get_unique_vertex_name(self, node: Node) -> str:
"""Get the unique vertex name given a Node object."""
if node not in self.uniqueVertexMap_:
self.uniqueVertexMap_[node] = self.uniqueVertexNo_
self.uniqueVertexNo_ += 1
return f"v{self.uniqueVertexMap_[node]}"
def dump_label(self, node: Node) -> str:
"""Returns the string for the label of the given node."""
labelStr = f"""{{ {{<Inputs>Inputs}}|
{{ {node.get_kind_name()}\lname: {node.get_name()} }}|
{{<Outputs>Outputs}} }}"""
return labelStr
def get_color(self, node: Node) -> str:
"""Returns the color for the given node."""
idx = hash(node.get_kind_name()) % len(self.colors_)
return self.colors_[idx]
def dump_node(self, node: Node) -> None:
"""Generates the dotty information for the given node."""
if not node:
return
nodeStr = f"""{self.get_unique_vertex_name(node)}[\n
\tlabel = \"{self.dump_label(node)}\"\n
\tshape = \"record\"\n
\tstyle=\"filled,rounded\"\n
\tfillcolor={self.get_color(node)}\n
penwidth = 2];\n"""
self.vertices_.append(nodeStr)
def visitNodes(self) -> None:
"""Visits all nodes in nodesMap_ and dump the dotty information for each node."""
for node in self.nodesMap_.values():
self.dump_node(node)
def visitEdges(self) -> None:
"""Visits all edges and dump the dotty information for each edge."""
for node in self.nodesMap_.values():
for nodeInput in node.get_inputs():
i = nodeInput[0]
if i.get_name() not in self.nodesMap_:
print(i.get_kind_name(), i.get_name())
edgeStr = self.get_unique_vertex_name(i) + ":Outputs -> "
edgeStr += self.get_unique_vertex_name(node) + ":Inputs"
self.edges_.append(edgeStr)
def dump_graph(self, dagName: str) -> None:
"""Visits the node graph and generates the dotty information."""
self.visitNodes()
self.visitEdges()
with open(f"{dagName}_dotty.dot", "w") as f:
f.write("digraph DAG {\n\trankdir=TB;\n")
for v in self.vertices_:
f.write(f"{v}\n")
for e in self.edges_:
f.write(f"{e};\n")
f.write("}")
def parse_args() -> Tuple[str, str, List[str]]:
"""Parse the arguments of this script."""
parser = argparse.ArgumentParser(description="Parse compilation log")
parser.add_argument("-f", "--log-file")
parser.add_argument("-d", "--db-file")
parser.add_argument("--dump-phases", nargs="+")
options = parser.parse_args()
if options.dump_phases:
dumpPhases = options.dump_phases
else:
dumpPhases = []
if options.db_file:
dbFile = options.db_file
else:
dbFile = "compilation_log_db.sqlite"
return dbFile, options.log_file, dumpPhases
def dump_dag(dagName: str) -> None:
"""A helper function to dump the DAG."""
dotty = DottyPrinter(NODES_MAP)
dotty.dump_graph(dagName)
def store_transformation_into_DB(
transID: int,
baseNode: Node,
addedNodes: List[Node],
replacedNodes: List[Node],
cursor: sqlite3.Cursor,
fullScopeName: str,
) -> None:
"""A helper function to store nodes transformations into database.
Args:
transID: int. The ID for this stored transformation.
baseNode: Node. The base node that changes its operands.
addedNodes: List[Node]. A list of added nodes in this transformation.
replacedNodes: List[Node]. A list of replaced nodes in this transformation.
cursor: sqlite3.Cursor. Cursor of the sqlite3 database.
fullScopeName: str. The full scope name of this transformation.
"""
cursor.execute(
"""INSERT INTO Log_Transformation VALUES (
?,
'OPERATOR_BASE',
?,
?,
?
)""",
(transID, baseNode.get_name(), baseNode.get_kind_name(), fullScopeName),
)
for an in addedNodes:
cursor.execute(
"""INSERT INTO Log_Transformation VALUES (
?,
'ADD_OPERAND',
?,
?,
?
)""",
(transID, an.get_name(), an.get_kind_name(), fullScopeName),
)
for rn in replacedNodes:
cursor.execute(
"""INSERT INTO Log_Transformation VALUES (
?,
'REMOVE_OPERAND',
?,
?,
?
)""",
(transID, rn.get_name(), rn.get_kind_name(), fullScopeName),
)
def find_all_replaced_nodes(replacedNode: Node) -> List[Node]:
"""Find all nodes that will lose user after the given node is removed.
After one node lost all its uses (e.g. after replaceAllUsesOfWith()), we go through
all of its parents to collect all nodes that will consequently lose all their uses.
Args:
replacedNode: Node. The node that just lost all uses.
"""
replacedNodeList = []
activeDCEList = [replacedNode]
while len(activeDCEList):
DCEnode = activeDCEList.pop()
replacedNodeList.append(DCEnode)
for nv in DCEnode.inputs_:
n = nv.node
if len(n.users_) <= 1:
activeDCEList.append(n)
return replacedNodeList
def init_db(sqliteFile: str) -> sqlite3.Connection:
"""Initialize a sqlite3 database connection."""
if os.path.isfile(sqliteFile):
os.remove(sqliteFile)
# Connect to database file.
conn = sqlite3.connect(sqliteFile)
cursor = conn.cursor()
cursor.execute(
"""CREATE TABLE Log_Transformation (
trans_id INTEGER,
operation_type VARCHAR(200),
node_name VARCHAR(200),
node_kind VARCHAR(200),
full_scope VARCHAR(200)
)"""
)
cursor.execute(
"""CREATE TABLE Log_Scope (
scope_id INTEGER,
scope_str VARCHAR(200),
full_scope_str VARCHAR(200)
)"""
)
cursor.execute(
"""CREATE TABLE Log_Node (
node_name VARCHAR(200),
node_kind VARCHAR(200),
create_scope_id INTEGER,
delete_scope_id INTEGER
)"""
)
cursor.execute(
"""CREATE TABLE Log_Node_Operation (
scope_id INTEGER,
operation VARCHAR(200),
node_name VARCHAR(200),
node_kind VARCHAR(200)
)"""
)
return conn
def process(log: Dict, dumpPhases: List[str], conn: sqlite3.Connection) -> None:
"""Process all the log lines.
Extract their information and reconstruct the node graph. And dump DAGs at given compilation phases.
Args:
logLines: List[str]. All lines of compilation log.
dumpPhases: List[str]. The phase at which to dump the DAG.
conn: sqlite3.Connection. The connection to a sqlite3 database that will store all the transformation in the compilation lop.
"""
# DB related vars
cursor = conn.cursor()
# Record nodes transformation
replacedNodes: List[Node] = []
addedNodes: List[Node] = []
recordTransformation = False
stopRecordTranformationNames = {
"optimizeFunctionBeforeLowering",
"optimizeFunction",
}
transID = 0
def process_create(event: Dict) -> None:
global scopeID
createdNode = Node(event["kind"], event["create"])
createdNode.set_scope_of_creation(SCOPE_STACK[-1])
NODES_MAP[createdNode.get_name()] = createdNode
cursor.execute(
"""INSERT INTO Log_Node VALUES (
?,
?,
?,
?
)""",
(event["create"], event["kind"], scopeID, -1),
)
cursor.execute(
"""INSERT INTO Log_Node_Operation VALUES (
?,
'CREATE',
?,
?
)""",
(scopeID, event["create"], event["kind"]),
)
if len(event["inputs"]) == 0:
# there's no node input for Splat
assert event["kind"] in (
"Splat",
"Constant",
"Placeholder",
), "This node kind shouldn't have any inputs."
for i in event["inputs"]:
name, resNo = i.split(":", 1)
if name in NODES_MAP:
inputNode = NODES_MAP[name]
createdNode.set_input(NodeValue(inputNode, resNo))
inputNode.add_user(createdNode)
if recordTransformation:
addedNodes.append(createdNode)
def process_delete(event: Dict) -> None:
global scopeID
deletedNode = NODES_MAP[event["delete"]]
for inputNode in deletedNode.inputs_:
i = inputNode[0]
i.remove_user(deletedNode)
del NODES_MAP[deletedNode.get_name()]
cursor.execute(
"""UPDATE Log_Node
SET delete_scope_id=?
WHERE node_name=?
""",
(scopeID, event["delete"]),
)
cursor.execute(
"""INSERT INTO Log_Node_Operation VALUES (
?,
'DELETE',
?,
?
)""",
(scopeID, event["delete"], event["kind"]),
)
def process_input_change(event: Dict) -> None:
changedNode = NODES_MAP[event["input_change"]]
# Don't touch the line of node input changing into null, it only happened
# in module destructor.
if event["after"] == "NONE":
return
prevNodeName, prevResNo = event["before"].split(":", 1)
newNodeName, newResNo = event["after"].split(":", 1)
prevNode = NODES_MAP[prevNodeName]
newNode = NODES_MAP[newNodeName]
# change the input of changedNode
changedNode.replace_input(
NodeValue(prevNode, prevResNo), NodeValue(newNode, newResNo)
)
prevNode.remove_user(changedNode)
newNode.add_user(changedNode)
# Record nodes transformation
if recordTransformation:
if prevNode.has_no_uses():
replacedNodes = find_all_replaced_nodes(prevNode)
store_transformation_into_DB(
transID, changedNode, addedNodes, replacedNodes, cursor, scopeName
)
transID += 1
addedNodes = []
replacedNodes = []
def process_scope(scopeName: str, phase: List) -> None:
global scopeID
if "::" in scopeName:
scopeName = scopeName.split("::", 1)[-1]
scopeID += 1
if scopeName in dumpPhases:
dump_dag(f"before_{scopeName}_{scopeID}")
if str(scopeID) in dumpPhases:
dump_dag(f"phase_{scopedID}")
SCOPE_STACK.append(scopeName)
# Start recording transformations.
if scopeName in stopRecordTranformationNames and len(SCOPE_STACK) == 2:
recordTransformation = True
# Update scope entrance in database
cursor.execute(
"""INSERT INTO Log_Scope VALUES (
?,
?,
?
)""",
(scopeID, "ENTER " + scopeName, "ENTER " + scopeName),
)
for ev in phase:
if "create" in ev:
process_create(ev)
elif "delete" in ev:
process_delete(ev)
elif "input_change" in ev:
process_input_change(ev)
else:
name, scope = list(ev.items())[0]
process_scope(name, scope)
# Stop recording transformations.
if scopeName in stopRecordTranformationNames and len(SCOPE_STACK) == 1:
recordTransformation = False
# Update scope exit in database
cursor.execute(
"""INSERT INTO Log_Scope VALUES (
?,
?,
?
)""",
(scopeID, "EXIT " + scopeName, "EXIT " + name),
)
scopeID += 1
if scopeName in dumpPhases:
dump_dag(f"after_{scopeName}_{scopeID}")
if str(scopeID) in dumpPhases:
dump_dag(f"phase_{scopedID}")
SCOPE_STACK.pop()
print("Log Version:", log["version"])
process_scope("MODULE LOADER", log["passes"])
conn.commit()
def main():
dbFile, logFile, dumpPhases = parse_args()
log = json.load(open(logFile))
with init_db(dbFile) as conn:
process(log, dumpPhases, conn)
return
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sqlite3
from typing import Dict, List
# A list of all filtered transformations.
TRANS_LIST: List["Transformation"] = []
# Mapping between added nodes and the transformation that adds these nodes.
NODES_ADDING_MAP: Dict[str, "Transformation"] = {}
class Transformation:
"""A class that represents the nodes transformation, e.g. lower,fold etc.
Public attributes:
addedNodes_: List[str]. Nodes added by this transformation.
removedNodes_: List[str]. Nodes removed by this transformation.
ancestors_: List['Transformation']. The ancestor transformation of current transformation.
scopeName_: str. The scope of current transformation.
transID_: str. The internal transformation ID in the database.
isDirectTrans_ :bool. Whether this transformation directly created/replaced the given nodeName that is passed to this script file.
"""
def __init__(self, transID: str):
self.addedNodes_: List[str] = []
self.removedNodes_: List[str] = []
self.ancestors_: List["Transformation"] = []
self.scopeName_: str = ""
self.transID_: str = transID
self.isDirectTrans_: bool = False
def appendAddedNode(self, nodeName: str) -> None:
"""Append the added nodes of this transformation."""
self.addedNodes_.append(nodeName)
def appendRemovedNode(self, nodeName: str) -> None:
"""Append the removed nodes of this transformation."""
self.removedNodes_.append(nodeName)
def addAncestor(self, ancestor: "Transformation") -> None:
"""Add ancestors of this transformation."""
self.ancestors_.append(ancestor)
def setBase(self, baseName: str) -> None:
"""Set the operator base of this node."""
self.baseNode_ = baseName
class DottyPrinter:
"""A class for generating the dotty graph file"""
def __init__(self, transList: List[Transformation]):
self.transList_ = transList
self.vertices_ = []
self.edges_ = []
def get_color(self, isDirectTrans: bool) -> str:
"""Returns the color for the given node."""
if isDirectTrans:
return "Yellow2"
else:
return "AliceBlue"
def dump_label(self, tran: Transformation) -> str:
"""Returns the string for the label of the given transformation."""
labelStr = (
rf"""{{ {{SCOPE:\l{tran.scopeName_} }}|{{ORIGINAL OPERAND CHAIN:\l\l"""
)
for rstr in tran.removedNodes_:
labelStr += rf"""{rstr}\l\l"""
labelStr += rf"}}| {{NEW OPERAND CHAIN:\l\l"
for astr in tran.addedNodes_:
labelStr += rf"""{astr}\l\l"""
labelStr += rf"}} |{{USER NODE: \l\l {tran.baseNode_}}} }}"
return labelStr
def dump_node(self, tran: Transformation) -> None:
"""Generates the dotty information for the given transformation."""
if not tran:
return
tranStr = f"""v{tran.transID_}[\n
\tlabel = \"{self.dump_label(tran)}\"\n
\tshape = \"record\"\n
\tstyle=\"filled,rounded\"\n
\tfillcolor={self.get_color(tran.isDirectTrans_)}\n
penwidth = 2];\n"""
self.vertices_.append(tranStr)
def visit_nodes(self) -> None:
"""Visits all transformation and dump the dotty information for each transformation."""
for tran in self.transList_:
self.dump_node(tran)
def visit_edges(self) -> None:
"""Visits all edges and dump the dotty information for each edge."""
for tran in self.transList_:
for anc in tran.ancestors_:
edgeStr = f"v{anc.transID_} -> v{tran.transID_}"
self.edges_.append(edgeStr)
def dump_graph(self, dottyFile: str) -> None:
"""Visits the graph and generates the dotty information."""
self.visit_nodes()
self.visit_edges()
with open(f"transformations_{dottyFile}.dot", "w") as f:
print(
f"\nWriting DAG info into dotty file transformations_{dottyFile}.dot ..."
)
f.write("digraph DAG {\n\trankdir=TB;\n")
for v in self.vertices_:
f.write(f"{v}\n")
for e in self.edges_:
f.write(f"{e};\n")
f.write("}")
def dump_dotty_DAG(dottyFile: str) -> None:
"""A helper function to dump the dotty file."""
dotty = DottyPrinter(TRANS_LIST)
dotty.dump_graph(dottyFile)
def init_db(sqliteFile: str) -> sqlite3.Connection:
"""Initialize a sqlite3 database connection."""
assert os.path.isfile(sqliteFile)
# Connect to database file.
return sqlite3.connect(sqliteFile)
def find_all_related_transformation(cursor: sqlite3.Cursor, transIDs: List[str]):
"""A recursive function that find all related transformations given a list of transformation IDs in the database.
Args:
cursor: sqlite3.Cursor. Cursor of current sqlite3 database connection.
transIDs: List[str]. A list of transformation IDs.
"""
transQueryStr = "(" + ", ".join(transIDs) + ")"
cursor.execute(
f"""
SELECT node_name
FROM Log_Transformation
WHERE trans_id in {transQueryStr} and operation_type in ('ADD_OPERAND', 'REMOVE_OPERAND')
GROUP BY node_name
"""
)
rows = cursor.fetchall()
nodesList = ["'" + r[0] + "'" for r in rows]
transQueryStr = "(" + ", ".join(nodesList) + ")"
cursor.execute(
f"""
SELECT trans_id
FROM Log_Transformation
WHERE node_name in {transQueryStr} and operation_type in ('ADD_OPERAND', 'REMOVE_OPERAND')
GROUP BY trans_id
"""
)
rows = cursor.fetchall()
newTransIDs = [str(r[0]) for r in rows]
if sorted(newTransIDs) != sorted(transIDs):
transIDs = find_all_related_transformation(cursor, newTransIDs)
return transIDs
def filter_node_transformation(
nodeName: str, conn: sqlite3.Connection, verbose: bool, dottyFile: str
):
"""Filter out all node transformation that is related to the given node.
Args:
nodeName: str. The node name that is passed to this script.
conn: sqlite3.Connection. A sqlite3 database connection.
verbose: bool. Verbosity of the output.
dottyFile: str. Dotty file name.
"""
cursor = conn.cursor()
cursor.execute(
"""
SELECT trans_id
FROM Log_Transformation
WHERE node_name = ?
GROUP BY trans_id
""",
(nodeName,),
)
rows = cursor.fetchall()
directTransIDs = [str(r[0]) for r in rows]
transIDs = find_all_related_transformation(cursor, directTransIDs)
for tid in transIDs:
cursor.execute(
"""
SELECT *
FROM Log_Transformation
WHERE trans_id = ?
""",
(tid,),
)
rows = cursor.fetchall()
if len(rows):
tran = Transformation(tid)
if tid in directTransIDs:
tran.isDirectTrans_ = True
TRANS_LIST.append(tran)
tran.scopeName_ = rows[0][4].replace("glow::", "").replace("->", r" --\> ")
for r in rows:
opr_type, name, kind = r[1:4]
if opr_type == "ADD_OPERAND":
nodeKindAndName = kind + r" \l" + name
tran.appendAddedNode(nodeKindAndName)
NODES_ADDING_MAP[nodeKindAndName] = tran
elif opr_type == "REMOVE_OPERAND":
nodeKindAndName = kind + r" \l" + name
tran.appendRemovedNode(nodeKindAndName)
if nodeKindAndName in NODES_ADDING_MAP:
tran.addAncestor(NODES_ADDING_MAP[nodeKindAndName])
elif opr_type == "OPERATOR_BASE":
nodeKindAndName = kind + r" \l" + name
tran.setBase(nodeKindAndName)
def processOutDottyName(dottyStyleName):
return dottyStyleName.split(r"\l")[1]
def checkNodeInIt(tran, nodeName):
if nodeName == processOutDottyName(tran.baseNode_):
return True
for rn in tran.removedNodes_:
if nodeName == processOutDottyName(rn):
return True
for an in tran.addedNodes_:
if nodeName == processOutDottyName(an):
return True
return False
for tran in TRANS_LIST:
if not verbose:
if not checkNodeInIt(tran, nodeName):
continue
print(f"\n===============Transformation ID: {tran.transID_} ================")
print("Scope: " + tran.scopeName_.replace(r"\>", ">"))
if nodeName == processOutDottyName(tran.baseNode_):
print("USER NODE: \n(*)" + tran.baseNode_.replace(r"\l", " "))
else:
print("USER NODE: \n" + tran.baseNode_.replace(r"\l", " "))
print("------ Previous operands set:")
for rn in tran.removedNodes_:
if nodeName == processOutDottyName(rn):
print("\t(*)" + rn.replace(r"\l", " "))
else:
print("\t" + rn.replace(r"\l", " "))
print("------ New operands set:")
for an in tran.addedNodes_:
if nodeName == processOutDottyName(an):
print("\t(*)" + an.replace(r"\l", " "))
else:
print("\t" + an.replace(r"\l", " "))
dump_dotty_DAG(dottyFile)
conn.commit()
def stat_list_phases(conn, depth=0):
cursor = conn.cursor()
cursor.execute(
"""
SELECT *
FROM Log_Scope
ORDER BY scope_id
"""
)
rows = cursor.fetchall()
currDepth = 0
print("Phase ID \tPhase Name\n-------------------------\n")
for r in rows:
if "ENTER" in r[1]:
currDepth += 1
if currDepth <= depth or depth == 0:
print(r[0], "\t" * currDepth + r[1])
if "EXIT" in r[1]:
currDepth -= 1
assert currDepth >= 0
def stat_phases_summary(conn: sqlite3.Connection, startPhase: int, endPhase: int):
cursor = conn.cursor()
cursor.execute(
"""
SELECT lng.scope_id, ls.full_scope_str, lng.operation, lng.node_kind, COUNT(node_kind)
FROM Log_Node_Operation lng
LEFT JOIN Log_Scope ls
ON lng.scope_id = ls.scope_id
WHERE lng.scope_id >= ? AND lng.scope_id < ?
GROUP By lng.node_kind
ORDER BY lng.scope_id
""",
(startPhase, endPhase),
)
rows = cursor.fetchall()
print(f"---- Between phase {startPhase} and phase {endPhase}:\n")
summaryStrs = {}
for r in rows:
scope_id, scope, opr, kind, num = r
if scope_id not in summaryStrs:
summaryStrs[scope_id] = f"Phase {scope_id}: \n [{scope}]\n"
summaryStrs[scope_id] += f"\t {opr}D {num} {kind} nodes.\n"
for sid in summaryStrs:
print(summaryStrs[sid])
def stat_phase(conn: sqlite3.Connection, phaseId: int):
cursor = conn.cursor()
cursor.execute(
"""SELECT full_scope_str FROM Log_Scope WHERE scope_id=?""", (phaseId,)
)
rows = cursor.fetchall()
fullScope = rows[0][0]
cursor.execute(
"""
SELECT node_kind, COUNT(node_kind), COUNT(node_kind)*100.0/ (SELECT Count(*) FROM Log_Node WHERE create_scope_id < ? AND delete_scope_id >= ?)
FROM Log_Node
WHERE create_scope_id < ? AND delete_scope_id >= ?
GROUP By node_kind
ORDER BY COUNT(node_kind) DESC
""",
(phaseId, phaseId, phaseId, phaseId),
)
rows = cursor.fetchall()
print(f"=== At phase {phaseId} ({fullScope}): \n")
print(
"\t{:>4s} \t{:>12s} \t\t{:>2s}\n--------------------------------------------------------".format(
"Num", "Kind", "(Percentage)"
)
)
for r in rows:
kind, num, perc = r
print("\t{:>4d} \t{:>12s} \t\t({:>2f}%)".format(num, kind, round(perc, 2)))
def process():
"""Parse args and process this script."""
parser = argparse.ArgumentParser(description="Filter compilation and optimiztion.")
parser.add_argument("--db-file")
parser.add_argument("--filter-target")
parser.add_argument("--filter-target-verbose")
parser.add_argument("--dotty-file")
parser.add_argument("--stat-list-phases", type=bool)
parser.add_argument("--stat-list-phases-depth", type=int)
parser.add_argument("--stat-phases-summary", type=int, nargs="+")
parser.add_argument("--stat-phase", type=int)
options = parser.parse_args()
assert options.db_file, "Please specify db file."
with init_db(options.db_file) as conn:
dottyFile = options.dotty_file if options.dotty_file else "dotty"
if options.filter_target:
filter_node_transformation(options.filter_target, conn, False, dottyFile)
if options.filter_target_verbose:
filter_node_transformation(
options.filter_target_verbose, conn, True, dottyFile
)
if options.stat_list_phases:
stat_list_phases(conn)
if options.stat_list_phases_depth:
stat_list_phases(conn, options.stat_list_phases_depth)
if options.stat_phases_summary:
assert len(options.stat_phases_summary) == 2
startPhase, endPhase = options.stat_phases_summary
stat_phases_summary(conn, startPhase, endPhase)
if options.stat_phase:
stat_phase(conn, options.stat_phase)
def main():
process()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
import yaml
# Command line options.
parser = argparse.ArgumentParser(
usage="Helper script to print the histogram from a Glow YAML profile."
)
parser.add_argument(
"-f", "--file", dest="file", required=True, type=str, help="Profile YAML file path."
)
parser.add_argument(
"-n",
"--name",
dest="name",
required=True,
type=str,
help="Node value name to plot.",
)
parser.add_argument(
"-l",
"--log-scale",
dest="log_scale",
required=False,
default=False,
action="store_true",
help="Plot the histogram on a logarithmic scale (base 10).",
)
args = parser.parse_args()
# Get arguments.
profile = args.file
name = args.name
log_scale = args.log_scale
# Verify profile exists.
if not os.path.isfile(profile):
print('File "%s" not found!' % profile)
exit(1)
# Read YAML data.
print('Reading file "%s" ...' % profile)
data = None
with open(profile, "r") as stream:
try:
data = yaml.safe_load(stream)
except yaml.YAMLError as err:
print(err)
# Search YAML entry for node value.
print('Searching node value name "%s" ...' % name)
entry = None
for item in data:
if item["nodeOutputName"] == name:
entry = item
if not entry:
print('Node value "%s" not found!' % name)
exit(1)
# Extract data.
hist_min = entry["min"]
hist_max = entry["max"]
histogram = np.array(entry["histogram"])
num_bins = len(histogram)
bin_width = (hist_max - hist_min) / num_bins
bin_centers = [(hist_min + idx * bin_width + bin_width / 2) for idx in range(num_bins)]
if log_scale:
histogram = np.log10(histogram)
histogram = np.maximum(histogram, np.zeros(histogram.shape))
# Plot histogram.
fig = plt.figure()
plt.plot(bin_centers, histogram)
plt.bar(bin_centers, histogram, bin_width)
fig.suptitle('Histogram for "%s" with range [%f, %f]' % (name, hist_min, hist_max))
plt.grid()
plt.xlabel("Range")
plt.ylabel("Bins [%s]" % ("Log Scale" if log_scale else "Linear Scale"))
plt.show()
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import onnx
import torch
import torch.nn
import torch.onnx
from onnx import helper, TensorProto
# GRU enums
GRU_DIR_FORWARD = "forward"
GRU_DIR_REVERSE = "reverse"
GRU_DIR_BIDIRECTIONAL = "bidirectional"
GRU_DIRS = [GRU_DIR_FORWARD, GRU_DIR_REVERSE, GRU_DIR_BIDIRECTIONAL]
# ONNX utility
def make_init(name, type, tensor):
return helper.make_tensor(
name=name,
data_type=type,
dims=tensor.shape,
vals=tensor.reshape(tensor.size).tolist(),
)
# Function to generate GRU ONNX test model
def gen_gru_onnx_test_model(
model_path,
seq_length,
batch_size,
hidden_size,
input_size,
direction,
has_bias,
has_sequence_lens,
has_initial_h,
linear_before_reset=False,
):
# Validate parameters
assert direction in GRU_DIRS, "ONNX GRU direction invalid!"
assert not has_sequence_lens, "ONNX GRU Variable sequence length not supported"
# Get number of directions
num_directions = 2 if (direction == GRU_DIR_BIDIRECTIONAL) else 1
# Tensor sizes
X_shape = [seq_length, batch_size, input_size]
W_shape = [num_directions, 3 * hidden_size, input_size]
R_shape = [num_directions, 3 * hidden_size, hidden_size]
B_shape = [num_directions, 6 * hidden_size]
sequence_lens_shape = [batch_size]
initial_h_shape = [num_directions, batch_size, hidden_size]
Y_shape = [seq_length, num_directions, batch_size, hidden_size]
# Generate random inputs (weights are assumed concatenated in ONNX format: z,r,h)
np.random.seed(1)
X = np.random.randn(*X_shape)
W = np.random.randn(*W_shape)
R = np.random.randn(*R_shape)
B = np.random.randn(*B_shape) if has_bias else np.zeros(B_shape)
sequence_lens = (
np.random.randint(1, seq_length, batch_size)
if has_sequence_lens
else np.tile(seq_length, batch_size)
)
initial_h = (
np.random.randn(*initial_h_shape)
if has_initial_h
else np.zeros(initial_h_shape)
)
# Function to get all the weight components for the given direction
def get_weights(dir_idx):
Wz = np.reshape(
W[dir_idx, 0 * hidden_size : 1 * hidden_size, :], [hidden_size, input_size]
)
Wr = np.reshape(
W[dir_idx, 1 * hidden_size : 2 * hidden_size, :], [hidden_size, input_size]
)
Wh = np.reshape(
W[dir_idx, 2 * hidden_size : 3 * hidden_size, :], [hidden_size, input_size]
)
Rz = np.reshape(
R[dir_idx, 0 * hidden_size : 1 * hidden_size, :], [hidden_size, hidden_size]
)
Rr = np.reshape(
R[dir_idx, 1 * hidden_size : 2 * hidden_size, :], [hidden_size, hidden_size]
)
Rh = np.reshape(
R[dir_idx, 2 * hidden_size : 3 * hidden_size, :], [hidden_size, hidden_size]
)
bWz = np.reshape(B[dir_idx, 0 * hidden_size : 1 * hidden_size], [hidden_size])
bWr = np.reshape(B[dir_idx, 1 * hidden_size : 2 * hidden_size], [hidden_size])
bWh = np.reshape(B[dir_idx, 2 * hidden_size : 3 * hidden_size], [hidden_size])
bRz = np.reshape(B[dir_idx, 3 * hidden_size : 4 * hidden_size], [hidden_size])
bRr = np.reshape(B[dir_idx, 4 * hidden_size : 5 * hidden_size], [hidden_size])
bRh = np.reshape(B[dir_idx, 5 * hidden_size : 6 * hidden_size], [hidden_size])
return Wz, Wr, Wh, Rz, Rr, Rh, bWz, bWr, bWh, bRz, bRr, bRh
# Function to get PyTorch weights (which are in the r,z,h order)
def get_torch_weights(dir_idx):
Wz, Wr, Wh, Rz, Rr, Rh, bWz, bWr, bWh, bRz, bRr, bRh = get_weights(dir_idx)
W_torch = np.concatenate((Wr, Wz, Wh), 0)
R_torch = np.concatenate((Rr, Rz, Rh), 0)
bW_torch = np.concatenate((bWr, bWz, bWh), 0)
bR_torch = np.concatenate((bRr, bRz, bRh), 0)
return (W_torch, R_torch, bW_torch, bR_torch)
# ----------------------------------------- COMPUTE pyTORCH REFERENCE ----------------------------------------------
# Compute reference using Pytorch. Pytorch GRU has only forward/bidirectional so we will do the reverse GRU using
# a Pytorch forward GRU.
gru = torch.nn.GRU(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=True,
batch_first=False,
dropout=0,
bidirectional=(direction == GRU_DIR_BIDIRECTIONAL),
)
# Get GRU state dictionary
gru_state_dict = gru.state_dict()
# Assign forward weights
forwardEnabled = direction in [GRU_DIR_FORWARD, GRU_DIR_BIDIRECTIONAL]
if forwardEnabled:
forward_dir_idx = 0
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(forward_dir_idx)
gru_state_dict["weight_ih_l0"] = torch.tensor(W_torch, dtype=torch.float32)
gru_state_dict["weight_hh_l0"] = torch.tensor(R_torch, dtype=torch.float32)
gru_state_dict["bias_ih_l0"] = torch.tensor(bW_torch, dtype=torch.float32)
gru_state_dict["bias_hh_l0"] = torch.tensor(bR_torch, dtype=torch.float32)
# Assign reverse weights
reverseEnabled = direction in [GRU_DIR_REVERSE, GRU_DIR_BIDIRECTIONAL]
if reverseEnabled:
if direction == GRU_DIR_REVERSE:
reverse_dir_idx = 0
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(reverse_dir_idx)
gru_state_dict["weight_ih_l0"] = torch.tensor(W_torch, dtype=torch.float32)
gru_state_dict["weight_hh_l0"] = torch.tensor(R_torch, dtype=torch.float32)
gru_state_dict["bias_ih_l0"] = torch.tensor(bW_torch, dtype=torch.float32)
gru_state_dict["bias_hh_l0"] = torch.tensor(bR_torch, dtype=torch.float32)
else:
reverse_dir_idx = 1
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(reverse_dir_idx)
gru_state_dict["weight_ih_l0_reverse"] = torch.tensor(
W_torch, dtype=torch.float32
)
gru_state_dict["weight_hh_l0_reverse"] = torch.tensor(
R_torch, dtype=torch.float32
)
gru_state_dict["bias_ih_l0_reverse"] = torch.tensor(
bW_torch, dtype=torch.float32
)
gru_state_dict["bias_hh_l0_reverse"] = torch.tensor(
bR_torch, dtype=torch.float32
)
# Set GRU state dictionary
gru.load_state_dict(gru_state_dict, strict=True)
# Perform inference
X_torch = torch.tensor(X, dtype=torch.float32)
initial_h_torch = torch.tensor(initial_h, dtype=torch.float32)
if direction == GRU_DIR_REVERSE:
Y, next_h = gru(X_torch.flip([0]), initial_h_torch)
Y = Y.flip([0])
else:
Y, next_h = gru(X_torch, initial_h_torch)
# Reshape output to ONNX format [seq_length, num_directions, batch_size, hidden_size]
Y_ref = Y.detach().numpy()
Y_ref = np.reshape(Y_ref, [seq_length, batch_size, num_directions, hidden_size])
Y_ref = np.transpose(Y_ref, [0, 2, 1, 3])
# Reshape states to ONNX format
Y_h_ref = next_h.detach().numpy()
# --------------------------------------- COMPUTE PYTHON-NUMPY REFERENCE -------------------------------------------
# Create X slices
Xslices = list()
for t in range(seq_length):
Xslices.append(np.reshape(X[t, :, :], [batch_size, input_size]))
# Function to compute one GRU cell
def compute_gru(forward):
dir_idx = 0 if forward else (0 if direction == GRU_DIR_REVERSE else 1)
Wz, Wr, Wh, Rz, Rr, Rh, bWz, bWr, bWh, bRz, bRr, bRh = get_weights(dir_idx)
def f(x):
return 1 / (1 + np.exp(-x))
def g(x):
return np.tanh(x)
def mm(x, w):
return np.matmul(x, w.transpose())
Ht = np.reshape(initial_h[dir_idx, :, :], [batch_size, hidden_size])
Yslices = list()
for t in range(seq_length):
xt = Xslices[t] if forward else Xslices[seq_length - 1 - t]
zt = f(mm(xt, Wz) + bWz + mm(Ht, Rz) + bRz)
rt = f(mm(xt, Wr) + bWr + mm(Ht, Rr) + bRr)
if linear_before_reset:
htild = g(mm(xt, Wh) + bWh + rt * (mm(Ht, Rh) + bRh))
else:
htild = g(mm(xt, Wh) + bWh + mm(rt * Ht, Rh) + bRh)
Ht = (1 - zt) * htild + zt * Ht
Yslices.append(Ht)
return Yslices, Ht
Yslices = list()
Hslices = list()
# Compute forward GRU
forwardYslices = list()
if forwardEnabled:
Yt, Ht = compute_gru(True)
forwardYslices += Yt
Hslices.append(Ht)
# Compute reverse GRU
reverseYslices = list()
if reverseEnabled:
Yt, Ht = compute_gru(False)
reverseYslices += Yt
Hslices.append(Ht)
# Concatenate slices
for t in range(seq_length):
if forwardEnabled:
Yslices.append(forwardYslices[t])
if reverseEnabled:
Yslices.append(reverseYslices[seq_length - 1 - t])
Y_ref_np = np.concatenate(Yslices, 0).reshape(
[seq_length, num_directions, batch_size, hidden_size]
)
Y_h_ref_np = np.concatenate(Hslices, 0).reshape(
[num_directions, batch_size, hidden_size]
)
# Use numpy implementation when linear_before_reset = False, else assert errors
if linear_before_reset is False:
Y_ref = Y_ref_np
Y_h_ref = Y_h_ref_np
else:
assert (
np.max(np.abs(Y_ref - Y_ref_np)) < 1e-6
), "Mismatch between Pytorch and Numpy GRU implementation"
assert (
np.max(np.abs(Y_h_ref - Y_h_ref_np)) < 1e-6
), "Mismatch between Pytorch and Numpy GRU implementation"
# ---------------------------------------------- NODE DEFINITION --------------------------------------------------
# Node inputs
node_inputs = [
"X",
"W",
"R",
"B" if has_bias else "",
"",
"initial_h" if has_initial_h else "",
]
# Node outputs
node_outputs = ["Y", "Y_h"]
# GRU node definition
gru_node_def = onnx.helper.make_node(
"GRU",
name="gru",
inputs=node_inputs,
outputs=node_outputs,
hidden_size=hidden_size,
direction=direction,
linear_before_reset=linear_before_reset,
)
# Error node definition
err_node_def = onnx.helper.make_node(
"Sub", name="error", inputs=["Y", "Y_ref"], outputs=["Y_err"]
)
# --------------------------------------------- GRAPH DEFINITION --------------------------------------------------
graph_input = list()
graph_init = list()
graph_output = list()
# GRU inputs
graph_input.append(helper.make_tensor_value_info("X", TensorProto.FLOAT, X_shape))
graph_input.append(helper.make_tensor_value_info("W", TensorProto.FLOAT, W_shape))
graph_input.append(helper.make_tensor_value_info("R", TensorProto.FLOAT, R_shape))
if has_bias:
graph_input.append(
helper.make_tensor_value_info("B", TensorProto.FLOAT, B_shape)
)
if has_sequence_lens:
graph_input.append(
helper.make_tensor_value_info(
"sequence_lens", TensorProto.INT32, sequence_lens_shape
)
)
if has_initial_h:
graph_input.append(
helper.make_tensor_value_info(
"initial_h", TensorProto.FLOAT, initial_h_shape
)
)
# Reference input
graph_input.append(
helper.make_tensor_value_info("Y_ref", TensorProto.FLOAT, Y_shape)
)
# GRU initializers
graph_init.append(make_init("X", TensorProto.FLOAT, X))
graph_init.append(make_init("W", TensorProto.FLOAT, W))
graph_init.append(make_init("R", TensorProto.FLOAT, R))
if has_bias:
graph_init.append(make_init("B", TensorProto.FLOAT, B))
if has_sequence_lens:
graph_init.append(make_init("sequence_lens", TensorProto.INT32, sequence_lens))
if has_initial_h:
graph_init.append(make_init("initial_h", TensorProto.FLOAT, initial_h))
# Reference initializer
graph_init.append(make_init("Y_ref", TensorProto.FLOAT, Y_ref))
# Graph outputs
graph_output.append(
helper.make_tensor_value_info("Y_err", TensorProto.FLOAT, Y_shape)
)
# Define graph (GraphProto)
graph_name = "gru_test"
graph_def = helper.make_graph(
[gru_node_def, err_node_def],
graph_name,
inputs=graph_input,
outputs=graph_output,
)
# Set initializers
graph_def.initializer.extend(graph_init)
# --------------------------------------------- MODEL DEFINITION --------------------------------------------------
# Define model (ModelProto)
model_def = helper.make_model(graph_def, producer_name="onnx-gru")
# Check model
onnx.checker.check_model(model_def)
# Print model
with open(model_path, "w") as f:
f.write(str(model_def))
# Forward GRU
gen_gru_onnx_test_model(
model_path="gruForward.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
linear_before_reset=False,
)
# Reverse GRU
gen_gru_onnx_test_model(
model_path="gruReverse.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="reverse",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
linear_before_reset=False,
)
# Bidirectional GRU
gen_gru_onnx_test_model(
model_path="gruBidirectional.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="bidirectional",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
linear_before_reset=False,
)
# Forward no bias GRU
gen_gru_onnx_test_model(
model_path="gruForwardNoBias.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=False,
has_sequence_lens=False,
has_initial_h=True,
linear_before_reset=False,
)
# Forward no state GRU
gen_gru_onnx_test_model(
model_path="gruForwardNoState.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=False,
linear_before_reset=False,
)
# Forward with linear before reset
gen_gru_onnx_test_model(
model_path="gruForwardLinearBeforeReset.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
linear_before_reset=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script that generates Caffe2 models.
# The generated model will be used for Caffe2 importer unittest:
# ./tests/unittests/caffe2ImporterTest.cpp
# Run $>python gen_caffe2_model.py to get the model files.
from caffe2.proto import caffe2_pb2
from caffe2.python import utils
from google.protobuf import text_format
# Define a weights network
weights = caffe2_pb2.NetDef()
weights.name = "init"
op = caffe2_pb2.OperatorDef()
op.type = "GivenTensorFill"
op.output.extend(["conv_w"])
op.arg.extend([utils.MakeArgument("shape", [1, 1, 2, 2])])
op.arg.extend([utils.MakeArgument("values", [1.0 for i in range(4)])])
weights.op.extend([op])
op = caffe2_pb2.OperatorDef()
op.type = "GivenTensorFill"
op.output.extend(["conv_b"])
op.arg.extend([utils.MakeArgument("shape", [1])])
op.arg.extend([utils.MakeArgument("values", [2.0 for i in range(1)])])
weights.op.extend([op])
weights.external_output.extend(op.output)
# Define an inference net
net = caffe2_pb2.NetDef()
net.name = "predict"
op = caffe2_pb2.OperatorDef()
op.type = "Conv"
op.input.extend(["data"])
op.input.extend(["conv_w"])
op.input.extend(["conv_b"])
op.arg.add().CopyFrom(utils.MakeArgument("kernel", 2))
op.arg.add().CopyFrom(utils.MakeArgument("stride", 1))
op.arg.add().CopyFrom(utils.MakeArgument("group", 1))
op.arg.add().CopyFrom(utils.MakeArgument("pad", 1))
op.output.extend(["conv_out"])
net.op.extend([op])
net.external_output.extend(op.output)
# Generate model in text format.
with open("predict_net.pbtxt", "w") as f:
f.write(text_format.MessageToString(net))
with open("init_net.pbtxt", "w") as f:
f.write(text_format.MessageToString(weights))
# Generate model in binary format.
with open("predict_net.pb", "wb") as f:
f.write(net.SerializeToString())
with open("init_net.pb", "wb") as f:
f.write(weights.SerializeToString())
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script that generates the TensorFlowLite models used
# for testing the Glow importer. The models will be generated in a local
# folder here 'tflite_models'. In order for the models to be used for unit
# testing the files must be copied in the folder:
# 'glow\tests\models\tfliteModels'
# To generate the models you need to run this script without arguments:
# python gen_tflite_models.py
# Python requirements: Python 3.6
# Python package requirements:
# TensorFlow 2.1.0
# Keras 2.3.1
# Numpy 1.16.2
# shutil, os, other dependencies
import os
import shutil
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as keras_backend
import tensorflow.keras.layers as layers
from tensorflow.keras.models import Model
from tensorflow.python.tools import freeze_graph
# ----------------------------------------------------------------------------------------------------------------------
# UTILS
# ----------------------------------------------------------------------------------------------------------------------
# Temporary folder path.
TEMP_DIR = os.path.join(os.path.dirname(__file__), "temp")
# Output model folder.
OUT_DIR = os.path.join(os.path.dirname(__file__), "tflite_models")
# Clean temporary directory.
def clean_dir(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
os.mkdir(dir_path)
# Remove temporary directory.
def rm_dir(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
# Function to save a model in TensorFlowLite format.
def save_model(model, filename):
# Print status.
print('Saving model "%s" ...' % filename)
# Clean temporary folder.
clean_dir(TEMP_DIR)
# Get model inputs.
model_inputs_dict = dict()
model_inputs_array = []
for idx in range(len(model.inputs)):
model_inputs_dict["input_%d" % idx] = model.inputs[idx]
model_inputs_array.append(model.inputs[idx].op.name)
# Get model outputs.
model_outputs_dict = dict()
model_outputs_array = []
for idx in range(len(model.outputs)):
if idx == 0:
output_name = model.outputs[idx].op.name
else:
output_name = model.outputs[idx].name
model_outputs_dict[output_name] = model.outputs[idx]
model_outputs_array.append(output_name)
# Save TensorFlow checkpoint.
tf.saved_model.simple_save(
keras_backend.get_session(),
os.path.join(TEMP_DIR, "checkpoint"),
inputs=model_inputs_dict,
outputs=model_outputs_dict,
)
# Freeze TensorFlow graph.
freeze_graph.freeze_graph(
None,
None,
None,
None,
model.outputs[0].op.name,
None,
None,
os.path.join(TEMP_DIR, "model.pb"),
False,
"",
input_saved_model_dir=os.path.join(TEMP_DIR, "checkpoint"),
)
# Convert and save TensorFlowLite model.
converter = tf.lite.TFLiteConverter.from_frozen_graph(
os.path.join(TEMP_DIR, "model.pb"),
input_arrays=model_inputs_array,
output_arrays=model_outputs_array,
)
converter.dump_graphviz_video = False
converter.allow_custom_ops = True
tflite_model = converter.convert()
model_filename = os.path.join(OUT_DIR, filename)
if not model_filename.endswith(".tflite"):
model_filename += ".tflite"
open(model_filename, "wb").write(tflite_model)
# Clean temporary folder.
rm_dir(TEMP_DIR)
# Function to save a tensor in binary format. In order for the GIT system
# to correctly recognize these files as binary we will add a leading '0'
# byte into the file.
def save_tensor(tensor, filename):
byte_array = b"\x00" + tensor.tobytes(order="C")
with open(os.path.join(OUT_DIR, filename), "wb") as fh:
fh.write(byte_array)
# Create output directory.
clean_dir(OUT_DIR)
# ----------------------------------------------------------------------------------------------------------------------
# Strided Slice
# ----------------------------------------------------------------------------------------------------------------------
def gen_strided_slice(
name,
input_shape,
begin,
end,
strides,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
):
# Create model.
inp = layers.Input(
name="input", batch_size=input_shape[0], shape=input_shape[1:], dtype=tf.float32
)
out = tf.strided_slice(
inp,
begin,
end,
strides,
begin_mask,
end_mask,
ellipsis_mask,
new_axis_mask,
shrink_axis_mask,
)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict([inp_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
# Basic test. Default strides are 1.
gen_strided_slice(
name="strided_slice_test0",
input_shape=(1, 2, 3),
begin=(0, 0, 0),
end=(1, 1, 1),
strides=(1, 1, 1),
)
# Test begin_mask. Ignore "begin" value for 2nd dimension and use value for maximum range.
gen_strided_slice(
name="strided_slice_test1",
input_shape=(1, 3, 4),
begin=(0, 2, 3),
end=(1, 3, 4),
strides=(1, 1, 1),
begin_mask=2,
)
# Test end_mask. Ignore "end" value for 2nd dimension and use value for maximum range.
gen_strided_slice(
name="strided_slice_test2",
input_shape=(1, 3, 4),
begin=(0, 0, 0),
end=(1, 1, 1),
strides=(1, 1, 1),
end_mask=2,
)
# Test begin_mask & end_mask. Ignore "begin"/"end" value for 2nd dimension and use values for maximum range.
gen_strided_slice(
name="strided_slice_test3",
input_shape=(1, 3, 4),
begin=(0, 1, 1),
end=(1, 2, 2),
strides=(1, 1, 1),
begin_mask=2,
end_mask=2,
)
# Test ellipsis_mask. Test access pattern [0, ..., 0] where the ellipsis position is marked as 0's for begin/end.
gen_strided_slice(
name="strided_slice_test4",
input_shape=(1, 3, 4),
begin=(0, 0, 0),
end=(1, 0, 1),
strides=(1, 1, 1),
begin_mask=0,
end_mask=0,
ellipsis_mask=2,
)
# Test new_axis_mask.
gen_strided_slice(
name="strided_slice_test5",
input_shape=(1, 3, 4),
begin=(0, 0, 0),
end=(1, 2, 3),
strides=(1, 1, 1),
new_axis_mask=2,
)
# Test shrink_axis_mask.
gen_strided_slice(
name="strided_slice_test6",
input_shape=(1, 3, 4),
begin=(0, 0, 0),
end=(1, 2, 3),
strides=(1, 1, 1),
shrink_axis_mask=2,
)
# ----------------------------------------------------------------------------------------------------------------------
# Select
# ----------------------------------------------------------------------------------------------------------------------
def gen_select_test(name, input_shape):
# Create model.
cond = layers.Input(
name="cond", batch_size=input_shape[0], shape=input_shape[1:], dtype=tf.bool
)
lhs = layers.Input(
name="lhs", batch_size=input_shape[0], shape=input_shape[1:], dtype=tf.float32
)
rhs = layers.Input(
name="rhs", batch_size=input_shape[0], shape=input_shape[1:], dtype=tf.float32
)
out = tf.where(cond, x=lhs, y=rhs)
model = Model(inputs=[cond, lhs, rhs], outputs=[out])
# Create data.
np.random.seed(0)
cond_tensor = np.random.randint(low=0, high=2, size=input_shape).astype(np.bool)
lhs_tensor = np.random.rand(*input_shape).astype(np.float32)
rhs_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict([cond_tensor, lhs_tensor, rhs_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(cond_tensor, name + ".inp0")
save_tensor(lhs_tensor, name + ".inp1")
save_tensor(rhs_tensor, name + ".inp2")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_select_test(name="select", input_shape=(1, 2, 3))
# ----------------------------------------------------------------------------------------------------------------------
# LogSoftmax
# ----------------------------------------------------------------------------------------------------------------------
def gen_log_softmax_test(name, input_shape, axis):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.nn.log_softmax(inp, axis=axis)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_log_softmax_test(name="log_softmax", input_shape=(1, 3), axis=-1)
# ----------------------------------------------------------------------------------------------------------------------
# GATHER ND
# ----------------------------------------------------------------------------------------------------------------------
def gen_gather_nd_test(name, data_shape, indices_shape):
# Create model.
data = layers.Input(
name="data", batch_size=data_shape[0], shape=data_shape[1:], dtype=tf.float32
)
indices = layers.Input(
name="indices",
batch_size=indices_shape[0],
shape=indices_shape[1:],
dtype=tf.int32,
)
out = tf.gather_nd(data, indices, batch_dims=0)
model = Model(inputs=[data, indices], outputs=[out])
# Create data.
np.random.seed(0)
data_tensor = np.random.rand(*data_shape).astype(np.float32)
indices_tensor = np.random.randint(
low=0, high=data_shape, size=indices_shape
).astype(np.int32)
out_tensor = model.predict([data_tensor, indices_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(data_tensor, name + ".inp0")
save_tensor(indices_tensor, name + ".inp1")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_gather_nd_test(name="gather_nd", data_shape=(2, 3, 4), indices_shape=(2, 3))
# ----------------------------------------------------------------------------------------------------------------------
# GATHER
# ----------------------------------------------------------------------------------------------------------------------
def gen_gather_test(name, data_shape, indices_shape, axis):
# Create model.
data = layers.Input(
name="data", batch_size=data_shape[0], shape=data_shape[1:], dtype=tf.float32
)
indices = layers.Input(
name="indices",
batch_size=indices_shape[0],
shape=indices_shape[1:],
dtype=tf.int32,
)
out = tf.gather(data, indices, axis=axis, batch_dims=0)
model = Model(inputs=[data, indices], outputs=[out])
# Create data.
np.random.seed(0)
data_tensor = np.random.rand(*data_shape).astype(np.float32)
indices_tensor = np.random.randint(data_shape[axis], size=indices_shape).astype(
np.int32
)
out_tensor = model.predict([data_tensor, indices_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(data_tensor, name + ".inp0")
save_tensor(indices_tensor, name + ".inp1")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_gather_test(
name="gather_axis0", data_shape=(1, 2, 3, 4), indices_shape=(1, 5), axis=0
)
gen_gather_test(
name="gather_axis1", data_shape=(1, 2, 3, 4), indices_shape=(1, 5), axis=1
)
# ----------------------------------------------------------------------------------------------------------------------
# CAST
# ----------------------------------------------------------------------------------------------------------------------
def gen_cast_test(name, input_shape, dtype):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.cast(inp, dtype=dtype)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_cast_test(name="cast_f32_to_int32", input_shape=(1, 1, 2, 12), dtype=tf.int32)
# ----------------------------------------------------------------------------------------------------------------------
# Logical operators
# ----------------------------------------------------------------------------------------------------------------------
def gen_unary_logical_operator_test(name, type):
# Create model.
inp = layers.Input(name="input1", batch_size=1, shape=2, dtype=tf.bool)
if type == "not":
out = tf.math.logical_not(inp)
else:
print('Logical unary operator "%s" not supported!')
exit(1)
model = Model(inputs=[inp], outputs=[out])
# Create data.
inp_tensor = np.array([[False, True]]).astype(bool)
out_tensor = model.predict([inp_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_unary_logical_operator_test(name="logical_not", type="not")
def gen_binary_logical_operator_test(name, type):
# Create model.
inp1 = layers.Input(name="input1", batch_size=1, shape=4, dtype=tf.bool)
inp2 = layers.Input(name="input2", batch_size=1, shape=4, dtype=tf.bool)
if type == "and":
out = tf.math.logical_and(inp1, inp2)
elif type == "or":
out = tf.math.logical_or(inp1, inp2)
else:
print('Logical binary operator "%s" not supported!')
exit(1)
model = Model(inputs=[inp1, inp2], outputs=[out])
# Create data.
inp1_tensor = np.array([[False, True, False, True]]).astype(bool)
inp2_tensor = np.array([[False, False, True, True]]).astype(bool)
out_tensor = model.predict([inp1_tensor, inp2_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp1_tensor, name + ".inp0")
save_tensor(inp2_tensor, name + ".inp1")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_binary_logical_operator_test(name="logical_and", type="and")
gen_binary_logical_operator_test(name="logical_or", type="or")
def gen_cmp_operator_test(name, type):
# Create model.
inp1 = layers.Input(name="input1", batch_size=1, shape=3)
inp2 = layers.Input(name="input2", batch_size=1, shape=3)
if type == "equal":
out = tf.math.equal(inp1, inp2)
elif type == "not_equal":
out = tf.math.not_equal(inp1, inp2)
elif type == "less":
out = tf.math.less(inp1, inp2)
elif type == "less_equal":
out = tf.math.less_equal(inp1, inp2)
elif type == "greater":
out = tf.math.greater(inp1, inp2)
elif type == "greater_equal":
out = tf.math.greater_equal(inp1, inp2)
else:
print('Logical operator "%s" not supported!')
exit(1)
model = Model(inputs=[inp1, inp2], outputs=[out])
# Create data.
inp1_tensor = np.array([[1.0, 1.0, -1.0]]).astype(np.float32)
inp2_tensor = np.array([[1.0, -1.0, 1.0]]).astype(np.float32)
out_tensor = model.predict([inp1_tensor, inp2_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp1_tensor, name + ".inp0")
save_tensor(inp2_tensor, name + ".inp1")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_cmp_operator_test(name="equal", type="equal")
gen_cmp_operator_test(name="not_equal", type="not_equal")
gen_cmp_operator_test(name="less", type="less")
gen_cmp_operator_test(name="less_equal", type="less_equal")
gen_cmp_operator_test(name="greater", type="greater")
gen_cmp_operator_test(name="greater_equal", type="greater_equal")
# ----------------------------------------------------------------------------------------------------------------------
# Unary operators
# ----------------------------------------------------------------------------------------------------------------------
def gen_unary_operator_test(name, type, input_shape):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
if type == "relu":
out = tf.nn.relu(inp)
elif type == "relu_n1to1":
out = tf.clip_by_value(inp, -1.0, 1.0)
elif type == "relu6":
out = tf.nn.relu6(inp)
elif type == "sigmoid":
out = tf.nn.sigmoid(inp)
elif type == "exp":
out = tf.exp(inp)
elif type == "log":
out = tf.math.log(inp)
elif type == "tanh":
out = tf.nn.tanh(inp)
elif type == "leaky_relu":
out = tf.nn.leaky_relu(inp, alpha=0.1)
elif type == "prelu":
out = layers.PReLU(alpha_initializer="random_uniform")(inp)
elif type == "square":
out = tf.math.square(inp)
elif type == "abs":
out = tf.math.abs(inp)
elif type == "neg":
out = tf.math.negative(inp)
elif type == "sqrt":
out = tf.math.sqrt(inp)
elif type == "rsqrt":
out = tf.math.rsqrt(inp)
elif type == "sin":
out = tf.math.sin(inp)
elif type == "cos":
out = tf.math.cos(inp)
elif type == "ceil":
out = tf.math.ceil(inp)
elif type == "round":
out = tf.math.round(inp)
elif type == "floor":
out = tf.math.floor(inp)
else:
print('Unary operator "%s" not supported!')
exit(1)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.randn(*input_shape).astype(np.float32)
if type in ["log", "sqrt", "rsqrt"]:
inp_tensor = np.abs(inp_tensor) + 1
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_unary_operator_test(name="relu", type="relu", input_shape=(1, 10))
gen_unary_operator_test(name="relu_n1to1", type="relu_n1to1", input_shape=(1, 10))
gen_unary_operator_test(name="relu6", type="relu6", input_shape=(1, 10))
gen_unary_operator_test(name="sigmoid", type="sigmoid", input_shape=(1, 10))
gen_unary_operator_test(name="tanh", type="tanh", input_shape=(1, 10))
gen_unary_operator_test(name="exp", type="exp", input_shape=(1, 10))
gen_unary_operator_test(name="log", type="log", input_shape=(1, 10))
gen_unary_operator_test(name="leaky_relu", type="leaky_relu", input_shape=(1, 10))
gen_unary_operator_test(name="prelu", type="prelu", input_shape=(1, 10))
gen_unary_operator_test(name="square", type="square", input_shape=(1, 10))
gen_unary_operator_test(name="abs", type="abs", input_shape=(1, 10))
gen_unary_operator_test(name="neg", type="neg", input_shape=(1, 10))
gen_unary_operator_test(name="sqrt", type="sqrt", input_shape=(1, 10))
gen_unary_operator_test(name="rsqrt", type="rsqrt", input_shape=(1, 10))
gen_unary_operator_test(name="sin", type="sin", input_shape=(1, 10))
gen_unary_operator_test(name="cos", type="cos", input_shape=(1, 10))
gen_unary_operator_test(name="ceil", type="ceil", input_shape=(1, 10))
gen_unary_operator_test(name="round", type="round", input_shape=(1, 10))
gen_unary_operator_test(name="floor", type="floor", input_shape=(1, 10))
# ----------------------------------------------------------------------------------------------------------------------
# Binary operators
# ----------------------------------------------------------------------------------------------------------------------
def gen_binary_operator_test(name, type, input_shape):
# Create model.
inp1 = layers.Input(name="input1", batch_size=input_shape[0], shape=input_shape[1:])
inp2 = layers.Input(name="input2", batch_size=input_shape[0], shape=input_shape[1:])
if type == "add":
out = tf.math.add(inp1, inp2)
elif type == "mul":
out = tf.math.multiply(inp1, inp2)
elif type == "sub":
out = tf.math.subtract(inp1, inp2)
elif type == "div":
out = tf.math.divide(inp1, inp2)
elif type == "pow":
out = tf.math.pow(inp1, inp2)
elif type == "max":
out = tf.math.maximum(inp1, inp2)
elif type == "min":
out = tf.math.minimum(inp1, inp2)
else:
print('Binary operator "%s" not supported!')
exit(1)
model = Model(inputs=[inp1, inp2], outputs=[out])
# Create data.
np.random.seed(0)
inp1_tensor = np.random.rand(*input_shape).astype(np.float32)
inp2_tensor = np.random.rand(*input_shape).astype(np.float32)
if type == "pow":
inp1_tensor = np.abs(inp1_tensor) + 1
out_tensor = model.predict([inp1_tensor, inp2_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp1_tensor, name + ".inp0")
save_tensor(inp2_tensor, name + ".inp1")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_binary_operator_test(name="add", type="add", input_shape=(1, 10))
gen_binary_operator_test(name="mul", type="mul", input_shape=(1, 10))
gen_binary_operator_test(name="sub", type="sub", input_shape=(1, 10))
gen_binary_operator_test(name="div", type="div", input_shape=(1, 10))
gen_binary_operator_test(name="pow", type="pow", input_shape=(1, 10))
gen_binary_operator_test(name="max", type="max", input_shape=(1, 10))
gen_binary_operator_test(name="min", type="min", input_shape=(1, 10))
# ----------------------------------------------------------------------------------------------------------------------
# Binary broadcasted operators
# ----------------------------------------------------------------------------------------------------------------------
def gen_binary_broadcast_operator_test(name, type, shape_1, shape_2):
# Create model.
inp1 = layers.Input(name="input1", batch_size=shape_1[0], shape=shape_1[1:])
inp2 = layers.Input(name="input2", batch_size=shape_2[0], shape=shape_2[1:])
if type == "add":
out = tf.math.add(inp1, inp2)
elif type == "mul":
out = tf.math.multiply(inp1, inp2)
elif type == "sub":
out = tf.math.subtract(inp1, inp2)
elif type == "div":
out = tf.math.divide(inp1, inp2)
elif type == "max":
out = tf.math.maximum(inp1, inp2)
elif type == "min":
out = tf.math.minimum(inp1, inp2)
else:
print('Binary operator "%s" not supported!' % type)
exit(1)
model = Model(inputs=[inp1, inp2], outputs=[out])
# Create data.
np.random.seed(0)
inp1_tensor = np.random.rand(*shape_1).astype(np.float32)
inp2_tensor = np.random.rand(*shape_2).astype(np.float32)
if type == "pow":
inp1_tensor = np.abs(inp1_tensor) + 1
out_tensor = model.predict([inp1_tensor, inp2_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp1_tensor, name + ".inp0")
save_tensor(inp2_tensor, name + ".inp1")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_binary_broadcast_operator_test(
name="add_broadcast", type="add", shape_1=(1, 5, 5, 3), shape_2=(1, 1, 1, 3)
)
gen_binary_broadcast_operator_test(
name="mul_broadcast", type="mul", shape_1=(1, 5, 5, 3), shape_2=(1, 1, 1, 3)
)
gen_binary_broadcast_operator_test(
name="sub_broadcast", type="sub", shape_1=(1, 5, 5, 3), shape_2=(1, 1, 1, 3)
)
gen_binary_broadcast_operator_test(
name="div_broadcast", type="div", shape_1=(1, 5, 5, 3), shape_2=(1, 1, 1, 3)
)
gen_binary_broadcast_operator_test(
name="max_broadcast", type="max", shape_1=(1, 5, 5, 3), shape_2=(1, 1, 1, 3)
)
gen_binary_broadcast_operator_test(
name="min_broadcast", type="min", shape_1=(1, 5, 5, 3), shape_2=(1, 1, 1, 3)
)
# ----------------------------------------------------------------------------------------------------------------------
# Conv2D
# ----------------------------------------------------------------------------------------------------------------------
def gen_conv2d_test(
name, input_shape, filters, kernels, strides, padding, dilations, activation
):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = layers.Conv2D(
filters=filters,
kernel_size=kernels,
strides=strides,
padding=padding,
dilation_rate=dilations,
activation=activation,
use_bias=True,
bias_initializer="random_normal",
)(inp)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_conv2d_test(
name="conv2d_valid",
input_shape=(1, 5, 5, 3),
filters=2,
kernels=(2, 3),
strides=(1, 1),
padding="valid",
dilations=(1, 1),
activation="linear",
)
gen_conv2d_test(
name="conv2d_same",
input_shape=(1, 5, 5, 3),
filters=2,
kernels=(2, 3),
strides=(1, 1),
padding="same",
dilations=(1, 1),
activation="linear",
)
gen_conv2d_test(
name="conv2d_relu",
input_shape=(1, 5, 5, 3),
filters=2,
kernels=(2, 3),
strides=(1, 1),
padding="valid",
dilations=(1, 1),
activation="relu",
)
# ----------------------------------------------------------------------------------------------------------------------
# DepthwiseConv2D
# ----------------------------------------------------------------------------------------------------------------------
def gen_depthwise_conv2d_test(
name,
input_shape,
depth_multiplier,
kernels,
strides,
padding,
dilations,
activation,
):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = layers.DepthwiseConv2D(
kernel_size=kernels,
strides=strides,
padding=padding,
depth_multiplier=depth_multiplier,
dilation_rate=dilations,
activation=activation,
use_bias=True,
bias_initializer="random_normal",
)(inp)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_depthwise_conv2d_test(
name="depthwise_conv2d_c1_m1",
input_shape=(1, 5, 5, 1),
depth_multiplier=1,
kernels=(2, 3),
strides=(1, 1),
padding="valid",
dilations=(1, 1),
activation="linear",
)
gen_depthwise_conv2d_test(
name="depthwise_conv2d_c1_m2",
input_shape=(1, 5, 5, 1),
depth_multiplier=2,
kernels=(2, 3),
strides=(1, 1),
padding="valid",
dilations=(1, 1),
activation="linear",
)
gen_depthwise_conv2d_test(
name="depthwise_conv2d_c2_m1",
input_shape=(1, 5, 5, 2),
depth_multiplier=1,
kernels=(2, 3),
strides=(1, 1),
padding="valid",
dilations=(1, 1),
activation="linear",
)
gen_depthwise_conv2d_test(
name="depthwise_conv2d_c2_m2",
input_shape=(1, 5, 5, 2),
depth_multiplier=2,
kernels=(2, 3),
strides=(1, 1),
padding="valid",
dilations=(1, 1),
activation="linear",
)
# ----------------------------------------------------------------------------------------------------------------------
# FullyConnected
# ----------------------------------------------------------------------------------------------------------------------
def gen_fully_connected_test(name, input_shape, out_channels, activation):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = layers.Dense(
units=out_channels,
activation=activation,
use_bias=True,
bias_initializer="random_normal",
)(inp)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_fully_connected_test(
name="fully_connected", input_shape=(2, 5), out_channels=10, activation="linear"
)
# ----------------------------------------------------------------------------------------------------------------------
# MaxPool2D
# ----------------------------------------------------------------------------------------------------------------------
def gen_maxpool2d_test(name, input_shape, kernels, strides, padding):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = layers.MaxPooling2D(pool_size=kernels, strides=strides, padding=padding)(inp)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_maxpool2d_test(
name="maxpool2d_valid",
input_shape=(1, 5, 5, 2),
kernels=(2, 3),
strides=(1, 1),
padding="valid",
)
gen_maxpool2d_test(
name="maxpool2d_same",
input_shape=(1, 5, 5, 2),
kernels=(2, 3),
strides=(1, 1),
padding="same",
)
# ----------------------------------------------------------------------------------------------------------------------
# AvgPool2D
# ----------------------------------------------------------------------------------------------------------------------
def gen_avgpool2d_test(name, input_shape, kernels, strides, padding):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = layers.AveragePooling2D(pool_size=kernels, strides=strides, padding=padding)(
inp
)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_avgpool2d_test(
name="avgpool2d_valid",
input_shape=(1, 5, 5, 2),
kernels=(2, 3),
strides=(1, 1),
padding="valid",
)
gen_avgpool2d_test(
name="avgpool2d_same",
input_shape=(1, 5, 5, 2),
kernels=(2, 3),
strides=(1, 1),
padding="same",
)
# ----------------------------------------------------------------------------------------------------------------------
# Softmax
# ----------------------------------------------------------------------------------------------------------------------
def gen_softmax_test(name, input_shape, axis):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = layers.Softmax(axis=axis)(inp)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_softmax_test(name="softmax", input_shape=(1, 3), axis=-1)
# ----------------------------------------------------------------------------------------------------------------------
# Transpose
# ----------------------------------------------------------------------------------------------------------------------
def gen_transpose_test(name, input_shape, perm):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = layers.Permute(perm)(inp)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_transpose_test(name="transpose", input_shape=(1, 2, 3), perm=(2, 1))
# ----------------------------------------------------------------------------------------------------------------------
# Slice
# ----------------------------------------------------------------------------------------------------------------------
def gen_slice_test(name, input_shape, begin, size):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.slice(inp, begin, size)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_slice_test(name="slice", input_shape=(1, 2, 3), begin=(0, 1, 2), size=(1, 1, 1))
gen_slice_test(
name="slice_neg_size", input_shape=(1, 2, 3), begin=(0, 1, 2), size=(1, 1, -1)
)
# ----------------------------------------------------------------------------------------------------------------------
# Reshape
# ----------------------------------------------------------------------------------------------------------------------
def gen_reshape_test(name, input_shape, shape):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.reshape(inp, shape)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_reshape_test(name="reshape", input_shape=(1, 2, 3), shape=(1, 6))
gen_reshape_test(name="reshape_neg_shape", input_shape=(1, 2, 3), shape=(1, -1))
# ----------------------------------------------------------------------------------------------------------------------
# Concat
# ----------------------------------------------------------------------------------------------------------------------
def gen_concat_test(name, input_shape, axis):
# Create model.
inp1 = layers.Input(name="input1", batch_size=input_shape[0], shape=input_shape[1:])
inp2 = layers.Input(name="input2", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.concat([inp1, inp2], axis)
model = Model(inputs=[inp1, inp2], outputs=[out])
# Create data.
np.random.seed(0)
inp1_tensor = np.random.rand(*input_shape).astype(np.float32)
inp2_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict([inp1_tensor, inp2_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp1_tensor, name + ".inp0")
save_tensor(inp2_tensor, name + ".inp1")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_concat_test(name="concat", input_shape=(1, 2, 3), axis=1)
gen_concat_test(name="concat_neg_axis", input_shape=(1, 2, 3), axis=-1)
# ----------------------------------------------------------------------------------------------------------------------
# Split
# ----------------------------------------------------------------------------------------------------------------------
def gen_split_test(name, input_shape, axis, num_split):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
outs = tf.split(inp, num_or_size_splits=num_split, axis=axis)
model = Model(inputs=[inp], outputs=outs)
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensors = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
for idx in range(len(out_tensors)):
save_tensor(out_tensors[idx], name + (".out%d" % idx))
# Clear session.
keras_backend.clear_session()
gen_split_test(name="split", input_shape=(1, 9), axis=-1, num_split=3)
# ----------------------------------------------------------------------------------------------------------------------
# Pad
# ----------------------------------------------------------------------------------------------------------------------
def gen_pad_test(name, input_shape, pads):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.pad(inp, paddings=pads)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_pad_test(name="pad", input_shape=(1, 2, 2), pads=[[0, 0], [1, 2], [0, 3]])
# ----------------------------------------------------------------------------------------------------------------------
# ArgMin/ArgMax
# ----------------------------------------------------------------------------------------------------------------------
def gen_arg_min_max_test(name, type, input_shape, axis):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
if type == "min":
out = tf.math.argmin(inp, axis=axis)
else:
out = tf.math.argmax(inp, axis=axis)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_arg_min_max_test(name="arg_min", type="min", input_shape=(1, 2, 10), axis=2)
gen_arg_min_max_test(name="arg_max", type="max", input_shape=(1, 2, 10), axis=2)
# ----------------------------------------------------------------------------------------------------------------------
# Pack
# ----------------------------------------------------------------------------------------------------------------------
def gen_pack_test(name, input_shape, axis):
# Create model.
inp1 = layers.Input(name="input1", batch_size=input_shape[0], shape=input_shape[1:])
inp2 = layers.Input(name="input2", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.stack([inp1, inp2], axis=axis)
model = Model(inputs=[inp1, inp2], outputs=[out])
# Create data.
np.random.seed(0)
inp1_tensor = np.random.rand(*input_shape).astype(np.float32)
inp2_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict([inp1_tensor, inp2_tensor])
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp1_tensor, name + ".inp0")
save_tensor(inp2_tensor, name + ".inp1")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_pack_test(name="pack", input_shape=(2, 3, 4), axis=1)
# ----------------------------------------------------------------------------------------------------------------------
# Unpack
# ----------------------------------------------------------------------------------------------------------------------
def gen_unpack_test(name, input_shape, axis):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
outs = tf.unstack(inp, axis=axis)
model = Model(inputs=[inp], outputs=outs)
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensors = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
for idx in range(len(out_tensors)):
save_tensor(out_tensors[idx], name + (".out%d" % idx))
# Clear session.
keras_backend.clear_session()
gen_unpack_test(name="unpack", input_shape=(2, 3, 4), axis=1)
# ----------------------------------------------------------------------------------------------------------------------
# Mean
# ----------------------------------------------------------------------------------------------------------------------
def gen_mean_test(name, input_shape, axis, keep_dims):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.reduce_mean(inp, axis=axis, keepdims=keep_dims)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_mean_test(name="mean_keep_dims", input_shape=(1, 2, 10), axis=2, keep_dims=True)
gen_mean_test(name="mean_no_keep_dims", input_shape=(1, 2, 10), axis=2, keep_dims=False)
gen_mean_test(
name="mean_multiple_axis_keep_dims",
input_shape=(1, 2, 10),
axis=(1, 2),
keep_dims=True,
)
gen_mean_test(
name="mean_multiple_axis_no_keep_dims",
input_shape=(1, 2, 10),
axis=(1, 2),
keep_dims=False,
)
# ----------------------------------------------------------------------------------------------------------------------
# Tile
# ----------------------------------------------------------------------------------------------------------------------
def gen_tile_test(name, input_shape, tiles):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.tile(inp, multiples=tiles)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_tile_test(name="tile", input_shape=(1, 2, 3), tiles=[1, 3, 2])
# ----------------------------------------------------------------------------------------------------------------------
# RESIZE NEAREST
# ----------------------------------------------------------------------------------------------------------------------
def gen_resize_nearest_test(name, input_shape, output_shape):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.compat.v1.image.resize_nearest_neighbor(inp, size=output_shape)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_resize_nearest_test(
name="resize_nearest", input_shape=(1, 3, 4, 2), output_shape=(5, 7)
)
# ----------------------------------------------------------------------------------------------------------------------
# RESIZE BILINEAR
# ----------------------------------------------------------------------------------------------------------------------
def gen_resize_bilinear_test(name, input_shape, output_shape):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.compat.v1.image.resize_bilinear(inp, size=output_shape)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_resize_bilinear_test(
name="resize_bilinear", input_shape=(1, 3, 4, 2), output_shape=(5, 7)
)
# ----------------------------------------------------------------------------------------------------------------------
# SPACE TO DEPTH
# ----------------------------------------------------------------------------------------------------------------------
def gen_space_to_depth_test(name, input_shape, block_size):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.compat.v1.space_to_depth(inp, block_size=block_size)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_space_to_depth_test(name="space_to_depth", input_shape=(1, 2, 4, 3), block_size=2)
# ----------------------------------------------------------------------------------------------------------------------
# DEPTH TO SPACE
# ----------------------------------------------------------------------------------------------------------------------
# Note: Older version of TensorFlow handles this operator as custom. This test is generated separately by manually
# editing the 'space_to_depth' test.
def gen_depth_to_space_test(name, input_shape, block_size):
# Create model.
inp = layers.Input(name="input", batch_size=input_shape[0], shape=input_shape[1:])
out = tf.nn.depth_to_space(inp, block_size=block_size)
model = Model(inputs=[inp], outputs=[out])
# Create data.
np.random.seed(0)
inp_tensor = np.random.rand(*input_shape).astype(np.float32)
out_tensor = model.predict(inp_tensor)
# Save model.
save_model(model, name)
# Save data.
save_tensor(inp_tensor, name + ".inp0")
save_tensor(out_tensor, name + ".out0")
# Clear session.
keras_backend.clear_session()
gen_depth_to_space_test(name="depth_to_space", input_shape=(1, 1, 2, 12), block_size=2)
|
#!/usr/bin/env python
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from PIL import Image
# This script is used to visualize memory allocations in the Glow compiler.
#
# Usage: ./visualize.py dump.txt
#
# The script will dump a sequence of bitmap files that can be combined into a
# video. Example: heap100123.bmp, heap heap100124.bmp, heap100125.bmp ... )
#
# On mac and linux this command will generate a gif file:
# convert -delay 10 -loop 0 *.bmp video.gif
#
# The input file should contain a list of allocation/deallocation commands.
# Allocation commands (marked with the letter 'a') report the start address and
# the size of the buffer, and the deallocation commands (marked with the letter
# 'd') report the address of the buffer. You can generate these command lists
# by inserting printf calls into the Glow memory allocator.
#
# Example input:
# a 348864 20000
# a 368896 20000
# a 388928 20000
# a 408960 200000
# d 388928
# d 368896
# d 348864
content = open(sys.argv[1]).read()
lines = content.split("\n")
canvas_size = 512
pixelsize = 8
img = Image.new("RGB", (canvas_size, canvas_size), "black")
pixels = img.load()
# Use this number to assign file names to frames in the video.
filename_counter = 10000000
# Maps from address to size
sizes = {}
color_index = 0
colors = [
(218, 112, 214),
(255, 182, 193),
(250, 235, 215),
(255, 250, 205),
(210, 105, 30),
(210, 180, 140),
(188, 143, 143),
(255, 240, 245),
(230, 230, 250),
(255, 255, 240),
]
def getColor():
global color_index
color_index += 1
return colors[color_index % len(colors)]
def setPixel(addr, color):
# Don't draw out-of-bounds pixels.
if addr >= canvas_size * canvas_size:
return
# Only draw pixels that are aligned to the block size.
if addr % pixelsize != 0:
return
# Draw large pixels.
sx = addr % canvas_size
sy = addr / canvas_size
sx = int(sx / pixelsize)
sy = int(sy / pixelsize)
for x in range(pixelsize):
for y in range(pixelsize):
pixels[sx * pixelsize + x, sy * pixelsize + y] = color
def saveFrame():
global filename_counter
filename_counter += 1
img.save("heap" + str(filename_counter) + ".bmp")
for line in lines:
tokens = line.split()
if len(tokens) < 1:
break
print(tokens)
if tokens[0] == "a":
frm = int(tokens[1])
sz = int(tokens[2])
sizes[frm] = sz
if frm + sz >= canvas_size * canvas_size:
continue
for i in range(sz):
setPixel(i + frm, (255, 255, 255)) # allocate
saveFrame()
cc = getColor()
for i in range(sz):
setPixel(i + frm, cc) # allocated
saveFrame()
if tokens[0] == "d":
frm = int(tokens[1])
sz = sizes[frm]
if frm + sz >= canvas_size * canvas_size:
continue
for i in range(sz):
setPixel(i + frm, (128, 0, 0)) # deallocate
saveFrame()
for i in range(sz):
setPixel(i + frm, (15, 15, 15)) # previously allocated
saveFrame()
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script that generates ONNX models.
# The generated model will be used for ONNX importer unittest:
# ./tests/unittests/onnxImporterTest.cpp
# Run $>python gen_onnx_model.py to get the ONNX model.
import numpy as np
import onnx
from onnx import AttributeProto, GraphProto, helper, TensorProto
# The protobuf definition can be found here:
# https://github.com/onnx/onnx/blob/master/onnx/onnx.proto
W = np.array(
[[[[1.0, 1.0], [1.0, 1.0]]]] # (1, 1, 2, 2) tensor for convolution weights
).astype(np.float32)
B = np.array([2.0]).astype(np.float32)
# Convolution with padding. "data" represents the input data,
# which will be provided by ONNX importer unittests.
node_def = onnx.helper.make_node(
"Conv",
inputs=["data", "W", "B"],
outputs=["y"],
kernel_shape=[2, 2],
strides=[1, 1],
pads=[1, 1, 1, 1],
name="conv1",
)
weight_tensor = helper.make_tensor(
name="W", data_type=TensorProto.FLOAT, dims=(1, 1, 2, 2), vals=W.reshape(4).tolist()
)
bias_tensor = helper.make_tensor(
name="B", data_type=TensorProto.FLOAT, dims=(1,), vals=B.reshape(1).tolist()
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_def],
"test-model",
inputs=[
helper.make_tensor_value_info("data", TensorProto.FLOAT, [1, 1, 3, 3]),
helper.make_tensor_value_info("W", TensorProto.FLOAT, [1, 1, 2, 2]),
helper.make_tensor_value_info("B", TensorProto.FLOAT, [1]),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, [1, 1, 4, 4])],
)
graph_def.initializer.extend([weight_tensor])
graph_def.initializer.extend([bias_tensor])
graph_def.initializer[0].name = "W"
graph_def.initializer[1].name = "B"
# Create the model (ModelProto)
model_def = helper.make_model(graph_def, producer_name="onnx-conv")
with open("simpleConv.onnxtxt", "w") as f:
f.write(str(model_def))
onnx.checker.check_model(model_def)
print("The model is checked!")
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import onnx
import tensorflow as tf
from onnx import helper, TensorProto
from tensorflow.python.ops import gen_audio_ops as audio_ops
# ONNX utility.
def make_init(name, dtype, tensor):
return helper.make_tensor(
name=name,
data_type=dtype,
dims=tensor.shape,
vals=tensor.reshape(tensor.size).tolist(),
)
# Function to generate AudioSpectrogram ONNX test model.
def gen_spectrogram_onnx_test_model(
model_path, window_count, window_size, stride, magnitude_squared=True
):
# Tensor sizes.
input_length = window_size + (window_count - 1) * stride
fft_length = int(2 ** np.ceil(np.log2(window_size)))
input_shape = [1, input_length]
spectrogram_length = int(fft_length / 2 + 1)
spectrogram_shape = [window_count, spectrogram_length]
# Generate random input data.
np.random.seed(1)
input_data = np.random.randn(*input_shape)
# ----------------------------------------- COMPUTE TensorFlow REFERENCE -------------------------------------------
# Define TensorFlow model.
tf_input = tf.constant(
input_data.reshape([input_length, 1]), name="input", dtype=tf.float32
)
tf_spectrogram = audio_ops.audio_spectrogram(
tf_input,
window_size=window_size,
stride=stride,
magnitude_squared=magnitude_squared,
)
# Run TensorFlow model and get reference output.
with tf.Session() as sess:
spectrogram_ref = sess.run(tf_spectrogram)
spectrogram_ref = np.reshape(spectrogram_ref, spectrogram_shape)
# ---------------------------------------------- NODE DEFINITION --------------------------------------------------
# AudioSpectrogram node definition.
spectrogram_node_def = onnx.helper.make_node(
"AudioSpectrogram",
name="audio_spectrogram",
inputs=["input"],
outputs=["spectrogram"],
window_size=int(window_size),
stride=int(stride),
magnitude_squared=int(magnitude_squared),
)
# Error node definition.
err_node_def = onnx.helper.make_node(
"Sub",
name="error",
inputs=["spectrogram", "spectrogram_ref"],
outputs=["spectrogram_err"],
)
# --------------------------------------------- GRAPH DEFINITION --------------------------------------------------
graph_input = list()
graph_init = list()
graph_output = list()
# Graph inputs.
graph_input.append(
helper.make_tensor_value_info("input", TensorProto.FLOAT, input_shape)
)
graph_input.append(
helper.make_tensor_value_info(
"spectrogram_ref", TensorProto.FLOAT, spectrogram_shape
)
)
# Graph initializers.
graph_init.append(make_init("input", TensorProto.FLOAT, input_data))
graph_init.append(make_init("spectrogram_ref", TensorProto.FLOAT, spectrogram_ref))
# Graph outputs.
graph_output.append(
helper.make_tensor_value_info(
"spectrogram_err", TensorProto.FLOAT, spectrogram_shape
)
)
# Graph name.
graph_name = "audio_spectrogram_test"
# Define graph (GraphProto).
graph_def = helper.make_graph(
[spectrogram_node_def, err_node_def],
graph_name,
inputs=graph_input,
outputs=graph_output,
)
# Set initializers.
graph_def.initializer.extend(graph_init)
# --------------------------------------------- MODEL DEFINITION --------------------------------------------------
# Define model (ModelProto).
model_def = helper.make_model(graph_def, producer_name="onnx-audio-spectrogram")
# Print model.
with open(model_path, "w") as f:
f.write(str(model_def))
# One window spectrogram.
gen_spectrogram_onnx_test_model(
model_path="audioSpectrogramOneWindow.onnxtxt",
window_count=1,
window_size=512,
stride=256,
magnitude_squared=True,
)
# Two window spectrogram.
gen_spectrogram_onnx_test_model(
model_path="audioSpectrogramTwoWindow.onnxtxt",
window_count=2,
window_size=640,
stride=320,
magnitude_squared=True,
)
# Magnitude non-squared.
gen_spectrogram_onnx_test_model(
model_path="audioSpectrogramNonSquared.onnxtxt",
window_count=1,
window_size=640,
stride=320,
magnitude_squared=False,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from caffe2.proto import caffe2_pb2
from google.protobuf import text_format
def read_model_from_file(path):
m = caffe2_pb2.NetDef()
with open(path, "rb") as f:
if ".pbtxt" in path:
text_format.Merge(f.read(), m)
else:
m.ParseFromString(f.read())
return m
def write_model_to_file(path, m):
with open(path, "wb") as f:
if ".pbtxt" in path:
f.write(text_format.MessageToString(m))
else:
f.write(m.SerializeToString())
# Perform dead code elimination on predict_net removing any nodes that aren't
# used for producing values in predict_net.external_output. Remove any nodes in
# init_net that produce values that are no longer needed by predict_net.
def dce(init_net, predict_net):
num_predict_net_ops_original = len(predict_net.op)
num_predict_net_inputs_original = len(predict_net.external_input)
# Find the set of tensors used in the computation of the outputs.
live_predict_net_op_outputs = set(predict_net.external_output)
prev_num_live_predict_net_op_outputs = len(live_predict_net_op_outputs)
while True:
for op in predict_net.op:
for output_tensor in op.output:
if output_tensor in live_predict_net_op_outputs:
for input_tensor in op.input:
live_predict_net_op_outputs.add(input_tensor)
num_live_predict_net_op_outputs = len(live_predict_net_op_outputs)
if num_live_predict_net_op_outputs == prev_num_live_predict_net_op_outputs:
break
prev_num_live_predict_net_op_outputs = num_live_predict_net_op_outputs
# Find the ops that are required to compute the tensors used during
# computation of the outputs.
live_predict_net_ops = []
for op in predict_net.op:
for output_tensor in op.output:
if output_tensor in live_predict_net_op_outputs:
live_predict_net_ops.append(op)
# Delete all unused ops in predict_net.
num_predict_net_ops_eliminated = len(predict_net.op) - len(live_predict_net_ops)
del predict_net.op[:]
predict_net.op.extend(live_predict_net_ops)
# Find the set of all used inputs tensors in predict_net.
live_predict_net_op_inputs = set()
for op in predict_net.op:
for input_tensor in op.input:
live_predict_net_op_inputs.add(input_tensor)
# Find the set of used external_inputs.
live_predict_net_external_inputs = set()
for external_input in predict_net.external_input:
if external_input in live_predict_net_op_inputs:
live_predict_net_external_inputs.add(external_input)
# Delete unused external_inputs in predict_net.
num_predict_net_inputs_eliminated = len(predict_net.external_input) - len(
live_predict_net_external_inputs
)
del predict_net.external_input[:]
predict_net.external_input.extend(live_predict_net_external_inputs)
print(
"predict_net ops eliminated: {}/{}".format(
num_predict_net_ops_eliminated, num_predict_net_ops_original
)
)
print(
"predict_net external_inputs eliminated: {}/{}".format(
num_predict_net_inputs_eliminated, num_predict_net_inputs_original
)
)
# Everything below pertains to removing unused outputs in the init_net,
# if no init net was provided then stop here.
if init_net is None:
return
num_init_net_ops_original = len(init_net.op)
# Find the set of init_net ops with outputs needed by the init_net
live_init_net_ops = []
for op in init_net.op:
for output_tensor in op.output:
if output_tensor in live_predict_net_external_inputs:
live_init_net_ops.append(op)
# Eliminate dead init_net ops
num_init_net_ops_eliminated = len(init_net.op) - len(live_init_net_ops)
del init_net.op[:]
init_net.op.extend(live_init_net_ops)
# Update init_net external_outputs
live_init_net_op_outputs = set()
for op in init_net.op:
for output_tensor in op.output:
live_init_net_op_outputs.add(output_tensor)
live_init_net_external_outputs = set()
for output_tensor in init_net.external_output:
if output_tensor in live_init_net_op_outputs:
live_init_net_external_outputs.add(output_tensor)
del init_net.external_output[:]
init_net.external_output.extend(live_init_net_external_outputs)
print(
"init_net ops eliminated: {}/{}".format(
num_init_net_ops_eliminated, num_init_net_ops_original
)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Caffe2 model dead code elimination")
parser.add_argument("--input_init_net_path", type=str)
parser.add_argument("--input_predict_net_path", type=str, required=True)
parser.add_argument("--output_init_net_path", type=str)
parser.add_argument("--output_predict_net_path", type=str, required=True)
args = parser.parse_args()
predict_net = read_model_from_file(args.input_predict_net_path)
init_net = None
if args.input_init_net_path is not None:
init_net = read_model_from_file(args.input_init_net_path)
dce(init_net, predict_net)
write_model_to_file(args.output_predict_net_path, predict_net)
if args.output_init_net_path is not None:
write_model_to_file(args.output_init_net_path, init_net)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import onnx
import torch
import torch.nn
import torch.onnx
from onnx import helper, TensorProto
# LSTM enums
LSTM_DIR_FORWARD = "forward"
LSTM_DIR_REVERSE = "reverse"
LSTM_DIR_BIDIRECTIONAL = "bidirectional"
LSTM_DIRS = [LSTM_DIR_FORWARD, LSTM_DIR_REVERSE, LSTM_DIR_BIDIRECTIONAL]
# ONNX utility
def make_init(name, type, tensor):
return helper.make_tensor(
name=name,
data_type=type,
dims=tensor.shape,
vals=tensor.reshape(tensor.size).tolist(),
)
# Function to generate LSTM ONNX test model
def gen_lstm_onnx_test_model(
model_path,
seq_length,
batch_size,
hidden_size,
input_size,
direction,
has_bias,
has_sequence_lens,
has_initial_h,
has_initial_c,
has_peephole,
input_forget=False,
):
# Validate parameters
assert direction in LSTM_DIRS, "ONNX LSTM direction invalid!"
assert not has_sequence_lens, "ONNX LSTM Variable sequence length not supported"
# Get number of directions
num_directions = 2 if (direction == LSTM_DIR_BIDIRECTIONAL) else 1
# Tensor sizes
X_shape = [seq_length, batch_size, input_size]
W_shape = [num_directions, 4 * hidden_size, input_size]
R_shape = [num_directions, 4 * hidden_size, hidden_size]
B_shape = [num_directions, 8 * hidden_size]
sequence_lens_shape = [batch_size]
initial_h_shape = [num_directions, batch_size, hidden_size]
initial_c_shape = [num_directions, batch_size, hidden_size]
P_shape = [num_directions, 3 * hidden_size]
Y_shape = [seq_length, num_directions, batch_size, hidden_size]
# Generate random inputs (weights are assumed concatenated in ONNX format: i,o,f,c)
np.random.seed(1)
X = np.random.randn(*X_shape)
W = np.random.randn(*W_shape)
R = np.random.randn(*R_shape)
B = np.random.randn(*B_shape) if has_bias else np.zeros(B_shape)
sequence_lens = (
np.random.randint(1, seq_length, batch_size)
if has_sequence_lens
else np.tile(seq_length, batch_size)
)
initial_h = (
np.random.randn(*initial_h_shape)
if has_initial_h
else np.zeros(initial_h_shape)
)
initial_c = (
np.random.randn(*initial_c_shape)
if has_initial_c
else np.zeros(initial_c_shape)
)
P = np.random.randn(*P_shape) if has_peephole else np.zeros(P_shape)
# Function to get all the weight components for the given direction
def get_weights(dir_idx):
Wi = np.reshape(
W[dir_idx, 0 * hidden_size : 1 * hidden_size, :], [hidden_size, input_size]
)
Wo = np.reshape(
W[dir_idx, 1 * hidden_size : 2 * hidden_size, :], [hidden_size, input_size]
)
Wf = np.reshape(
W[dir_idx, 2 * hidden_size : 3 * hidden_size, :], [hidden_size, input_size]
)
Wc = np.reshape(
W[dir_idx, 3 * hidden_size : 4 * hidden_size, :], [hidden_size, input_size]
)
Ri = np.reshape(
R[dir_idx, 0 * hidden_size : 1 * hidden_size, :], [hidden_size, hidden_size]
)
Ro = np.reshape(
R[dir_idx, 1 * hidden_size : 2 * hidden_size, :], [hidden_size, hidden_size]
)
Rf = np.reshape(
R[dir_idx, 2 * hidden_size : 3 * hidden_size, :], [hidden_size, hidden_size]
)
Rc = np.reshape(
R[dir_idx, 3 * hidden_size : 4 * hidden_size, :], [hidden_size, hidden_size]
)
bWi = np.reshape(B[dir_idx, 0 * hidden_size : 1 * hidden_size], [hidden_size])
bWo = np.reshape(B[dir_idx, 1 * hidden_size : 2 * hidden_size], [hidden_size])
bWf = np.reshape(B[dir_idx, 2 * hidden_size : 3 * hidden_size], [hidden_size])
bWc = np.reshape(B[dir_idx, 3 * hidden_size : 4 * hidden_size], [hidden_size])
bRi = np.reshape(B[dir_idx, 4 * hidden_size : 5 * hidden_size], [hidden_size])
bRo = np.reshape(B[dir_idx, 5 * hidden_size : 6 * hidden_size], [hidden_size])
bRf = np.reshape(B[dir_idx, 6 * hidden_size : 7 * hidden_size], [hidden_size])
bRc = np.reshape(B[dir_idx, 7 * hidden_size : 8 * hidden_size], [hidden_size])
Pi = np.tile(P[dir_idx, 0 * hidden_size : 1 * hidden_size], (batch_size, 1))
Po = np.tile(P[dir_idx, 1 * hidden_size : 2 * hidden_size], (batch_size, 1))
Pf = np.tile(P[dir_idx, 2 * hidden_size : 3 * hidden_size], (batch_size, 1))
return (
Wi,
Wo,
Wf,
Wc,
Ri,
Ro,
Rf,
Rc,
bWi,
bWo,
bWf,
bWc,
bRi,
bRo,
bRf,
bRc,
Pi,
Po,
Pf,
)
# Function to get PyTorch weights (which are in the i, f, c, o order)
def get_torch_weights(dir_idx):
(
Wi,
Wo,
Wf,
Wc,
Ri,
Ro,
Rf,
Rc,
bWi,
bWo,
bWf,
bWc,
bRi,
bRo,
bRf,
bRc,
Pi,
Po,
Pf,
) = get_weights(dir_idx)
W_torch = np.concatenate((Wi, Wf, Wc, Wo), 0)
R_torch = np.concatenate((Ri, Rf, Rc, Ro), 0)
bW_torch = np.concatenate((bWi, bWf, bWc, bWo), 0)
bR_torch = np.concatenate((bRi, bRf, bRc, bRo), 0)
return (W_torch, R_torch, bW_torch, bR_torch)
# ----------------------------------------- COMPUTE pyTORCH REFERENCE ----------------------------------------------
# Compute reference using Pytorch. Pytorch LSTM has only forward/bidirectional so we will do the reverse LSTM using
# a Pytorch forward LSTM.
lstm = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
bias=True,
batch_first=False,
dropout=0,
bidirectional=(direction == LSTM_DIR_BIDIRECTIONAL),
)
# Get LSTM state dictionary
lstm_state_dict = lstm.state_dict()
# Assign forward weights
forwardEnabled = direction in [LSTM_DIR_FORWARD, LSTM_DIR_BIDIRECTIONAL]
if forwardEnabled:
forward_dir_idx = 0
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(forward_dir_idx)
lstm_state_dict["weight_ih_l0"] = torch.tensor(W_torch, dtype=torch.float32)
lstm_state_dict["weight_hh_l0"] = torch.tensor(R_torch, dtype=torch.float32)
lstm_state_dict["bias_ih_l0"] = torch.tensor(bW_torch, dtype=torch.float32)
lstm_state_dict["bias_hh_l0"] = torch.tensor(bR_torch, dtype=torch.float32)
# Assign reverse weights
reverseEnabled = direction in [LSTM_DIR_REVERSE, LSTM_DIR_BIDIRECTIONAL]
if reverseEnabled:
if direction == LSTM_DIR_REVERSE:
reverse_dir_idx = 0
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(reverse_dir_idx)
lstm_state_dict["weight_ih_l0"] = torch.tensor(W_torch, dtype=torch.float32)
lstm_state_dict["weight_hh_l0"] = torch.tensor(R_torch, dtype=torch.float32)
lstm_state_dict["bias_ih_l0"] = torch.tensor(bW_torch, dtype=torch.float32)
lstm_state_dict["bias_hh_l0"] = torch.tensor(bR_torch, dtype=torch.float32)
else:
reverse_dir_idx = 1
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(reverse_dir_idx)
lstm_state_dict["weight_ih_l0_reverse"] = torch.tensor(
W_torch, dtype=torch.float32
)
lstm_state_dict["weight_hh_l0_reverse"] = torch.tensor(
R_torch, dtype=torch.float32
)
lstm_state_dict["bias_ih_l0_reverse"] = torch.tensor(
bW_torch, dtype=torch.float32
)
lstm_state_dict["bias_hh_l0_reverse"] = torch.tensor(
bR_torch, dtype=torch.float32
)
# Set LSTM state dictionary
lstm.load_state_dict(lstm_state_dict, strict=True)
# Perform inference
X_torch = torch.tensor(X, dtype=torch.float32)
initial_h_torch = torch.tensor(initial_h, dtype=torch.float32)
initial_c_torch = torch.tensor(initial_c, dtype=torch.float32)
if direction == LSTM_DIR_REVERSE:
Y, (next_h, next_c) = lstm(
X_torch.flip([0]), (initial_h_torch, initial_c_torch)
)
Y = Y.flip([0])
else:
Y, (next_h, next_c) = lstm(X_torch, (initial_h_torch, initial_c_torch))
# Reshape output to ONNX format [seq_length, num_directions, batch_size, hidden_size]
Y_ref = Y.detach().numpy()
Y_ref = np.reshape(Y_ref, [seq_length, batch_size, num_directions, hidden_size])
Y_ref = np.transpose(Y_ref, [0, 2, 1, 3])
# Reshape states to ONNX format
Y_h_ref = next_h.detach().numpy()
Y_c_ref = next_c.detach().numpy()
# --------------------------------------- COMPUTE PYTHON-NUMPY REFERENCE -------------------------------------------
# Create X slices
Xslices = list()
for t in range(seq_length):
Xslices.append(np.reshape(X[t, :, :], [batch_size, input_size]))
# Function to compute one LSTM cell
def compute_lstm(forward):
dir_idx = 0 if forward else (0 if direction == LSTM_DIR_REVERSE else 1)
(
Wi,
Wo,
Wf,
Wc,
Ri,
Ro,
Rf,
Rc,
bWi,
bWo,
bWf,
bWc,
bRi,
bRo,
bRf,
bRc,
Pi,
Po,
Pf,
) = get_weights(dir_idx)
def f(x):
return 1 / (1 + np.exp(-x))
def g(x):
return np.tanh(x)
def h(x):
return np.tanh(x)
def mm(x, w):
return np.matmul(x, w.transpose())
Ht = np.reshape(initial_h[dir_idx, :, :], [batch_size, hidden_size])
Ct = np.reshape(initial_c[dir_idx, :, :], [batch_size, hidden_size])
Yslices = list()
for t in range(seq_length):
xt = Xslices[t] if forward else Xslices[seq_length - 1 - t]
ft = f(mm(xt, Wf) + bWf + mm(Ht, Rf) + bRf + Pf * Ct)
if input_forget:
it = 1 - ft
else:
it = f(mm(xt, Wi) + bWi + mm(Ht, Ri) + bRi + Pi * Ct)
ctild = g(mm(xt, Wc) + bWc + mm(Ht, Rc) + bRc)
Ct = ft * Ct + it * ctild
ot = f(mm(xt, Wo) + bWo + mm(Ht, Ro) + bRo + Po * Ct)
Ht = ot * h(Ct)
Yslices.append(Ht)
return Yslices, Ht, Ct
Yslices = list()
Hslices = list()
Cslices = list()
# Compute forward LSTM
forwardYslices = list()
if forwardEnabled:
Yt, Ht, Ct = compute_lstm(True)
forwardYslices += Yt
Hslices.append(Ht)
Cslices.append(Ct)
# Compute reverse LSTM
reverseYslices = list()
if reverseEnabled:
Yt, Ht, Ct = compute_lstm(False)
reverseYslices += Yt
Hslices.append(Ht)
Cslices.append(Ct)
# Concatenate slices
for t in range(seq_length):
if forwardEnabled:
Yslices.append(forwardYslices[t])
if reverseEnabled:
Yslices.append(reverseYslices[seq_length - 1 - t])
Y_ref_np = np.concatenate(Yslices, 0).reshape(
[seq_length, num_directions, batch_size, hidden_size]
)
Y_h_ref_np = np.concatenate(Hslices, 0).reshape(
[num_directions, batch_size, hidden_size]
)
Y_c_ref_np = np.concatenate(Cslices, 0).reshape(
[num_directions, batch_size, hidden_size]
)
# Use numpy implementation when using peepholes or input_forget, else assert errors
if has_peephole or input_forget:
Y_ref = Y_ref_np
Y_h_ref = Y_h_ref_np
Y_c_ref = Y_c_ref_np
else:
assert (
np.max(np.abs(Y_ref - Y_ref_np)) < 1e-6
), "Mismatch between Pytorch and Numpy LSTM implementation"
assert (
np.max(np.abs(Y_h_ref - Y_h_ref_np)) < 1e-6
), "Mismatch between Pytorch and Numpy LSTM implementation"
assert (
np.max(np.abs(Y_c_ref - Y_c_ref_np)) < 1e-6
), "Mismatch between Pytorch and Numpy LSTM implementation"
# ---------------------------------------------- NODE DEFINITION --------------------------------------------------
# Node inputs
node_inputs = [
"X",
"W",
"R",
"B" if has_bias else "",
"",
"initial_h" if has_initial_h else "",
"initial_c" if has_initial_c else "",
"P" if has_peephole else "",
]
# Node outputs
node_outputs = ["Y", "Y_h", "Y_c"]
# LSTM node definition
lstm_node_def = onnx.helper.make_node(
"LSTM",
name="lstm",
inputs=node_inputs,
outputs=node_outputs,
hidden_size=hidden_size,
direction=direction,
input_forget=input_forget,
)
# Error node definition
err_node_def = onnx.helper.make_node(
"Sub", name="error", inputs=["Y", "Y_ref"], outputs=["Y_err"]
)
# --------------------------------------------- GRAPH DEFINITION --------------------------------------------------
graph_input = list()
graph_init = list()
graph_output = list()
# LSTM inputs
graph_input.append(helper.make_tensor_value_info("X", TensorProto.FLOAT, X_shape))
graph_input.append(helper.make_tensor_value_info("W", TensorProto.FLOAT, W_shape))
graph_input.append(helper.make_tensor_value_info("R", TensorProto.FLOAT, R_shape))
if has_bias:
graph_input.append(
helper.make_tensor_value_info("B", TensorProto.FLOAT, B_shape)
)
if has_sequence_lens:
graph_input.append(
helper.make_tensor_value_info(
"sequence_lens", TensorProto.INT32, sequence_lens_shape
)
)
if has_initial_h:
graph_input.append(
helper.make_tensor_value_info(
"initial_h", TensorProto.FLOAT, initial_h_shape
)
)
if has_initial_c:
graph_input.append(
helper.make_tensor_value_info(
"initial_c", TensorProto.FLOAT, initial_c_shape
)
)
if has_peephole:
graph_input.append(
helper.make_tensor_value_info("P", TensorProto.FLOAT, P_shape)
)
# Reference input
graph_input.append(
helper.make_tensor_value_info("Y_ref", TensorProto.FLOAT, Y_shape)
)
# LSTM initializers
graph_init.append(make_init("X", TensorProto.FLOAT, X))
graph_init.append(make_init("W", TensorProto.FLOAT, W))
graph_init.append(make_init("R", TensorProto.FLOAT, R))
if has_bias:
graph_init.append(make_init("B", TensorProto.FLOAT, B))
if has_sequence_lens:
graph_init.append(make_init("sequence_lens", TensorProto.INT32, sequence_lens))
if has_initial_h:
graph_init.append(make_init("initial_h", TensorProto.FLOAT, initial_h))
if has_initial_c:
graph_init.append(make_init("initial_c", TensorProto.FLOAT, initial_c))
if has_peephole:
graph_init.append(make_init("P", TensorProto.FLOAT, P))
# Reference initializer
graph_init.append(make_init("Y_ref", TensorProto.FLOAT, Y_ref))
# Graph outputs
graph_output.append(
helper.make_tensor_value_info("Y_err", TensorProto.FLOAT, Y_shape)
)
# Define graph (GraphProto)
graph_name = "lstm_test"
graph_def = helper.make_graph(
[lstm_node_def, err_node_def],
graph_name,
inputs=graph_input,
outputs=graph_output,
)
# Set initializers
graph_def.initializer.extend(graph_init)
# --------------------------------------------- MODEL DEFINITION --------------------------------------------------
# Define model (ModelProto)
model_def = helper.make_model(graph_def, producer_name="onnx-lstm")
# Check model
onnx.checker.check_model(model_def)
# Print model
with open(model_path, "w") as f:
f.write(str(model_def))
# Forward LSTM
gen_lstm_onnx_test_model(
model_path="lstmForward.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
has_initial_c=True,
has_peephole=False,
input_forget=False,
)
# Reverse LSTM
gen_lstm_onnx_test_model(
model_path="lstmReverse.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="reverse",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
has_initial_c=True,
has_peephole=False,
input_forget=False,
)
# Bidirectional LSTM
gen_lstm_onnx_test_model(
model_path="lstmBidirectional.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="bidirectional",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
has_initial_c=True,
has_peephole=False,
input_forget=False,
)
# Forward no bias LSTM
gen_lstm_onnx_test_model(
model_path="lstmForwardNoBias.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=False,
has_sequence_lens=False,
has_initial_h=True,
has_initial_c=True,
has_peephole=False,
input_forget=False,
)
# Forward no state LSTM
gen_lstm_onnx_test_model(
model_path="lstmForwardNoState.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=False,
has_initial_c=False,
has_peephole=False,
input_forget=False,
)
# Forward with peephole LSTM
gen_lstm_onnx_test_model(
model_path="lstmForwardWithPeephole.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
has_initial_c=True,
has_peephole=True,
input_forget=False,
)
# Forward with input forget LSTM
gen_lstm_onnx_test_model(
model_path="lstmForwardInputForget.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
has_initial_c=True,
has_peephole=False,
input_forget=True,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import onnx
import tensorflow as tf
from onnx import helper, TensorProto
from tensorflow.python.ops import gen_audio_ops as audio_ops
# ONNX utility.
def make_init(name, dtype, tensor):
return helper.make_tensor(
name=name,
data_type=dtype,
dims=tensor.shape,
vals=tensor.reshape(tensor.size).tolist(),
)
# Function to generate MFCC ONNX test model.
def gen_mfcc_onnx_test_model(
model_path,
window_count,
window_size,
stride,
sample_rate,
lower_frequency_limit,
upper_frequency_limit,
filterbank_channel_count,
dct_coefficient_count,
):
# Tensor sizes.
input_length = window_size + (window_count - 1) * stride
fft_length = int(2 ** np.ceil(np.log2(window_size)))
input_shape = [1, input_length]
spectrogram_length = int(fft_length / 2 + 1)
spectrogram_shape = [window_count, spectrogram_length]
coefficients_shape = [window_count, dct_coefficient_count]
# Generate random input data.
np.random.seed(1)
input_data = np.random.randn(*input_shape)
# ----------------------------------------- COMPUTE TensorFlow REFERENCE -------------------------------------------
# Define TensorFlow model.
tf_input = tf.constant(
input_data.reshape([input_length, 1]), name="input", dtype=tf.float32
)
tf_spectrogram = audio_ops.audio_spectrogram(
tf_input, window_size=window_size, stride=stride, magnitude_squared=True
)
tf_mfcc = audio_ops.mfcc(
spectrogram=tf_spectrogram,
sample_rate=sample_rate,
upper_frequency_limit=upper_frequency_limit,
lower_frequency_limit=lower_frequency_limit,
filterbank_channel_count=filterbank_channel_count,
dct_coefficient_count=dct_coefficient_count,
)
# Run TensorFlow model and get spectrogram input.
with tf.Session() as sess:
spectrogram = sess.run(tf_spectrogram)
spectrogram = np.reshape(spectrogram, spectrogram_shape)
# Run TensorFlow model and get reference output coefficients.
with tf.Session() as sess:
coefficients_ref = sess.run(tf_mfcc)
coefficients_ref = np.reshape(coefficients_ref, coefficients_shape)
# ---------------------------------------------- NODE DEFINITION --------------------------------------------------
# MFCC node definition.
mfcc_node_def = onnx.helper.make_node(
"MFCC",
name="mfcc",
inputs=["spectrogram"],
outputs=["coefficients"],
sample_rate=float(sample_rate),
lower_frequency_limit=float(lower_frequency_limit),
upper_frequency_limit=float(upper_frequency_limit),
filterbank_channel_count=int(filterbank_channel_count),
dct_coefficient_count=int(dct_coefficient_count),
)
# Error node definition.
err_node_def = onnx.helper.make_node(
"Sub",
name="error",
inputs=["coefficients", "coefficients_ref"],
outputs=["coefficients_err"],
)
# --------------------------------------------- GRAPH DEFINITION --------------------------------------------------
graph_input = list()
graph_init = list()
graph_output = list()
# Graph inputs.
graph_input.append(
helper.make_tensor_value_info(
"spectrogram", TensorProto.FLOAT, spectrogram_shape
)
)
graph_input.append(
helper.make_tensor_value_info(
"coefficients_ref", TensorProto.FLOAT, coefficients_shape
)
)
# Graph initializers.
graph_init.append(make_init("spectrogram", TensorProto.FLOAT, spectrogram))
graph_init.append(
make_init("coefficients_ref", TensorProto.FLOAT, coefficients_ref)
)
# Graph outputs.
graph_output.append(
helper.make_tensor_value_info(
"coefficients_err", TensorProto.FLOAT, coefficients_shape
)
)
# Graph name.
graph_name = "mfcc_test"
# Define graph (GraphProto).
graph_def = helper.make_graph(
[mfcc_node_def, err_node_def],
graph_name,
inputs=graph_input,
outputs=graph_output,
)
# Set initializers.
graph_def.initializer.extend(graph_init)
# --------------------------------------------- MODEL DEFINITION --------------------------------------------------
# Define model (ModelProto).
model_def = helper.make_model(graph_def, producer_name="onnx-mfcc")
# Print model.
with open(model_path, "w") as f:
f.write(str(model_def))
# One window MFCC.
gen_mfcc_onnx_test_model(
model_path="mfccOneWindow.onnxtxt",
window_count=1,
window_size=640,
stride=320,
sample_rate=16000,
lower_frequency_limit=20,
upper_frequency_limit=4000,
filterbank_channel_count=40,
dct_coefficient_count=10,
)
# Two window MFCC.
gen_mfcc_onnx_test_model(
model_path="mfccTwoWindow.onnxtxt",
window_count=2,
window_size=512,
stride=256,
sample_rate=16000,
lower_frequency_limit=20,
upper_frequency_limit=4000,
filterbank_channel_count=40,
dct_coefficient_count=10,
)
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import onnx
import torch
import torch.nn
import torch.onnx
from onnx import helper, TensorProto
# RNN enums
RNN_DIR_FORWARD = "forward"
RNN_DIR_REVERSE = "reverse"
RNN_DIR_BIDIRECTIONAL = "bidirectional"
RNN_DIRS = [RNN_DIR_FORWARD, RNN_DIR_REVERSE, RNN_DIR_BIDIRECTIONAL]
# ONNX utility
def make_init(name, type, tensor):
return helper.make_tensor(
name=name,
data_type=type,
dims=tensor.shape,
vals=tensor.reshape(tensor.size).tolist(),
)
# Function to generate RNN ONNX test model
def gen_rnn_onnx_test_model(
model_path,
seq_length,
batch_size,
hidden_size,
input_size,
direction,
has_bias,
has_sequence_lens,
has_initial_h,
):
# Validate parameters
assert direction in RNN_DIRS, "ONNX RNN direction invalid!"
assert not has_sequence_lens, "ONNX RNN Variable sequence length not supported"
# Get number of directions
num_directions = 2 if (direction == RNN_DIR_BIDIRECTIONAL) else 1
# Tensor sizes
X_shape = [seq_length, batch_size, input_size]
W_shape = [num_directions, 1 * hidden_size, input_size]
R_shape = [num_directions, 1 * hidden_size, hidden_size]
B_shape = [num_directions, 2 * hidden_size]
sequence_lens_shape = [batch_size]
initial_h_shape = [num_directions, batch_size, hidden_size]
Y_shape = [seq_length, num_directions, batch_size, hidden_size]
# Generate random inputs (weights are assumed concatenated in ONNX format: z,r,h)
np.random.seed(1)
X = np.random.randn(*X_shape)
W = np.random.randn(*W_shape)
R = np.random.randn(*R_shape)
B = np.random.randn(*B_shape) if has_bias else np.zeros(B_shape)
sequence_lens = (
np.random.randint(1, seq_length, batch_size)
if has_sequence_lens
else np.tile(seq_length, batch_size)
)
initial_h = (
np.random.randn(*initial_h_shape)
if has_initial_h
else np.zeros(initial_h_shape)
)
# Function to get all the weight components for the given direction
def get_weights(dir_idx):
Wi = np.reshape(
W[dir_idx, 0 * hidden_size : 1 * hidden_size, :], [hidden_size, input_size]
)
Ri = np.reshape(
R[dir_idx, 0 * hidden_size : 1 * hidden_size, :], [hidden_size, hidden_size]
)
bWi = np.reshape(B[dir_idx, 0 * hidden_size : 1 * hidden_size], [hidden_size])
bRi = np.reshape(B[dir_idx, 1 * hidden_size : 2 * hidden_size], [hidden_size])
return (Wi, Ri, bWi, bRi)
# Function to get PyTorch weights (which are in the r,z,h order)
def get_torch_weights(dir_idx):
Wi, Ri, bWi, bRi = get_weights(dir_idx)
W_torch = Wi
R_torch = Ri
bW_torch = bWi
bR_torch = bRi
return (W_torch, R_torch, bW_torch, bR_torch)
# ----------------------------------------- COMPUTE pyTORCH REFERENCE ----------------------------------------------
# Compute reference using Pytorch. Pytorch RNN has only forward/bidirectional so we will do the reverse RNN using
# a Pytorch forward RNN.
rnn = torch.nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
nonlinearity="tanh",
bias=True,
batch_first=False,
dropout=0,
bidirectional=(direction == RNN_DIR_BIDIRECTIONAL),
)
# Get RNN state dictionary
rnn_state_dict = rnn.state_dict()
# Assign forward weights
forwardEnabled = direction in [RNN_DIR_FORWARD, RNN_DIR_BIDIRECTIONAL]
if forwardEnabled:
forward_dir_idx = 0
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(forward_dir_idx)
rnn_state_dict["weight_ih_l0"] = torch.tensor(W_torch, dtype=torch.float32)
rnn_state_dict["weight_hh_l0"] = torch.tensor(R_torch, dtype=torch.float32)
rnn_state_dict["bias_ih_l0"] = torch.tensor(bW_torch, dtype=torch.float32)
rnn_state_dict["bias_hh_l0"] = torch.tensor(bR_torch, dtype=torch.float32)
# Assign reverse weights
reverseEnabled = direction in [RNN_DIR_REVERSE, RNN_DIR_BIDIRECTIONAL]
if reverseEnabled:
if direction == RNN_DIR_REVERSE:
reverse_dir_idx = 0
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(reverse_dir_idx)
rnn_state_dict["weight_ih_l0"] = torch.tensor(W_torch, dtype=torch.float32)
rnn_state_dict["weight_hh_l0"] = torch.tensor(R_torch, dtype=torch.float32)
rnn_state_dict["bias_ih_l0"] = torch.tensor(bW_torch, dtype=torch.float32)
rnn_state_dict["bias_hh_l0"] = torch.tensor(bR_torch, dtype=torch.float32)
else:
reverse_dir_idx = 1
(W_torch, R_torch, bW_torch, bR_torch) = get_torch_weights(reverse_dir_idx)
rnn_state_dict["weight_ih_l0_reverse"] = torch.tensor(
W_torch, dtype=torch.float32
)
rnn_state_dict["weight_hh_l0_reverse"] = torch.tensor(
R_torch, dtype=torch.float32
)
rnn_state_dict["bias_ih_l0_reverse"] = torch.tensor(
bW_torch, dtype=torch.float32
)
rnn_state_dict["bias_hh_l0_reverse"] = torch.tensor(
bR_torch, dtype=torch.float32
)
# Set RNN state dictionary
rnn.load_state_dict(rnn_state_dict, strict=True)
# Perform inference
X_torch = torch.tensor(X, dtype=torch.float32)
initial_h_torch = torch.tensor(initial_h, dtype=torch.float32)
if direction == RNN_DIR_REVERSE:
Y, next_h = rnn(X_torch.flip([0]), initial_h_torch)
Y = Y.flip([0])
else:
Y, next_h = rnn(X_torch, initial_h_torch)
# Reshape output to ONNX format [seq_length, num_directions, batch_size, hidden_size]
Y_ref = Y.detach().numpy()
Y_ref = np.reshape(Y_ref, [seq_length, batch_size, num_directions, hidden_size])
Y_ref = np.transpose(Y_ref, [0, 2, 1, 3])
# Reshape states to ONNX format
Y_h_ref = next_h.detach().numpy()
# --------------------------------------- COMPUTE PYTHON-NUMPY REFERENCE -------------------------------------------
# Create X slices
Xslices = list()
for t in range(seq_length):
Xslices.append(np.reshape(X[t, :, :], [batch_size, input_size]))
# Function to compute one RNN cell
def compute_rnn(forward):
dir_idx = 0 if forward else (0 if direction == RNN_DIR_REVERSE else 1)
Wi, Ri, bWi, bRi = get_weights(dir_idx)
def f(x):
return np.tanh(x)
def mm(x, w):
return np.matmul(x, w.transpose())
Ht = np.reshape(initial_h[dir_idx, :, :], [batch_size, hidden_size])
Yslices = list()
for t in range(seq_length):
xt = Xslices[t] if forward else Xslices[seq_length - 1 - t]
Ht = f(mm(xt, Wi) + bWi + mm(Ht, Ri) + bRi)
Yslices.append(Ht)
return Yslices, Ht
Yslices = list()
Hslices = list()
# Compute forward RNN
forwardYslices = list()
if forwardEnabled:
Yt, Ht = compute_rnn(True)
forwardYslices += Yt
Hslices.append(Ht)
# Compute reverse RNN
reverseYslices = list()
if reverseEnabled:
Yt, Ht = compute_rnn(False)
reverseYslices += Yt
Hslices.append(Ht)
# Concatenate slices
for t in range(seq_length):
if forwardEnabled:
Yslices.append(forwardYslices[t])
if reverseEnabled:
Yslices.append(reverseYslices[seq_length - 1 - t])
Y_ref_np = np.concatenate(Yslices, 0).reshape(
[seq_length, num_directions, batch_size, hidden_size]
)
Y_h_ref_np = np.concatenate(Hslices, 0).reshape(
[num_directions, batch_size, hidden_size]
)
# Compare Numpy with Torch implementation.
assert (
np.max(np.abs(Y_ref - Y_ref_np)) < 1e-6
), "Mismatch between Pytorch and Numpy RNN implementation"
assert (
np.max(np.abs(Y_h_ref - Y_h_ref_np)) < 1e-6
), "Mismatch between Pytorch and Numpy RNN implementation"
# ---------------------------------------------- NODE DEFINITION --------------------------------------------------
# Node inputs
node_inputs = [
"X",
"W",
"R",
"B" if has_bias else "",
"",
"initial_h" if has_initial_h else "",
]
# Node outputs
node_outputs = ["Y", "Y_h"]
# RNN node definition
rnn_node_def = onnx.helper.make_node(
"RNN",
name="rnn",
inputs=node_inputs,
outputs=node_outputs,
hidden_size=hidden_size,
direction=direction,
)
# Error node definition
err_node_def = onnx.helper.make_node(
"Sub", name="error", inputs=["Y", "Y_ref"], outputs=["Y_err"]
)
# --------------------------------------------- GRAPH DEFINITION --------------------------------------------------
graph_input = list()
graph_init = list()
graph_output = list()
# RNN inputs
graph_input.append(helper.make_tensor_value_info("X", TensorProto.FLOAT, X_shape))
graph_input.append(helper.make_tensor_value_info("W", TensorProto.FLOAT, W_shape))
graph_input.append(helper.make_tensor_value_info("R", TensorProto.FLOAT, R_shape))
if has_bias:
graph_input.append(
helper.make_tensor_value_info("B", TensorProto.FLOAT, B_shape)
)
if has_sequence_lens:
graph_input.append(
helper.make_tensor_value_info(
"sequence_lens", TensorProto.INT32, sequence_lens_shape
)
)
if has_initial_h:
graph_input.append(
helper.make_tensor_value_info(
"initial_h", TensorProto.FLOAT, initial_h_shape
)
)
# Reference input
graph_input.append(
helper.make_tensor_value_info("Y_ref", TensorProto.FLOAT, Y_shape)
)
# RNN initializers
graph_init.append(make_init("X", TensorProto.FLOAT, X))
graph_init.append(make_init("W", TensorProto.FLOAT, W))
graph_init.append(make_init("R", TensorProto.FLOAT, R))
if has_bias:
graph_init.append(make_init("B", TensorProto.FLOAT, B))
if has_sequence_lens:
graph_init.append(make_init("sequence_lens", TensorProto.INT32, sequence_lens))
if has_initial_h:
graph_init.append(make_init("initial_h", TensorProto.FLOAT, initial_h))
# Reference initializer
graph_init.append(make_init("Y_ref", TensorProto.FLOAT, Y_ref))
# Graph outputs
graph_output.append(
helper.make_tensor_value_info("Y_err", TensorProto.FLOAT, Y_shape)
)
# Define graph (GraphProto)
graph_name = "rnn_test"
graph_def = helper.make_graph(
[rnn_node_def, err_node_def],
graph_name,
inputs=graph_input,
outputs=graph_output,
)
# Set initializers
graph_def.initializer.extend(graph_init)
# --------------------------------------------- MODEL DEFINITION --------------------------------------------------
# Define model (ModelProto)
model_def = helper.make_model(graph_def, producer_name="onnx-rnn")
# Check model
onnx.checker.check_model(model_def)
# Print model
with open(model_path, "w") as f:
f.write(str(model_def))
# Forward RNN
gen_rnn_onnx_test_model(
model_path="rnnForward.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
)
# Reverse RNN
gen_rnn_onnx_test_model(
model_path="rnnReverse.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="reverse",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
)
# Bidirectional RNN
gen_rnn_onnx_test_model(
model_path="rnnBidirectional.onnxtxt",
seq_length=2,
batch_size=5,
hidden_size=4,
input_size=3,
direction="bidirectional",
has_bias=True,
has_sequence_lens=False,
has_initial_h=True,
)
# Forward no bias RNN
gen_rnn_onnx_test_model(
model_path="rnnForwardNoBias.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=False,
has_sequence_lens=False,
has_initial_h=True,
)
# Forward no state RNN
gen_rnn_onnx_test_model(
model_path="rnnForwardNoState.onnxtxt",
seq_length=1,
batch_size=5,
hidden_size=4,
input_size=3,
direction="forward",
has_bias=True,
has_sequence_lens=False,
has_initial_h=False,
)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch_glow
def pytest_addoption(parser):
parser.addoption("--backend", action="store", default=None)
def pytest_sessionstart(session):
backend = session.config.getoption("--backend")
if backend:
torch_glow.setGlowBackend(backend)
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import itertools
import os
import unittest
from contextlib import contextmanager
from copy import deepcopy
from io import BytesIO
import numpy as np
import torch
import torch_glow
from parameterized import parameterized
GLOW_FUSION_GROUP = "glow::FusionGroup"
SUBGRAPH_ATTR = "Subgraph"
BACKEND_NAME_KEY = "BACKEND_NAME"
INTERPRETER = "Interpreter"
DEFAULT_BACKEND = os.environ.get(BACKEND_NAME_KEY, "Interpreter")
def get_backend_name():
return os.environ.get(BACKEND_NAME_KEY, INTERPRETER)
@contextmanager
def ephemeral_torchglow_settings(
fp16=False,
backend=DEFAULT_BACKEND,
fusion=False,
blocklist=None,
accept_all_layouts=False,
):
old_fp16 = torch_glow.get_convert_to_fp16()
old_clip = torch_glow.get_clip_fp16()
old_convert_fused = torch_glow.get_convert_fused_to_fp16()
old_backend = torch_glow.getGlowBackendName()
old_blocklist = torch_glow.getFusionBlocklist()
old_fusion = torch_glow.getFusionPassEnabled()
try:
if fusion:
torch_glow.enableFusionPass_DO_NOT_USE_THIS()
else:
torch_glow.disableFusionPass()
if fp16:
torch_glow.enable_convert_to_fp16()
torch_glow.enable_convert_fused_to_fp16()
torch_glow.enable_clip_fp16()
else:
torch_glow.disable_convert_to_fp16()
torch_glow.disable_convert_fused_to_fp16()
torch_glow.disable_clip_fp16()
if blocklist is None:
torch_glow.clearFusionBlocklist()
else:
torch_glow.setFusionBlocklist(list(blocklist))
if accept_all_layouts:
torch_glow.enable_accept_all_layout()
else:
torch_glow.disable_accept_all_layout()
torch_glow.setGlowBackend(backend)
yield
finally:
torch_glow.enable_convert_to_fp16() if old_fp16 else torch_glow.disable_convert_to_fp16()
torch_glow.enable_clip_fp16() if old_clip else torch_glow.disable_clip_fp16()
torch_glow.enable_convert_fused_to_fp16() if old_convert_fused else torch_glow.disable_convert_fused_to_fp16()
torch_glow.enableFusionPass_DO_NOT_USE_THIS() if old_fusion else torch_glow.disableFusionPass()
torch_glow.setGlowBackend(old_backend)
torch_glow.setFusionBlocklist(old_blocklist)
def check_skip(case):
backend = DEFAULT_BACKEND
supported = {INTERPRETER}
try:
supported = supported | case.supported_backends
except AttributeError:
pass
if backend not in supported:
case.skipTest("Skipping tests for backend: " + backend)
def assert_equivalent(
result1_name, result1, result2_name, result2, atol=5e-4, rtol=1e-3, use_eq=False
):
if isinstance(result1, tuple) or isinstance(result2, tuple):
assert isinstance(result1, tuple) and isinstance(result2, tuple)
assert len(result1) == len(result2)
return all(
assert_equivalent(
result1_name, a, result2_name, b, atol=atol, rtol=rtol, use_eq=use_eq
)
for a, b in zip(result1, result2)
)
elif result2.dtype == torch.bool:
diff = torch.eq(result1, result2)
if torch.all(diff):
return True
else:
error = f"Diff:{diff}\n"
raise AssertionError(error)
else:
matches = (
torch.equal(result1, result2)
if use_eq
else torch.allclose(result1, result2, rtol=rtol, atol=atol)
)
if matches:
return True
else:
diff = torch.abs(result1 - result2)
error = f"{result1_name} result:\n{result1}\n"
error += f"{result2_name} result:\n{result2}\n"
error += f"Diff:\n{diff}\n"
error += f"Max diff:\n{torch.max(diff)}"
raise AssertionError(error)
# To avoid linter complaining about allocating default value
DEFAULT_SKIP_BACKENDS_SET = {}
def run_comparison_tests(
module,
inputs,
fusible_ops,
fp32vfp32_atol=5e-4,
fp32vfp32_rtol=1e-3,
fp32vfp16_atol=1e-2,
fp32vfp16_rtol=1e-2,
fp16vfp16_atol=5e-4,
fp16vfp16_rtol=1e-3,
fusion_blocklist=None,
scripted=False,
check_trace=True,
skip_for_backends=DEFAULT_SKIP_BACKENDS_SET,
skip_fp32_vs_fp16=False,
skip_to_glow=False, # Ugly hack, TODO: Remove
):
# tuplify inputs
if not isinstance(inputs, tuple):
inputs = (inputs,)
# Check that test is setup properly
if not isinstance(module, torch.nn.Module):
raise AssertionError("to_glow only supports nn.Modules")
if "Interpreter" in skip_for_backends:
raise AssertionError(
"Interpreter backend can't be skipped, skip entire test until Interpreter is supported"
)
# If other_backend isn't supported then skip the test
other_backend = torch_glow.getGlowBackendName()
if other_backend in skip_for_backends:
raise unittest.SkipTest(f"backend {other_backend} not supported for this test")
# Get other Glow backend besides Interpreter to test if applicable
if other_backend == "Interpreter":
other_backend = None
if skip_to_glow and other_backend:
raise AssertionError(
f"to_glow must be used for non-interpreter backends, skip this test for {other_backend} backend until the test supports to_glow"
)
def prepare(m, inputs, fp16, backend, fusion):
""" "Helper to prepare a JIT module to run either on PyTorch or Glow"""
inputs = deepcopy(inputs)
def getJITModule():
m_jit = None
if scripted:
m_jit = torch.jit.script(m)
else:
m_jit = torch.jit.trace(m, inputs, check_trace=check_trace)
if scripted or not check_trace:
# run it once to activate the fuser if not run yet
m_jit(*inputs)
return m_jit
with torch.no_grad():
m_jit = None
if fusion:
with ephemeral_torchglow_settings(
fusion=True, fp16=fp16, backend=backend, blocklist=fusion_blocklist
):
m_jit = getJITModule()
assert_fused(
m_jit.graph_for(*(deepcopy(inputs))),
fusible_ops,
)
else:
m_jit = getJITModule()
if backend != "PyTorch": # to_glow
m_jit = torch_glow.lower(
model=m_jit,
example_inputs=inputs,
backend=backend,
convert_to_fp16=fp16,
)
return m_jit
def compare(a_name, a, b_name, b, atol, rtol, use_eq=False):
""" "Helper to compare two JIT modules, skip comparison if either is None"""
if not a:
print(f"Skipping {a_name} vs {b_name} because {a_name} not computed")
return
if not b:
print(f"Skipping {a_name} vs {b_name} because {b_name} not computed")
return
a_ouputs = a(*deepcopy(inputs))
b_ouputs = b(*deepcopy(inputs))
assert_equivalent(a_name, a_ouputs, b_name, b_ouputs, atol, rtol, use_eq)
# Prepare modules for testing
m_pytorch_fp32 = prepare(
module, inputs, fp16=False, backend="PyTorch", fusion=False
)
m_interpreter_fuser_fp32 = prepare(
module, inputs, fp16=False, backend="Interpreter", fusion=True
)
m_interpreter_fp32 = None
m_interpreter_fp16 = None
m_other_fp16 = None
if not skip_to_glow:
m_interpreter_fp32 = prepare(
module, inputs, fp16=False, backend="Interpreter", fusion=True
)
m_interpreter_fp16 = prepare(
module, inputs, fp16=True, backend="Interpreter", fusion=True
)
m_other_fp16 = None
if other_backend:
m_other_fp16 = prepare(
module, inputs, fp16=True, backend=other_backend, fusion=False
)
# JIT vs Interpreter, via to_glow, fp32-fp32
compare(
"m_pytorch_fp32",
m_pytorch_fp32,
"m_interpreter_fp32",
m_interpreter_fp32,
fp32vfp32_atol,
fp32vfp32_rtol,
)
# Interpreter vs Interpreter, via to_glow and fuser, fp32-fp32
compare(
"m_interpreter_fp32",
m_interpreter_fp32,
"m_interpreter_fuser_fp32",
m_interpreter_fuser_fp32,
fp32vfp32_atol,
fp32vfp32_rtol,
use_eq=True, # fuser and to_glow should match exactly
)
# Interpreter vs Other, via to_glow, fp16-fp16
compare(
"m_interpreter_fp16",
m_interpreter_fp16,
"m_other_fp16",
m_other_fp16,
fp16vfp16_atol,
fp16vfp16_rtol,
)
if not skip_fp32_vs_fp16:
# JIT vs Interpreter, via to_glow, fp32-fp16
compare(
"m_pytorch_fp32",
m_pytorch_fp32,
"m_interpreter_fp16",
m_interpreter_fp16,
fp32vfp16_atol,
fp32vfp16_rtol,
)
def compare_tracing_methods(
module,
*inputs,
atol=5e-4,
rtol=1e-3,
reference=None,
fusible_ops=None,
fusion_blocklist=None,
fp16=False,
scripted=False,
check_trace=True,
accept_all_layouts=False,
skip_to_glow=False, # Ugly hack, TODO: Remove
):
if not isinstance(module, torch.nn.Module):
raise AssertionError("to_glow only supports nn.Modules")
def trace(mod, ins):
if scripted:
return torch.jit.script(mod)
else:
return torch.jit.trace(mod, ins, check_trace=check_trace)
with torch.no_grad():
with ephemeral_torchglow_settings(
fusion=True,
fp16=fp16,
blocklist=fusion_blocklist,
accept_all_layouts=accept_all_layouts,
):
fusion_inputs = deepcopy(inputs)
fusion_trace = trace(module, fusion_inputs)
assert_fused(
fusion_trace.graph_for(*fusion_inputs),
(fusible_ops or []),
accept_any=fusible_ops is None,
)
fusion_result = fusion_trace(*fusion_inputs)
with ephemeral_torchglow_settings(
fusion=False, fp16=fp16, accept_all_layouts=accept_all_layouts
):
if scripted:
torchscript_result = module(*deepcopy(inputs))
else:
torchscript_inputs = deepcopy(inputs)
torchscript_trace = trace(module, torchscript_inputs)
torchscript_result = torchscript_trace(*torchscript_inputs)
with ephemeral_torchglow_settings(
fusion=False, fp16=fp16, accept_all_layouts=accept_all_layouts
):
if not skip_to_glow:
glow_inputs = deepcopy(inputs)
traced_module = trace(module, glow_inputs)
lowered_module = torch_glow.lower(
traced_module, glow_inputs, DEFAULT_BACKEND
)
glow_result = lowered_module(*glow_inputs)
if reference:
assert_equivalent(
"Reference",
reference,
"Glow fusion",
fusion_trace,
atol=atol,
rtol=rtol,
)
assert_equivalent(
"Reference",
reference,
"TorchScript",
torchscript_result,
atol=atol,
rtol=rtol,
)
if not skip_to_glow:
assert_equivalent(
"Reference", reference, "Glow", glow_result, atol=atol, rtol=rtol
)
# This is written out manually instead of using combinations in order to aid
# debugging. TODO: Clean up.
assert_equivalent(
"Glow fusion",
fusion_result,
"TorchScript",
torchscript_result,
atol=atol,
rtol=rtol,
)
if not skip_to_glow:
assert_equivalent(
"Glow fusion", fusion_result, "Glow", glow_result, atol=atol, rtol=rtol
)
assert_equivalent(
"TorchScript",
torchscript_result,
"Glow",
glow_result,
atol=atol,
rtol=rtol,
)
# Compilation test for glow lowering without executing.
# This is designed for use cases where the original graph contains placeholder operators.
def test_lowering(
module,
*inputs,
fusible_ops=None,
fusion_blocklist=None,
fp16=False,
scripted=False,
check_trace=True,
accept_all_layouts=False,
):
if not isinstance(module, torch.nn.Module):
raise AssertionError("to_glow only supports nn.Modules")
def trace(mod, ins):
if scripted:
return torch.jit.script(mod)
else:
return torch.jit.trace(mod, ins, check_trace=check_trace)
with torch.no_grad():
with ephemeral_torchglow_settings(
fusion=False, fp16=fp16, accept_all_layouts=accept_all_layouts
):
glow_inputs = deepcopy(inputs)
traced_module = trace(module, glow_inputs)
# If deferred weight loader is not set, it will throw a runtime exception
_lowered_module = torch_glow.lower(
traced_module, glow_inputs, DEFAULT_BACKEND
) # unused
def compare_tracing_methods_error(
module,
*inputs,
fusible_ops=None,
fusion_blocklist=None,
fp16=False,
):
if not isinstance(module, torch.nn.Module):
raise AssertionError("to_glow only supports nn.Modules")
def trace(mod, ins):
return torch.jit.trace(mod, ins)
with torch.no_grad():
with ephemeral_torchglow_settings(
fusion=True, fp16=fp16, blocklist=fusion_blocklist
):
fusion_inputs = deepcopy(inputs)
try:
fusion_trace = trace(module, fusion_inputs)
assert_fused(
fusion_trace.graph_for(*fusion_inputs),
*(fusible_ops or []),
accept_any=fusible_ops is None,
)
fusion_trace(*fusion_inputs)
except Exception:
pass
else:
raise AssertionError("Error expected (fusion), but none were received")
with ephemeral_torchglow_settings(fusion=False, fp16=fp16):
try:
torchscript_inputs = deepcopy(inputs)
torchscript_trace = trace(module, torchscript_inputs)
torchscript_trace(*torchscript_inputs)
except Exception:
pass
else:
raise AssertionError(
"Error expected (torchscript), but none were received"
)
with ephemeral_torchglow_settings(fusion=False, fp16=fp16):
try:
glow_inputs = deepcopy(inputs)
glow_spec = torch_glow.lower(
model=module,
example_inputs=glow_inputs,
backend=DEFAULT_BACKEND,
)
glow_trace = torch_glow.to_glow(trace(module, glow_inputs), glow_spec)
glow_trace(*glow_inputs)
except Exception:
pass
else:
raise AssertionError("Error expected (glow), but none were received")
def assert_fused(fused_graph, ops, accept_any=False, strict=False):
expected = set(ops)
fused = set()
with torch.no_grad():
for node in fused_graph.nodes():
kind = node.kind()
if kind == GLOW_FUSION_GROUP:
fused.update(map(lambda n: n.kind(), node.g(SUBGRAPH_ATTR).nodes()))
else:
assert (
kind not in expected
), f"Expected {kind} to be fused in graph\n{fused_graph}"
missing = set() if (accept_any and fused) else expected - fused
unexpected = set() if (accept_any or not strict) else fused - expected
assert (
not unexpected
), f"Expected fusion of {expected}, but {fused} was fused in graph\n{fused_graph}"
assert (
not missing
), f"Expected fusion of {expected}, but only {fused} was fused in graph\n{fused_graph}"
def graph_contains_str(graph, substr):
return graph.str().find(substr) >= 0
# Verifies equal modules for save-load tests.
def assertModulesEqual(case, mod1, mod2, message=None):
for p1, p2 in itertools.zip_longest(mod1.parameters(), mod2.parameters()):
case.assertTrue(p1.equal(p2), message)
def save_and_reload_model(model):
buf = BytesIO()
print("saving ...")
torch.jit.save(model, buf)
print("done")
print("reloading....")
buf.seek(0)
reloaded_model = torch.jit.load(buf)
print("done")
return reloaded_model
class TorchGlowTestCase(unittest.TestCase):
"""
Base class for torch_glow tests that ensure that torch.manual_seed is
called before each test.
NOTE: this won't effect arguments to the test case so make sure that test
cases generate their own inputs to the test network within the test case not
outside of it.
"""
def setUp(self):
torch.manual_seed(0)
np.random.seed(0)
print("running the setup for TorchGlowTest")
def deterministic_expand(params):
"""Takes params as a list of lambdas where each lambda produces a tuple of
unique parameters for the test"""
torch.manual_seed(0)
np.random.seed(0)
return parameterized.expand([p() for p in params])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.